title stringlengths 1 185 | diff stringlengths 0 32.2M | body stringlengths 0 123k ⌀ | url stringlengths 57 58 | created_at stringlengths 20 20 | closed_at stringlengths 20 20 | merged_at stringlengths 20 20 ⌀ | updated_at stringlengths 20 20 |
|---|---|---|---|---|---|---|---|
TST: Test sorting levels not aligned with index (#25775) | diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py
index 246ba943a4509..96aeb608ba3b8 100644
--- a/pandas/tests/frame/test_sorting.py
+++ b/pandas/tests/frame/test_sorting.py
@@ -227,6 +227,18 @@ def test_stable_descending_multicolumn_sort(self):
kind='mergesort')
assert_frame_equal(sorted_df, expected)
+ def test_sort_multi_index(self):
+ # GH 25775, testing that sorting by index works with a multi-index.
+ df = DataFrame({'a': [3, 1, 2], 'b': [0, 0, 0],
+ 'c': [0, 1, 2], 'd': list('abc')})
+ result = df.set_index(list('abc')).sort_index(level=list('ba'))
+
+ expected = DataFrame({'a': [1, 2, 3], 'b': [0, 0, 0],
+ 'c': [1, 2, 0], 'd': list('bca')})
+ expected = expected.set_index(list('abc'))
+
+ tm.assert_frame_equal(result, expected)
+
def test_stable_categorial(self):
# GH 16793
df = DataFrame({
| - [x] closes #25775
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry (empty since no user-facing changs)
With `master` at 6d2398a58fda68e40f116f199439504558c7774c, issue #25775 seems resolved. Added validation tests. | https://api.github.com/repos/pandas-dev/pandas/pulls/26492 | 2019-05-22T19:04:53Z | 2019-06-05T12:54:35Z | 2019-06-05T12:54:35Z | 2019-06-05T12:54:40Z |
Fixed typo mutiplication -> multiplication | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index afe37bf198ab7..6bfa63012689d 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -918,7 +918,7 @@ def __len__(self):
def dot(self, other):
"""
- Compute the matrix mutiplication between the DataFrame and other.
+ Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
| Docstring typo fix only. | https://api.github.com/repos/pandas-dev/pandas/pulls/26489 | 2019-05-22T13:18:29Z | 2019-05-22T13:58:19Z | 2019-05-22T13:58:19Z | 2019-05-22T14:02:33Z |
DOC/CLN: Change API reference section title | diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst
index 680cb7e3dac91..42ebf648f299f 100644
--- a/doc/source/reference/indexing.rst
+++ b/doc/source/reference/indexing.rst
@@ -2,9 +2,9 @@
.. _api.indexing:
-========
-Indexing
-========
+=============
+Index Objects
+=============
Index
-----
| https://api.github.com/repos/pandas-dev/pandas/pulls/26486 | 2019-05-22T10:51:41Z | 2019-05-24T15:11:26Z | 2019-05-24T15:11:26Z | 2019-05-29T10:55:19Z | |
DOC/CLN: wil -> will | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 76910f425836e..623e2b4863029 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -926,7 +926,7 @@ def squeeze(self, axis=None):
a 1
Name: 0, dtype: int64
- Squeezing all axes wil project directly into a scalar:
+ Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
| https://api.github.com/repos/pandas-dev/pandas/pulls/26484 | 2019-05-21T20:38:52Z | 2019-05-21T22:33:42Z | 2019-05-21T22:33:42Z | 2019-05-22T14:09:46Z | |
Better error message for DataFrame.hist() without numerical columns (… | diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index 90297ecfa3415..fed4b0d90983c 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -2426,6 +2426,10 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
data = data._get_numeric_data()
naxes = len(data.columns)
+ if naxes == 0:
+ raise ValueError("hist method requires numerical columns, "
+ "nothing to plot.")
+
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py
index c62ed21c2fb17..f3f6c9c7fc2d4 100644
--- a/pandas/tests/plotting/test_hist_method.py
+++ b/pandas/tests/plotting/test_hist_method.py
@@ -209,6 +209,16 @@ def test_hist_df_legacy(self):
with pytest.raises(AttributeError):
ser.hist(foo='bar')
+ @pytest.mark.slow
+ def test_hist_non_numerical_raises(self):
+ # gh-10444
+ df = DataFrame(np.random.rand(10, 2))
+ df_o = df.astype(np.object)
+
+ msg = "hist method requires numerical columns, nothing to plot."
+ with pytest.raises(ValueError, match=msg):
+ df_o.hist()
+
@pytest.mark.slow
def test_hist_layout(self):
df = DataFrame(randn(100, 3))
| Closes #10444
Added simple check for non-zero number of numeric columns plus suggested error message in case the check fails.
Happy to make any adjustments this if desired.
| https://api.github.com/repos/pandas-dev/pandas/pulls/26483 | 2019-05-21T19:16:14Z | 2019-05-24T15:47:02Z | 2019-05-24T15:47:02Z | 2019-05-25T12:57:38Z |
CLN: pd.TimeGrouper | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 30bc332f8a04b..f9b356110055b 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -270,7 +270,7 @@ Removal of prior version deprecations/changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Removed ``Panel`` (:issue:`25047`, :issue:`25191`, :issue:`25231`)
-
--
+- Removed previously deprecated ``TimeGrouper`` (:issue:`16942`)
-
.. _whatsnew_0250.performance:
diff --git a/pandas/__init__.py b/pandas/__init__.py
index bd367bbe27d5e..6af6f3093c120 100644
--- a/pandas/__init__.py
+++ b/pandas/__init__.py
@@ -65,7 +65,7 @@
to_numeric, to_datetime, to_timedelta,
# misc
- np, TimeGrouper, Grouper, factorize, unique, value_counts,
+ np, Grouper, factorize, unique, value_counts,
array, Categorical, set_eng_float_format, Series, DataFrame,
Panel)
diff --git a/pandas/core/api.py b/pandas/core/api.py
index 96f623bda9a8a..b7398e433f28f 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -45,15 +45,3 @@
from pandas.tseries.offsets import DateOffset
from pandas.core.tools.datetimes import to_datetime
from pandas.core.tools.timedeltas import to_timedelta
-
-
-# Deprecation: xref gh-16747
-class TimeGrouper:
-
- def __new__(cls, *args, **kwargs):
- from pandas.core.resample import TimeGrouper
- import warnings
- warnings.warn("pd.TimeGrouper is deprecated and will be removed; "
- "Please use pd.Grouper(freq=...)",
- FutureWarning, stacklevel=2)
- return TimeGrouper(*args, **kwargs)
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 7ee0225723675..c92808200ebea 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -50,7 +50,7 @@ class TestPDApi(Base):
]
# these are already deprecated; awaiting removal
- deprecated_classes = ['TimeGrouper', 'Panel']
+ deprecated_classes = ['Panel']
# these should be deprecated in the future
deprecated_classes_in_future = []
@@ -132,17 +132,6 @@ def test_testing(self):
self.check(testing, self.funcs)
-class TestTopLevelDeprecations:
-
- # top-level API deprecations
- # GH 13790
-
- def test_TimeGrouper(self):
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- pd.TimeGrouper(freq='D')
-
-
class TestCDateRange:
def test_deprecation_cdaterange(self):
diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py
index 21c71154c95ef..ef05e6ada4890 100644
--- a/pandas/tests/groupby/test_timegrouper.py
+++ b/pandas/tests/groupby/test_timegrouper.py
@@ -10,6 +10,7 @@
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range
+from pandas.core.groupby.grouper import Grouper
from pandas.core.groupby.ops import BinGrouper
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
@@ -365,10 +366,8 @@ def sumfunc_value(x):
return x.value.sum()
expected = df.groupby(pd.Grouper(key='date')).apply(sumfunc_value)
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- result = (df_dt.groupby(pd.TimeGrouper(freq='M', key='date'))
- .apply(sumfunc_value))
+ result = (df_dt.groupby(Grouper(freq='M', key='date'))
+ .apply(sumfunc_value))
assert_series_equal(result.reset_index(drop=True),
expected.reset_index(drop=True))
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py
index c3c908f4b0d1b..63fa2007e401d 100644
--- a/pandas/tests/resample/test_base.py
+++ b/pandas/tests/resample/test_base.py
@@ -6,10 +6,10 @@
import pandas as pd
from pandas import DataFrame, Series
from pandas.core.groupby.groupby import DataError
+from pandas.core.groupby.grouper import Grouper
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import PeriodIndex, period_range
from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range
-from pandas.core.resample import TimeGrouper
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_index_equal,
@@ -214,7 +214,7 @@ def test_apply_to_empty_series(empty_series):
def test_resampler_is_iterable(series):
# GH 15314
freq = 'H'
- tg = TimeGrouper(freq, convention='start')
+ tg = Grouper(freq=freq, convention='start')
grouped = series.groupby(tg)
resampled = series.resample(freq)
for (rk, rv), (gk, gv) in zip(resampled, grouped):
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index c2868979e9d8d..5711174ef0c9f 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -10,10 +10,10 @@
import pandas as pd
from pandas import DataFrame, Series, Timedelta, Timestamp, isna, notna
+from pandas.core.groupby.grouper import Grouper
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import Period, period_range
-from pandas.core.resample import (
- DatetimeIndex, TimeGrouper, _get_timestamp_range_edges)
+from pandas.core.resample import DatetimeIndex, _get_timestamp_range_edges
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
@@ -42,7 +42,7 @@ def test_custom_grouper(index):
dti = index
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
- b = TimeGrouper(Minute(5))
+ b = Grouper(freq=Minute(5))
g = s.groupby(b)
# check all cython functions work
@@ -50,7 +50,7 @@ def test_custom_grouper(index):
for f in funcs:
g._cython_agg_general(f)
- b = TimeGrouper(Minute(5), closed='right', label='right')
+ b = Grouper(freq=Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
@@ -116,7 +116,7 @@ def test_resample_integerarray():
def test_resample_basic_grouper(series):
s = series
result = s.resample('5Min').last()
- grouper = TimeGrouper(Minute(5), closed='left', label='left')
+ grouper = Grouper(freq=Minute(5), closed='left', label='left')
expected = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expected)
@@ -373,7 +373,7 @@ def test_resample_upsampling_picked_but_not_correct():
def test_resample_frame_basic():
df = tm.makeTimeDataFrame()
- b = TimeGrouper('M')
+ b = Grouper(freq='M')
g = df.groupby(b)
# check all cython functions work
@@ -521,7 +521,7 @@ def test_nearest_upsample_with_limit():
def test_resample_ohlc(series):
s = series
- grouper = TimeGrouper(Minute(5))
+ grouper = Grouper(freq=Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min').ohlc()
diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py
index 2f330d1f2484b..3f767f8e7100f 100644
--- a/pandas/tests/resample/test_time_grouper.py
+++ b/pandas/tests/resample/test_time_grouper.py
@@ -6,8 +6,8 @@
import pandas as pd
from pandas import DataFrame, Series
+from pandas.core.groupby.grouper import Grouper
from pandas.core.indexes.datetimes import date_range
-from pandas.core.resample import TimeGrouper
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
@@ -16,9 +16,7 @@
def test_apply():
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- grouper = pd.TimeGrouper(freq='A', label='right', closed='right')
+ grouper = Grouper(freq='A', label='right', closed='right')
grouped = test_series.groupby(grouper)
@@ -38,9 +36,7 @@ def test_count():
expected = test_series.groupby(lambda x: x.year).count()
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- grouper = pd.TimeGrouper(freq='A', label='right', closed='right')
+ grouper = Grouper(freq='A', label='right', closed='right')
result = test_series.groupby(grouper).count()
expected.index = result.index
assert_series_equal(result, expected)
@@ -64,7 +60,7 @@ def test_apply_iteration():
N = 1000
ind = pd.date_range(start="2000-01-01", freq="D", periods=N)
df = DataFrame({'open': 1, 'close': 2}, index=ind)
- tg = TimeGrouper('M')
+ tg = Grouper(freq='M')
_, grouper, _ = tg._get_grouper(df)
@@ -93,7 +89,7 @@ def test_fails_on_no_datetime_index(name, func):
msg = ("Only valid with DatetimeIndex, TimedeltaIndex "
"or PeriodIndex, but got an instance of '{}'".format(name))
with pytest.raises(TypeError, match=msg):
- df.groupby(TimeGrouper('D'))
+ df.groupby(Grouper(freq='D'))
def test_aaa_group_order():
@@ -105,7 +101,7 @@ def test_aaa_group_order():
df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2),
datetime(2013, 1, 3), datetime(2013, 1, 4),
datetime(2013, 1, 5)] * 4
- grouped = df.groupby(TimeGrouper(key='key', freq='D'))
+ grouped = df.groupby(Grouper(key='key', freq='D'))
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 1)),
df[::5])
@@ -135,7 +131,7 @@ def test_aggregate_normal(resample_method):
datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
- dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
+ dt_grouped = dt_df.groupby(Grouper(key='key', freq='D'))
expected = getattr(normal_grouped, resample_method)()
dt_result = getattr(dt_grouped, resample_method)()
@@ -195,7 +191,7 @@ def test_aggregate_with_nat(func, fill_value):
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
- dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
+ dt_grouped = dt_df.groupby(Grouper(key='key', freq='D'))
normal_result = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
@@ -222,7 +218,7 @@ def test_aggregate_with_nat_size():
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
- dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
+ dt_grouped = dt_df.groupby(Grouper(key='key', freq='D'))
normal_result = normal_grouped.size()
dt_result = dt_grouped.size()
@@ -238,7 +234,7 @@ def test_aggregate_with_nat_size():
def test_repr():
# GH18203
- result = repr(TimeGrouper(key='A', freq='H'))
+ result = repr(Grouper(key='A', freq='H'))
expected = ("TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, "
"closed='left', label='left', how='mean', "
"convention='e', base=0)")
| - [x] xref #6581
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26477 | 2019-05-21T00:50:21Z | 2019-05-24T22:07:54Z | 2019-05-24T22:07:54Z | 2019-06-04T21:18:49Z |
Fix the output of df.describe on an empty categorical / object column | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 89a9da4a73b35..cee1778e05bb8 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -253,6 +253,34 @@ are returned. (:issue:`21521`)
df.groupby("a").ffill()
+``DataFrame`` describe on an empty categorical / object column will return top and freq
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When calling :meth:`DataFrame.describe` with an empty categorical / object
+column, the 'top' and 'freq' columns were previously omitted, which was inconsistent with
+the output for non-empty columns. Now the 'top' and 'freq' columns will always be included,
+with :attr:`numpy.nan` in the case of an empty :class:`DataFrame` (:issue:`26397`)
+
+.. ipython:: python
+
+ df = pd.DataFrame({"empty_col": pd.Categorical([])})
+ df
+
+*Previous Behavior*:
+
+.. code-block:: python
+
+ In [3]: df.describe()
+ Out[3]:
+ empty_col
+ count 0
+ unique 0
+
+*New Behavior*:
+
+.. ipython:: python
+
+ df.describe()
``__str__`` methods now call ``__repr__`` rather than vica-versa
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py
index 89b86c66d7b05..65c6babff38ba 100644
--- a/pandas/core/arrays/categorical.py
+++ b/pandas/core/arrays/categorical.py
@@ -1478,7 +1478,7 @@ def value_counts(self, dropna=True):
if dropna or clean:
obs = code if clean else code[mask]
- count = bincount(obs, minlength=ncat or None)
+ count = bincount(obs, minlength=ncat or 0)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 0596d0ab844ec..7ca2c52e18c41 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -9920,6 +9920,12 @@ def describe_categorical_1d(data):
names += ['top', 'freq']
result += [top, freq]
+ # If the DataFrame is empty, set 'top' and 'freq' to None
+ # to maintain output shape consistency
+ else:
+ names += ['top', 'freq']
+ result += [None, None]
+
return pd.Series(result, index=names, name=data.name)
def describe_1d(data):
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index effe7eb47323d..487ff7932ec5f 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -588,6 +588,16 @@ def test_describe_categorical(self):
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
+ def test_describe_empty_categorical_column(self):
+ # GH 26397
+ # Ensure the index of an an empty categoric DataFrame column
+ # also contains (count, unique, top, freq)
+ df = pd.DataFrame({"empty_col": Categorical([])})
+ result = df.describe()
+ expected = DataFrame({'empty_col': [0, 0, None, None]},
+ index=['count', 'unique', 'top', 'freq'])
+ tm.assert_frame_equal(result, expected)
+
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
@@ -608,6 +618,7 @@ def test_describe_categorical_columns(self):
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
+
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
| - [x] closes #26397
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26474 | 2019-05-20T18:50:35Z | 2019-06-01T14:52:36Z | 2019-06-01T14:52:36Z | 2019-06-01T14:52:41Z |
Excel Test Cleanup - ReadWriteClass | diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 112d14795d9bf..f9926cd26d8da 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -49,7 +49,6 @@ def ignore_xlrd_time_clock_warning():
yield
-@td.skip_if_no('xlrd', '1.0.0')
class SharedItems:
@pytest.fixture(autouse=True)
@@ -60,6 +59,20 @@ def setup_method(self, datapath):
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
+
+@td.skip_if_no('xlrd', '1.0.0')
+class ReadingTestsBase(SharedItems):
+ # This is based on ExcelWriterBase
+
+ @pytest.fixture(autouse=True, params=['xlrd', None])
+ def set_engine(self, request):
+ func_name = "get_exceldf"
+ old_func = getattr(self, func_name)
+ new_func = partial(old_func, engine=request.param)
+ setattr(self, func_name, new_func)
+ yield
+ setattr(self, func_name, old_func)
+
def get_csv_refdf(self, basename):
"""
Obtain the reference data from read_csv with the Python engine.
@@ -114,19 +127,6 @@ def get_exceldf(self, basename, ext, *args, **kwds):
pth = os.path.join(self.dirpath, basename + ext)
return read_excel(pth, *args, **kwds)
-
-class ReadingTestsBase(SharedItems):
- # This is based on ExcelWriterBase
-
- @pytest.fixture(autouse=True, params=['xlrd', None])
- def set_engine(self, request):
- func_name = "get_exceldf"
- old_func = getattr(self, func_name)
- new_func = partial(old_func, engine=request.param)
- setattr(self, func_name, new_func)
- yield
- setattr(self, func_name, old_func)
-
@td.skip_if_no("xlrd", "1.0.1") # see gh-22682
def test_usecols_int(self, ext):
@@ -565,74 +565,6 @@ def test_read_excel_blank_with_header(self, ext):
actual = self.get_exceldf('blank_with_header', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
- @td.skip_if_no("xlwt")
- @td.skip_if_no("openpyxl")
- @pytest.mark.parametrize("header,expected", [
- (None, DataFrame([np.nan] * 4)),
- (0, DataFrame({"Unnamed: 0": [np.nan] * 3}))
- ])
- def test_read_one_empty_col_no_header(self, ext, header, expected):
- # xref gh-12292
- filename = "no_header"
- df = pd.DataFrame(
- [["", 1, 100],
- ["", 2, 200],
- ["", 3, 300],
- ["", 4, 400]]
- )
-
- with ensure_clean(ext) as path:
- df.to_excel(path, filename, index=False, header=False)
- result = read_excel(path, filename, usecols=[0], header=header)
-
- tm.assert_frame_equal(result, expected)
-
- @td.skip_if_no("xlwt")
- @td.skip_if_no("openpyxl")
- @pytest.mark.parametrize("header,expected", [
- (None, DataFrame([0] + [np.nan] * 4)),
- (0, DataFrame([np.nan] * 4))
- ])
- def test_read_one_empty_col_with_header(self, ext, header, expected):
- filename = "with_header"
- df = pd.DataFrame(
- [["", 1, 100],
- ["", 2, 200],
- ["", 3, 300],
- ["", 4, 400]]
- )
-
- with ensure_clean(ext) as path:
- df.to_excel(path, 'with_header', index=False, header=True)
- result = read_excel(path, filename, usecols=[0], header=header)
-
- tm.assert_frame_equal(result, expected)
-
- @td.skip_if_no('openpyxl')
- @td.skip_if_no('xlwt')
- def test_set_column_names_in_parameter(self, ext):
- # GH 12870 : pass down column names associated with
- # keyword argument names
- refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'],
- [3, 'baz']], columns=['a', 'b'])
-
- with ensure_clean(ext) as pth:
- with ExcelWriter(pth) as writer:
- refdf.to_excel(writer, 'Data_no_head',
- header=False, index=False)
- refdf.to_excel(writer, 'Data_with_head', index=False)
-
- refdf.columns = ['A', 'B']
-
- with ExcelFile(pth) as reader:
- xlsdf_no_head = read_excel(reader, 'Data_no_head',
- header=None, names=['A', 'B'])
- xlsdf_with_head = read_excel(reader, 'Data_with_head',
- index_col=None, names=['A', 'B'])
-
- tm.assert_frame_equal(xlsdf_no_head, refdf)
- tm.assert_frame_equal(xlsdf_with_head, refdf)
-
def test_date_conversion_overflow(self, ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
expected = pd.DataFrame([[pd.Timestamp('2016-03-12'), 'Marc Johnson'],
@@ -741,7 +673,6 @@ def test_read_from_file_url(self, ext):
tm.assert_frame_equal(url_table, local_table)
- @td.skip_if_no('pathlib')
def test_read_from_pathlib_path(self, ext):
# GH12655
@@ -780,32 +711,6 @@ def test_reader_closes_file(self, ext):
assert f.closed
- @td.skip_if_no("xlwt")
- @td.skip_if_no("openpyxl")
- def test_creating_and_reading_multiple_sheets(self, ext):
- # see gh-9450
- #
- # Test reading multiple sheets, from a runtime
- # created Excel file with multiple sheets.
- def tdf(col_sheet_name):
- d, i = [11, 22, 33], [1, 2, 3]
- return DataFrame(d, i, columns=[col_sheet_name])
-
- sheets = ["AAA", "BBB", "CCC"]
-
- dfs = [tdf(s) for s in sheets]
- dfs = dict(zip(sheets, dfs))
-
- with ensure_clean(ext) as pth:
- with ExcelWriter(pth) as ew:
- for sheetname, df in dfs.items():
- df.to_excel(ew, sheetname)
-
- dfs_returned = read_excel(pth, sheet_name=sheets, index_col=0)
-
- for s in sheets:
- tm.assert_frame_equal(dfs[s], dfs_returned[s])
-
def test_reader_seconds(self, ext):
# Test reading times with and without milliseconds. GH5945.
@@ -902,84 +807,6 @@ def test_read_excel_multiindex_header_only(self, ext):
expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns)
tm.assert_frame_equal(result, expected)
- @td.skip_if_no("xlsxwriter")
- def test_read_excel_multiindex_empty_level(self, ext):
- # see gh-12453
- with ensure_clean(ext) as path:
- df = DataFrame({
- ("One", "x"): {0: 1},
- ("Two", "X"): {0: 3},
- ("Two", "Y"): {0: 7},
- ("Zero", ""): {0: 0}
- })
-
- expected = DataFrame({
- ("One", "x"): {0: 1},
- ("Two", "X"): {0: 3},
- ("Two", "Y"): {0: 7},
- ("Zero", "Unnamed: 4_level_1"): {0: 0}
- })
-
- df.to_excel(path)
- actual = pd.read_excel(path, header=[0, 1], index_col=0)
- tm.assert_frame_equal(actual, expected)
-
- df = pd.DataFrame({
- ("Beg", ""): {0: 0},
- ("Middle", "x"): {0: 1},
- ("Tail", "X"): {0: 3},
- ("Tail", "Y"): {0: 7}
- })
-
- expected = pd.DataFrame({
- ("Beg", "Unnamed: 1_level_1"): {0: 0},
- ("Middle", "x"): {0: 1},
- ("Tail", "X"): {0: 3},
- ("Tail", "Y"): {0: 7}
- })
-
- df.to_excel(path)
- actual = pd.read_excel(path, header=[0, 1], index_col=0)
- tm.assert_frame_equal(actual, expected)
-
- @td.skip_if_no("xlsxwriter")
- @pytest.mark.parametrize("c_idx_names", [True, False])
- @pytest.mark.parametrize("r_idx_names", [True, False])
- @pytest.mark.parametrize("c_idx_levels", [1, 3])
- @pytest.mark.parametrize("r_idx_levels", [1, 3])
- def test_excel_multindex_roundtrip(self, ext, c_idx_names, r_idx_names,
- c_idx_levels, r_idx_levels):
- # see gh-4679
- with ensure_clean(ext) as pth:
- if c_idx_levels == 1 and c_idx_names:
- pytest.skip("Column index name cannot be "
- "serialized unless it's a MultiIndex")
-
- # Empty name case current read in as
- # unnamed levels, not Nones.
- check_names = r_idx_names or r_idx_levels <= 1
-
- df = mkdf(5, 5, c_idx_names, r_idx_names,
- c_idx_levels, r_idx_levels)
- df.to_excel(pth)
-
- act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
- header=list(range(c_idx_levels)))
- tm.assert_frame_equal(df, act, check_names=check_names)
-
- df.iloc[0, :] = np.nan
- df.to_excel(pth)
-
- act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
- header=list(range(c_idx_levels)))
- tm.assert_frame_equal(df, act, check_names=check_names)
-
- df.iloc[-1, :] = np.nan
- df.to_excel(pth)
- act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
- header=list(range(c_idx_levels)))
- tm.assert_frame_equal(df, act, check_names=check_names)
-
def test_excel_old_index_format(self, ext):
# see gh-4679
filename = "test_index_name_pre17" + ext
@@ -1054,30 +881,6 @@ def test_read_excel_chunksize(self, ext):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
chunksize=100)
- @td.skip_if_no("xlwt")
- @td.skip_if_no("openpyxl")
- def test_read_excel_parse_dates(self, ext):
- # see gh-11544, gh-12051
- df = DataFrame(
- {"col": [1, 2, 3],
- "date_strings": pd.date_range("2012-01-01", periods=3)})
- df2 = df.copy()
- df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y")
-
- with ensure_clean(ext) as pth:
- df2.to_excel(pth)
-
- res = read_excel(pth, index_col=0)
- tm.assert_frame_equal(df2, res)
-
- res = read_excel(pth, parse_dates=["date_strings"], index_col=0)
- tm.assert_frame_equal(df, res)
-
- date_parser = lambda x: pd.datetime.strptime(x, "%m/%d/%Y")
- res = read_excel(pth, parse_dates=["date_strings"],
- date_parser=date_parser, index_col=0)
- tm.assert_frame_equal(df, res)
-
def test_read_excel_skiprows_list(self, ext):
# GH 4903
actual = pd.read_excel(os.path.join(self.dirpath,
@@ -1141,6 +944,208 @@ def test_read_excel_squeeze(self, ext):
tm.assert_series_equal(actual, expected)
+@td.skip_if_no('xlrd', '1.0.0')
+@pytest.mark.parametrize("ext", ['.xls', '.xlsx', '.xlsm'])
+class TestRoundTrip:
+
+ @td.skip_if_no("xlwt")
+ @td.skip_if_no("openpyxl")
+ @pytest.mark.parametrize("header,expected", [
+ (None, DataFrame([np.nan] * 4)),
+ (0, DataFrame({"Unnamed: 0": [np.nan] * 3}))
+ ])
+ def test_read_one_empty_col_no_header(self, ext, header, expected):
+ # xref gh-12292
+ filename = "no_header"
+ df = pd.DataFrame(
+ [["", 1, 100],
+ ["", 2, 200],
+ ["", 3, 300],
+ ["", 4, 400]]
+ )
+
+ with ensure_clean(ext) as path:
+ df.to_excel(path, filename, index=False, header=False)
+ result = read_excel(path, filename, usecols=[0], header=header)
+
+ tm.assert_frame_equal(result, expected)
+
+ @td.skip_if_no("xlwt")
+ @td.skip_if_no("openpyxl")
+ @pytest.mark.parametrize("header,expected", [
+ (None, DataFrame([0] + [np.nan] * 4)),
+ (0, DataFrame([np.nan] * 4))
+ ])
+ def test_read_one_empty_col_with_header(self, ext, header, expected):
+ filename = "with_header"
+ df = pd.DataFrame(
+ [["", 1, 100],
+ ["", 2, 200],
+ ["", 3, 300],
+ ["", 4, 400]]
+ )
+
+ with ensure_clean(ext) as path:
+ df.to_excel(path, 'with_header', index=False, header=True)
+ result = read_excel(path, filename, usecols=[0], header=header)
+
+ tm.assert_frame_equal(result, expected)
+
+ @td.skip_if_no('openpyxl')
+ @td.skip_if_no('xlwt')
+ def test_set_column_names_in_parameter(self, ext):
+ # GH 12870 : pass down column names associated with
+ # keyword argument names
+ refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'],
+ [3, 'baz']], columns=['a', 'b'])
+
+ with ensure_clean(ext) as pth:
+ with ExcelWriter(pth) as writer:
+ refdf.to_excel(writer, 'Data_no_head',
+ header=False, index=False)
+ refdf.to_excel(writer, 'Data_with_head', index=False)
+
+ refdf.columns = ['A', 'B']
+
+ with ExcelFile(pth) as reader:
+ xlsdf_no_head = read_excel(reader, 'Data_no_head',
+ header=None, names=['A', 'B'])
+ xlsdf_with_head = read_excel(reader, 'Data_with_head',
+ index_col=None, names=['A', 'B'])
+
+ tm.assert_frame_equal(xlsdf_no_head, refdf)
+ tm.assert_frame_equal(xlsdf_with_head, refdf)
+
+ @td.skip_if_no("xlwt")
+ @td.skip_if_no("openpyxl")
+ def test_creating_and_reading_multiple_sheets(self, ext):
+ # see gh-9450
+ #
+ # Test reading multiple sheets, from a runtime
+ # created Excel file with multiple sheets.
+ def tdf(col_sheet_name):
+ d, i = [11, 22, 33], [1, 2, 3]
+ return DataFrame(d, i, columns=[col_sheet_name])
+
+ sheets = ["AAA", "BBB", "CCC"]
+
+ dfs = [tdf(s) for s in sheets]
+ dfs = dict(zip(sheets, dfs))
+
+ with ensure_clean(ext) as pth:
+ with ExcelWriter(pth) as ew:
+ for sheetname, df in dfs.items():
+ df.to_excel(ew, sheetname)
+
+ dfs_returned = read_excel(pth, sheet_name=sheets, index_col=0)
+
+ for s in sheets:
+ tm.assert_frame_equal(dfs[s], dfs_returned[s])
+
+ @td.skip_if_no("xlsxwriter")
+ def test_read_excel_multiindex_empty_level(self, ext):
+ # see gh-12453
+ with ensure_clean(ext) as path:
+ df = DataFrame({
+ ("One", "x"): {0: 1},
+ ("Two", "X"): {0: 3},
+ ("Two", "Y"): {0: 7},
+ ("Zero", ""): {0: 0}
+ })
+
+ expected = DataFrame({
+ ("One", "x"): {0: 1},
+ ("Two", "X"): {0: 3},
+ ("Two", "Y"): {0: 7},
+ ("Zero", "Unnamed: 4_level_1"): {0: 0}
+ })
+
+ df.to_excel(path)
+ actual = pd.read_excel(path, header=[0, 1], index_col=0)
+ tm.assert_frame_equal(actual, expected)
+
+ df = pd.DataFrame({
+ ("Beg", ""): {0: 0},
+ ("Middle", "x"): {0: 1},
+ ("Tail", "X"): {0: 3},
+ ("Tail", "Y"): {0: 7}
+ })
+
+ expected = pd.DataFrame({
+ ("Beg", "Unnamed: 1_level_1"): {0: 0},
+ ("Middle", "x"): {0: 1},
+ ("Tail", "X"): {0: 3},
+ ("Tail", "Y"): {0: 7}
+ })
+
+ df.to_excel(path)
+ actual = pd.read_excel(path, header=[0, 1], index_col=0)
+ tm.assert_frame_equal(actual, expected)
+
+ @td.skip_if_no("xlsxwriter")
+ @pytest.mark.parametrize("c_idx_names", [True, False])
+ @pytest.mark.parametrize("r_idx_names", [True, False])
+ @pytest.mark.parametrize("c_idx_levels", [1, 3])
+ @pytest.mark.parametrize("r_idx_levels", [1, 3])
+ def test_excel_multindex_roundtrip(self, ext, c_idx_names, r_idx_names,
+ c_idx_levels, r_idx_levels):
+ # see gh-4679
+ with ensure_clean(ext) as pth:
+ if c_idx_levels == 1 and c_idx_names:
+ pytest.skip("Column index name cannot be "
+ "serialized unless it's a MultiIndex")
+
+ # Empty name case current read in as
+ # unnamed levels, not Nones.
+ check_names = r_idx_names or r_idx_levels <= 1
+
+ df = mkdf(5, 5, c_idx_names, r_idx_names,
+ c_idx_levels, r_idx_levels)
+ df.to_excel(pth)
+
+ act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
+ header=list(range(c_idx_levels)))
+ tm.assert_frame_equal(df, act, check_names=check_names)
+
+ df.iloc[0, :] = np.nan
+ df.to_excel(pth)
+
+ act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
+ header=list(range(c_idx_levels)))
+ tm.assert_frame_equal(df, act, check_names=check_names)
+
+ df.iloc[-1, :] = np.nan
+ df.to_excel(pth)
+ act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
+ header=list(range(c_idx_levels)))
+ tm.assert_frame_equal(df, act, check_names=check_names)
+
+ @td.skip_if_no("xlwt")
+ @td.skip_if_no("openpyxl")
+ def test_read_excel_parse_dates(self, ext):
+ # see gh-11544, gh-12051
+ df = DataFrame(
+ {"col": [1, 2, 3],
+ "date_strings": pd.date_range("2012-01-01", periods=3)})
+ df2 = df.copy()
+ df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y")
+
+ with ensure_clean(ext) as pth:
+ df2.to_excel(pth)
+
+ res = read_excel(pth, index_col=0)
+ tm.assert_frame_equal(df2, res)
+
+ res = read_excel(pth, parse_dates=["date_strings"], index_col=0)
+ tm.assert_frame_equal(df, res)
+
+ date_parser = lambda x: pd.datetime.strptime(x, "%m/%d/%Y")
+ res = read_excel(pth, parse_dates=["date_strings"],
+ date_parser=date_parser, index_col=0)
+ tm.assert_frame_equal(df, res)
+
+
+@td.skip_if_no('xlrd', '1.0.0')
@pytest.mark.parametrize("ext", ['.xls', '.xlsx', '.xlsm'])
class TestXlrdReader(ReadingTestsBase):
"""
| The tests we have in this module need a pretty big refactor as they mix a few testing idioms together. This is slowing down contributions on items like #25092 and #25427
This is going to require quite a few PRs to get it where it needs to be. This one simply:
- Moves tests that require both reading and writing into a dedicated class
- Moves skip fixtures off of the SharedItems class onto the classes that actually use them
I'd eventually like to get rid of the SharedItems class and use parametrization on Reader/Writer classes to test the various combinations of engines and extensions, though again going to take a few PRs to get there | https://api.github.com/repos/pandas-dev/pandas/pulls/26473 | 2019-05-20T18:21:47Z | 2019-05-24T16:01:09Z | 2019-05-24T16:01:09Z | 2020-01-16T00:34:48Z |
Fix bug where list like object not returning empty Index. | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 1be7e0736f9fe..7d71cb22f49ed 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3095,7 +3095,7 @@ def _ensure_valid_index(self, value):
passed value.
"""
# GH5632, make sure that we are a Series convertible
- if not len(self.index) and is_list_like(value):
+ if not len(self.index) and is_list_like(value) and len(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError):
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index ae14563e5952a..a78b2ab7d1c4c 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -821,6 +821,14 @@ def test_setitem_empty_frame_with_boolean(self, dtype, kwargs):
df[df > df2] = 47
assert_frame_equal(df, df2)
+ def test_setitem_with_empty_listlike(self):
+ # GH #17101
+ index = pd.Index([], name="idx")
+ result = pd.DataFrame(columns=["A"], index=index)
+ result["A"] = []
+ expected = pd.DataFrame(columns=["A"], index=index)
+ tm.assert_index_equal(result.index, expected.index)
+
def test_setitem_scalars_no_index(self):
# GH16823 / 17894
df = DataFrame()
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index 68e93f06e43dc..c4505231932c6 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -442,10 +442,10 @@ def test_partial_set_empty_frame(self):
# these work as they don't really change
# anything but the index
# GH5632
- expected = DataFrame(columns=["foo"], index=Index([], dtype="int64"))
+ expected = DataFrame(columns=["foo"], index=Index([], dtype="object"))
def f():
- df = DataFrame()
+ df = DataFrame(index=Index([], dtype="object"))
df["foo"] = Series([], dtype="object")
return df
@@ -469,22 +469,21 @@ def f():
expected["foo"] = expected["foo"].astype("float64")
def f():
- df = DataFrame()
+ df = DataFrame(index=Index([], dtype="int64"))
df["foo"] = []
return df
tm.assert_frame_equal(f(), expected)
def f():
- df = DataFrame()
+ df = DataFrame(index=Index([], dtype="int64"))
df["foo"] = Series(np.arange(len(df)), dtype="float64")
return df
tm.assert_frame_equal(f(), expected)
def f():
- df = DataFrame()
- tm.assert_index_equal(df.index, Index([], dtype="object"))
+ df = DataFrame(index=Index([], dtype="int64"))
df["foo"] = range(len(df))
return df
| - [x] closes #17101
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26471 | 2019-05-20T14:12:39Z | 2019-08-22T11:40:59Z | 2019-08-22T11:40:59Z | 2019-08-22T11:41:02Z |
DOC: fix example with Timestamp/integer addition | diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst
index ca788963971ad..4ea7c656fd197 100644
--- a/doc/source/user_guide/indexing.rst
+++ b/doc/source/user_guide/indexing.rst
@@ -868,7 +868,7 @@ You can also set using these same indexers.
.. ipython:: python
- df.at[dates[-1] + 1, 0] = 7
+ df.at[dates[-1] + pd.Timedelta('1 day'), 0] = 7
df
Boolean indexing
| Remaining warning that is being raised in the examples in the docs (the others are related to pyarrow). | https://api.github.com/repos/pandas-dev/pandas/pulls/26467 | 2019-05-20T08:49:00Z | 2019-05-20T13:14:57Z | 2019-05-20T13:14:57Z | 2019-05-20T13:14:58Z |
Add defensive check for argument errors keyword in to_numeric | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index b2d1aa299a45a..9e04dcaa41416 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -41,6 +41,7 @@ Other Enhancements
- :meth:`DataFrame.query` and :meth:`DataFrame.eval` now supports quoting column names with backticks to refer to names with spaces (:issue:`6508`)
- :func:`merge_asof` now gives a more clear error message when merge keys are categoricals that are not equal (:issue:`26136`)
- :meth:`pandas.core.window.Rolling` supports exponential (or Poisson) window type (:issue:`21303`)
+- :func:`to_numeric` now gives a error message when errors argument value is not in the tuple of accepted values. (:issue:`26466`)
.. _whatsnew_0250.api_breaking:
diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py
index 08ce649d8602c..d7a1b1119ce4b 100644
--- a/pandas/core/tools/numeric.py
+++ b/pandas/core/tools/numeric.py
@@ -105,6 +105,9 @@ def to_numeric(arg, errors='raise', downcast=None):
if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'):
raise ValueError('invalid downcasting method provided')
+ if errors not in ('ignore', 'raise', 'coerce'):
+ raise ValueError('invalid error value specified')
+
is_series = False
is_index = False
is_scalars = False
diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_numeric.py
index 5d3903cb93bd5..6e3e768f9360f 100644
--- a/pandas/tests/tools/test_numeric.py
+++ b/pandas/tests/tools/test_numeric.py
@@ -413,6 +413,16 @@ def test_downcast_invalid_cast():
to_numeric(data, downcast=invalid_downcast)
+def test_errors_invalid_value():
+ # see gh-26466
+ data = ["1", 2, 3]
+ invalid_error_value = "invalid"
+ msg = "invalid error value specified"
+
+ with pytest.raises(ValueError, match=msg):
+ to_numeric(data, errors=invalid_error_value)
+
+
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
| - [x] closes #26394
- [x] tests passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] Added defensive check for argument `errors` in func `to_numeric`
| https://api.github.com/repos/pandas-dev/pandas/pulls/26466 | 2019-05-20T06:59:11Z | 2019-05-20T11:11:37Z | 2019-05-20T11:11:36Z | 2019-05-20T11:11:50Z |
DOC: #25723 passing kwargs to excel document engine | diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 0abd073c7dc07..88d8ccbbe036e 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -2341,10 +2341,10 @@ round-trippable manner.
.. ipython:: python
df = pd.DataFrame({'foo': [1, 2, 3, 4],
- 'bar': ['a', 'b', 'c', 'd'],
- 'baz': pd.date_range('2018-01-01', freq='d', periods=4),
- 'qux': pd.Categorical(['a', 'b', 'c', 'c'])
- }, index=pd.Index(range(4), name='idx'))
+ 'bar': ['a', 'b', 'c', 'd'],
+ 'baz': pd.date_range('2018-01-01', freq='d', periods=4),
+ 'qux': pd.Categorical(['a', 'b', 'c', 'c'])
+ }, index=pd.Index(range(4), name='idx'))
df
df.dtypes
@@ -2864,6 +2864,19 @@ of sheet names can simply be passed to ``read_excel`` with no loss in performanc
data = pd.read_excel('path_to_file.xls', ['Sheet1', 'Sheet2'],
index_col=None, na_values=['NA'])
+``ExcelFile`` can also be called with a ``xlrd.book.Book`` object
+as a parameter. This allows the user to control how the excel file is read.
+For example, sheets can be loaded on demand by calling ``xlrd.open_workbook()``
+with ``on_demand=True``.
+
+.. code-block:: python
+
+ import xlrd
+ xlrd_book = xlrd.open_workbook('path_to_file.xls', on_demand=True)
+ with pd.ExcelFile(xlrd_book) as xls:
+ df1 = pd.read_excel(xls, 'Sheet1')
+ df2 = pd.read_excel(xls, 'Sheet2')
+
.. _io.excel.specifying_sheets:
Specifying Sheets
| - [x] closes #25723
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
This pr tries to address #25723.
Made a small addition to the ExcelFile section of user_guide/io.rst regarding the discussion in #25723.
| https://api.github.com/repos/pandas-dev/pandas/pulls/26465 | 2019-05-20T06:31:35Z | 2019-05-20T16:31:05Z | 2019-05-20T16:31:05Z | 2019-05-20T16:31:13Z |
CLN: Remove ExcelWriter.sheetname | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index ac9857715dde4..c51441f701a45 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -312,7 +312,7 @@ Deprecations
Removal of prior version deprecations/changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Removed ``Panel`` (:issue:`25047`, :issue:`25191`, :issue:`25231`)
--
+- Removed the previously deprecated ``sheetname`` keyword in :func:`read_excel` (:issue:`16442`, :issue:`20938`)
- Removed previously deprecated ``TimeGrouper`` (:issue:`16942`)
-
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py
index c0678575fd6f0..a0d51e85aa4f3 100644
--- a/pandas/io/excel/_base.py
+++ b/pandas/io/excel/_base.py
@@ -5,7 +5,6 @@
import os
from textwrap import fill
from urllib.request import urlopen
-import warnings
from pandas._config import config
@@ -291,15 +290,10 @@ def read_excel(io,
mangle_dupe_cols=True,
**kwds):
- # Can't use _deprecate_kwarg since sheetname=None has a special meaning
- if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds:
- warnings.warn("The `sheetname` keyword is deprecated, use "
- "`sheet_name` instead", FutureWarning, stacklevel=2)
- sheet_name = kwds.pop("sheetname")
-
- if 'sheet' in kwds:
- raise TypeError("read_excel() got an unexpected keyword argument "
- "`sheet`")
+ for arg in ('sheet', 'sheetname'):
+ if arg in kwds:
+ raise TypeError("read_excel() got an unexpected keyword argument "
+ "`{}`".format(arg))
if not isinstance(io, ExcelFile):
io = ExcelFile(io, engine=engine)
@@ -833,16 +827,6 @@ def parse(self,
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file.
"""
-
- # Can't use _deprecate_kwarg since sheetname=None has a special meaning
- if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds:
- warnings.warn("The `sheetname` keyword is deprecated, use "
- "`sheet_name` instead", FutureWarning, stacklevel=2)
- sheet_name = kwds.pop("sheetname")
- elif 'sheetname' in kwds:
- raise TypeError("Cannot specify both `sheet_name` "
- "and `sheetname`. Use just `sheet_name`")
-
if 'chunksize' in kwds:
raise NotImplementedError("chunksize keyword of read_excel "
"is not implemented")
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index f9926cd26d8da..44ce3111c3a1e 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -342,15 +342,15 @@ def test_excel_passes_na(self, ext):
tm.assert_frame_equal(parsed, expected)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
- def test_deprecated_sheetname(self, ext):
+ @pytest.mark.parametrize('arg', ['sheet', 'sheetname'])
+ def test_unexpected_kwargs_raises(self, ext, arg):
# gh-17964
excel = self.get_excelfile('test1', ext)
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- read_excel(excel, sheetname='Sheet1')
-
- with pytest.raises(TypeError):
- read_excel(excel, sheet='Sheet1')
+ kwarg = {arg: 'Sheet1'}
+ msg = "unexpected keyword argument `{}`".format(arg)
+ with pytest.raises(TypeError, match=msg):
+ read_excel(excel, **kwarg)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_excel_table_sheet_by_index(self, ext):
@@ -588,32 +588,20 @@ def test_sheet_name_and_sheetname(self, ext):
df_ref = self.get_csv_refdf(filename)
df1 = self.get_exceldf(filename, ext,
sheet_name=sheet_name, index_col=0) # doc
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- with ignore_xlrd_time_clock_warning():
- df2 = self.get_exceldf(filename, ext, index_col=0,
- sheetname=sheet_name) # backward compat
+ with ignore_xlrd_time_clock_warning():
+ df2 = self.get_exceldf(filename, ext, index_col=0,
+ sheet_name=sheet_name)
excel = self.get_excelfile(filename, ext)
df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- df2_parse = excel.parse(index_col=0,
- sheetname=sheet_name) # backward compat
+ df2_parse = excel.parse(index_col=0,
+ sheet_name=sheet_name)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
tm.assert_frame_equal(df1_parse, df_ref, check_names=False)
tm.assert_frame_equal(df2_parse, df_ref, check_names=False)
- def test_sheet_name_both_raises(self, ext):
- with pytest.raises(TypeError, match="Cannot specify both"):
- self.get_exceldf('test1', ext, sheetname='Sheet1',
- sheet_name='Sheet1')
-
- excel = self.get_excelfile('test1', ext)
- with pytest.raises(TypeError, match="Cannot specify both"):
- excel.parse(sheetname='Sheet1',
- sheet_name='Sheet1')
-
def test_excel_read_buffer(self, ext):
pth = os.path.join(self.dirpath, 'test1' + ext)
| - [x] xref https://github.com/pandas-dev/pandas/issues/6581
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
cc @WillAyd | https://api.github.com/repos/pandas-dev/pandas/pulls/26464 | 2019-05-20T02:57:43Z | 2019-05-25T01:18:05Z | 2019-05-25T01:18:05Z | 2019-05-25T15:51:35Z |
Fix 'observed' kwarg not doing anything on SeriesGroupBy | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 2c66d3e4db321..2e9709f7bdd8f 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -512,6 +512,7 @@ Groupby/Resample/Rolling
- Bug in :func:`pandas.core.groupby.GroupBy.agg` when applying a aggregation function to timezone aware data (:issue:`23683`)
- Bug in :func:`pandas.core.groupby.GroupBy.first` and :func:`pandas.core.groupby.GroupBy.last` where timezone information would be dropped (:issue:`21603`)
- Bug in :func:`pandas.core.groupby.GroupBy.size` when grouping only NA values (:issue:`23050`)
+- Bug in :func:`Series.groupby` where ``observed`` kwarg was previously ignored (:issue:`24880`)
- Bug in :func:`Series.groupby` where using ``groupby`` with a :class:`MultiIndex` Series with a list of labels equal to the length of the series caused incorrect grouping (:issue:`25704`)
- Ensured that ordering of outputs in ``groupby`` aggregation functions is consistent across all versions of Python (:issue:`25692`)
- Ensured that result group order is correct when grouping on an ordered ``Categorical`` and specifying ``observed=True`` (:issue:`25871`, :issue:`25167`)
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 2f665975f96bd..dc414a588a2ce 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -25,7 +25,6 @@
from pandas.core.dtypes.missing import isna, notna
import pandas.core.algorithms as algorithms
-from pandas.core.arrays import Categorical
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.frame import DataFrame
@@ -33,7 +32,7 @@
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy, _apply_docs, _transform_template)
-from pandas.core.index import CategoricalIndex, Index, MultiIndex
+from pandas.core.index import Index, MultiIndex
import pandas.core.indexes.base as ibase
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
@@ -834,9 +833,10 @@ def _wrap_output(self, output, index, names=None):
return Series(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
- return self._wrap_output(output=output,
- index=self.grouper.result_index,
- names=names)
+ result = self._wrap_output(output=output,
+ index=self.grouper.result_index,
+ names=names)
+ return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
@@ -856,13 +856,16 @@ def _get_index():
return index
if isinstance(values[0], dict):
- # GH #823
+ # GH #823 #24880
index = _get_index()
- result = DataFrame(values, index=index).stack()
+ result = self._reindex_output(DataFrame(values, index=index))
+ # if self.observed is False,
+ # keep all-NaN rows created while re-indexing
+ result = result.stack(dropna=self.observed)
result.name = self._selection_name
return result
- if isinstance(values[0], (Series, dict)):
+ if isinstance(values[0], Series):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
@@ -870,9 +873,11 @@ def _get_index():
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
- # GH #6265
- return Series(values, index=_get_index(),
- name=self._selection_name)
+ # GH #6265 #24880
+ result = Series(data=values,
+ index=_get_index(),
+ name=self._selection_name)
+ return self._reindex_output(result)
def _aggregate_named(self, func, *args, **kwargs):
result = OrderedDict()
@@ -1335,7 +1340,8 @@ def _gotitem(self, key, ndim, subset=None):
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(subset, selection=key,
- grouper=self.grouper)
+ grouper=self.grouper,
+ observed=self.observed)
raise AssertionError("invalid ndim for _gotitem")
@@ -1407,69 +1413,6 @@ def _wrap_agged_blocks(self, items, blocks):
return self._reindex_output(result)._convert(datetime=True)
- def _reindex_output(self, result):
- """
- If we have categorical groupers, then we want to make sure that
- we have a fully reindex-output to the levels. These may have not
- participated in the groupings (e.g. may have all been
- nan groups);
-
- This can re-expand the output space
- """
-
- # we need to re-expand the output space to accomodate all values
- # whether observed or not in the cartesian product of our groupes
- groupings = self.grouper.groupings
- if groupings is None:
- return result
- elif len(groupings) == 1:
- return result
-
- # if we only care about the observed values
- # we are done
- elif self.observed:
- return result
-
- # reindexing only applies to a Categorical grouper
- elif not any(isinstance(ping.grouper, (Categorical, CategoricalIndex))
- for ping in groupings):
- return result
-
- levels_list = [ping.group_index for ping in groupings]
- index, _ = MultiIndex.from_product(
- levels_list, names=self.grouper.names).sortlevel()
-
- if self.as_index:
- d = {self.obj._get_axis_name(self.axis): index, 'copy': False}
- return result.reindex(**d)
-
- # GH 13204
- # Here, the categorical in-axis groupers, which need to be fully
- # expanded, are columns in `result`. An idea is to do:
- # result = result.set_index(self.grouper.names)
- # .reindex(index).reset_index()
- # but special care has to be taken because of possible not-in-axis
- # groupers.
- # So, we manually select and drop the in-axis grouper columns,
- # reindex `result`, and then reset the in-axis grouper columns.
-
- # Select in-axis groupers
- in_axis_grps = ((i, ping.name) for (i, ping)
- in enumerate(groupings) if ping.in_axis)
- g_nums, g_names = zip(*in_axis_grps)
-
- result = result.drop(labels=list(g_names), axis=1)
-
- # Set a temp index and reindex (possibly expanding)
- result = result.set_index(self.grouper.result_index
- ).reindex(index, copy=False)
-
- # Reset in-axis grouper columns
- # (using level numbers `g_nums` because level names may not be unique)
- result = result.reset_index(level=g_nums)
-
- return result.reset_index(drop=True)
-
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py
index aa04b7505afe4..91bb71a1a8af7 100644
--- a/pandas/core/groupby/groupby.py
+++ b/pandas/core/groupby/groupby.py
@@ -36,13 +36,14 @@ class providing the base-class of operations.
from pandas.api.types import (
is_datetime64_dtype, is_integer_dtype, is_object_dtype)
import pandas.core.algorithms as algorithms
+from pandas.core.arrays import Categorical
from pandas.core.base import (
DataError, GroupByError, PandasObject, SelectionMixin, SpecificationError)
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
-from pandas.core.index import Index, MultiIndex
+from pandas.core.index import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
@@ -2301,6 +2302,79 @@ def tail(self, n=5):
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask]
+ def _reindex_output(self, output):
+ """
+ If we have categorical groupers, then we might want to make sure that
+ we have a fully re-indexed output to the levels. This means expanding
+ the output space to accommodate all values in the cartesian product of
+ our groups, regardless of whether they were observed in the data or
+ not. This will expand the output space if there are missing groups.
+
+ The method returns early without modifying the input if the number of
+ groupings is less than 2, self.observed == True or none of the groupers
+ are categorical.
+
+ Parameters
+ ----------
+ output: Series or DataFrame
+ Object resulting from grouping and applying an operation.
+
+ Returns
+ -------
+ Series or DataFrame
+ Object (potentially) re-indexed to include all possible groups.
+ """
+ groupings = self.grouper.groupings
+ if groupings is None:
+ return output
+ elif len(groupings) == 1:
+ return output
+
+ # if we only care about the observed values
+ # we are done
+ elif self.observed:
+ return output
+
+ # reindexing only applies to a Categorical grouper
+ elif not any(isinstance(ping.grouper, (Categorical, CategoricalIndex))
+ for ping in groupings):
+ return output
+
+ levels_list = [ping.group_index for ping in groupings]
+ index, _ = MultiIndex.from_product(
+ levels_list, names=self.grouper.names).sortlevel()
+
+ if self.as_index:
+ d = {self.obj._get_axis_name(self.axis): index, 'copy': False}
+ return output.reindex(**d)
+
+ # GH 13204
+ # Here, the categorical in-axis groupers, which need to be fully
+ # expanded, are columns in `output`. An idea is to do:
+ # output = output.set_index(self.grouper.names)
+ # .reindex(index).reset_index()
+ # but special care has to be taken because of possible not-in-axis
+ # groupers.
+ # So, we manually select and drop the in-axis grouper columns,
+ # reindex `output`, and then reset the in-axis grouper columns.
+
+ # Select in-axis groupers
+ in_axis_grps = ((i, ping.name) for (i, ping)
+ in enumerate(groupings) if ping.in_axis)
+ g_nums, g_names = zip(*in_axis_grps)
+
+ output = output.drop(labels=list(g_names), axis=1)
+
+ # Set a temp index and reindex (possibly expanding)
+ output = output.set_index(self.grouper.result_index
+ ).reindex(index, copy=False)
+
+ # Reset in-axis grouper columns
+ # (using level numbers `g_nums` because level names may not be unique)
+ output = output.reset_index(level=g_nums)
+
+ return output.reset_index(drop=True)
+
GroupBy._add_numeric_operations()
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index 112f7629d735a..f24fa0daa5b18 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -1,3 +1,4 @@
+from collections import OrderedDict
from datetime import datetime
import numpy as np
@@ -25,7 +26,7 @@ def f(a):
ordered=a.ordered)
return a
- index = pd.MultiIndex.from_product(map(f, args), names=names)
+ index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index).sort_index()
@@ -189,7 +190,7 @@ def test_level_get_group(observed):
# GH15155
df = DataFrame(data=np.arange(2, 22, 2),
index=MultiIndex(
- levels=[pd.CategoricalIndex(["a", "b"]), range(10)],
+ levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"]))
g = df.groupby(level=["Index1"], observed=observed)
@@ -197,7 +198,7 @@ def test_level_get_group(observed):
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(data=np.arange(2, 12, 2),
- index=pd.MultiIndex(levels=[pd.CategoricalIndex(
+ index=MultiIndex(levels=[CategoricalIndex(
["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"]))
@@ -265,7 +266,7 @@ def test_observed(observed):
# multiple groupers with a non-cat
gb = df.groupby(['A', 'B', 'C'], observed=observed)
- exp_index = pd.MultiIndex.from_arrays(
+ exp_index = MultiIndex.from_arrays(
[cat1, cat2, ['foo', 'bar'] * 2],
names=['A', 'B', 'C'])
expected = DataFrame({'values': Series(
@@ -280,7 +281,7 @@ def test_observed(observed):
tm.assert_frame_equal(result, expected)
gb = df.groupby(['A', 'B'], observed=observed)
- exp_index = pd.MultiIndex.from_arrays(
+ exp_index = MultiIndex.from_arrays(
[cat1, cat2],
names=['A', 'B'])
expected = DataFrame({'values': [1, 2, 3, 4]},
@@ -296,25 +297,25 @@ def test_observed(observed):
# https://github.com/pandas-dev/pandas/issues/8138
d = {'cat':
- pd.Categorical(["a", "b", "a", "b"], categories=["a", "b", "c"],
- ordered=True),
+ Categorical(["a", "b", "a", "b"], categories=["a", "b", "c"],
+ ordered=True),
'ints': [1, 1, 2, 2],
'val': [10, 20, 30, 40]}
- df = pd.DataFrame(d)
+ df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
- exp_index = pd.CategoricalIndex(list('ab'), name="cat",
- categories=list('abc'),
- ordered=True)
+ exp_index = CategoricalIndex(list('ab'), name="cat",
+ categories=list('abc'),
+ ordered=True)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20., 30]},
index=exp_index)
if not observed:
- index = pd.CategoricalIndex(list('abc'), name="cat",
- categories=list('abc'),
- ordered=True)
+ index = CategoricalIndex(list('abc'), name="cat",
+ categories=list('abc'),
+ ordered=True)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
@@ -324,9 +325,9 @@ def test_observed(observed):
result = groups_double_key.agg('mean')
expected = DataFrame(
{"val": [10, 30, 20, 40],
- "cat": pd.Categorical(['a', 'a', 'b', 'b'],
- categories=['a', 'b', 'c'],
- ordered=True),
+ "cat": Categorical(['a', 'a', 'b', 'b'],
+ categories=['a', 'b', 'c'],
+ ordered=True),
"ints": [1, 2, 1, 2]}).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
@@ -347,7 +348,7 @@ def test_observed(observed):
# with as_index
d = {'foo': [10, 8, 4, 8, 4, 1, 1], 'bar': [10, 20, 30, 40, 50, 60, 70],
'baz': ['d', 'c', 'e', 'a', 'a', 'd', 'c']}
- df = pd.DataFrame(d)
+ df = DataFrame(d)
cat = pd.cut(df['foo'], np.linspace(0, 10, 3))
df['range'] = cat
groups = df.groupby(['range', 'baz'], as_index=False, observed=observed)
@@ -360,7 +361,7 @@ def test_observed(observed):
def test_observed_codes_remap(observed):
d = {'C1': [3, 3, 4, 5], 'C2': [1, 2, 3, 4], 'C3': [10, 100, 200, 34]}
- df = pd.DataFrame(d)
+ df = DataFrame(d)
values = pd.cut(df['C1'], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, 'C2'], observed=observed)
@@ -401,8 +402,8 @@ def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
- cat = pd.Categorical(['a', 'c', 'a'], categories=['a', 'b', 'c'])
- df = pd.DataFrame({'cat': cat, 'vals': [1, 2, 3]})
+ cat = Categorical(['a', 'c', 'a'], categories=['a', 'b', 'c'])
+ df = DataFrame({'cat': cat, 'vals': [1, 2, 3]})
g = df.groupby('cat', observed=observed)
result = g.groups
@@ -419,9 +420,9 @@ def test_observed_groups(observed):
def test_observed_groups_with_nan(observed):
# GH 24740
- df = pd.DataFrame({'cat': pd.Categorical(['a', np.nan, 'a'],
- categories=['a', 'b', 'd']),
- 'vals': [1, 2, 3]})
+ df = DataFrame({'cat': Categorical(['a', np.nan, 'a'],
+ categories=['a', 'b', 'd']),
+ 'vals': [1, 2, 3]})
g = df.groupby('cat', observed=observed)
result = g.groups
if observed:
@@ -435,16 +436,16 @@ def test_observed_groups_with_nan(observed):
def test_dataframe_categorical_with_nan(observed):
# GH 21151
- s1 = pd.Categorical([np.nan, 'a', np.nan, 'a'],
- categories=['a', 'b', 'c'])
- s2 = pd.Series([1, 2, 3, 4])
- df = pd.DataFrame({'s1': s1, 's2': s2})
+ s1 = Categorical([np.nan, 'a', np.nan, 'a'],
+ categories=['a', 'b', 'c'])
+ s2 = Series([1, 2, 3, 4])
+ df = DataFrame({'s1': s1, 's2': s2})
result = df.groupby('s1', observed=observed).first().reset_index()
if observed:
- expected = DataFrame({'s1': pd.Categorical(['a'],
+ expected = DataFrame({'s1': Categorical(['a'],
categories=['a', 'b', 'c']), 's2': [2]})
else:
- expected = DataFrame({'s1': pd.Categorical(['a', 'b', 'c'],
+ expected = DataFrame({'s1': Categorical(['a', 'b', 'c'],
categories=['a', 'b', 'c']),
's2': [2, np.nan, np.nan]})
tm.assert_frame_equal(result, expected)
@@ -459,11 +460,11 @@ def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
- label = pd.Categorical(['d', 'a', 'b', 'a', 'd', 'b'],
- categories=['a', 'b', 'missing', 'd'],
- ordered=ordered)
- val = pd.Series(['d', 'a', 'b', 'a', 'd', 'b'])
- df = pd.DataFrame({'label': label, 'val': val})
+ label = Categorical(['d', 'a', 'b', 'a', 'd', 'b'],
+ categories=['a', 'b', 'missing', 'd'],
+ ordered=ordered)
+ val = Series(['d', 'a', 'b', 'a', 'd', 'b'])
+ df = DataFrame({'label': label, 'val': val})
# aggregate on the Categorical
result = (df.groupby('label', observed=observed, sort=sort)['val']
@@ -471,8 +472,8 @@ def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
- label = pd.Series(result.index.array, dtype='object')
- aggr = pd.Series(result.array)
+ label = Series(result.index.array, dtype='object')
+ aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = 'missing'
if not all(label == aggr):
@@ -555,9 +556,9 @@ def test_categorical_index():
def test_describe_categorical_columns():
# GH 11558
- cats = pd.CategoricalIndex(['qux', 'foo', 'baz', 'bar'],
- categories=['foo', 'bar', 'baz', 'qux'],
- ordered=True)
+ cats = CategoricalIndex(['qux', 'foo', 'baz', 'bar'],
+ categories=['foo', 'bar', 'baz', 'qux'],
+ ordered=True)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
@@ -567,22 +568,22 @@ def test_describe_categorical_columns():
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
- df = pd.DataFrame({'a': range(10),
- 'medium': ['A', 'B'] * 5,
- 'artist': list('XYXXY') * 2})
+ df = DataFrame({'a': range(10),
+ 'medium': ['A', 'B'] * 5,
+ 'artist': list('XYXXY') * 2})
df['medium'] = df['medium'].astype('category')
gcat = df.groupby(
['artist', 'medium'], observed=False)['a'].count().unstack()
result = gcat.describe()
- exp_columns = pd.CategoricalIndex(['A', 'B'], ordered=False,
- name='medium')
+ exp_columns = CategoricalIndex(['A', 'B'], ordered=False,
+ name='medium')
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat['A'] + gcat['B']
- expected = pd.Series([6, 4], index=pd.Index(['X', 'Y'], name='artist'))
+ expected = Series([6, 4], index=Index(['X', 'Y'], name='artist'))
tm.assert_series_equal(result, expected)
@@ -644,22 +645,22 @@ def test_preserve_categories():
categories = list('abc')
# ordered=True
- df = DataFrame({'A': pd.Categorical(list('ba'),
- categories=categories,
- ordered=True)})
- index = pd.CategoricalIndex(categories, categories, ordered=True)
+ df = DataFrame({'A': Categorical(list('ba'),
+ categories=categories,
+ ordered=True)})
+ index = CategoricalIndex(categories, categories, ordered=True)
tm.assert_index_equal(
df.groupby('A', sort=True, observed=False).first().index, index)
tm.assert_index_equal(
df.groupby('A', sort=False, observed=False).first().index, index)
# ordered=False
- df = DataFrame({'A': pd.Categorical(list('ba'),
- categories=categories,
- ordered=False)})
- sort_index = pd.CategoricalIndex(categories, categories, ordered=False)
- nosort_index = pd.CategoricalIndex(list('bac'), list('bac'),
- ordered=False)
+ df = DataFrame({'A': Categorical(list('ba'),
+ categories=categories,
+ ordered=False)})
+ sort_index = CategoricalIndex(categories, categories, ordered=False)
+ nosort_index = CategoricalIndex(list('bac'), list('bac'),
+ ordered=False)
tm.assert_index_equal(
df.groupby('A', sort=True, observed=False).first().index,
sort_index)
@@ -857,94 +858,94 @@ def test_sort_datetimelike():
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
- df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'],
- categories=['a', 'b', 'c']),
- 'B': [1, 2, 1]})
- expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A')
+ df = DataFrame({"A": Categorical(['a', 'a', 'b'],
+ categories=['a', 'b', 'c']),
+ 'B': [1, 2, 1]})
+ expected_idx = CategoricalIndex(['a', 'b', 'c'], name='A')
# 0 by default
result = df.groupby("A", observed=False).B.sum()
- expected = pd.Series([3, 1, 0], expected_idx, name='B')
+ expected = Series([3, 1, 0], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
- expected = pd.Series([3, 1, 0], expected_idx, name='B')
+ expected = Series([3, 1, 0], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
- expected = pd.Series([3, 1, np.nan], expected_idx, name='B')
+ expected = Series([3, 1, np.nan], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count>1
result = df.groupby("A", observed=False).B.sum(min_count=2)
- expected = pd.Series([3, np.nan, np.nan], expected_idx, name='B')
+ expected = Series([3, np.nan, np.nan], expected_idx, name='B')
tm.assert_series_equal(result, expected)
def test_empty_prod():
# https://github.com/pandas-dev/pandas/issues/18678
- df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'],
- categories=['a', 'b', 'c']),
- 'B': [1, 2, 1]})
+ df = DataFrame({"A": Categorical(['a', 'a', 'b'],
+ categories=['a', 'b', 'c']),
+ 'B': [1, 2, 1]})
- expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A')
+ expected_idx = CategoricalIndex(['a', 'b', 'c'], name='A')
# 1 by default
result = df.groupby("A", observed=False).B.prod()
- expected = pd.Series([2, 1, 1], expected_idx, name='B')
+ expected = Series([2, 1, 1], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.prod(min_count=0)
- expected = pd.Series([2, 1, 1], expected_idx, name='B')
+ expected = Series([2, 1, 1], expected_idx, name='B')
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.prod(min_count=1)
- expected = pd.Series([2, 1, np.nan], expected_idx, name='B')
+ expected = Series([2, 1, np.nan], expected_idx, name='B')
tm.assert_series_equal(result, expected)
def test_groupby_multiindex_categorical_datetime():
# https://github.com/pandas-dev/pandas/issues/21390
- df = pd.DataFrame({
- 'key1': pd.Categorical(list('abcbabcba')),
- 'key2': pd.Categorical(
+ df = DataFrame({
+ 'key1': Categorical(list('abcbabcba')),
+ 'key2': Categorical(
list(pd.date_range('2018-06-01 00', freq='1T', periods=3)) * 3),
'values': np.arange(9),
})
result = df.groupby(['key1', 'key2']).mean()
- idx = pd.MultiIndex.from_product(
- [pd.Categorical(['a', 'b', 'c']),
- pd.Categorical(pd.date_range('2018-06-01 00', freq='1T', periods=3))],
+ idx = MultiIndex.from_product(
+ [Categorical(['a', 'b', 'c']),
+ Categorical(pd.date_range('2018-06-01 00', freq='1T', periods=3))],
names=['key1', 'key2'])
- expected = pd.DataFrame(
+ expected = DataFrame(
{'values': [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
assert_frame_equal(result, expected)
@pytest.mark.parametrize("as_index, expected", [
- (True, pd.Series(
- index=pd.MultiIndex.from_arrays(
- [pd.Series([1, 1, 2], dtype='category'),
- [1, 2, 2]], names=['a', 'b']
+ (True, Series(
+ index=MultiIndex.from_arrays(
+ [Series([1, 1, 2], dtype='category'),
+ [1, 2, 2]], names=['a', 'b']
),
data=[1, 2, 3], name='x'
)),
- (False, pd.DataFrame({
- 'a': pd.Series([1, 1, 2], dtype='category'),
+ (False, DataFrame({
+ 'a': Series([1, 1, 2], dtype='category'),
'b': [1, 2, 2],
'x': [1, 2, 3]
}))
])
def test_groupby_agg_observed_true_single_column(as_index, expected):
# GH-23970
- df = pd.DataFrame({
- 'a': pd.Series([1, 1, 2], dtype='category'),
+ df = DataFrame({
+ 'a': Series([1, 1, 2], dtype='category'),
'b': [1, 2, 2],
'x': [1, 2, 3]
})
@@ -957,9 +958,92 @@ def test_groupby_agg_observed_true_single_column(as_index, expected):
@pytest.mark.parametrize('fill_value', [None, np.nan, pd.NaT])
def test_shift(fill_value):
- ct = pd.Categorical(['a', 'b', 'c', 'd'],
- categories=['a', 'b', 'c', 'd'], ordered=False)
- expected = pd.Categorical([None, 'a', 'b', 'c'],
- categories=['a', 'b', 'c', 'd'], ordered=False)
+ ct = Categorical(['a', 'b', 'c', 'd'],
+ categories=['a', 'b', 'c', 'd'], ordered=False)
+ expected = Categorical([None, 'a', 'b', 'c'],
+ categories=['a', 'b', 'c', 'd'], ordered=False)
res = ct.shift(1, fill_value=fill_value)
assert_equal(res, expected)
+
+
+@pytest.fixture
+def df_cat(df):
+ """
+ DataFrame with multiple categorical columns and a column of integers.
+ Shortened so as not to contain all possible combinations of categories.
+ Useful for testing `observed` kwarg functionality on GroupBy objects.
+
+ Parameters
+ ----------
+ df: DataFrame
+ Non-categorical, longer DataFrame from another fixture, used to derive
+ this one
+
+ Returns
+ -------
+ df_cat: DataFrame
+ """
+ df_cat = df.copy()[:4] # leave out some groups
+ df_cat['A'] = df_cat['A'].astype('category')
+ df_cat['B'] = df_cat['B'].astype('category')
+ df_cat['C'] = Series([1, 2, 3, 4])
+ df_cat = df_cat.drop(['D'], axis=1)
+ return df_cat
+
+
+@pytest.mark.parametrize('operation, kwargs', [
+ ('agg', dict(dtype='category')),
+ ('apply', dict())])
+def test_seriesgroupby_observed_true(df_cat, operation, kwargs):
+ # GH 24880
+ index = MultiIndex.from_frame(
+ DataFrame({'A': ['foo', 'foo', 'bar', 'bar'],
+ 'B': ['one', 'two', 'one', 'three']
+ }, **kwargs))
+ expected = Series(data=[1, 3, 2, 4], index=index, name='C')
+ grouped = df_cat.groupby(['A', 'B'], observed=True)['C']
+ result = getattr(grouped, operation)(sum)
+ assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize('operation', ['agg', 'apply'])
+@pytest.mark.parametrize('observed', [False, None])
+def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
+ # GH 24880
+ index, _ = MultiIndex.from_product(
+ [CategoricalIndex(['bar', 'foo'], ordered=False),
+ CategoricalIndex(['one', 'three', 'two'], ordered=False)],
+ names=['A', 'B']).sortlevel()
+
+ expected = Series(data=[2, 4, np.nan, 1, np.nan, 3],
+ index=index, name='C')
+ grouped = df_cat.groupby(['A', 'B'], observed=observed)['C']
+ result = getattr(grouped, operation)(sum)
+ assert_series_equal(result, expected)
+
+
+@pytest.mark.parametrize("observed, index, data", [
+ (True, MultiIndex.from_tuples(
+ [('foo', 'one', 'min'), ('foo', 'one', 'max'),
+ ('foo', 'two', 'min'), ('foo', 'two', 'max'),
+ ('bar', 'one', 'min'), ('bar', 'one', 'max'),
+ ('bar', 'three', 'min'), ('bar', 'three', 'max')],
+ names=['A', 'B', None]), [1, 1, 3, 3, 2, 2, 4, 4]),
+ (False, MultiIndex.from_product(
+ [CategoricalIndex(['bar', 'foo'], ordered=False),
+ CategoricalIndex(['one', 'three', 'two'], ordered=False),
+ Index(['min', 'max'])],
+ names=['A', 'B', None]),
+ [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3]),
+ (None, MultiIndex.from_product(
+ [CategoricalIndex(['bar', 'foo'], ordered=False),
+ CategoricalIndex(['one', 'three', 'two'], ordered=False),
+ Index(['min', 'max'])],
+ names=['A', 'B', None]),
+ [2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3])])
+def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
+ # GH 24880
+ expected = Series(data=data, index=index, name='C')
+ result = df_cat.groupby(['A', 'B'], observed=observed)['C'].apply(
+ lambda x: OrderedDict([('min', x.min()), ('max', x.max())]))
+ assert_series_equal(result, expected)
| - [x] closes #24880
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26463 | 2019-05-19T21:13:24Z | 2019-05-30T01:33:50Z | 2019-05-30T01:33:50Z | 2019-06-17T14:10:26Z |
Remove py.path special handling from io.common | diff --git a/pandas/io/common.py b/pandas/io/common.py
index f9cd1806763e2..34635ebf64ad6 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -9,6 +9,7 @@
import lzma
import mmap
import os
+import pathlib
from urllib.error import URLError # noqa
from urllib.parse import ( # noqa
urlencode, urljoin, urlparse as parse_url, uses_netloc, uses_params,
@@ -115,24 +116,10 @@ def _stringify_path(filepath_or_buffer):
Any other object is passed through unchanged, which includes bytes,
strings, buffers, or anything else that's not even path-like.
"""
- try:
- import pathlib
- _PATHLIB_INSTALLED = True
- except ImportError:
- _PATHLIB_INSTALLED = False
-
- try:
- from py.path import local as LocalPath
- _PY_PATH_INSTALLED = True
- except ImportError:
- _PY_PATH_INSTALLED = False
-
if hasattr(filepath_or_buffer, '__fspath__'):
return filepath_or_buffer.__fspath__()
- if _PATHLIB_INSTALLED and isinstance(filepath_or_buffer, pathlib.Path):
+ elif isinstance(filepath_or_buffer, pathlib.Path):
return str(filepath_or_buffer)
- if _PY_PATH_INSTALLED and isinstance(filepath_or_buffer, LocalPath):
- return filepath_or_buffer.strpath
return _expand_user(filepath_or_buffer)
| - [x] closes #26450
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26458 | 2019-05-19T05:18:58Z | 2019-05-26T01:48:03Z | 2019-05-26T01:48:03Z | 2019-05-26T05:34:25Z |
BUG-26214 fix colors parameter in DataFrame.boxplot | diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst
index 75b705372c747..0144ec211fd03 100644
--- a/doc/source/whatsnew/v1.0.0.rst
+++ b/doc/source/whatsnew/v1.0.0.rst
@@ -229,6 +229,7 @@ Plotting
- Bug in :meth:`DataFrame.plot` producing incorrect legend markers when plotting multiple series on the same axis (:issue:`18222`)
- Bug in :meth:`DataFrame.plot` when ``kind='box'`` and data contains datetime or timedelta data. These types are now automatically dropped (:issue:`22799`)
- Bug in :meth:`DataFrame.plot.line` and :meth:`DataFrame.plot.area` produce wrong xlim in x-axis (:issue:`27686`, :issue:`25160`, :issue:`24784`)
+- Bug where :meth:`DataFrame.boxplot` would not accept a `color` parameter like `DataFrame.plot.box` (:issue:`26214`)
- :func:`set_option` now validates that the plot backend provided to ``'plotting.backend'`` implements the backend when the option is set, rather than when a plot is created (:issue:`28163`)
Groupby/resample/rolling
diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py
index 8ff7441df5354..99035013092cc 100644
--- a/pandas/plotting/_matplotlib/boxplot.py
+++ b/pandas/plotting/_matplotlib/boxplot.py
@@ -4,6 +4,7 @@
from matplotlib.artist import setp
import numpy as np
+from pandas.core.dtypes.common import is_dict_like
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import remove_na_arraylike
@@ -250,13 +251,38 @@ def boxplot(
def _get_colors():
# num_colors=3 is required as method maybe_color_bp takes the colors
# in positions 0 and 2.
- return _get_standard_colors(color=kwds.get("color"), num_colors=3)
+ # if colors not provided, use same defaults as DataFrame.plot.box
+ result = _get_standard_colors(num_colors=3)
+ result = np.take(result, [0, 0, 2])
+ result = np.append(result, "k")
+
+ colors = kwds.pop("color", None)
+ if colors:
+ if is_dict_like(colors):
+ # replace colors in result array with user-specified colors
+ # taken from the colors dict parameter
+ # "boxes" value placed in position 0, "whiskers" in 1, etc.
+ valid_keys = ["boxes", "whiskers", "medians", "caps"]
+ key_to_index = dict(zip(valid_keys, range(4)))
+ for key, value in colors.items():
+ if key in valid_keys:
+ result[key_to_index[key]] = value
+ else:
+ raise ValueError(
+ "color dict contains invalid "
+ "key '{0}' "
+ "The key must be either {1}".format(key, valid_keys)
+ )
+ else:
+ result.fill(colors)
+
+ return result
def maybe_color_bp(bp):
- if "color" not in kwds:
- setp(bp["boxes"], color=colors[0], alpha=1)
- setp(bp["whiskers"], color=colors[0], alpha=1)
- setp(bp["medians"], color=colors[2], alpha=1)
+ setp(bp["boxes"], color=colors[0], alpha=1)
+ setp(bp["whiskers"], color=colors[1], alpha=1)
+ setp(bp["medians"], color=colors[2], alpha=1)
+ setp(bp["caps"], color=colors[3], alpha=1)
def plot_group(keys, values, ax):
keys = [pprint_thing(x) for x in keys]
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index 5bbaff580c356..116d924f5a596 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -175,6 +175,34 @@ def test_boxplot_numeric_data(self):
ax = df.plot(kind="box")
assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"]
+ @pytest.mark.parametrize(
+ "colors_kwd, expected",
+ [
+ (
+ dict(boxes="r", whiskers="b", medians="g", caps="c"),
+ dict(boxes="r", whiskers="b", medians="g", caps="c"),
+ ),
+ (dict(boxes="r"), dict(boxes="r")),
+ ("r", dict(boxes="r", whiskers="r", medians="r", caps="r")),
+ ],
+ )
+ def test_color_kwd(self, colors_kwd, expected):
+ # GH: 26214
+ df = DataFrame(random.rand(10, 2))
+ result = df.boxplot(color=colors_kwd, return_type="dict")
+ for k, v in expected.items():
+ assert result[k][0].get_color() == v
+
+ @pytest.mark.parametrize(
+ "dict_colors, msg",
+ [(dict(boxes="r", invalid_key="r"), "invalid key 'invalid_key'")],
+ )
+ def test_color_kwd_errors(self, dict_colors, msg):
+ # GH: 26214
+ df = DataFrame(random.rand(10, 2))
+ with pytest.raises(ValueError, match=msg):
+ df.boxplot(color=dict_colors, return_type="dict")
+
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
| - [X] closes #26214
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
This PR fixes issues in the handling of the `color` parameter in the implementation of `DataFrame.boxplot` that do not exist in `DataFrame.plot.box`
- the `color` parameter is now removed from `kwds` before the `matplotlib` call, preventing an error
- an alternative implementation of color extraction from `color`, as `_get_standard_colors` did not preserve the appropriate order (without looking at that function, one can see that the previous implementation would give the boxes and the medians the same color even when otherwise specified)
- checking that `color` is actually a `dict`
| https://api.github.com/repos/pandas-dev/pandas/pulls/26456 | 2019-05-19T04:30:48Z | 2019-09-20T03:36:34Z | 2019-09-20T03:36:34Z | 2019-09-20T03:36:41Z |
TST/DEPR: remove .ix from tests\indexing\multiindex\test_loc.py | diff --git a/pandas/tests/indexing/multiindex/test_ix.py b/pandas/tests/indexing/multiindex/test_ix.py
index 6b6e1dbd859a2..5ea172f14f6f6 100644
--- a/pandas/tests/indexing/multiindex/test_ix.py
+++ b/pandas/tests/indexing/multiindex/test_ix.py
@@ -1,5 +1,6 @@
from warnings import catch_warnings, simplefilter
+import numpy as np
import pytest
from pandas.errors import PerformanceWarning
@@ -53,3 +54,19 @@ def test_ix_general(self):
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
+
+ def test_ix_multiindex_missing_label_raises(self):
+ # GH 21593
+ df = DataFrame(np.random.randn(3, 3),
+ columns=[[2, 2, 4], [6, 8, 10]],
+ index=[[4, 4, 8], [8, 10, 12]])
+
+ with pytest.raises(KeyError, match=r"^2$"):
+ df.ix[2]
+
+ def test_series_ix_getitem_fancy(
+ self, multiindex_year_month_day_dataframe_random_data):
+ s = multiindex_year_month_day_dataframe_random_data['A']
+ expected = s.reindex(s.index[49:51])
+ result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
+ tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 97ef78ff5ce1b..962976b8ded55 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -1,5 +1,4 @@
import itertools
-from warnings import catch_warnings
import numpy as np
import pytest
@@ -25,7 +24,6 @@ def frame_random_data_integer_multi_index():
return DataFrame(np.random.randn(6, 2), index=index)
-@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
class TestMultiIndexLoc:
def test_loc_getitem_series(self):
@@ -84,54 +82,48 @@ def test_loc_getitem_array(self):
result = x.loc[scalar]
tm.assert_series_equal(result, expected)
- def test_loc_multiindex(self):
+ def test_loc_multiindex_labels(self):
+ df = DataFrame(np.random.randn(3, 3),
+ columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
+ index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
- mi_labels = DataFrame(np.random.randn(3, 3),
- columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
- index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
-
- mi_int = DataFrame(np.random.randn(3, 3),
- columns=[[2, 2, 4], [6, 8, 10]],
- index=[[4, 4, 8], [8, 10, 12]])
-
- # the first row
- rs = mi_labels.loc['i']
- with catch_warnings(record=True):
- xp = mi_labels.ix['i']
- tm.assert_frame_equal(rs, xp)
+ # the first 2 rows
+ expected = df.iloc[[0, 1]].droplevel(0)
+ result = df.loc['i']
+ tm.assert_frame_equal(result, expected)
- # 2nd (last) columns
- rs = mi_labels.loc[:, 'j']
- with catch_warnings(record=True):
- xp = mi_labels.ix[:, 'j']
- tm.assert_frame_equal(rs, xp)
+ # 2nd (last) column
+ expected = df.iloc[:, [2]].droplevel(0, axis=1)
+ result = df.loc[:, 'j']
+ tm.assert_frame_equal(result, expected)
- # corner column
- rs = mi_labels.loc['j'].loc[:, 'j']
- with catch_warnings(record=True):
- xp = mi_labels.ix['j'].ix[:, 'j']
- tm.assert_frame_equal(rs, xp)
+ # bottom right corner
+ expected = df.iloc[[2], [2]].droplevel(0).droplevel(0, axis=1)
+ result = df.loc['j'].loc[:, 'j']
+ tm.assert_frame_equal(result, expected)
# with a tuple
- rs = mi_labels.loc[('i', 'X')]
- with catch_warnings(record=True):
- xp = mi_labels.ix[('i', 'X')]
- tm.assert_frame_equal(rs, xp)
+ expected = df.iloc[[0, 1]]
+ result = df.loc[('i', 'X')]
+ tm.assert_frame_equal(result, expected)
+
+ def test_loc_multiindex_ints(self):
+ df = DataFrame(np.random.randn(3, 3),
+ columns=[[2, 2, 4], [6, 8, 10]],
+ index=[[4, 4, 8], [8, 10, 12]])
+ expected = df.iloc[[0, 1]].droplevel(0)
+ result = df.loc[4]
+ tm.assert_frame_equal(result, expected)
- rs = mi_int.loc[4]
- with catch_warnings(record=True):
- xp = mi_int.ix[4]
- tm.assert_frame_equal(rs, xp)
+ def test_loc_multiindex_missing_label_raises(self):
+ df = DataFrame(np.random.randn(3, 3),
+ columns=[[2, 2, 4], [6, 8, 10]],
+ index=[[4, 4, 8], [8, 10, 12]])
- # missing label
with pytest.raises(KeyError, match=r"^2$"):
- mi_int.loc[2]
- with catch_warnings(record=True):
- # GH 21593
- with pytest.raises(KeyError, match=r"^2$"):
- mi_int.ix[2]
+ df.loc[2]
- def test_loc_multiindex_too_many_dims(self):
+ def test_loc_multiindex_too_many_dims_raises(self):
# GH 14885
s = Series(range(8), index=MultiIndex.from_product(
[['a', 'b'], ['c', 'd'], ['e', 'f']]))
@@ -227,7 +219,6 @@ def test_loc_getitem_int_slice(self):
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
- # expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
@@ -309,17 +300,11 @@ def test_loc_getitem_duplicates_multiindex_missing_indexers(indexer, is_level1,
tm.assert_series_equal(result, expected)
-@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
-@pytest.mark.parametrize('indexer', [
- lambda s: s.loc[[(2000, 3, 10), (2000, 3, 13)]],
- lambda s: s.ix[[(2000, 3, 10), (2000, 3, 13)]]
-])
def test_series_loc_getitem_fancy(
- multiindex_year_month_day_dataframe_random_data, indexer):
+ multiindex_year_month_day_dataframe_random_data):
s = multiindex_year_month_day_dataframe_random_data['A']
expected = s.reindex(s.index[49:51])
-
- result = indexer(s)
+ result = s.loc[[(2000, 3, 10), (2000, 3, 13)]]
tm.assert_series_equal(result, expected)
| follow on from #26438
i leave this as draft until #26438 is merged, to prevent this one being merged first.
not used `iloc` for the expected where dropping levels would be required, used df.values instead. | https://api.github.com/repos/pandas-dev/pandas/pulls/26451 | 2019-05-18T23:52:46Z | 2019-05-19T20:28:39Z | 2019-05-19T20:28:39Z | 2019-05-19T20:45:40Z |
CLN: remove __bytes__ | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index dc87ae8f48b8a..cf46a7f193b5a 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -250,6 +250,7 @@ Other API Changes
- Bug in :meth:`DatetimeIndex.snap` which didn't preserving the ``name`` of the input :class:`Index` (:issue:`25575`)
- The ``arg`` argument in :meth:`pandas.core.groupby.DataFrameGroupBy.agg` has been renamed to ``func`` (:issue:`26089`)
- The ``arg`` argument in :meth:`pandas.core.window._Window.aggregate` has been renamed to ``func`` (:issue:`26372`)
+- Most Pandas classes had a ``__bytes__`` method, which was used for getting a python2-style bytestring representation of the object. This method has been removed as a part of dropping Python2 (:issue:`26447`)
.. _whatsnew_0250.deprecations:
diff --git a/pandas/core/base.py b/pandas/core/base.py
index b5b3f8118f473..f7837c60c0b82 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -48,15 +48,6 @@ def __str__(self):
"""
raise AbstractMethodError(self)
- def __bytes__(self):
- """
- Return a bytes representation for a particular object.
- """
- from pandas._config import get_option
-
- encoding = get_option("display.encoding")
- return str(self).encode(encoding, 'replace')
-
def __repr__(self):
"""
Return a string representation for a particular object.
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py
index f93c445b26841..32047c3fbb5e1 100644
--- a/pandas/core/dtypes/dtypes.py
+++ b/pandas/core/dtypes/dtypes.py
@@ -132,15 +132,6 @@ def __str__(self):
"""
return self.name
- def __bytes__(self):
- """
- Return a string representation for a particular object.
- """
- from pandas._config import get_option
-
- encoding = get_option("display.encoding")
- return str(self).encode(encoding, 'replace')
-
def __repr__(self):
"""
Return a string representation for a particular object.
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 044160ac6f5e8..32594c856a236 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -140,9 +140,12 @@ def test_unicode_string_with_unicode(self):
df = DataFrame({'A': ["\u05d0"]})
str(df)
- def test_bytestring_with_unicode(self):
- df = DataFrame({'A': ["\u05d0"]})
- bytes(df)
+ def test_str_to_bytes_raises(self):
+ # GH 26447
+ df = DataFrame({'A': ["abc"]})
+ msg = "^'str' object cannot be interpreted as an integer$"
+ with pytest.raises(TypeError, match=msg):
+ bytes(df)
def test_very_wide_info_repr(self):
df = DataFrame(np.random.randn(10, 20),
diff --git a/pandas/tests/indexes/multi/test_format.py b/pandas/tests/indexes/multi/test_format.py
index f6edb1d722321..c320cb32b856c 100644
--- a/pandas/tests/indexes/multi/test_format.py
+++ b/pandas/tests/indexes/multi/test_format.py
@@ -88,12 +88,6 @@ def test_unicode_string_with_unicode():
str(idx)
-def test_bytestring_with_unicode():
- d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
- idx = pd.DataFrame(d).set_index(["a", "b"]).index
- bytes(idx)
-
-
def test_repr_max_seq_item_setting(idx):
# GH10182
idx = idx.repeat(50)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 32f3dcff0ba73..7b507a9de6b5d 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -2388,10 +2388,12 @@ def test_print_unicode_columns(self):
"c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
- @pytest.mark.parametrize("func", [str, bytes])
- def test_with_unicode(self, func):
- index = Index(list(range(1000)))
- func(index)
+ def test_str_to_bytes_raises(self):
+ # GH 26447
+ index = Index([str(x) for x in range(10)])
+ msg = "^'str' object cannot be interpreted as an integer$"
+ with pytest.raises(TypeError, match=msg):
+ bytes(index)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
diff --git a/pandas/tests/indexes/test_frozen.py b/pandas/tests/indexes/test_frozen.py
index 885d078b16f56..56efd4bbfd62a 100644
--- a/pandas/tests/indexes/test_frozen.py
+++ b/pandas/tests/indexes/test_frozen.py
@@ -1,6 +1,7 @@
import warnings
import numpy as np
+import pytest
from pandas.core.indexes.frozen import FrozenList, FrozenNDArray
from pandas.tests.test_base import CheckImmutable, CheckStringMixin
@@ -49,6 +50,12 @@ def test_difference_dupe(self):
expected = FrozenList([1, 3])
self.check_result(result, expected)
+ def test_tricky_container_to_bytes_raises(self):
+ # GH 26447
+ msg = "^'str' object cannot be interpreted as an integer$"
+ with pytest.raises(TypeError, match=msg):
+ bytes(self.unicode_container)
+
class TestFrozenNDArray(CheckImmutable, CheckStringMixin):
mutable_methods = ('put', 'itemset', 'fill')
@@ -68,6 +75,9 @@ def test_constructor_warns(self):
with tm.assert_produces_warning(FutureWarning):
FrozenNDArray([1, 2, 3])
+ def test_tricky_container_to_bytes(self):
+ bytes(self.unicode_container)
+
def test_shallow_copying(self):
original = self.container.copy()
assert isinstance(self.container.view(), FrozenNDArray)
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index 032c730fea408..92b6fb0610979 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -1,6 +1,7 @@
from datetime import datetime, timedelta
import numpy as np
+import pytest
import pandas as pd
from pandas import (
@@ -152,9 +153,12 @@ def test_unicode_string_with_unicode(self):
df = Series(["\u05d0"], name="\u05d1")
str(df)
- def test_bytestring_with_unicode(self):
- df = Series(["\u05d0"], name="\u05d1")
- bytes(df)
+ def test_str_to_bytes_raises(self):
+ # GH 26447
+ df = Series(["abc"], name="abc")
+ msg = "^'str' object cannot be interpreted as an integer$"
+ with pytest.raises(TypeError, match=msg):
+ bytes(df)
def test_timeseries_repr_object_dtype(self):
index = Index([datetime(2000, 1, 1) + timedelta(i)
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 7a8ef11bafb85..3b4f85e680f6e 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -38,7 +38,6 @@ def test_tricky_container(self):
pytest.skip('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
- bytes(self.unicode_container)
class CheckImmutable:
| - [x] xref #25725
Remove ``__bytes__`` method from ``StringMixin`` and ``PandasExtensionDtype``. These are the only uses of ``__bytes__`` in the code base.
| https://api.github.com/repos/pandas-dev/pandas/pulls/26447 | 2019-05-18T16:25:17Z | 2019-05-19T17:08:08Z | 2019-05-19T17:08:08Z | 2019-05-19T17:08:14Z |
Clean Up src/parsers | diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx
index 2eb19ef1dd082..88b918e9cc515 100644
--- a/pandas/_libs/parsers.pyx
+++ b/pandas/_libs/parsers.pyx
@@ -149,9 +149,6 @@ cdef extern from "parser/tokenizer.h":
int skipinitialspace # ignore spaces following delimiter? */
int quoting # style of quoting to write */
- # hmm =/
- # int numeric_field
-
char commentchar
int allow_embedded_newline
int strict # raise exception on bad CSV */
diff --git a/pandas/_libs/src/parser/io.h b/pandas/_libs/src/parser/io.h
index 074322c7bdf78..aac418457d3b6 100644
--- a/pandas/_libs/src/parser/io.h
+++ b/pandas/_libs/src/parser/io.h
@@ -10,7 +10,8 @@ The full license is in the LICENSE file, distributed with this software.
#ifndef PANDAS__LIBS_SRC_PARSER_IO_H_
#define PANDAS__LIBS_SRC_PARSER_IO_H_
-#include "Python.h"
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
#include "tokenizer.h"
typedef struct _file_source {
@@ -37,8 +38,6 @@ typedef struct _memory_map {
size_t position;
} memory_map;
-#define MM(src) ((memory_map *)src)
-
void *new_mmap(char *fname);
int del_mmap(void *src);
diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c
index fd5fc0df299ae..723bf56a79512 100644
--- a/pandas/_libs/src/parser/tokenizer.c
+++ b/pandas/_libs/src/parser/tokenizer.c
@@ -131,8 +131,6 @@ void parser_set_default_options(parser_t *self) {
self->skip_footer = 0;
}
-int get_parser_memory_footprint(parser_t *self) { return 0; }
-
parser_t *parser_new() { return (parser_t *)calloc(1, sizeof(parser_t)); }
int parser_clear_data_buffers(parser_t *self) {
@@ -1426,21 +1424,6 @@ PANDAS_INLINE void uppercase(char *p) {
for (; *p; ++p) *p = toupper_ascii(*p);
}
-int PANDAS_INLINE to_longlong(char *item, long long *p_value) {
- char *p_end;
-
- // Try integer conversion. We explicitly give the base to be 10. If
- // we used 0, strtoll() would convert '012' to 10, because the leading 0 in
- // '012' signals an octal number in C. For a general purpose reader, that
- // would be a bug, not a feature.
- *p_value = strtoll(item, &p_end, 10);
-
- // Allow trailing spaces.
- while (isspace_ascii(*p_end)) ++p_end;
-
- return (errno == 0) && (!*p_end);
-}
-
int to_boolean(const char *item, uint8_t *val) {
char *tmp;
int i, status = 0;
@@ -1474,24 +1457,6 @@ int to_boolean(const char *item, uint8_t *val) {
return status;
}
-#ifdef TEST
-
-int main(int argc, char *argv[]) {
- double x, y;
- long long xi;
- int status;
- char *s;
-
- s = "123,789";
- status = to_longlong_thousands(s, &xi, ',');
- printf("s = '%s'\n", s);
- printf("status = %d\n", status);
- printf("x = %d\n", (int)xi);
-
- return 0;
-}
-#endif // TEST
-
// ---------------------------------------------------------------------------
// Implementation of xstrtod
diff --git a/pandas/_libs/src/parser/tokenizer.h b/pandas/_libs/src/parser/tokenizer.h
index 6cad4c932cb07..b6d5d6937f4db 100644
--- a/pandas/_libs/src/parser/tokenizer.h
+++ b/pandas/_libs/src/parser/tokenizer.h
@@ -12,14 +12,8 @@ See LICENSE for the license
#ifndef PANDAS__LIBS_SRC_PARSER_TOKENIZER_H_
#define PANDAS__LIBS_SRC_PARSER_TOKENIZER_H_
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <time.h>
-#include "Python.h"
-
-#include <ctype.h>
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
#define ERROR_OK 0
#define ERROR_NO_DIGITS 1
@@ -31,9 +25,6 @@ See LICENSE for the license
#include "khash.h"
-#define CHUNKSIZE 1024 * 256
-#define KB 1024
-#define MB 1024 * KB
#define STREAM_INIT_SIZE 32
#define REACHED_EOF 1
@@ -50,25 +41,10 @@ See LICENSE for the license
*/
-#define FALSE 0
-#define TRUE 1
-
-// Maximum number of columns in a file.
-#define MAX_NUM_COLUMNS 2000
-
-// Maximum number of characters in single field.
-#define FIELD_BUFFER_SIZE 2000
-
/*
* Common set of error types for the read_rows() and tokenize()
* functions.
*/
-#define ERROR_OUT_OF_MEMORY 1
-#define ERROR_INVALID_COLUMN_INDEX 10
-#define ERROR_CHANGED_NUMBER_OF_FIELDS 12
-#define ERROR_TOO_MANY_CHARS 21
-#define ERROR_TOO_MANY_FIELDS 22
-#define ERROR_NO_DATA 23
// #define VERBOSE
#if defined(VERBOSE)
@@ -84,12 +60,6 @@ See LICENSE for the license
* of some file I/O.
*/
-/*
- * WORD_BUFFER_SIZE determines the maximum amount of non-delimiter
- * text in a row.
- */
-#define WORD_BUFFER_SIZE 4000
-
typedef enum {
START_RECORD,
START_FIELD,
@@ -164,9 +134,6 @@ typedef struct parser_t {
int skipinitialspace; /* ignore spaces following delimiter? */
int quoting; /* style of quoting to write */
- // krufty, hmm =/
- int numeric_field;
-
char commentchar;
int allow_embedded_newline;
int strict; /* raise exception on bad CSV */
@@ -191,7 +158,7 @@ typedef struct parser_t {
void *skipset;
PyObject *skipfunc;
int64_t skip_first_N_rows;
- int skip_footer;
+ int64_t skip_footer;
// pick one, depending on whether the converter requires GIL
double (*double_converter_nogil)(const char *, char **,
char, char, char, int);
@@ -208,7 +175,7 @@ typedef struct parser_t {
typedef struct coliter_t {
char **words;
int64_t *line_start;
- int col;
+ int64_t col;
} coliter_t;
void coliter_setup(coliter_t *self, parser_t *parser, int i, int start);
| Looking at this in detail noticed quite a few unused functions and pre-processor directives. There was also a mismatch in types between the .pyx and C structs | https://api.github.com/repos/pandas-dev/pandas/pulls/26445 | 2019-05-18T15:33:25Z | 2019-05-18T16:15:55Z | 2019-05-18T16:15:55Z | 2019-05-18T16:35:01Z |
TST/CLN: remove try/except from test_column_contains_typeerror | diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 8013ddfeb38f9..d1b009a7fa8e2 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -111,11 +111,9 @@ def test_keys(self, float_frame):
getkeys = float_frame.keys
assert getkeys() is float_frame.columns
- def test_column_contains_typeerror(self, float_frame):
- try:
+ def test_column_contains_raises(self, float_frame):
+ with pytest.raises(TypeError, match="unhashable type: 'Index'"):
float_frame.columns in float_frame
- except TypeError:
- pass
def test_tab_completion(self):
# DataFrame whose columns are identifiers shall have them in __dir__.
| https://api.github.com/repos/pandas-dev/pandas/pulls/26442 | 2019-05-17T20:53:35Z | 2019-05-18T00:42:23Z | 2019-05-18T00:42:23Z | 2019-05-18T01:50:16Z | |
TST: Iterrows making incorrect assumptions about datetime | diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index d1b009a7fa8e2..ce841b302a037 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -1,4 +1,5 @@
from copy import deepcopy
+import datetime
import pydoc
import numpy as np
@@ -222,6 +223,17 @@ def test_iterrows_iso8601(self):
exp = s.loc[k]
self._assert_series_equal(v, exp)
+ def test_iterrows_corner(self):
+ # gh-12222
+ df = DataFrame(
+ {'a': [datetime.datetime(2015, 1, 1)], 'b': [None], 'c': [None],
+ 'd': [''], 'e': [[]], 'f': [set()], 'g': [{}]})
+ expected = Series(
+ [datetime.datetime(2015, 1, 1), None, None, '', [], set(), {}],
+ index=list('abcdefg'), name=0, dtype='object')
+ _, result = next(df.iterrows())
+ tm.assert_series_equal(result, expected)
+
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
s = self.klass._constructor_sliced(tup[1:])
| - [x] closes #12222
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ n/a] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26441 | 2019-05-17T19:39:00Z | 2019-05-18T14:16:55Z | 2019-05-18T14:16:55Z | 2019-05-18T21:18:38Z |
ERR: User-facing AssertionError in DataFrame Constructor | diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py
index 2ee49c30b226d..2cd53d2ce9cee 100644
--- a/pandas/core/internals/construction.py
+++ b/pandas/core/internals/construction.py
@@ -424,8 +424,13 @@ def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
else:
# list of lists
content = list(lib.to_object_array(data).T)
- return _convert_object_array(content, columns, dtype=dtype,
- coerce_float=coerce_float)
+ # gh-26429 do not raise user-facing AssertionError
+ try:
+ result = _convert_object_array(content, columns, dtype=dtype,
+ coerce_float=coerce_float)
+ except AssertionError as e:
+ raise ValueError(e) from e
+ return result
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 82f86295da97f..b5b389b6323b2 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -435,6 +435,11 @@ def test_constructor_error_msgs(self):
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
+ # gh-26429
+ msg = "2 columns passed, passed data had 10 columns"
+ with pytest.raises(ValueError, match=msg):
+ DataFrame((range(10), range(10, 20)), columns=('ones', 'twos'))
+
msg = ("If using all scalar "
"values, you must pass "
"an index")
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index d97a763fe2d0b..8b140263b12bc 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -353,7 +353,7 @@ def test_frame_from_json_bad_data(self):
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
msg = "3 columns passed, passed data had 2 columns"
- with pytest.raises(AssertionError, match=msg):
+ with pytest.raises(ValueError, match=msg):
read_json(json, orient="split")
# bad key
| - [x] closes #26429
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/26440 | 2019-05-17T15:00:15Z | 2019-05-18T14:12:47Z | 2019-05-18T14:12:46Z | 2019-05-18T21:19:29Z |
DEPR: Change .ix DeprecationWarning -> FutureWarning | diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index d38ee7b8b589a..5f43512b69098 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -255,6 +255,7 @@ Other API Changes
Deprecations
~~~~~~~~~~~~
+- The deprecated ``.ix[]`` indexer now raises a more visible FutureWarning instead of DeprecationWarning (:issue:`26438`).
- Deprecated the ``units=M`` (months) and ``units=Y`` (year) parameters for ``units`` of :func:`pandas.to_timedelta`, :func:`pandas.Timedelta` and :func:`pandas.TimedeltaIndex` (:issue:`16344`)
- The functions :func:`pandas.to_datetime` and :func:`pandas.to_timedelta` have deprecated the ``box`` keyword. Instead, use :meth:`to_numpy` or :meth:`Timestamp.to_datetime64` or :meth:`Timedelta.to_timedelta64`. (:issue:`24416`)
- The :meth:`DataFrame.compound` and :meth:`Series.compound` methods are deprecated and will be removed in a future version.
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 65123a8f0f5a7..d243d74b9b745 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1419,7 +1419,7 @@ class _IXIndexer(_NDFrameIndexer):
def __init__(self, name, obj):
warnings.warn(self._ix_deprecation_warning,
- DeprecationWarning, stacklevel=2)
+ FutureWarning, stacklevel=2)
super().__init__(name, obj)
@Appender(_NDFrameIndexer._validate_key.__doc__)
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index 104886bb3e446..adb8c97584463 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -361,7 +361,7 @@ def test_getitem_ix_mixed_integer(self):
assert_frame_equal(result, expected)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = df.ix[[1, 10]]
expected = df.ix[Index([1, 10], dtype=object)]
assert_frame_equal(result, expected)
@@ -381,34 +381,34 @@ def test_getitem_ix_mixed_integer(self):
def test_getitem_setitem_ix_negative_integers(self):
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = self.frame.ix[:, -1]
assert_series_equal(result, self.frame['D'])
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = self.frame.ix[:, [-1]]
assert_frame_equal(result, self.frame[['D']])
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = self.frame.ix[:, [-1, -2]]
assert_frame_equal(result, self.frame[['D', 'C']])
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
self.frame.ix[:, [-1]] = 0
assert (self.frame['D'] == 0).all()
df = DataFrame(np.random.randn(8, 4))
# ix does label-based indexing when having an integer index
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
with pytest.raises(KeyError):
df.ix[[-1]]
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
with pytest.raises(KeyError):
df.ix[:, [-1]]
@@ -416,11 +416,11 @@ def test_getitem_setitem_ix_negative_integers(self):
a = DataFrame(np.random.randn(20, 2),
index=[chr(x + 65) for x in range(20)])
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
a.ix[-1] = a.ix[-2]
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
assert_series_equal(a.ix[-1], a.ix[-2], check_names=False)
assert a.ix[-1].name == 'T'
assert a.ix[-2].name == 'S'
@@ -798,19 +798,19 @@ def test_getitem_fancy_2d(self):
f = self.frame
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
assert_frame_equal(f.ix[:, ['B', 'A']],
f.reindex(columns=['B', 'A']))
subidx = self.frame.index[[5, 4, 1]]
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
assert_frame_equal(f.ix[subidx, ['B', 'A']],
f.reindex(index=subidx, columns=['B', 'A']))
# slicing rows, etc.
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
assert_frame_equal(f.ix[5:10], f[5:10])
assert_frame_equal(f.ix[5:10, :], f[5:10])
assert_frame_equal(f.ix[:5, ['A', 'B']],
@@ -819,26 +819,26 @@ def test_getitem_fancy_2d(self):
# slice rows with labels, inclusive!
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
expected = f.ix[5:11]
result = f.ix[f.index[5]:f.index[10]]
assert_frame_equal(expected, result)
# slice columns
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
assert_frame_equal(f.ix[:, :2], f.reindex(columns=['A', 'B']))
# get view
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
exp = f.copy()
f.ix[5:10].values[:] = 5
exp.values[5:10] = 5
assert_frame_equal(f, exp)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
msg = "Cannot index with multidimensional key"
with pytest.raises(ValueError, match=msg):
f.ix[f > 0.5]
@@ -898,7 +898,7 @@ def test_setitem_fancy_2d(self):
expected = frame.copy()
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
frame.ix[:, ['B', 'A']] = 1
expected['B'] = 1.
expected['A'] = 1.
@@ -914,7 +914,7 @@ def test_setitem_fancy_2d(self):
values = np.random.randn(3, 2)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
frame.ix[subidx, ['B', 'A']] = values
frame2.ix[[5, 4, 1], ['B', 'A']] = values
@@ -928,14 +928,14 @@ def test_setitem_fancy_2d(self):
frame = self.frame.copy()
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
expected1 = self.frame.copy()
frame.ix[5:10] = 1.
expected1.values[5:10] = 1.
assert_frame_equal(frame, expected1)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
expected2 = self.frame.copy()
arr = np.random.randn(5, len(frame.columns))
frame.ix[5:10] = arr
@@ -944,7 +944,7 @@ def test_setitem_fancy_2d(self):
# case 4
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
frame = self.frame.copy()
frame.ix[5:10, :] = 1.
assert_frame_equal(frame, expected1)
@@ -953,7 +953,7 @@ def test_setitem_fancy_2d(self):
# case 5
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
frame = self.frame.copy()
frame2 = self.frame.copy()
@@ -966,13 +966,13 @@ def test_setitem_fancy_2d(self):
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
frame2.ix[:5, [0, 1]] = values
assert_frame_equal(frame2, expected)
# case 6: slice rows with labels, inclusive!
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
frame = self.frame.copy()
expected = self.frame.copy()
@@ -982,7 +982,7 @@ def test_setitem_fancy_2d(self):
# case 7: slice columns
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
@@ -1025,7 +1025,7 @@ def test_fancy_setitem_int_labels(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
tmp = df.copy()
exp = df.copy()
tmp.ix[[0, 2, 4]] = 5
@@ -1033,7 +1033,7 @@ def test_fancy_setitem_int_labels(self):
assert_frame_equal(tmp, exp)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
tmp = df.copy()
exp = df.copy()
tmp.ix[6] = 5
@@ -1041,7 +1041,7 @@ def test_fancy_setitem_int_labels(self):
assert_frame_equal(tmp, exp)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
tmp = df.copy()
exp = df.copy()
tmp.ix[:, 2] = 5
@@ -1055,25 +1055,25 @@ def test_fancy_getitem_int_labels(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = df.ix[[4, 2, 0], [2, 0]]
expected = df.reindex(index=[4, 2, 0], columns=[2, 0])
assert_frame_equal(result, expected)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = df.ix[[4, 2, 0]]
expected = df.reindex(index=[4, 2, 0])
assert_frame_equal(result, expected)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = df.ix[4]
expected = df.xs(4)
assert_series_equal(result, expected)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = df.ix[:, 3]
expected = df[3]
assert_series_equal(result, expected)
@@ -1082,7 +1082,7 @@ def test_fancy_index_int_labels_exceptions(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
# labels that aren't contained
with pytest.raises(KeyError, match=r"\[1\] not in index"):
@@ -1105,7 +1105,7 @@ def test_fancy_index_int_labels_exceptions(self):
def test_setitem_fancy_mixed_2d(self):
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5
result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]
assert (result.values == 5).all()
@@ -1119,7 +1119,7 @@ def test_setitem_fancy_mixed_2d(self):
# #1432
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
df = DataFrame({1: [1., 2., 3.],
2: [3, 4, 5]})
assert df._is_mixed_type
@@ -1137,32 +1137,32 @@ def test_ix_align(self):
df = df_orig.copy()
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
df.ix[:, 0] = b
assert_series_equal(df.ix[:, 0].reindex(b.index), b)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
dft = df_orig.T
dft.ix[0, :] = b
assert_series_equal(dft.ix[0, :].reindex(b.index), b)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
df = df_orig.copy()
df.ix[:5, 0] = b
s = df.ix[:5, 0]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
dft = df_orig.T
dft.ix[0, :5] = b
s = dft.ix[0, :5]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
df = df_orig.copy()
idx = [0, 1, 3, 5]
df.ix[idx, 0] = b
@@ -1170,7 +1170,7 @@ def test_ix_align(self):
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
dft = df_orig.T
dft.ix[0, idx] = b
s = dft.ix[0, idx]
@@ -1182,7 +1182,7 @@ def test_ix_frame_align(self):
df = df_orig.copy()
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
df.ix[:3] = b
out = b.ix[:3]
assert_frame_equal(out, b)
@@ -1190,14 +1190,14 @@ def test_ix_frame_align(self):
b.sort_index(inplace=True)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
df = df_orig.copy()
df.ix[[0, 1, 2]] = b
out = df.ix[[0, 1, 2]].reindex(b.index)
assert_frame_equal(out, b)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
df = df_orig.copy()
df.ix[:3] = b
out = df.ix[:3]
@@ -1240,7 +1240,7 @@ def test_ix_multi_take_nonint_index(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=['a', 'b'])
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=['a'])
assert_frame_equal(rs, xp)
@@ -1249,7 +1249,7 @@ def test_ix_multi_take_multiindex(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=[['a', 'b'], ['1', '2']])
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=[('a', '1')])
assert_frame_equal(rs, xp)
@@ -1259,17 +1259,17 @@ def test_ix_dup(self):
df = DataFrame(np.random.randn(len(idx), 3), idx)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
sub = df.ix[:'d']
assert_frame_equal(sub, df)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
sub = df.ix['a':'c']
assert_frame_equal(sub, df.ix[0:4])
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
sub = df.ix['b':'d']
assert_frame_equal(sub, df.ix[2:])
@@ -1278,57 +1278,57 @@ def test_getitem_fancy_1d(self):
# return self if no slicing...for now
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
assert f.ix[:, :] is f
# low dimensional slice
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
xs1 = f.ix[2, ['C', 'B', 'A']]
xs2 = f.xs(f.index[2]).reindex(['C', 'B', 'A'])
tm.assert_series_equal(xs1, xs2)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
ts1 = f.ix[5:10, 2]
ts2 = f[f.columns[2]][5:10]
tm.assert_series_equal(ts1, ts2)
# positional xs
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
xs1 = f.ix[0]
xs2 = f.xs(f.index[0])
tm.assert_series_equal(xs1, xs2)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
xs1 = f.ix[f.index[5]]
xs2 = f.xs(f.index[5])
tm.assert_series_equal(xs1, xs2)
# single column
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
assert_series_equal(f.ix[:, 'A'], f['A'])
# return view
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
exp = f.copy()
exp.values[5] = 4
f.ix[5][:] = 4
tm.assert_frame_equal(exp, f)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
exp.values[:, 1] = 6
f.ix[:, 1][:] = 6
tm.assert_frame_equal(exp, f)
# slice of mixed-frame
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
xs = self.mixed_frame.ix[5]
exp = self.mixed_frame.xs(self.mixed_frame.index[5])
tm.assert_series_equal(xs, exp)
@@ -1340,7 +1340,7 @@ def test_setitem_fancy_1d(self):
expected = self.frame.copy()
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
frame.ix[2, ['C', 'B', 'A']] = [1., 2., 3.]
expected['C'][2] = 1.
expected['B'][2] = 2.
@@ -1348,7 +1348,7 @@ def test_setitem_fancy_1d(self):
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
frame2 = self.frame.copy()
frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]
assert_frame_equal(frame, expected)
@@ -1358,14 +1358,14 @@ def test_setitem_fancy_1d(self):
expected = self.frame.copy()
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
vals = np.random.randn(5)
expected.values[5:10, 2] = vals
frame.ix[5:10, 2] = vals
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
frame2 = self.frame.copy()
frame2.ix[5:10, 'B'] = vals
assert_frame_equal(frame, expected)
@@ -1375,13 +1375,13 @@ def test_setitem_fancy_1d(self):
expected = self.frame.copy()
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
frame.ix[4] = 5.
expected.values[4] = 5.
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
frame.ix[frame.index[4]] = 6.
expected.values[4] = 6.
assert_frame_equal(frame, expected)
@@ -1391,7 +1391,7 @@ def test_setitem_fancy_1d(self):
expected = self.frame.copy()
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
frame.ix[:, 'A'] = 7.
expected['A'] = 7.
assert_frame_equal(frame, expected)
@@ -1912,7 +1912,7 @@ def test_single_element_ix_dont_upcast(self):
assert issubclass(self.frame['E'].dtype.type, (int, np.integer))
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = self.frame.ix[self.frame.index[5], 'E']
assert is_integer(result)
@@ -1924,7 +1924,7 @@ def test_single_element_ix_dont_upcast(self):
df["b"] = 666
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = df.ix[0, "b"]
assert is_integer(result)
result = df.loc[0, "b"]
@@ -1932,7 +1932,7 @@ def test_single_element_ix_dont_upcast(self):
expected = Series([666], [0], name='b')
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = df.ix[[0], "b"]
assert_series_equal(result, expected)
result = df.loc[[0], "b"]
@@ -2004,14 +2004,14 @@ def test_iloc_duplicates(self):
result = df.iloc[0]
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result2 = df.ix[0]
assert isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = df.T.iloc[:, 0]
result2 = df.T.ix[:, 0]
assert isinstance(result, Series)
@@ -2024,19 +2024,19 @@ def test_iloc_duplicates(self):
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
rs = df.iloc[0]
xp = df.ix[0]
assert_series_equal(rs, xp)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
rs = df.iloc[:, 0]
xp = df.T.ix[0]
assert_series_equal(rs, xp)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
rs = df.iloc[:, [0]]
xp = df.ix[:, [0]]
assert_frame_equal(rs, xp)
@@ -2263,7 +2263,7 @@ def test_getitem_ix_float_duplicates(self):
expect = df.iloc[1:]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
@@ -2273,7 +2273,7 @@ def test_getitem_ix_float_duplicates(self):
expect = df.iloc[1:]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
@@ -2284,7 +2284,7 @@ def test_getitem_ix_float_duplicates(self):
expect = df.iloc[1:-1]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:-1, 0]
@@ -2294,7 +2294,7 @@ def test_getitem_ix_float_duplicates(self):
expect = df.iloc[[1, -1]]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[[1, -1], 0]
@@ -2525,7 +2525,7 @@ def test_index_namedtuple(self):
df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"])
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = df.ix[IndexType("foo", "bar")]["A"]
assert result == 1
diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py
index 291d06f5862bf..cefff461ecb68 100644
--- a/pandas/tests/indexing/common.py
+++ b/pandas/tests/indexing/common.py
@@ -141,7 +141,7 @@ def get_value(self, f, i, values=False):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
- filterwarnings("ignore", "\\n.ix", DeprecationWarning)
+ filterwarnings("ignore", "\\n.ix", FutureWarning)
return f.ix[i]
def check_values(self, f, func, values=False):
diff --git a/pandas/tests/indexing/multiindex/test_ix.py b/pandas/tests/indexing/multiindex/test_ix.py
index 051803b3c55e5..6b6e1dbd859a2 100644
--- a/pandas/tests/indexing/multiindex/test_ix.py
+++ b/pandas/tests/indexing/multiindex/test_ix.py
@@ -8,7 +8,7 @@
from pandas.util import testing as tm
-@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
+@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
class TestMultiIndexIx:
def test_frame_setitem_ix(self, multiindex_dataframe_random_data):
@@ -23,7 +23,7 @@ def test_frame_setitem_ix(self, multiindex_dataframe_random_data):
assert df.loc[('bar', 'two'), 1] == 7
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
df = frame.copy()
df.columns = list(range(3))
df.ix[('bar', 'two'), 1] = 7
diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py
index 3e0867f414bb7..97ef78ff5ce1b 100644
--- a/pandas/tests/indexing/multiindex/test_loc.py
+++ b/pandas/tests/indexing/multiindex/test_loc.py
@@ -25,7 +25,7 @@ def frame_random_data_integer_multi_index():
return DataFrame(np.random.randn(6, 2), index=index)
-@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
+@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
class TestMultiIndexLoc:
def test_loc_getitem_series(self):
@@ -309,7 +309,7 @@ def test_loc_getitem_duplicates_multiindex_missing_indexers(indexer, is_level1,
tm.assert_series_equal(result, expected)
-@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
+@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
@pytest.mark.parametrize('indexer', [
lambda s: s.loc[[(2000, 3, 10), (2000, 3, 13)]],
lambda s: s.ix[[(2000, 3, 10), (2000, 3, 13)]]
diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py
index e52e2a234600a..20830bbe4680b 100644
--- a/pandas/tests/indexing/multiindex/test_partial.py
+++ b/pandas/tests/indexing/multiindex/test_partial.py
@@ -100,7 +100,7 @@ def test_getitem_partial_column_select(self):
tm.assert_frame_equal(result, expected)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = df.ix[('a', 'y'), [1, 0]]
tm.assert_frame_equal(result, expected)
@@ -132,7 +132,7 @@ def test_partial_set(
# ---------------------------------------------------------------------
# AMBIGUOUS CASES!
- def test_partial_ix_missing(
+ def test_partial_loc_missing(
self, multiindex_year_month_day_dataframe_random_data):
pytest.skip("skipping for now")
diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py
index 2fbd3a55508a1..44aae4cd55e35 100644
--- a/pandas/tests/indexing/multiindex/test_setitem.py
+++ b/pandas/tests/indexing/multiindex/test_setitem.py
@@ -11,7 +11,7 @@
from pandas.util import testing as tm
-@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
+@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
class TestMultiIndexSetItem:
def test_setitem_multiindex(self):
@@ -280,7 +280,7 @@ def test_frame_getitem_setitem_multislice(self):
tm.assert_series_equal(df['value'], result)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
result = df.ix[:, 'value']
tm.assert_series_equal(df['value'], result)
diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py
index 6433a39fe4373..3394c4c06d45a 100644
--- a/pandas/tests/indexing/multiindex/test_slice.py
+++ b/pandas/tests/indexing/multiindex/test_slice.py
@@ -12,7 +12,7 @@
from pandas.util import testing as tm
-@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
+@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
class TestMultiIndexSlicers:
def test_per_axis_per_level_getitem(self):
diff --git a/pandas/tests/indexing/test_callable.py b/pandas/tests/indexing/test_callable.py
index b45f69245cfdc..657309170cac3 100644
--- a/pandas/tests/indexing/test_callable.py
+++ b/pandas/tests/indexing/test_callable.py
@@ -6,7 +6,7 @@
class TestIndexingCallable:
- def test_frame_loc_ix_callable(self):
+ def test_frame_loc_callable(self):
# GH 11485
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': list('aabb'),
'C': [1, 2, 3, 4]})
@@ -62,7 +62,7 @@ def test_frame_loc_ix_callable(self):
res = df.loc[lambda x: 1, lambda x: 'A']
assert res == df.loc[1, 'A']
- def test_frame_loc_ix_callable_mixture(self):
+ def test_frame_loc_callable_mixture(self):
# GH 11485
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': list('aabb'),
'C': [1, 2, 3, 4]})
@@ -85,7 +85,7 @@ def test_frame_loc_ix_callable_mixture(self):
res = df.loc[3, lambda x: ['A', 'B']]
tm.assert_series_equal(res, df.loc[3, ['A', 'B']])
- def test_frame_loc_callable(self):
+ def test_frame_loc_callable_labels(self):
# GH 11485
df = pd.DataFrame({'X': [1, 2, 3, 4],
'Y': list('aabb')},
diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py
index d4d1f67c600ae..b94d3000a5841 100644
--- a/pandas/tests/indexing/test_chaining_and_caching.py
+++ b/pandas/tests/indexing/test_chaining_and_caching.py
@@ -355,7 +355,7 @@ def check(result, expected):
result4 = df['A'].iloc[2]
check(result4, expected)
- @pytest.mark.filterwarnings("ignore::DeprecationWarning")
+ @pytest.mark.filterwarnings("ignore::FutureWarning")
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 020fdcfe445de..9a2aae08dbb15 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -8,7 +8,7 @@
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_series_equal
-ignore_ix = pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
+ignore_ix = pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
class TestFloatIndexers:
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py
index 81c38ee42749c..4fa26dc67ba0c 100644
--- a/pandas/tests/indexing/test_iloc.py
+++ b/pandas/tests/indexing/test_iloc.py
@@ -383,53 +383,53 @@ def test_iloc_getitem_frame(self):
result = df.iloc[2]
with catch_warnings(record=True):
- filterwarnings("ignore", "\\n.ix", DeprecationWarning)
+ filterwarnings("ignore", "\\n.ix", FutureWarning)
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
- filterwarnings("ignore", "\\n.ix", DeprecationWarning)
+ filterwarnings("ignore", "\\n.ix", FutureWarning)
exp = df.ix[4, 4]
assert result == exp
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
- filterwarnings("ignore", "\\n.ix", DeprecationWarning)
+ filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
- filterwarnings("ignore", "\\n.ix", DeprecationWarning)
+ filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
- filterwarnings("ignore", "\\n.ix", DeprecationWarning)
+ filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
- filterwarnings("ignore", "\\n.ix", DeprecationWarning)
+ filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indices
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
- filterwarnings("ignore", "\\n.ix", DeprecationWarning)
+ filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indices
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
- filterwarnings("ignore", "\\n.ix", DeprecationWarning)
+ filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
@@ -437,7 +437,7 @@ def test_iloc_getitem_frame(self):
s = Series(index=range(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
- filterwarnings("ignore", "\\n.ix", DeprecationWarning)
+ filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py
index 478032f76e7cb..d56894a8c1f7b 100644
--- a/pandas/tests/indexing/test_ix.py
+++ b/pandas/tests/indexing/test_ix.py
@@ -16,12 +16,12 @@ def test_ix_deprecation():
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
- with tm.assert_produces_warning(DeprecationWarning,
- check_stacklevel=False):
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=True):
df.ix[1, 'A']
-@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
+@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
class TestIX:
def test_ix_loc_setitem_consistency(self):
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py
index b3beec95151fb..8351d5646e816 100644
--- a/pandas/tests/indexing/test_loc.py
+++ b/pandas/tests/indexing/test_loc.py
@@ -654,7 +654,7 @@ def test_loc_name(self):
assert result == 'index_name'
with catch_warnings(record=True):
- filterwarnings("ignore", "\\n.ix", DeprecationWarning)
+ filterwarnings("ignore", "\\n.ix", FutureWarning)
result = df.ix[[0, 1]].index.name
assert result == 'index_name'
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py
index c1d530cc890e5..0c53b8c7a0350 100644
--- a/pandas/tests/indexing/test_partial.py
+++ b/pandas/tests/indexing/test_partial.py
@@ -16,7 +16,7 @@
class TestPartialSetting:
- @pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
+ @pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning")
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py
index b82beda5d554a..6641311faace2 100644
--- a/pandas/tests/series/indexing/test_indexing.py
+++ b/pandas/tests/series/indexing/test_indexing.py
@@ -574,7 +574,7 @@ def test_slice_can_reorder_not_uniquely_indexed():
s[::-1] # it works!
-def test_ix_setitem(test_data):
+def test_loc_setitem(test_data):
inds = test_data.series.index[[3, 4, 7]]
result = test_data.series.copy()
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 737445b3fb572..e8d6b3bcaa77f 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -188,7 +188,7 @@ def test_reindex(self):
tm.assert_frame_equal(reindexed, expected)
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
tm.assert_frame_equal(reindexed, expected)
@@ -201,7 +201,7 @@ def test_reindex_preserve_levels(self):
assert chunk.index is new_index
with catch_warnings(record=True):
- simplefilter("ignore", DeprecationWarning)
+ simplefilter("ignore", FutureWarning)
chunk = self.ymd.ix[new_index]
assert chunk.index is new_index
@@ -1014,7 +1014,7 @@ def test_multilevel_consolidate(self):
df['Totals', ''] = df.sum(1)
df = df._consolidate()
- def test_ix_preserve_names(self):
+ def test_loc_preserve_names(self):
result = self.ymd.loc[2000]
result2 = self.ymd['A'].loc[2000]
assert result.index.names == self.ymd.index.names[1:]
| closes https://github.com/pandas-dev/pandas/issues/15152 | https://api.github.com/repos/pandas-dev/pandas/pulls/26438 | 2019-05-17T06:27:15Z | 2019-05-19T17:15:07Z | 2019-05-19T17:15:07Z | 2019-05-19T17:15:10Z |
CLN: remove unused code check for pytest.raises | diff --git a/ci/code_checks.sh b/ci/code_checks.sh
index 3d1ec637ae202..a16580679ff54 100755
--- a/ci/code_checks.sh
+++ b/ci/code_checks.sh
@@ -169,15 +169,6 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then
invgrep -r -E --include '*.py' '(unittest(\.| import )mock|mock\.Mock\(\)|mock\.patch)' pandas/tests/
RET=$(($RET + $?)) ; echo $MSG "DONE"
- # Check that we use pytest.raises only as a context manager
- #
- # For any flake8-compliant code, the only way this regex gets
- # matched is if there is no "with" statement preceding "pytest.raises"
- MSG='Check for pytest.raises as context manager (a line starting with `pytest.raises` is invalid, needs a `with` to precede it)' ; echo $MSG
- MSG='TODO: This check is currently skipped because so many files fail this. Please enable when all are corrected (xref gh-24332)' ; echo $MSG
- # invgrep -R --include '*.py' -E '[[:space:]] pytest.raises' pandas/tests
- # RET=$(($RET + $?)) ; echo $MSG "DONE"
-
MSG='Check for wrong space after code-block directive and before colon (".. code-block ::" instead of ".. code-block::")' ; echo $MSG
invgrep -R --include="*.rst" ".. code-block ::" doc/source
RET=$(($RET + $?)) ; echo $MSG "DONE"
| xref #25866
| https://api.github.com/repos/pandas-dev/pandas/pulls/26435 | 2019-05-16T22:52:42Z | 2019-05-17T12:51:08Z | 2019-05-17T12:51:08Z | 2019-05-17T13:50:50Z |
CLN: Remove Categorical.from_array | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index cd727c728eb3d..495d0beaf3faa 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -149,6 +149,7 @@ Removal of prior version deprecations/changes
- ``pd.ordered_merge`` has been removed (deprecated since v0.19). Use ``pd.merge_ordered`` instead (:issue:`18459`)
- The ``SparseList`` class has been removed (:issue:`14007`)
- The ``pandas.io.wb`` and ``pandas.io.data`` stub modules have been removed (:issue:`13735`)
+- ``Categorical.from_array`` has been removed (:issue:`13854`)
.. _whatsnew_0220.performance:
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index deaec20586005..e34755e665f8d 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -552,26 +552,6 @@ def _from_inferred_categories(cls, inferred_categories, inferred_codes,
return cls(codes, dtype=dtype, fastpath=True)
- @classmethod
- def from_array(cls, data, **kwargs):
- """
- .. deprecated:: 0.19.0
- Use ``Categorical`` instead.
-
- Make a Categorical type from a single array-like object.
-
- For internal compatibility with numpy arrays.
-
- Parameters
- ----------
- data : array-like
- Can be an Index or array-like. The categories are assumed to be
- the unique values of `data`.
- """
- warn("Categorical.from_array is deprecated, use Categorical instead",
- FutureWarning, stacklevel=2)
- return cls(data, **kwargs)
-
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index b570672124976..b661bde434814 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -1955,11 +1955,6 @@ def test_deprecated_labels(self):
res = cat.labels
tm.assert_numpy_array_equal(res, exp)
- def test_deprecated_from_array(self):
- # GH13854, `.from_array` is deprecated
- with tm.assert_produces_warning(FutureWarning):
- Categorical.from_array([0, 1])
-
def test_datetime_categorical_comparison(self):
dt_cat = Categorical(date_range('2014-01-01', periods=3), ordered=True)
tm.assert_numpy_array_equal(dt_cat > dt_cat[0],
@@ -4817,7 +4812,7 @@ def test_constructor(self):
assert isinstance(sc, tm.SubclassedCategorical)
tm.assert_categorical_equal(sc, Categorical(['a', 'b', 'c']))
- def test_from_array(self):
+ def test_from_codes(self):
sc = tm.SubclassedCategorical.from_codes([1, 0, 2], ['a', 'b', 'c'])
assert isinstance(sc, tm.SubclassedCategorical)
exp = Categorical.from_codes([1, 0, 2], ['a', 'b', 'c'])
| Deprecated in 0.19.0
xref #13854. | https://api.github.com/repos/pandas-dev/pandas/pulls/18642 | 2017-12-05T07:50:38Z | 2017-12-05T11:15:22Z | 2017-12-05T11:15:22Z | 2017-12-05T16:54:07Z |
CLN: ASV HDFStore benchmark | diff --git a/asv_bench/benchmarks/hdfstore_bench.py b/asv_bench/benchmarks/hdfstore_bench.py
index 5aa8f76917797..d7b3be25a18b9 100644
--- a/asv_bench/benchmarks/hdfstore_bench.py
+++ b/asv_bench/benchmarks/hdfstore_bench.py
@@ -1,34 +1,40 @@
-from .pandas_vb_common import *
-import os
+import numpy as np
+from pandas import DataFrame, Panel, date_range, HDFStore
+import pandas.util.testing as tm
+from .pandas_vb_common import BaseIO, setup # noqa
-class HDF5(object):
- goal_time = 0.2
-
- def setup(self):
- self.index = tm.makeStringIndex(25000)
- self.df = DataFrame({'float1': randn(25000), 'float2': randn(25000),},
- index=self.index)
- self.df_mixed = DataFrame(
- {'float1': randn(25000), 'float2': randn(25000),
- 'string1': (['foo'] * 25000),
- 'bool1': ([True] * 25000),
- 'int1': np.random.randint(0, 250000, size=25000),},
- index=self.index)
+class HDF5(BaseIO):
- self.df_wide = DataFrame(np.random.randn(25000, 100))
-
- self.df2 = DataFrame({'float1': randn(25000), 'float2': randn(25000)},
- index=date_range('1/1/2000', periods=25000))
- self.df_wide2 = DataFrame(np.random.randn(25000, 100),
- index=date_range('1/1/2000', periods=25000))
+ goal_time = 0.2
- self.df_dc = DataFrame(np.random.randn(10000, 10),
- columns=[('C%03d' % i) for i in range(10)])
+ def setup(self):
+ N = 25000
+ index = tm.makeStringIndex(N)
+ self.df = DataFrame({'float1': np.random.randn(N),
+ 'float2': np.random.randn(N)},
+ index=index)
+ self.df_mixed = DataFrame({'float1': np.random.randn(N),
+ 'float2': np.random.randn(N),
+ 'string1': ['foo'] * N,
+ 'bool1': [True] * N,
+ 'int1': np.random.randint(0, N, size=N)},
+ index=index)
+ self.df_wide = DataFrame(np.random.randn(N, 100))
+ self.start_wide = self.df_wide.index[10000]
+ self.stop_wide = self.df_wide.index[15000]
+ self.df2 = DataFrame({'float1': np.random.randn(N),
+ 'float2': np.random.randn(N)},
+ index=date_range('1/1/2000', periods=N))
+ self.start = self.df2.index[10000]
+ self.stop = self.df2.index[15000]
+ self.df_wide2 = DataFrame(np.random.randn(N, 100),
+ index=date_range('1/1/2000', periods=N))
+ self.df_dc = DataFrame(np.random.randn(N, 10),
+ columns=['C%03d' % i for i in range(10)])
self.f = '__test__.h5'
- self.remove(self.f)
self.store = HDFStore(self.f)
self.store.put('fixed', self.df)
@@ -42,12 +48,6 @@ def teardown(self):
self.store.close()
self.remove(self.f)
- def remove(self, f):
- try:
- os.remove(f)
- except:
- pass
-
def time_read_store(self):
self.store.get('fixed')
@@ -82,14 +82,12 @@ def time_write_store_table_dc(self):
self.store.append('table_dc_write', self.df_dc, data_columns=True)
def time_query_store_table_wide(self):
- start = self.df_wide2.index[10000]
- stop = self.df_wide2.index[15000]
- self.store.select('table_wide', where="index > start and index < stop")
+ self.store.select('table_wide', where="index > self.start_wide and "
+ "index < self.stop_wide")
def time_query_store_table(self):
- start = self.df2.index[10000]
- stop = self.df2.index[15000]
- self.store.select('table', where="index > start and index < stop")
+ self.store.select('table', where="index > self.start and "
+ "index < self.stop")
def time_store_repr(self):
repr(self.store)
@@ -101,16 +99,16 @@ def time_store_info(self):
self.store.info()
-class HDF5Panel(object):
+class HDF5Panel(BaseIO):
+
goal_time = 0.2
def setup(self):
self.f = '__test__.h5'
- self.p = Panel(randn(20, 1000, 25),
- items=[('Item%03d' % i) for i in range(20)],
+ self.p = Panel(np.random.randn(20, 1000, 25),
+ items=['Item%03d' % i for i in range(20)],
major_axis=date_range('1/1/2000', periods=1000),
- minor_axis=[('E%03d' % i) for i in range(25)])
- self.remove(self.f)
+ minor_axis=['E%03d' % i for i in range(25)])
self.store = HDFStore(self.f)
self.store.append('p1', self.p)
@@ -118,12 +116,6 @@ def teardown(self):
self.store.close()
self.remove(self.f)
- def remove(self, f):
- try:
- os.remove(f)
- except:
- pass
-
def time_read_store_table_panel(self):
self.store.select('p1')
diff --git a/asv_bench/benchmarks/io_bench.py b/asv_bench/benchmarks/io_bench.py
index c718b13912e73..e8112cc41f032 100644
--- a/asv_bench/benchmarks/io_bench.py
+++ b/asv_bench/benchmarks/io_bench.py
@@ -8,23 +8,7 @@
import timeit
-class _BenchTeardown(object):
- """
- base class for teardown method implementation
- """
- fname = None
-
- def remove(self, f):
- try:
- os.remove(f)
- except:
- pass
-
- def teardown(self):
- self.remove(self.fname)
-
-
-class frame_to_csv(_BenchTeardown):
+class frame_to_csv(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
@@ -35,7 +19,7 @@ def time_frame_to_csv(self):
self.df.to_csv(self.fname)
-class frame_to_csv2(_BenchTeardown):
+class frame_to_csv2(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
@@ -49,7 +33,7 @@ def time_frame_to_csv2(self):
self.df.to_csv(self.fname)
-class frame_to_csv_date_formatting(_BenchTeardown):
+class frame_to_csv_date_formatting(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
@@ -61,7 +45,7 @@ def time_frame_to_csv_date_formatting(self):
self.data.to_csv(self.fname, date_format='%Y%m%d')
-class frame_to_csv_mixed(_BenchTeardown):
+class frame_to_csv_mixed(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
@@ -114,7 +98,7 @@ def time_read_csv_infer_datetime_format_ymd(self):
read_csv(StringIO(self.data), header=None, names=['foo'], parse_dates=['foo'], infer_datetime_format=True)
-class read_csv_skiprows(_BenchTeardown):
+class read_csv_skiprows(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
@@ -127,7 +111,7 @@ def time_read_csv_skiprows(self):
read_csv(self.fname, skiprows=10000)
-class read_csv_standard(_BenchTeardown):
+class read_csv_standard(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
@@ -174,7 +158,7 @@ def time_read_uint64_na_values(self):
read_csv(StringIO(self.data1), header=None, na_values=self.na_values)
-class write_csv_standard(_BenchTeardown):
+class write_csv_standard(BaseIO):
goal_time = 0.2
fname = '__test__.csv'
@@ -218,14 +202,14 @@ def time_read_nrows(self, compression, engine):
compression=compression, engine=engine)
-class read_json_lines(_BenchTeardown):
+class read_json_lines(BaseIO):
goal_time = 0.2
fname = "__test__.json"
def setup(self):
self.N = 100000
self.C = 5
- self.df = DataFrame({('float{0}'.format(i), randn(self.N)) for i in range(self.C)})
+ self.df = DataFrame({'float{0}'.format(i): randn(self.N) for i in range(self.C)})
self.df.to_json(self.fname,orient="records",lines=True)
def time_read_json_lines(self):
diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py
index 62eb826418030..74517f184ae6f 100644
--- a/asv_bench/benchmarks/pandas_vb_common.py
+++ b/asv_bench/benchmarks/pandas_vb_common.py
@@ -1,3 +1,4 @@
+import os
from pandas import *
import pandas as pd
from numpy.random import randn
@@ -19,6 +20,25 @@
def setup(*args, **kwargs):
np.random.seed(1234)
+
+class BaseIO(object):
+ """
+ Base class for IO benchmarks
+ """
+ fname = None
+
+ def remove(self, f):
+ """Remove created files"""
+ try:
+ os.remove(f)
+ except:
+ # On Windows, attempting to remove a file that is in use
+ # causes an exception to be raised
+ pass
+
+ def teardown(self):
+ self.remove(self.fname)
+
# try em until it works!
for imp in ['pandas._libs.lib', 'pandas.lib', 'pandas_tseries']:
try:
| - Ran flake8 and replaced star imports
An `HDFStore.info()` benchmark was added in #16666, but the benchmark fails (even on master). I can create a new issue for this unless there's an obvious fix I am not seeing.
```
asv dev -b ^hdfstore_bench· Discovering benchmarks
· Running 18 total benchmarks (1 commits * 1 environments * 18 benchmarks)
[ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python
[ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python
[ 5.56%] ··· Running hdfstore_bench.HDF5.time_query_store_table 22.3ms
[ 11.11%] ··· Running hdfstore_bench.HDF5.time_query_store_table_wide 33.8ms
[ 16.67%] ··· Running hdfstore_bench.HDF5.time_read_store 11.6ms
[ 22.22%] ··· Running hdfstore_bench.HDF5.time_read_store_mixed 24.3ms
[ 27.78%] ··· Running hdfstore_bench.HDF5.time_read_store_table 16.3ms
[ 33.33%] ··· Running hdfstore_bench.HDF5.time_read_store_table_mixed 34.6ms
[ 38.89%] ··· Running hdfstore_bench.HDF5.time_read_store_table_wide 38.3ms
[ 44.44%] ··· Running hdfstore_bench.HDF5.time_store_info failed
[ 44.44%] ····· Traceback (most recent call last):
File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 818, in <module>
commands[mode](args)
File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 795, in main_run
result = benchmark.do_run()
File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 349, in do_run
return self.run(*self._current_params)
File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 424, in run
samples, number = self.benchmark_timing(timer, repeat, warmup_time, number=number)
File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 471, in benchmark_timing
timing = timer.timeit(number)
File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 202, in timeit
timing = self.inner(it, self.timer)
File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 100, in inner
_func()
File "/home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/hdfstore_bench.py", line 101, in time_store_info
self.store.info()
File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/pandas/io/pytables.py", line 495, in __getattr__
(type(self).__name__, name))
AttributeError: 'HDFStore' object has no attribute 'info'
[ 50.00%] ··· Running hdfstore_bench.HDF5.time_store_repr 46.1ms
[ 55.56%] ··· Running hdfstore_bench.HDF5.time_store_str 45.2ms
[ 61.11%] ··· Running hdfstore_bench.HDF5.time_write_store 12.5ms
[ 66.67%] ··· Running hdfstore_bench.HDF5.time_write_store_mixed 28.3ms
[ 72.22%] ··· Running hdfstore_bench.HDF5.time_write_store_table 41.2ms
[ 77.78%] ··· Running hdfstore_bench.HDF5.time_write_store_table_dc 329ms
[ 83.33%] ··· Running hdfstore_bench.HDF5.time_write_store_table_mixed 52.2ms
[ 88.89%] ··· Running hdfstore_bench.HDF5.time_write_store_table_wide 123ms
[ 94.44%] ··· Running hdfstore_bench.HDF5Panel.time_read_store_table_panel 45.6ms
[100.00%] ··· Running hdfstore_bench.HDF5Panel.time_write_store_table_panel 76.3ms
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/18641 | 2017-12-05T06:00:31Z | 2017-12-06T10:18:14Z | 2017-12-06T10:18:14Z | 2017-12-06T17:59:13Z |
Cleanup & De-duplication for custom offset classes | diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 251af50ab12ce..29e14103dfe20 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -307,27 +307,6 @@ class CacheableOffset(object):
_cacheable = True
-class BeginMixin(object):
- # helper for vectorized offsets
-
- def _beg_apply_index(self, i, freq):
- """Offsets index to beginning of Period frequency"""
-
- off = i.to_perioddelta('D')
-
- base, mult = get_freq_code(freq)
- base_period = i.to_period(base)
- if self.n <= 0:
- # when subtracting, dates on start roll to prior
- roll = np.where(base_period.to_timestamp() == i - off,
- self.n, self.n + 1)
- else:
- roll = self.n
-
- base = (base_period + roll).to_timestamp()
- return base + off
-
-
class EndMixin(object):
# helper for vectorized offsets
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 857ec9e9881d9..4dae59d11f66f 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -413,8 +413,27 @@ def _from_name(cls, suffix=None):
return cls()
+class _CustomMixin(object):
+ """
+ Mixin for classes that define and validate calendar, holidays,
+ and weekdays attributes
+ """
+ def __init__(self, weekmask, holidays, calendar):
+ calendar, holidays = _get_calendar(weekmask=weekmask,
+ holidays=holidays,
+ calendar=calendar)
+ # Custom offset instances are identified by the
+ # following two attributes. See DateOffset._params()
+ # holidays, weekmask
+
+ # assumes self.kwds already exists
+ self.kwds['weekmask'] = self.weekmask = weekmask
+ self.kwds['holidays'] = self.holidays = holidays
+ self.kwds['calendar'] = self.calendar = calendar
+
+
class BusinessMixin(object):
- """ mixin to business types to provide related functions """
+ """ Mixin to business types to provide related functions """
@property
def offset(self):
@@ -572,9 +591,26 @@ def __init__(self, start='09:00', end='17:00', offset=timedelta(0)):
kwds = {'offset': offset}
self.start = kwds['start'] = _validate_business_time(start)
self.end = kwds['end'] = _validate_business_time(end)
- self.kwds = kwds
+ self.kwds.update(kwds)
self._offset = offset
+ @cache_readonly
+ def next_bday(self):
+ """used for moving to next businessday"""
+ if self.n >= 0:
+ nb_offset = 1
+ else:
+ nb_offset = -1
+ if self._prefix.startswith('C'):
+ # CustomBusinessHour
+ return CustomBusinessDay(n=nb_offset,
+ weekmask=self.weekmask,
+ holidays=self.holidays,
+ calendar=self.calendar)
+ else:
+ return BusinessDay(n=nb_offset)
+
+ # TODO: Cache this once offsets are immutable
def _get_daytime_flag(self):
if self.start == self.end:
raise ValueError('start and end must not be the same')
@@ -616,6 +652,7 @@ def _prev_opening_time(self, other):
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
+ # TODO: cache this once offsets are immutable
def _get_business_hours_by_sec(self):
"""
Return business hours in a day by seconds.
@@ -784,19 +821,11 @@ def __init__(self, n=1, normalize=False, start='09:00',
end='17:00', offset=timedelta(0)):
self.n = self._validate_n(n)
self.normalize = normalize
+ self.kwds = {}
super(BusinessHour, self).__init__(start=start, end=end, offset=offset)
- @cache_readonly
- def next_bday(self):
- # used for moving to next businessday
- if self.n >= 0:
- nb_offset = 1
- else:
- nb_offset = -1
- return BusinessDay(n=nb_offset)
-
-class CustomBusinessDay(BusinessDay):
+class CustomBusinessDay(_CustomMixin, BusinessDay):
"""
DateOffset subclass representing possibly n custom business days,
excluding holidays
@@ -822,19 +851,9 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.n = self._validate_n(n)
self.normalize = normalize
self._offset = offset
- self.kwds = {}
-
- calendar, holidays = _get_calendar(weekmask=weekmask,
- holidays=holidays,
- calendar=calendar)
- # CustomBusinessDay instances are identified by the
- # following two attributes. See DateOffset._params()
- # holidays, weekmask
+ self.kwds = {'offset': offset}
- self.kwds['weekmask'] = self.weekmask = weekmask
- self.kwds['holidays'] = self.holidays = holidays
- self.kwds['calendar'] = self.calendar = calendar
- self.kwds['offset'] = offset
+ _CustomMixin.__init__(self, weekmask, holidays, calendar)
@apply_wraps
def apply(self, other):
@@ -874,7 +893,8 @@ def onOffset(self, dt):
return np.is_busday(day64, busdaycal=self.calendar)
-class CustomBusinessHour(BusinessHourMixin, SingleConstructorOffset):
+class CustomBusinessHour(_CustomMixin, BusinessHourMixin,
+ SingleConstructorOffset):
"""
DateOffset subclass representing possibly n custom business days
@@ -889,27 +909,11 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
start='09:00', end='17:00', offset=timedelta(0)):
self.n = self._validate_n(n)
self.normalize = normalize
- super(CustomBusinessHour, self).__init__(start=start,
- end=end, offset=offset)
-
- calendar, holidays = _get_calendar(weekmask=weekmask,
- holidays=holidays,
- calendar=calendar)
- self.kwds['weekmask'] = self.weekmask = weekmask
- self.kwds['holidays'] = self.holidays = holidays
- self.kwds['calendar'] = self.calendar = calendar
+ self._offset = offset
+ self.kwds = {'offset': offset}
- @cache_readonly
- def next_bday(self):
- # used for moving to next businessday
- if self.n >= 0:
- nb_offset = 1
- else:
- nb_offset = -1
- return CustomBusinessDay(n=nb_offset,
- weekmask=self.weekmask,
- holidays=self.holidays,
- calendar=self.calendar)
+ _CustomMixin.__init__(self, weekmask, holidays, calendar)
+ BusinessHourMixin.__init__(self, start=start, end=end, offset=offset)
# ---------------------------------------------------------------------
@@ -981,10 +985,10 @@ class BusinessMonthBegin(MonthOffset):
_day_opt = 'business_start'
-class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
+class _CustomBusinessMonth(_CustomMixin, BusinessMixin, MonthOffset):
"""
DateOffset subclass representing one custom business month, incrementing
- between end of month dates
+ between [BEGIN/END] of month dates
Parameters
----------
@@ -999,11 +1003,9 @@ class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
-
_cacheable = False
- _prefix = 'CBM'
- onOffset = DateOffset.onOffset # override MonthOffset method
+ onOffset = DateOffset.onOffset # override MonthOffset method
apply_index = DateOffset.apply_index # override MonthOffset method
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
@@ -1011,15 +1013,9 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
self.n = self._validate_n(n)
self.normalize = normalize
self._offset = offset
- self.kwds = {}
+ self.kwds = {'offset': offset}
- calendar, holidays = _get_calendar(weekmask=weekmask,
- holidays=holidays,
- calendar=calendar)
- self.kwds['weekmask'] = self.weekmask = weekmask
- self.kwds['holidays'] = self.holidays = holidays
- self.kwds['calendar'] = self.calendar = calendar
- self.kwds['offset'] = offset
+ _CustomMixin.__init__(self, weekmask, holidays, calendar)
@cache_readonly
def cbday(self):
@@ -1028,7 +1024,17 @@ def cbday(self):
@cache_readonly
def m_offset(self):
- return MonthEnd(n=1, normalize=self.normalize)
+ if self._prefix.endswith('S'):
+ # MonthBegin:
+ return MonthBegin(n=1, normalize=self.normalize)
+ else:
+ # MonthEnd
+ return MonthEnd(n=1, normalize=self.normalize)
+
+
+class CustomBusinessMonthEnd(_CustomBusinessMonth):
+ __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end')
+ _prefix = 'CBM'
@apply_wraps
def apply(self, other):
@@ -1054,57 +1060,10 @@ def apply(self, other):
return result
-class CustomBusinessMonthBegin(BusinessMixin, MonthOffset):
- """
- DateOffset subclass representing one custom business month, incrementing
- between beginning of month dates
-
- Parameters
- ----------
- n : int, default 1
- offset : timedelta, default timedelta(0)
- normalize : bool, default False
- Normalize start/end dates to midnight before generating date range
- weekmask : str, Default 'Mon Tue Wed Thu Fri'
- weekmask of valid business days, passed to ``numpy.busdaycalendar``
- holidays : list
- list/array of dates to exclude from the set of valid business days,
- passed to ``numpy.busdaycalendar``
- calendar : pd.HolidayCalendar or np.busdaycalendar
- """
-
- _cacheable = False
+class CustomBusinessMonthBegin(_CustomBusinessMonth):
+ __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'beginning')
_prefix = 'CBMS'
- onOffset = DateOffset.onOffset # override MonthOffset method
- apply_index = DateOffset.apply_index # override MonthOffset method
-
- def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
- holidays=None, calendar=None, offset=timedelta(0)):
- self.n = self._validate_n(n)
- self.normalize = normalize
- self._offset = offset
- self.kwds = {}
-
- # _get_calendar does validation and possible transformation
- # of calendar and holidays.
- calendar, holidays = _get_calendar(weekmask=weekmask,
- holidays=holidays,
- calendar=calendar)
- self.kwds['calendar'] = self.calendar = calendar
- self.kwds['weekmask'] = self.weekmask = weekmask
- self.kwds['holidays'] = self.holidays = holidays
- self.kwds['offset'] = offset
-
- @cache_readonly
- def cbday(self):
- kwds = self.kwds
- return CustomBusinessDay(n=self.n, normalize=self.normalize, **kwds)
-
- @cache_readonly
- def m_offset(self):
- return MonthBegin(n=1, normalize=self.normalize)
-
@apply_wraps
def apply(self, other):
n = self.n
@@ -1707,13 +1666,16 @@ def onOffset(self, dt):
return dt.month == self.month and dt.day == self._get_offset_day(dt)
def __init__(self, n=1, normalize=False, month=None):
+ self.n = self._validate_n(n)
+ self.normalize = normalize
+
month = month if month is not None else self._default_month
self.month = month
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
- DateOffset.__init__(self, n=n, normalize=normalize, month=month)
+ self.kwds = {'month': month}
@classmethod
def _from_name(cls, suffix=None):
| The diff looks a little bit mangled. This PR does two things.
1) Centralize frequently-repeated calendar `__init__` in a new _CustomMixin
2) Merge a bunch of shared CustomBusinessMonthEnd and CustomBusinessMonthBegin logic into a parent class.
- [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18640 | 2017-12-05T00:18:27Z | 2017-12-06T00:52:42Z | 2017-12-06T00:52:42Z | 2017-12-06T04:19:49Z |
Explicit LooseVersion comps | diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx
index bbdd5f0d8334c..bb8b0ed14e1d9 100644
--- a/pandas/_libs/sparse.pyx
+++ b/pandas/_libs/sparse.pyx
@@ -12,8 +12,8 @@ from distutils.version import LooseVersion
# numpy versioning
_np_version = np.version.short_version
-_np_version_under1p10 = LooseVersion(_np_version) < '1.10'
-_np_version_under1p11 = LooseVersion(_np_version) < '1.11'
+_np_version_under1p10 = LooseVersion(_np_version) < LooseVersion('1.10')
+_np_version_under1p11 = LooseVersion(_np_version) < LooseVersion('1.11')
np.import_array()
np.import_ufunc()
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py
index 2deb29dabe764..80a2c05d86971 100644
--- a/pandas/compat/__init__.py
+++ b/pandas/compat/__init__.py
@@ -399,7 +399,7 @@ def raise_with_traceback(exc, traceback=Ellipsis):
# dateutil minimum version
import dateutil
-if LooseVersion(dateutil.__version__) < '2.5':
+if LooseVersion(dateutil.__version__) < LooseVersion('2.5'):
raise ImportError('dateutil 2.5.0 is the minimum required version')
from dateutil import parser as _date_parser
parse_date = _date_parser.parse
diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py
index 5112957b49875..cb8ad5e3ea46f 100644
--- a/pandas/compat/numpy/__init__.py
+++ b/pandas/compat/numpy/__init__.py
@@ -9,12 +9,12 @@
# numpy versioning
_np_version = np.__version__
_nlv = LooseVersion(_np_version)
-_np_version_under1p10 = _nlv < '1.10'
-_np_version_under1p11 = _nlv < '1.11'
-_np_version_under1p12 = _nlv < '1.12'
-_np_version_under1p13 = _nlv < '1.13'
-_np_version_under1p14 = _nlv < '1.14'
-_np_version_under1p15 = _nlv < '1.15'
+_np_version_under1p10 = _nlv < LooseVersion('1.10')
+_np_version_under1p11 = _nlv < LooseVersion('1.11')
+_np_version_under1p12 = _nlv < LooseVersion('1.12')
+_np_version_under1p13 = _nlv < LooseVersion('1.13')
+_np_version_under1p14 = _nlv < LooseVersion('1.14')
+_np_version_under1p15 = _nlv < LooseVersion('1.15')
if _nlv < '1.9':
raise ImportError('this version of pandas is incompatible with '
diff --git a/pandas/conftest.py b/pandas/conftest.py
index b9d0087b50306..b09119895617c 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -70,8 +70,8 @@ def ip():
is_dateutil_le_261 = pytest.mark.skipif(
- LooseVersion(dateutil.__version__) > '2.6.1',
+ LooseVersion(dateutil.__version__) > LooseVersion('2.6.1'),
reason="dateutil api change version")
is_dateutil_gt_261 = pytest.mark.skipif(
- LooseVersion(dateutil.__version__) <= '2.6.1',
+ LooseVersion(dateutil.__version__) <= LooseVersion('2.6.1'),
reason="dateutil stable version")
diff --git a/pandas/core/computation/check.py b/pandas/core/computation/check.py
index bb8cc74bad3c2..2a9ed0fb9764d 100644
--- a/pandas/core/computation/check.py
+++ b/pandas/core/computation/check.py
@@ -6,7 +6,7 @@
try:
import numexpr as ne
- ver = ne.__version__
+ ver = LooseVersion(ne.__version__)
_NUMEXPR_INSTALLED = ver >= LooseVersion(_MIN_NUMEXPR_VERSION)
if not _NUMEXPR_INSTALLED:
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index 8a6a870834c83..c3e72d6c31bf5 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -347,7 +347,7 @@ def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
import scipy
from scipy import interpolate
- if LooseVersion(scipy.__version__) < '0.18.0':
+ if LooseVersion(scipy.__version__) < LooseVersion('0.18.0'):
try:
method = interpolate.piecewise_polynomial_interpolate
return method(xi, yi.reshape(-1, 1), x,
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
index b2bf4ab7ff7f1..1bc6526214a91 100644
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -22,7 +22,7 @@ def _try_import():
"pip install -U feather-format\n")
try:
- feather.__version__ >= LooseVersion('0.3.1')
+ LooseVersion(feather.__version__) >= LooseVersion('0.3.1')
except AttributeError:
raise ImportError("the feather-format library must be >= "
"version 0.3.1\n"
@@ -106,7 +106,7 @@ def read_feather(path, nthreads=1):
feather = _try_import()
path = _stringify_path(path)
- if feather.__version__ < LooseVersion('0.4.0'):
+ if LooseVersion(feather.__version__) < LooseVersion('0.4.0'):
return feather.read_dataframe(path)
return feather.read_dataframe(path, nthreads=nthreads)
diff --git a/pandas/io/html.py b/pandas/io/html.py
index d0861f1aa4ec6..67a48198adc27 100644
--- a/pandas/io/html.py
+++ b/pandas/io/html.py
@@ -684,7 +684,7 @@ def _parser_dispatch(flavor):
raise ImportError(
"BeautifulSoup4 (bs4) not found, please install it")
import bs4
- if bs4.__version__ == LooseVersion('4.2.0'):
+ if LooseVersion(bs4.__version__) == LooseVersion('4.2.0'):
raise ValueError("You're using a version"
" of BeautifulSoup4 (4.2.0) that has been"
" known to cause problems on certain"
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 4a13d2c9db944..7827c3ae04d4d 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -50,7 +50,7 @@ def __init__(self):
"\nor via pip\n"
"pip install -U pyarrow\n")
- if LooseVersion(pyarrow.__version__) < '0.4.1':
+ if LooseVersion(pyarrow.__version__) < LooseVersion('0.4.1'):
raise ImportError("pyarrow >= 0.4.1 is required for parquet"
"support\n\n"
"you can install via conda\n"
@@ -58,8 +58,10 @@ def __init__(self):
"\nor via pip\n"
"pip install -U pyarrow\n")
- self._pyarrow_lt_050 = LooseVersion(pyarrow.__version__) < '0.5.0'
- self._pyarrow_lt_060 = LooseVersion(pyarrow.__version__) < '0.6.0'
+ self._pyarrow_lt_050 = (LooseVersion(pyarrow.__version__) <
+ LooseVersion('0.5.0'))
+ self._pyarrow_lt_060 = (LooseVersion(pyarrow.__version__) <
+ LooseVersion('0.6.0'))
self.api = pyarrow
def write(self, df, path, compression='snappy',
@@ -97,7 +99,7 @@ def __init__(self):
"\nor via pip\n"
"pip install -U fastparquet")
- if LooseVersion(fastparquet.__version__) < '0.1.0':
+ if LooseVersion(fastparquet.__version__) < LooseVersion('0.1.0'):
raise ImportError("fastparquet >= 0.1.0 is required for parquet "
"support\n\n"
"you can install via conda\n"
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 2a66aea88f6d9..74cd2ba7dc4d8 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -248,7 +248,7 @@ def _tables():
_table_mod = tables
# version requirements
- if LooseVersion(tables.__version__) < '3.0.0':
+ if LooseVersion(tables.__version__) < LooseVersion('3.0.0'):
raise ImportError("PyTables version >= 3.0.0 is required")
# set the file open policy
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 975ad1e4ff368..26874a57c66f7 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -67,11 +67,11 @@ def _is_sqlalchemy_connectable(con):
_SQLALCHEMY_INSTALLED = True
from distutils.version import LooseVersion
- ver = LooseVersion(sqlalchemy.__version__)
+ ver = sqlalchemy.__version__
# For sqlalchemy versions < 0.8.2, the BIGINT type is recognized
# for a sqlite engine, which results in a warning when trying to
# read/write a DataFrame with int64 values. (GH7433)
- if ver < '0.8.2':
+ if LooseVersion(ver) < LooseVersion('0.8.2'):
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
diff --git a/pandas/plotting/_compat.py b/pandas/plotting/_compat.py
index d527bc08e2f08..0cc715eda2e18 100644
--- a/pandas/plotting/_compat.py
+++ b/pandas/plotting/_compat.py
@@ -8,7 +8,7 @@
def _mpl_le_1_2_1():
try:
import matplotlib as mpl
- return (str(mpl.__version__) <= LooseVersion('1.2.1') and
+ return (LooseVersion(mpl.__version__) <= LooseVersion('1.2.1') and
str(mpl.__version__)[0] != '0')
except ImportError:
return False
@@ -19,8 +19,9 @@ def _mpl_ge_1_3_1():
import matplotlib
# The or v[0] == '0' is because their versioneer is
# messed up on dev
- return (matplotlib.__version__ >= LooseVersion('1.3.1') or
- matplotlib.__version__[0] == '0')
+ return (LooseVersion(matplotlib.__version__) >=
+ LooseVersion('1.3.1') or
+ str(matplotlib.__version__)[0] == '0')
except ImportError:
return False
@@ -28,8 +29,8 @@ def _mpl_ge_1_3_1():
def _mpl_ge_1_4_0():
try:
import matplotlib
- return (matplotlib.__version__ >= LooseVersion('1.4') or
- matplotlib.__version__[0] == '0')
+ return (LooseVersion(matplotlib.__version__) >= LooseVersion('1.4') or
+ str(matplotlib.__version__)[0] == '0')
except ImportError:
return False
@@ -37,8 +38,8 @@ def _mpl_ge_1_4_0():
def _mpl_ge_1_5_0():
try:
import matplotlib
- return (matplotlib.__version__ >= LooseVersion('1.5') or
- matplotlib.__version__[0] == '0')
+ return (LooseVersion(matplotlib.__version__) >= LooseVersion('1.5') or
+ str(matplotlib.__version__)[0] == '0')
except ImportError:
return False
@@ -46,7 +47,7 @@ def _mpl_ge_1_5_0():
def _mpl_ge_2_0_0():
try:
import matplotlib
- return matplotlib.__version__ >= LooseVersion('2.0')
+ return LooseVersion(matplotlib.__version__) >= LooseVersion('2.0')
except ImportError:
return False
@@ -62,7 +63,7 @@ def _mpl_le_2_0_0():
def _mpl_ge_2_0_1():
try:
import matplotlib
- return matplotlib.__version__ >= LooseVersion('2.0.1')
+ return LooseVersion(matplotlib.__version__) >= LooseVersion('2.0.1')
except ImportError:
return False
@@ -70,6 +71,6 @@ def _mpl_ge_2_0_1():
def _mpl_ge_2_1_0():
try:
import matplotlib
- return matplotlib.__version__ >= LooseVersion('2.1')
+ return LooseVersion(matplotlib.__version__) >= LooseVersion('2.1')
except ImportError:
return False
diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py
index af39ee9815313..c25ef4bf38cab 100644
--- a/pandas/tests/computation/test_compat.py
+++ b/pandas/tests/computation/test_compat.py
@@ -15,7 +15,7 @@ def test_compat():
try:
import numexpr as ne
ver = ne.__version__
- if ver < LooseVersion(_MIN_NUMEXPR_VERSION):
+ if LooseVersion(ver) < LooseVersion(_MIN_NUMEXPR_VERSION):
assert not _NUMEXPR_INSTALLED
else:
assert _NUMEXPR_INSTALLED
@@ -37,7 +37,8 @@ def testit():
except ImportError:
pytest.skip("no numexpr")
else:
- if ne.__version__ < LooseVersion(_MIN_NUMEXPR_VERSION):
+ if (LooseVersion(ne.__version__) <
+ LooseVersion(_MIN_NUMEXPR_VERSION)):
with pytest.raises(ImportError):
testit()
else:
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index 092bbb36169d4..577f4238c2328 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -1186,7 +1186,7 @@ def test_nan_to_nat_conversions():
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
- if LooseVersion(np.__version__) >= '1.7.0':
+ if LooseVersion(np.__version__) >= LooseVersion('1.7.0'):
assert (s[8].value == np.datetime64('NaT').astype(np.int64))
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index cfdb18cefee64..8505c397b4a56 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -1742,7 +1742,7 @@ def test_round(self):
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
- if sys.version < LooseVersion('2.7'):
+ if LooseVersion(sys.version) < LooseVersion('2.7'):
# Rounding with decimal is a ValueError in Python < 2.7
with pytest.raises(ValueError):
df.round(nan_round_Series)
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 0b562269ea29d..3d2bee9e01d34 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -222,7 +222,7 @@ def test_itertuples(self):
tup = next(df.itertuples(name='TestName'))
- if sys.version >= LooseVersion('2.7'):
+ if LooseVersion(sys.version) >= LooseVersion('2.7'):
assert tup._fields == ('Index', 'a', 'b')
assert (tup.Index, tup.a, tup.b) == tup
assert type(tup).__name__ == 'TestName'
@@ -231,7 +231,7 @@ def test_itertuples(self):
tup2 = next(df.itertuples(name='TestName'))
assert tup2 == (0, 1, 4)
- if sys.version >= LooseVersion('2.7'):
+ if LooseVersion(sys.version) >= LooseVersion('2.7'):
assert tup2._fields == ('Index', '_1', '_2')
df3 = DataFrame({'f' + str(i): [i] for i in range(1024)})
diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py
index d566c92e7738e..7e8869cbdeefb 100644
--- a/pandas/tests/frame/test_missing.py
+++ b/pandas/tests/frame/test_missing.py
@@ -21,7 +21,8 @@
try:
import scipy
- _is_scipy_ge_0190 = scipy.__version__ >= LooseVersion('0.19.0')
+ _is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >=
+ LooseVersion('0.19.0'))
except:
_is_scipy_ge_0190 = False
@@ -717,7 +718,7 @@ def test_interp_alt_scipy(self):
result = df.interpolate(method='pchip')
expected.loc[2, 'A'] = 3
- if LooseVersion(scipy.__version__) >= '0.17.0':
+ if LooseVersion(scipy.__version__) >= LooseVersion('0.17.0'):
expected.loc[5, 'A'] = 6.0
else:
expected.loc[5, 'A'] = 6.125
diff --git a/pandas/tests/frame/test_rank.py b/pandas/tests/frame/test_rank.py
index 58f4d9b770173..02fe0edf95577 100644
--- a/pandas/tests/frame/test_rank.py
+++ b/pandas/tests/frame/test_rank.py
@@ -212,7 +212,8 @@ def test_rank_methods_frame(self):
sprank = sprank.astype(np.float64)
expected = DataFrame(sprank, columns=cols)
- if LooseVersion(scipy.__version__) >= '0.17.0':
+ if (LooseVersion(scipy.__version__) >=
+ LooseVersion('0.17.0')):
expected = expected.astype('float64')
tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/generic/test_frame.py b/pandas/tests/generic/test_frame.py
index ae73664e224cf..90d2427bb3bd7 100644
--- a/pandas/tests/generic/test_frame.py
+++ b/pandas/tests/generic/test_frame.py
@@ -173,7 +173,8 @@ def test_set_attribute(self):
assert_series_equal(df['y'], Series([2, 4, 6], name='y'))
@pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and
- LooseVersion(xarray.__version__) < '0.10.0',
+ LooseVersion(xarray.__version__) <
+ LooseVersion('0.10.0'),
reason='xarray >= 0.10.0 required')
@pytest.mark.parametrize(
"index", ['FloatIndex', 'IntIndex',
diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py
index 4773ff69e0982..e5c0708e35c51 100644
--- a/pandas/tests/generic/test_series.py
+++ b/pandas/tests/generic/test_series.py
@@ -173,7 +173,8 @@ def finalize(self, other, method=None, **kwargs):
Series.__finalize__ = _finalize
@pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and
- LooseVersion(xarray.__version__) < '0.10.0',
+ LooseVersion(xarray.__version__) <
+ LooseVersion('0.10.0'),
reason='xarray >= 0.10.0 required')
@pytest.mark.parametrize(
"index",
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index d03951458f12a..f307a75952350 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -1294,7 +1294,7 @@ def test_parsers_dayfirst_yearfirst(self, cache):
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
- is_lt_253 = dateutil.__version__ < LooseVersion('2.5.3')
+ is_lt_253 = LooseVersion(dateutil.__version__) < LooseVersion('2.5.3')
# str : dayfirst, yearfirst, expected
cases = {'10-11-12': [(False, False,
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py
index 0b60ca6e8a720..087567354d32d 100644
--- a/pandas/tests/indexes/timedeltas/test_arithmetic.py
+++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py
@@ -416,14 +416,14 @@ def test_ops_ndarray(self):
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td + other, expected)
- if LooseVersion(np.__version__) >= '1.8':
+ if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other + td, expected)
pytest.raises(TypeError, lambda: td + np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
tm.assert_numpy_array_equal(td - other, expected)
- if LooseVersion(np.__version__) >= '1.8':
+ if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(-other + td, expected)
pytest.raises(TypeError, lambda: td - np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) - td)
@@ -436,7 +436,7 @@ def test_ops_ndarray(self):
tm.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
- if LooseVersion(np.__version__) >= '1.8':
+ if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
@@ -444,12 +444,12 @@ def test_ops_ndarray(self):
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
tm.assert_numpy_array_equal(td + other, expected)
- if LooseVersion(np.__version__) >= '1.8':
+ if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
tm.assert_numpy_array_equal(-td + other, expected)
- if LooseVersion(np.__version__) >= '1.8':
+ if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index a22d0174947e1..4b5cbaeb7308d 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -25,7 +25,7 @@
from pandas.compat import zip, u
# in 3.6.1 a c-api slicing function changed, see src/compat_helper.h
-PY361 = sys.version >= LooseVersion('3.6.1')
+PY361 = LooseVersion(sys.version) >= LooseVersion('3.6.1')
@pytest.fixture
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index 082a37a94f75b..b263d368f41f5 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -16,7 +16,7 @@
div_style = ''
try:
import IPython
- if IPython.__version__ < LooseVersion('3.0.0'):
+ if LooseVersion(IPython.__version__) < LooseVersion('3.0.0'):
div_style = ' style="max-width:1500px;overflow:auto;"'
except (ImportError, AttributeError):
pass
diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py
index 0b60d37d36c08..67f95c828c80e 100755
--- a/pandas/tests/io/generate_legacy_storage_files.py
+++ b/pandas/tests/io/generate_legacy_storage_files.py
@@ -194,7 +194,7 @@ def create_data():
nat=NaT,
tz=Timestamp('2011-01-01', tz='US/Eastern'))
- if _loose_version < '0.19.2':
+ if _loose_version < LooseVersion('0.19.2'):
timestamp['freq'] = Timestamp('2011-01-01', offset='D')
timestamp['both'] = Timestamp('2011-01-01', tz='Asia/Tokyo',
offset='M')
@@ -245,10 +245,10 @@ def create_pickle_data():
# Pre-0.14.1 versions generated non-unpicklable mixed-type frames and
# panels if their columns/items were non-unique.
- if _loose_version < '0.14.1':
+ if _loose_version < LooseVersion('0.14.1'):
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
- if _loose_version < '0.17.0':
+ if _loose_version < LooseVersion('0.17.0'):
del data['series']['period']
del data['scalars']['period']
return data
@@ -260,12 +260,12 @@ def _u(x):
def create_msgpack_data():
data = create_data()
- if _loose_version < '0.17.0':
+ if _loose_version < LooseVersion('0.17.0'):
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
del data['frame']['dup']
del data['panel']['dup']
- if _loose_version < '0.18.0':
+ if _loose_version < LooseVersion('0.18.0'):
del data['series']['dt_tz']
del data['frame']['dt_mixed_tzs']
# Not supported
diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py
index a1c0ec3bc1a0a..cd1685f282bd2 100644
--- a/pandas/tests/io/json/test_ujson.py
+++ b/pandas/tests/io/json/test_ujson.py
@@ -417,7 +417,7 @@ def test_nat(self):
def test_npy_nat(self):
from distutils.version import LooseVersion
- if LooseVersion(np.__version__) < '1.7.0':
+ if LooseVersion(np.__version__) < LooseVersion('1.7.0'):
pytest.skip("numpy version < 1.7.0, is "
"{0}".format(np.__version__))
diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py
index 4c0f67fa6876a..7ff2ac9ff1305 100644
--- a/pandas/tests/io/parser/parse_dates.py
+++ b/pandas/tests/io/parser/parse_dates.py
@@ -270,7 +270,7 @@ def test_yy_format_with_yearfirst(self):
# See gh-217
import dateutil
- if dateutil.__version__ >= LooseVersion('2.5.0'):
+ if LooseVersion(dateutil.__version__) >= LooseVersion('2.5.0'):
pytest.skip("testing yearfirst=True not-support"
"on datetutil < 2.5.0 this works but"
"is wrong")
diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py
index 021f3715d472b..e9909400ce429 100644
--- a/pandas/tests/io/test_feather.py
+++ b/pandas/tests/io/test_feather.py
@@ -61,7 +61,7 @@ def test_basic(self):
assert df.dttz.dtype.tz.zone == 'US/Eastern'
self.check_round_trip(df)
- @pytest.mark.skipif(fv >= '0.4.0', reason='fixed in 0.4.0')
+ @pytest.mark.skipif(fv >= LooseVersion('0.4.0'), reason='fixed in 0.4.0')
def test_strided_data_issues(self):
# strided data issuehttps://github.com/wesm/feather/issues/97
@@ -81,7 +81,7 @@ def test_stringify_columns(self):
df = pd.DataFrame(np.arange(12).reshape(4, 3)).copy()
self.check_error_on_write(df, ValueError)
- @pytest.mark.skipif(fv >= '0.4.0', reason='fixed in 0.4.0')
+ @pytest.mark.skipif(fv >= LooseVersion('0.4.0'), reason='fixed in 0.4.0')
def test_unsupported(self):
# timedelta
@@ -98,7 +98,7 @@ def test_unsupported_other(self):
df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
self.check_error_on_write(df, ValueError)
- @pytest.mark.skipif(fv < '0.4.0', reason='new in 0.4.0')
+ @pytest.mark.skipif(fv < LooseVersion('0.4.0'), reason='new in 0.4.0')
def test_rw_nthreads(self):
df = pd.DataFrame({'A': np.arange(100000)})
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index 0b268dcca90e8..fd0c2b9d0218c 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -52,7 +52,7 @@ def _skip_if_none_of(module_names):
_skip_if_no(module_names)
if module_names == 'bs4':
import bs4
- if bs4.__version__ == LooseVersion('4.2.0'):
+ if LooseVersion(bs4.__version__) == LooseVersion('4.2.0'):
pytest.skip("Bad version of bs4: 4.2.0")
else:
not_found = [module_name for module_name in module_names if not
@@ -61,7 +61,7 @@ def _skip_if_none_of(module_names):
pytest.skip("{0!r} not found".format(not_found))
if 'bs4' in module_names:
import bs4
- if bs4.__version__ == LooseVersion('4.2.0'):
+ if LooseVersion(bs4.__version__) == LooseVersion('4.2.0'):
pytest.skip("Bad version of bs4: 4.2.0")
@@ -85,7 +85,7 @@ def assert_framelist_equal(list1, list2, *args, **kwargs):
def test_bs4_version_fails():
_skip_if_none_of(('bs4', 'html5lib'))
import bs4
- if bs4.__version__ == LooseVersion('4.2.0'):
+ if LooseVersion(bs4.__version__) == LooseVersion('4.2.0'):
tm.assert_raises(AssertionError, read_html, os.path.join(DATA_PATH,
"spam.html"),
flavor='bs4')
diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py
index 2d1671840ce8f..b9d66426c9dcb 100644
--- a/pandas/tests/io/test_packers.py
+++ b/pandas/tests/io/test_packers.py
@@ -299,7 +299,7 @@ def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
- if LooseVersion(sys.version) < '2.7':
+ if LooseVersion(sys.version) < LooseVersion('2.7'):
pytest.skip('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(2013, 1, 1),
@@ -864,7 +864,7 @@ def check_min_structure(self, data, version):
def compare(self, current_data, all_data, vf, version):
# GH12277 encoding default used to be latin-1, now utf-8
- if LooseVersion(version) < '0.18.0':
+ if LooseVersion(version) < LooseVersion('0.18.0'):
data = read_msgpack(vf, encoding='latin-1')
else:
data = read_msgpack(vf)
@@ -895,7 +895,7 @@ def compare(self, current_data, all_data, vf, version):
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
- if LooseVersion(version) < '0.17.0':
+ if LooseVersion(version) < LooseVersion('0.17.0'):
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
@@ -904,7 +904,7 @@ def compare_series_dt_tz(self, result, expected, typ, version):
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
- if LooseVersion(version) < '0.17.0':
+ if LooseVersion(version) < LooseVersion('0.17.0'):
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index e7bcff22371b7..3243e7a6017c2 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -50,7 +50,7 @@ def pa():
def pa_lt_070():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
- if LooseVersion(pyarrow.__version__) >= '0.7.0':
+ if LooseVersion(pyarrow.__version__) >= LooseVersion('0.7.0'):
pytest.skip("pyarrow is >= 0.7.0")
return 'pyarrow'
@@ -59,7 +59,7 @@ def pa_lt_070():
def pa_ge_070():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
- if LooseVersion(pyarrow.__version__) < '0.7.0':
+ if LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'):
pytest.skip("pyarrow is < 0.7.0")
return 'pyarrow'
diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py
index 91c1f19f5caab..c49339f112d6a 100644
--- a/pandas/tests/io/test_pickle.py
+++ b/pandas/tests/io/test_pickle.py
@@ -94,7 +94,7 @@ def compare(data, vf, version):
def compare_sp_series_ts(res, exp, typ, version):
# SparseTimeSeries integrated into SparseSeries in 0.12.0
# and deprecated in 0.17.0
- if version and LooseVersion(version) <= "0.12.0":
+ if version and LooseVersion(version) <= LooseVersion("0.12.0"):
tm.assert_sp_series_equal(res, exp, check_series_type=False)
else:
tm.assert_sp_series_equal(res, exp)
@@ -123,7 +123,7 @@ def compare_series_ts(result, expected, typ, version):
def compare_series_dt_tz(result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
- if LooseVersion(version) < '0.17.0':
+ if LooseVersion(version) < LooseVersion('0.17.0'):
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
@@ -133,10 +133,10 @@ def compare_series_dt_tz(result, expected, typ, version):
def compare_series_cat(result, expected, typ, version):
# Categorical dtype is added in 0.15.0
# ordered is changed in 0.16.0
- if LooseVersion(version) < '0.15.0':
+ if LooseVersion(version) < LooseVersion('0.15.0'):
tm.assert_series_equal(result, expected, check_dtype=False,
check_categorical=False)
- elif LooseVersion(version) < '0.16.0':
+ elif LooseVersion(version) < LooseVersion('0.16.0'):
tm.assert_series_equal(result, expected, check_categorical=False)
else:
tm.assert_series_equal(result, expected)
@@ -145,7 +145,7 @@ def compare_series_cat(result, expected, typ, version):
def compare_frame_dt_mixed_tzs(result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
- if LooseVersion(version) < '0.17.0':
+ if LooseVersion(version) < LooseVersion('0.17.0'):
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
@@ -155,10 +155,10 @@ def compare_frame_dt_mixed_tzs(result, expected, typ, version):
def compare_frame_cat_onecol(result, expected, typ, version):
# Categorical dtype is added in 0.15.0
# ordered is changed in 0.16.0
- if LooseVersion(version) < '0.15.0':
+ if LooseVersion(version) < LooseVersion('0.15.0'):
tm.assert_frame_equal(result, expected, check_dtype=False,
check_categorical=False)
- elif LooseVersion(version) < '0.16.0':
+ elif LooseVersion(version) < LooseVersion('0.16.0'):
tm.assert_frame_equal(result, expected, check_categorical=False)
else:
tm.assert_frame_equal(result, expected)
@@ -177,7 +177,7 @@ def compare_index_period(result, expected, typ, version):
def compare_sp_frame_float(result, expected, typ, version):
- if LooseVersion(version) <= '0.18.1':
+ if LooseVersion(version) <= LooseVersion('0.18.1'):
tm.assert_sp_frame_equal(result, expected, exact_indices=False,
check_dtype=False)
else:
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 3fcbf90d12494..f239b7fe7855d 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -36,8 +36,8 @@
PossibleDataLossError, ClosedFileError)
-_default_compressor = ('blosc' if LooseVersion(tables.__version__) >= '2.2'
- else 'zlib')
+_default_compressor = ('blosc' if LooseVersion(tables.__version__) >=
+ LooseVersion('2.2') else 'zlib')
# testing on windows/py3 seems to fault
@@ -4270,7 +4270,7 @@ def test_select_as_multiple(self):
selector='df1')
@pytest.mark.skipif(
- LooseVersion(tables.__version__) < '3.1.0',
+ LooseVersion(tables.__version__) < LooseVersion('3.1.0'),
reason=("tables version does not support fix for nan selection "
"bug: GH 4858"))
def test_nan_selection_bug_4858(self):
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index b3ead7d9c8333..d0d7f881b37d0 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -143,7 +143,7 @@ def test_read_dta1(self, file):
tm.assert_frame_equal(parsed, expected)
def test_read_dta2(self):
- if LooseVersion(sys.version) < '2.7':
+ if LooseVersion(sys.version) < LooseVersion('2.7'):
pytest.skip('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py
index 1bc49e9e5f96a..7661b46a79061 100644
--- a/pandas/tests/plotting/test_boxplot_method.py
+++ b/pandas/tests/plotting/test_boxplot_method.py
@@ -26,7 +26,7 @@ def _skip_if_mpl_14_or_dev_boxplot():
# Boxplot failures on 1.4 and 1.4.1
# Don't need try / except since that's done at class level
import matplotlib
- if str(matplotlib.__version__) >= LooseVersion('1.4'):
+ if LooseVersion(matplotlib.__version__) >= LooseVersion('1.4'):
pytest.skip("Matplotlib Regression in 1.4 and current dev.")
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index e23911e8d2003..e5dfbaf12b231 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -359,7 +359,7 @@ def test_conversion(self):
'2014-01-01 00:00:00.000000001'])
def test_repr(self, date, freq):
# dateutil zone change (only matters for repr)
- if dateutil.__version__ >= LooseVersion('2.6.0'):
+ if LooseVersion(dateutil.__version__) >= LooseVersion('2.6.0'):
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/US/Pacific']
else:
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index cfc319da1598d..caaa122024cba 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -757,7 +757,7 @@ def test_corr_rank(self):
tm.assert_almost_equal(result, expected)
# these methods got rewritten in 0.8
- if scipy.__version__ < LooseVersion('0.9'):
+ if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
pytest.skip("skipping corr rank because of scipy version "
"{0}".format(scipy.__version__))
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 2dbce45317639..594049d3d3bb9 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -24,7 +24,8 @@
try:
import scipy
- _is_scipy_ge_0190 = scipy.__version__ >= LooseVersion('0.19.0')
+ _is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >=
+ LooseVersion('0.19.0'))
except:
_is_scipy_ge_0190 = False
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
index e45acdedbd2a9..69bd8081ce53a 100644
--- a/pandas/tests/series/test_rank.py
+++ b/pandas/tests/series/test_rank.py
@@ -244,7 +244,7 @@ def test_rank_methods_series(self):
sprank = rankdata(vals, m if m != 'first' else 'ordinal')
expected = Series(sprank, index=index)
- if LooseVersion(scipy.__version__) >= '0.17.0':
+ if LooseVersion(scipy.__version__) >= LooseVersion('0.17.0'):
expected = expected.astype('float64')
tm.assert_series_equal(result, expected)
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index b661bde434814..17f34fdf3604c 100644
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -729,7 +729,7 @@ def f():
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
- if LooseVersion(np.__version__) > "1.7.1":
+ if LooseVersion(np.__version__) > LooseVersion("1.7.1"):
pytest.raises(TypeError, lambda: a < cat)
pytest.raises(TypeError, lambda: a < cat_rev)
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index d6b64896b8a60..84e811301ab4b 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -3080,7 +3080,7 @@ def test_fallback_plural(self):
hrs_pre = utc_offsets['utc_offset_daylight']
hrs_post = utc_offsets['utc_offset_standard']
- if dateutil.__version__ < LooseVersion('2.6.0'):
+ if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'):
# buggy ambiguous behavior in 2.6.0
# GH 14621
# https://github.com/dateutil/dateutil/issues/321
@@ -3088,7 +3088,7 @@ def test_fallback_plural(self):
n=3, tstart=self._make_timestamp(self.ts_pre_fallback,
hrs_pre, tz),
expected_utc_offset=hrs_post)
- elif dateutil.__version__ > LooseVersion('2.6.0'):
+ elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'):
# fixed, but skip the test
continue
diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py
index 5fd2089d234c1..af26ddb554752 100644
--- a/pandas/tests/tseries/test_timezones.py
+++ b/pandas/tests/tseries/test_timezones.py
@@ -553,11 +553,11 @@ def f():
assert times[0] == Timestamp('2013-10-26 23:00', tz=tz, freq="H")
if str(tz).startswith('dateutil'):
- if dateutil.__version__ < LooseVersion('2.6.0'):
+ if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'):
# see gh-14621
assert times[-1] == Timestamp('2013-10-27 01:00:00+0000',
tz=tz, freq="H")
- elif dateutil.__version__ > LooseVersion('2.6.0'):
+ elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'):
# fixed ambiguous behavior
assert times[-1] == Timestamp('2013-10-27 01:00:00+0100',
tz=tz, freq="H")
@@ -1242,14 +1242,14 @@ def test_ambiguous_compat(self):
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382835600000000000
- if dateutil.__version__ < LooseVersion('2.6.0'):
+ if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'):
# dateutil 2.6 buggy w.r.t. ambiguous=0
# see gh-14621
# see https://github.com/dateutil/dateutil/issues/321
assert (result_pytz.to_pydatetime().tzname() ==
result_dateutil.to_pydatetime().tzname())
assert str(result_pytz) == str(result_dateutil)
- elif dateutil.__version__ > LooseVersion('2.6.0'):
+ elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'):
# fixed ambiguous behavior
assert result_pytz.to_pydatetime().tzname() == 'GMT'
assert result_dateutil.to_pydatetime().tzname() == 'BST'
@@ -1264,7 +1264,7 @@ def test_ambiguous_compat(self):
assert result_pytz.value == 1382832000000000000
# dateutil < 2.6 is buggy w.r.t. ambiguous timezones
- if dateutil.__version__ > LooseVersion('2.5.3'):
+ if LooseVersion(dateutil.__version__) > LooseVersion('2.5.3'):
# see gh-14621
assert str(result_pytz) == str(result_dateutil)
assert (result_pytz.to_pydatetime().tzname() ==
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 9db09f23eb849..60b95b931b993 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -329,7 +329,7 @@ def _skip_if_mpl_1_5():
import matplotlib as mpl
v = mpl.__version__
- if v > LooseVersion('1.4.3') or v[0] == '0':
+ if LooseVersion(v) > LooseVersion('1.4.3') or str(v)[0] == '0':
import pytest
pytest.skip("matplotlib 1.5")
else:
@@ -362,7 +362,7 @@ def _skip_if_no_xarray():
xarray = pytest.importorskip("xarray")
v = xarray.__version__
- if v < LooseVersion('0.7.0'):
+ if LooseVersion(v) < LooseVersion('0.7.0'):
import pytest
pytest.skip("xarray version is too low: {version}".format(version=v))
| - [X] closes #18633
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18637 | 2017-12-04T21:20:47Z | 2017-12-06T11:18:37Z | 2017-12-06T11:18:37Z | 2017-12-06T12:40:54Z |
ENH: DataFrame.pivot accepts a list of values | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 151ab8456c1d7..18013666d0b82 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -326,6 +326,7 @@ Other Enhancements
- ``Resampler`` objects now have a functioning :attr:`~pandas.core.resample.Resampler.pipe` method.
Previously, calls to ``pipe`` were diverted to the ``mean`` method (:issue:`17905`).
- :func:`~pandas.api.types.is_scalar` now returns ``True`` for ``DateOffset`` objects (:issue:`18943`).
+- :func:`DataFrame.pivot` now accepts a list for the ``values=`` kwarg (:issue:`17160`).
- Added :func:`pandas.api.extensions.register_dataframe_accessor`,
:func:`pandas.api.extensions.register_series_accessor`, and
:func:`pandas.api.extensions.register_index_accessor`, accessor for libraries downstream of pandas
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index efb002474f876..f0770893140e4 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4956,11 +4956,14 @@ def pivot(self, index=None, columns=None, values=None):
existing index.
columns : string or object
Column to use to make new frame's columns.
- values : string or object, optional
- Column to use for populating new frame's values. If not
+ values : string, object or a list of the previous, optional
+ Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
+ .. versionchanged :: 0.23.0
+ Also accept list of column names.
+
Returns
-------
DataFrame
@@ -4989,15 +4992,16 @@ def pivot(self, index=None, columns=None, values=None):
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
- ... 'baz': [1, 2, 3, 4, 5, 6]})
+ ... 'baz': [1, 2, 3, 4, 5, 6],
+ ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
- foo bar baz
- 0 one A 1
- 1 one B 2
- 2 one C 3
- 3 two A 4
- 4 two B 5
- 5 two C 6
+ foo bar baz zoo
+ 0 one A 1 x
+ 1 one B 2 y
+ 2 one C 3 z
+ 3 two A 4 q
+ 4 two B 5 w
+ 5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
@@ -5011,6 +5015,13 @@ def pivot(self, index=None, columns=None, values=None):
one 1 2 3
two 4 5 6
+ >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
+ baz zoo
+ bar A B C A B C
+ foo
+ one 1 2 3 x y z
+ two 4 5 6 q w t
+
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 3ef152d091b24..389f1af48434a 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -392,16 +392,21 @@ def pivot(self, index=None, columns=None, values=None):
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = self.set_index(cols, append=append)
- return indexed.unstack(columns)
else:
if index is None:
index = self.index
else:
index = self[index]
- indexed = self._constructor_sliced(
- self[values].values,
- index=MultiIndex.from_arrays([index, self[columns]]))
- return indexed.unstack(columns)
+ index = MultiIndex.from_arrays([index, self[columns]])
+
+ if is_list_like(values) and not isinstance(values, tuple):
+ # Exclude tuple because it is seen as a single column name
+ indexed = self._constructor(self[values].values, index=index,
+ columns=values)
+ else:
+ indexed = self._constructor_sliced(self[values].values,
+ index=index)
+ return indexed.unstack(columns)
def pivot_simple(index, columns, values):
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 786c57a4a82df..92bedbabdf2f1 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -371,6 +371,89 @@ def test_pivot_periods(self):
pv = df.pivot(index='p1', columns='p2', values='data1')
tm.assert_frame_equal(pv, expected)
+ @pytest.mark.parametrize('values', [
+ ['baz', 'zoo'], np.array(['baz', 'zoo']),
+ pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])
+ ])
+ def test_pivot_with_list_like_values(self, values):
+ # issue #17160
+ df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
+ 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
+ 'baz': [1, 2, 3, 4, 5, 6],
+ 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
+
+ result = df.pivot(index='foo', columns='bar', values=values)
+
+ data = [[1, 2, 3, 'x', 'y', 'z'],
+ [4, 5, 6, 'q', 'w', 't']]
+ index = Index(data=['one', 'two'], name='foo')
+ columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],
+ labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
+ names=[None, 'bar'])
+ expected = DataFrame(data=data, index=index,
+ columns=columns, dtype='object')
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize('values', [
+ ['bar', 'baz'], np.array(['bar', 'baz']),
+ pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])
+ ])
+ def test_pivot_with_list_like_values_nans(self, values):
+ # issue #17160
+ df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
+ 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
+ 'baz': [1, 2, 3, 4, 5, 6],
+ 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
+
+ result = df.pivot(index='zoo', columns='foo', values=values)
+
+ data = [[np.nan, 'A', np.nan, 4],
+ [np.nan, 'C', np.nan, 6],
+ [np.nan, 'B', np.nan, 5],
+ ['A', np.nan, 1, np.nan],
+ ['B', np.nan, 2, np.nan],
+ ['C', np.nan, 3, np.nan]]
+ index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')
+ columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
+ names=[None, 'foo'])
+ expected = DataFrame(data=data, index=index,
+ columns=columns, dtype='object')
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'
+ 'with KeyError #19966')
+ def test_pivot_with_multiindex(self):
+ # issue #17160
+ index = Index(data=[0, 1, 2, 3, 4, 5])
+ data = [['one', 'A', 1, 'x'],
+ ['one', 'B', 2, 'y'],
+ ['one', 'C', 3, 'z'],
+ ['two', 'A', 4, 'q'],
+ ['two', 'B', 5, 'w'],
+ ['two', 'C', 6, 't']]
+ columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],
+ labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
+ df = DataFrame(data=data, index=index, columns=columns, dtype='object')
+ result = df.pivot(index=('bar', 'first'), columns=('bar', 'second'),
+ values=('baz', 'first'))
+
+ data = {'A': Series([1, 4], index=['one', 'two']),
+ 'B': Series([2, 5], index=['one', 'two']),
+ 'C': Series([3, 6], index=['one', 'two'])}
+ expected = DataFrame(data)
+ tm.assert_frame_equal(result, expected)
+
+ def test_pivot_with_tuple_of_values(self):
+ # issue #17160
+ df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
+ 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
+ 'baz': [1, 2, 3, 4, 5, 6],
+ 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
+ with pytest.raises(KeyError):
+ # tuple is seen as a single column name
+ df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))
+
def test_margins(self):
def _check_output(result, values_col, index=['A', 'B'],
columns=['C'],
| - [x] closes https://github.com/pandas-dev/pandas/issues/17160
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18636 | 2017-12-04T21:13:33Z | 2018-03-26T07:24:02Z | 2018-03-26T07:24:01Z | 2018-03-26T08:14:26Z |
DOC: explain the `mode.chained_assignment` option | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 2f9f7a04def19..2f3dbb9746066 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -1833,15 +1833,27 @@ that you've done this:
Yikes!
+.. _indexing.evaluation_order:
+
Evaluation order matters
~~~~~~~~~~~~~~~~~~~~~~~~
-Furthermore, in chained expressions, the order may determine whether a copy is returned or not.
-If an expression will set values on a copy of a slice, then a ``SettingWithCopy``
-warning will be issued.
+When you use chained indexing, the order and type of the indexing operation
+partially determine whether the result is a slice into the original object, or
+a copy of the slice.
+
+Pandas has the ``SettingWithCopyWarning`` because assigning to a copy of a
+slice is frequently not intentional, but a mistake caused by chained indexing
+returning a copy where a slice was expected.
+
+If you would like pandas to be more or less trusting about assignment to a
+chained indexing expression, you can set the :ref:`option <options>`
+``mode.chained_assignment`` to one of these values:
-You can control the action of a chained assignment via the option ``mode.chained_assignment``,
-which can take the values ``['raise','warn',None]``, where showing a warning is the default.
+* ``'warn'``, the default, means a ``SettingWithCopyWarning`` is printed.
+* ``'raise'`` means pandas will raise a ``SettingWithCopyException``
+ you have to deal with.
+* ``None`` will suppress the warnings entirely.
.. ipython:: python
:okwarning:
diff --git a/doc/source/options.rst b/doc/source/options.rst
index be3a3d9a55534..db3380bd4a3e7 100644
--- a/doc/source/options.rst
+++ b/doc/source/options.rst
@@ -417,9 +417,10 @@ io.hdf.dropna_table True drop ALL nan rows when appe
io.parquet.engine None The engine to use as a default for
parquet reading and writing. If None
then try 'pyarrow' and 'fastparquet'
-mode.chained_assignment warn Raise an exception, warn, or no
- action if trying to use chained
- assignment, The default is warn
+mode.chained_assignment warn Controls ``SettingWithCopyWarning``:
+ 'raise', 'warn', or None. Raise an
+ exception, warn, or no action if
+ trying to use :ref:`chained assignment <indexing.evaluation_order>`.
mode.sim_interactive False Whether to simulate interactive mode
for purposes of testing.
mode.use_inf_as_na False True means treat None, NaN, -INF,
| - [x] closes #xxxx (n/a)
- [x] tests added / passed (yes, Sphinx compiles without errors)
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry (n/a)
Make the explanation of the `mode.chained_assignment` option link to the explanation of chained indexing and assignment. Explain what the three possible settings do. | https://api.github.com/repos/pandas-dev/pandas/pulls/18635 | 2017-12-04T20:34:39Z | 2017-12-07T15:46:08Z | 2017-12-07T15:46:08Z | 2017-12-11T20:20:50Z |
CLN: Replaced package list with find_packages | diff --git a/ci/environment-dev.yaml b/ci/environment-dev.yaml
index 57748fef1a2e5..c72abd0c19516 100644
--- a/ci/environment-dev.yaml
+++ b/ci/environment-dev.yaml
@@ -10,5 +10,5 @@ dependencies:
- python-dateutil>=2.5.0
- python=3
- pytz
- - setuptools
+ - setuptools>=3.3
- sphinx
diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt
index e9840388203b1..82f8de277c57b 100644
--- a/ci/requirements_dev.txt
+++ b/ci/requirements_dev.txt
@@ -6,5 +6,5 @@ moto
pytest>=3.1
python-dateutil>=2.5.0
pytz
-setuptools
-sphinx
\ No newline at end of file
+setuptools>=3.3
+sphinx
diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml
index 8152af84228b8..0b54980d2bc87 100644
--- a/conda.recipe/meta.yaml
+++ b/conda.recipe/meta.yaml
@@ -15,7 +15,7 @@ requirements:
- python
- cython
- numpy x.x
- - setuptools
+ - setuptools >=3.3
run:
- python
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 105135fd2c454..979d5afd0a04f 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -194,7 +194,7 @@ installed), make sure you have `pytest
Dependencies
------------
-* `setuptools <https://setuptools.readthedocs.io/en/latest/>`__
+* `setuptools <https://setuptools.readthedocs.io/en/latest/>`__: 3.3.0 or higher
* `NumPy <http://www.numpy.org>`__: 1.9.0 or higher
* `python-dateutil <//https://dateutil.readthedocs.io/en/stable/>`__: 2.5.0 or higher
* `pytz <http://pytz.sourceforge.net/>`__
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 832bba056ada1..db3a1dc4d39e8 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -186,6 +186,7 @@ Other API Changes
- :func:`Series.fillna` now raises a ``TypeError`` instead of a ``ValueError`` when passed a list, tuple or DataFrame as a ``value`` (:issue:`18293`)
- :func:`pandas.DataFrame.merge` no longer casts a ``float`` column to ``object`` when merging on ``int`` and ``float`` columns (:issue:`16572`)
- The default NA value for :class:`UInt64Index` has changed from 0 to ``NaN``, which impacts methods that mask with NA, such as ``UInt64Index.where()`` (:issue:`18398`)
+- Refactored ``setup.py`` to use ``find_packages`` instead of explicitly listing out all subpackages (:issue:`18535`)
.. _whatsnew_0220.deprecations:
diff --git a/setup.py b/setup.py
index 19818b07162cf..515e1660fa6de 100755
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,7 @@
import sys
import shutil
from distutils.version import LooseVersion
-from setuptools import setup, Command
+from setuptools import setup, Command, find_packages
# versioning
import versioneer
@@ -716,100 +716,12 @@ def pxd(name):
setup(name=DISTNAME,
maintainer=AUTHOR,
version=versioneer.get_version(),
- packages=['pandas',
- 'pandas.api',
- 'pandas.api.types',
- 'pandas.compat',
- 'pandas.compat.numpy',
- 'pandas.core',
- 'pandas.core.dtypes',
- 'pandas.core.indexes',
- 'pandas.core.computation',
- 'pandas.core.reshape',
- 'pandas.core.sparse',
- 'pandas.core.tools',
- 'pandas.core.util',
- 'pandas.computation',
- 'pandas.errors',
- 'pandas.formats',
- 'pandas.io',
- 'pandas.io.json',
- 'pandas.io.sas',
- 'pandas.io.msgpack',
- 'pandas.io.formats',
- 'pandas.io.clipboard',
- 'pandas._libs',
- 'pandas._libs.tslibs',
- 'pandas.plotting',
- 'pandas.stats',
- 'pandas.types',
- 'pandas.util',
- 'pandas.tests',
- 'pandas.tests.api',
- 'pandas.tests.dtypes',
- 'pandas.tests.categorical',
- 'pandas.tests.computation',
- 'pandas.tests.sparse',
- 'pandas.tests.frame',
- 'pandas.tests.generic',
- 'pandas.tests.indexing',
- 'pandas.tests.indexes',
- 'pandas.tests.indexes.datetimes',
- 'pandas.tests.indexes.timedeltas',
- 'pandas.tests.indexes.period',
- 'pandas.tests.internals',
- 'pandas.tests.io',
- 'pandas.tests.io.json',
- 'pandas.tests.io.parser',
- 'pandas.tests.io.sas',
- 'pandas.tests.io.msgpack',
- 'pandas.tests.io.formats',
- 'pandas.tests.groupby',
- 'pandas.tests.reshape',
- 'pandas.tests.reshape.merge',
- 'pandas.tests.series',
- 'pandas.tests.scalar',
- 'pandas.tests.tseries',
- 'pandas.tests.tseries.offsets',
- 'pandas.tests.plotting',
- 'pandas.tests.tools',
- 'pandas.tests.util',
- 'pandas.tools',
- 'pandas.tseries',
- ],
- package_data={'pandas.tests': ['data/*.csv'],
- 'pandas.tests.indexes': ['data/*.pickle'],
+ packages=find_packages(include=['pandas', 'pandas.*']),
+ package_data={'': ['data/*', 'templates/*'],
'pandas.tests.io': ['data/legacy_hdf/*.h5',
'data/legacy_pickle/*/*.pickle',
'data/legacy_msgpack/*/*.msgpack',
- 'data/*.csv*',
- 'data/*.dta',
- 'data/*.pickle',
- 'data/*.txt',
- 'data/*.xls',
- 'data/*.xlsx',
- 'data/*.xlsm',
- 'data/*.table',
- 'parser/data/*.csv',
- 'parser/data/*.gz',
- 'parser/data/*.bz2',
- 'parser/data/*.txt',
- 'parser/data/*.tar',
- 'parser/data/*.zip',
- 'parser/data/*.tar.gz',
- 'sas/data/*.csv',
- 'sas/data/*.xpt',
- 'sas/data/*.sas7bdat',
- 'data/*.html',
- 'data/html_encoding/*.html',
- 'json/data/*.json*'],
- 'pandas.tests.io.formats': ['data/*.csv'],
- 'pandas.tests.io.msgpack': ['data/*.mp'],
- 'pandas.tests.reshape': ['data/*.csv'],
- 'pandas.tests.reshape.merge': ['data/*.csv'],
- 'pandas.tests.tseries.offsets': ['data/*.pickle'],
- 'pandas.io.formats': ['templates/*.tpl']
- },
+ 'data/html_encoding/*.html']},
ext_modules=extensions,
maintainer_email=EMAIL,
description=DESCRIPTION,
| - [X] closes #18535
- [ ] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
As far as local testing goes, I built source distributions using py27 and py36. Compared to HEAD, the generated distributions were practically identical. Technically they were off by a size of around 100 bytes, but I was getting that same variation when generating clean built distributions just on HEAD alone. Running a recursive diff showed some slight differences in the ``.so`` files, but given I saw that variation without making any changes I think it is OK. | https://api.github.com/repos/pandas-dev/pandas/pulls/18632 | 2017-12-04T16:53:45Z | 2017-12-08T11:12:59Z | 2017-12-08T11:12:59Z | 2017-12-12T15:41:31Z |
ENH: support non default indexes in writing to Parquet | diff --git a/doc/source/io.rst b/doc/source/io.rst
index 54e7a11c5f2b1..2cbd64bf5186b 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -4504,11 +4504,8 @@ dtypes, including extension dtypes such as datetime with tz.
Several caveats.
-- The format will NOT write an ``Index``, or ``MultiIndex`` for the
- ``DataFrame`` and will raise an error if a non-default one is provided. You
- can ``.reset_index()`` to store the index or ``.reset_index(drop=True)`` to
- ignore it.
- Duplicate column names and non-string columns names are not supported
+- Index level names, if specified, must be strings
- Categorical dtypes can be serialized to parquet, but will de-serialize as ``object`` dtype.
- Non supported types include ``Period`` and actual python object types. These will raise a helpful error message
on an attempt at serialization.
diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt
index 31902c98d0b6c..1b709cfd4aa90 100644
--- a/doc/source/whatsnew/v0.21.1.txt
+++ b/doc/source/whatsnew/v0.21.1.txt
@@ -61,6 +61,9 @@ New features
Improvements to the Parquet IO functionality
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+- :func:`DataFrame.to_parquet` will now write non-default indexes when the
+ underlying engine supports it. The indexes will be preserved when reading
+ back in with :func:`read_parquet` (:issue:`18581`).
- :func:`read_parquet` now allows to specify the columns to read from a parquet file (:issue:`18154`)
- :func:`read_parquet` now allows to specify kwargs which are passed to the respective engine (:issue:`18216`)
@@ -91,8 +94,6 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
--
-
Conversion
^^^^^^^^^^
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py
index 7827c3ae04d4d..aa5dd821f5980 100644
--- a/pandas/io/parquet.py
+++ b/pandas/io/parquet.py
@@ -3,7 +3,8 @@
from warnings import catch_warnings
from distutils.version import LooseVersion
from pandas import DataFrame, RangeIndex, Int64Index, get_option
-from pandas.compat import range
+from pandas.compat import string_types
+from pandas.core.common import AbstractMethodError
from pandas.io.common import get_filepath_or_buffer
@@ -34,39 +35,75 @@ def get_engine(engine):
return FastParquetImpl()
-class PyArrowImpl(object):
+class BaseImpl(object):
+
+ api = None # module
+
+ @staticmethod
+ def validate_dataframe(df):
+
+ if not isinstance(df, DataFrame):
+ raise ValueError("to_parquet only supports IO with DataFrames")
+
+ # must have value column names (strings only)
+ if df.columns.inferred_type not in {'string', 'unicode'}:
+ raise ValueError("parquet must have string column names")
+
+ # index level names must be strings
+ valid_names = all(
+ isinstance(name, string_types)
+ for name in df.index.names
+ if name is not None
+ )
+ if not valid_names:
+ raise ValueError("Index level names must be strings")
+
+ def write(self, df, path, compression, **kwargs):
+ raise AbstractMethodError(self)
+
+ def read(self, path, columns=None, **kwargs):
+ raise AbstractMethodError(self)
+
+
+class PyArrowImpl(BaseImpl):
def __init__(self):
# since pandas is a dependency of pyarrow
# we need to import on first use
-
try:
import pyarrow
import pyarrow.parquet
except ImportError:
- raise ImportError("pyarrow is required for parquet support\n\n"
- "you can install via conda\n"
- "conda install pyarrow -c conda-forge\n"
- "\nor via pip\n"
- "pip install -U pyarrow\n")
-
- if LooseVersion(pyarrow.__version__) < LooseVersion('0.4.1'):
- raise ImportError("pyarrow >= 0.4.1 is required for parquet"
- "support\n\n"
- "you can install via conda\n"
- "conda install pyarrow -c conda-forge\n"
- "\nor via pip\n"
- "pip install -U pyarrow\n")
-
- self._pyarrow_lt_050 = (LooseVersion(pyarrow.__version__) <
- LooseVersion('0.5.0'))
- self._pyarrow_lt_060 = (LooseVersion(pyarrow.__version__) <
- LooseVersion('0.6.0'))
+ raise ImportError(
+ "pyarrow is required for parquet support\n\n"
+ "you can install via conda\n"
+ "conda install pyarrow -c conda-forge\n"
+ "\nor via pip\n"
+ "pip install -U pyarrow\n"
+ )
+ if LooseVersion(pyarrow.__version__) < '0.4.1':
+ raise ImportError(
+ "pyarrow >= 0.4.1 is required for parquet support\n\n"
+ "you can install via conda\n"
+ "conda install pyarrow -c conda-forge\n"
+ "\nor via pip\n"
+ "pip install -U pyarrow\n"
+ )
+
+ self._pyarrow_lt_060 = (
+ LooseVersion(pyarrow.__version__) < LooseVersion('0.6.0'))
+ self._pyarrow_lt_070 = (
+ LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'))
+
self.api = pyarrow
def write(self, df, path, compression='snappy',
coerce_timestamps='ms', **kwargs):
+ self.validate_dataframe(df)
+ if self._pyarrow_lt_070:
+ self._validate_write_lt_070(df)
path, _, _ = get_filepath_or_buffer(path)
+
if self._pyarrow_lt_060:
table = self.api.Table.from_pandas(df, timestamps_to_ms=True)
self.api.parquet.write_table(
@@ -80,36 +117,75 @@ def write(self, df, path, compression='snappy',
def read(self, path, columns=None, **kwargs):
path, _, _ = get_filepath_or_buffer(path)
+ if self._pyarrow_lt_070:
+ return self.api.parquet.read_pandas(path, columns=columns,
+ **kwargs).to_pandas()
+ kwargs['use_pandas_metadata'] = True
return self.api.parquet.read_table(path, columns=columns,
**kwargs).to_pandas()
-
-class FastParquetImpl(object):
+ def _validate_write_lt_070(self, df):
+ # Compatibility shim for pyarrow < 0.7.0
+ # TODO: Remove in pandas 0.22.0
+ from pandas.core.indexes.multi import MultiIndex
+ if isinstance(df.index, MultiIndex):
+ msg = (
+ "Multi-index DataFrames are only supported "
+ "with pyarrow >= 0.7.0"
+ )
+ raise ValueError(msg)
+ # Validate index
+ if not isinstance(df.index, Int64Index):
+ msg = (
+ "pyarrow < 0.7.0 does not support serializing {} for the "
+ "index; you can .reset_index() to make the index into "
+ "column(s), or install the latest version of pyarrow or "
+ "fastparquet."
+ )
+ raise ValueError(msg.format(type(df.index)))
+ if not df.index.equals(RangeIndex(len(df))):
+ raise ValueError(
+ "pyarrow < 0.7.0 does not support serializing a non-default "
+ "index; you can .reset_index() to make the index into "
+ "column(s), or install the latest version of pyarrow or "
+ "fastparquet."
+ )
+ if df.index.name is not None:
+ raise ValueError(
+ "pyarrow < 0.7.0 does not serialize indexes with a name; you "
+ "can set the index.name to None or install the latest version "
+ "of pyarrow or fastparquet."
+ )
+
+
+class FastParquetImpl(BaseImpl):
def __init__(self):
# since pandas is a dependency of fastparquet
# we need to import on first use
-
try:
import fastparquet
except ImportError:
- raise ImportError("fastparquet is required for parquet support\n\n"
- "you can install via conda\n"
- "conda install fastparquet -c conda-forge\n"
- "\nor via pip\n"
- "pip install -U fastparquet")
-
- if LooseVersion(fastparquet.__version__) < LooseVersion('0.1.0'):
- raise ImportError("fastparquet >= 0.1.0 is required for parquet "
- "support\n\n"
- "you can install via conda\n"
- "conda install fastparquet -c conda-forge\n"
- "\nor via pip\n"
- "pip install -U fastparquet")
-
+ raise ImportError(
+ "fastparquet is required for parquet support\n\n"
+ "you can install via conda\n"
+ "conda install fastparquet -c conda-forge\n"
+ "\nor via pip\n"
+ "pip install -U fastparquet"
+ )
+ if LooseVersion(fastparquet.__version__) < '0.1.0':
+ raise ImportError(
+ "fastparquet >= 0.1.0 is required for parquet "
+ "support\n\n"
+ "you can install via conda\n"
+ "conda install fastparquet -c conda-forge\n"
+ "\nor via pip\n"
+ "pip install -U fastparquet"
+ )
self.api = fastparquet
def write(self, df, path, compression='snappy', **kwargs):
+ self.validate_dataframe(df)
# thriftpy/protocol/compact.py:339:
# DeprecationWarning: tostring() is deprecated.
# Use tobytes() instead.
@@ -120,7 +196,8 @@ def write(self, df, path, compression='snappy', **kwargs):
def read(self, path, columns=None, **kwargs):
path, _, _ = get_filepath_or_buffer(path)
- return self.api.ParquetFile(path).to_pandas(columns=columns, **kwargs)
+ parquet_file = self.api.ParquetFile(path)
+ return parquet_file.to_pandas(columns=columns, **kwargs)
def to_parquet(df, path, engine='auto', compression='snappy', **kwargs):
@@ -141,43 +218,7 @@ def to_parquet(df, path, engine='auto', compression='snappy', **kwargs):
kwargs
Additional keyword arguments passed to the engine
"""
-
impl = get_engine(engine)
-
- if not isinstance(df, DataFrame):
- raise ValueError("to_parquet only support IO with DataFrames")
-
- valid_types = {'string', 'unicode'}
-
- # validate index
- # --------------
-
- # validate that we have only a default index
- # raise on anything else as we don't serialize the index
-
- if not isinstance(df.index, Int64Index):
- raise ValueError("parquet does not support serializing {} "
- "for the index; you can .reset_index()"
- "to make the index into column(s)".format(
- type(df.index)))
-
- if not df.index.equals(RangeIndex.from_range(range(len(df)))):
- raise ValueError("parquet does not support serializing a "
- "non-default index for the index; you "
- "can .reset_index() to make the index "
- "into column(s)")
-
- if df.index.name is not None:
- raise ValueError("parquet does not serialize index meta-data on a "
- "default index")
-
- # validate columns
- # ----------------
-
- # must have value column names (strings only)
- if df.columns.inferred_type not in valid_types:
- raise ValueError("parquet must have string column names")
-
return impl.write(df, path, compression=compression, **kwargs)
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py
index 8e6f8998f5eeb..c59acbd946f91 100644
--- a/pandas/tests/io/test_parquet.py
+++ b/pandas/tests/io/test_parquet.py
@@ -207,15 +207,14 @@ def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
class Base(object):
def check_error_on_write(self, df, engine, exc):
- # check that we are raising the exception
- # on writing
-
+ # check that we are raising the exception on writing
with pytest.raises(exc):
with tm.ensure_clean() as path:
to_parquet(df, path, engine, compression=None)
def check_round_trip(self, df, engine, expected=None,
- write_kwargs=None, read_kwargs=None):
+ write_kwargs=None, read_kwargs=None,
+ check_names=True):
if write_kwargs is None:
write_kwargs = {}
if read_kwargs is None:
@@ -226,7 +225,7 @@ def check_round_trip(self, df, engine, expected=None,
if expected is None:
expected = df
- tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, expected, check_names=check_names)
# repeat
to_parquet(df, path, engine, **write_kwargs)
@@ -234,7 +233,7 @@ def check_round_trip(self, df, engine, expected=None,
if expected is None:
expected = df
- tm.assert_frame_equal(result, expected)
+ tm.assert_frame_equal(result, expected, check_names=check_names)
class TestBasic(Base):
@@ -273,33 +272,6 @@ def test_columns_dtypes_invalid(self, engine):
datetime.datetime(2011, 1, 1, 1, 1)]
self.check_error_on_write(df, engine, ValueError)
- def test_write_with_index(self, engine):
-
- df = pd.DataFrame({'A': [1, 2, 3]})
- self.check_round_trip(df, engine, write_kwargs={'compression': None})
-
- # non-default index
- for index in [[2, 3, 4],
- pd.date_range('20130101', periods=3),
- list('abc'),
- [1, 3, 4],
- pd.MultiIndex.from_tuples([('a', 1), ('a', 2),
- ('b', 1)]),
- ]:
-
- df.index = index
- self.check_error_on_write(df, engine, ValueError)
-
- # index with meta-data
- df.index = [0, 1, 2]
- df.index.name = 'foo'
- self.check_error_on_write(df, engine, ValueError)
-
- # column multi-index
- df.index = [0, 1, 2]
- df.columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]),
- self.check_error_on_write(df, engine, ValueError)
-
@pytest.mark.parametrize('compression', [None, 'gzip', 'snappy', 'brotli'])
def test_compression(self, engine, compression):
@@ -323,6 +295,72 @@ def test_read_columns(self, engine):
write_kwargs={'compression': None},
read_kwargs={'columns': ['string']})
+ def test_write_index(self, engine):
+ check_names = engine != 'fastparquet'
+
+ if engine == 'pyarrow':
+ import pyarrow
+ if LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'):
+ pytest.skip("pyarrow is < 0.7.0")
+
+ df = pd.DataFrame({'A': [1, 2, 3]})
+ self.check_round_trip(df, engine, write_kwargs={'compression': None})
+
+ indexes = [
+ [2, 3, 4],
+ pd.date_range('20130101', periods=3),
+ list('abc'),
+ [1, 3, 4],
+ ]
+ # non-default index
+ for index in indexes:
+ df.index = index
+ self.check_round_trip(
+ df, engine,
+ write_kwargs={'compression': None},
+ check_names=check_names)
+
+ # index with meta-data
+ df.index = [0, 1, 2]
+ df.index.name = 'foo'
+ self.check_round_trip(df, engine, write_kwargs={'compression': None})
+
+ def test_write_multiindex(self, pa_ge_070):
+ # Not suppoprted in fastparquet as of 0.1.3 or older pyarrow version
+ engine = pa_ge_070
+
+ df = pd.DataFrame({'A': [1, 2, 3]})
+ index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)])
+ df.index = index
+ self.check_round_trip(df, engine, write_kwargs={'compression': None})
+
+ def test_write_column_multiindex(self, engine):
+ # column multi-index
+ mi_columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)])
+ df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns)
+ self.check_error_on_write(df, engine, ValueError)
+
+ def test_multiindex_with_columns(self, pa_ge_070):
+
+ engine = pa_ge_070
+ dates = pd.date_range('01-Jan-2018', '01-Dec-2018', freq='MS')
+ df = pd.DataFrame(np.random.randn(2 * len(dates), 3),
+ columns=list('ABC'))
+ index1 = pd.MultiIndex.from_product(
+ [['Level1', 'Level2'], dates],
+ names=['level', 'date'])
+ index2 = index1.copy(names=None)
+ for index in [index1, index2]:
+ df.index = index
+ with tm.ensure_clean() as path:
+ df.to_parquet(path, engine)
+ result = read_parquet(path, engine)
+ expected = df
+ tm.assert_frame_equal(result, expected)
+ result = read_parquet(path, engine, columns=['A', 'B'])
+ expected = df[['A', 'B']]
+ tm.assert_frame_equal(result, expected)
+
class TestParquetPyArrow(Base):
@@ -352,14 +390,12 @@ def test_basic_subset_columns(self, pa, df_full):
read_kwargs={'columns': ['string', 'int']})
def test_duplicate_columns(self, pa):
-
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
self.check_error_on_write(df, pa, ValueError)
def test_unsupported(self, pa):
-
# period
df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
self.check_error_on_write(df, pa, ValueError)
| - [x] closes #18581
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18629 | 2017-12-04T13:43:05Z | 2017-12-11T18:57:17Z | 2017-12-11T18:57:16Z | 2017-12-12T02:38:30Z |
BUG: Don't overflow in DataFrame init | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 495d0beaf3faa..f2500bb29d0be 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -186,7 +186,7 @@ Conversion
^^^^^^^^^^
- Bug in :class:`Index` constructor with `dtype='uint64'` where int-like floats were not coerced to :class:`UInt64Index` (:issue:`18400`)
--
+- Bug in the :class:`DataFrame` constructor in which data containing very large positive or very large negative numbers was causing ``OverflowError`` (:issue:`18584`)
-
Indexing
@@ -262,4 +262,3 @@ Other
- Fixed a bug where creating a Series from an array that contains both tz-naive and tz-aware values will result in a Series whose dtype is tz-aware instead of object (:issue:`16406`)
- Fixed construction of a :class:`Series` from a ``dict`` containing ``NaN`` as key (:issue:`18480`)
- Adding a ``Period`` object to a ``datetime`` or ``Timestamp`` object will now correctly raise a ``TypeError`` (:issue:`17983`)
--
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index cb192fcced318..e15b4693432d9 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -181,14 +181,22 @@ cdef class Seen(object):
"""
Set flags indicating that an integer value was encountered.
+ In addition to setting a flag that an integer was seen, we
+ also set two flags depending on the type of integer seen:
+
+ 1) sint_ : a negative (signed) number in the
+ range of [-2**63, 0) was encountered
+ 2) uint_ : a positive number in the range of
+ [2**63, 2**64) was encountered
+
Parameters
----------
val : Python int
Value with which to set the flags.
"""
self.int_ = 1
- self.sint_ = self.sint_ or (val < 0)
- self.uint_ = self.uint_ or (val > oINT64_MAX)
+ self.sint_ = self.sint_ or (oINT64_MIN <= val < 0)
+ self.uint_ = self.uint_ or (oINT64_MAX < val <= oUINT64_MAX)
@property
def numeric_(self):
@@ -1263,7 +1271,8 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0,
if not seen.null_:
seen.saw_int(int(val))
- if seen.uint_ and seen.sint_:
+ if ((seen.uint_ and seen.sint_) or
+ val > oUINT64_MAX or val < oINT64_MIN):
seen.object_ = 1
break
diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py
index ef12416ef4e1c..092bbb36169d4 100644
--- a/pandas/tests/dtypes/test_inference.py
+++ b/pandas/tests/dtypes/test_inference.py
@@ -388,6 +388,13 @@ def test_convert_numeric_int64_uint64(self, case, coerce):
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
+ @pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
+ def test_convert_int_overflow(self, value):
+ # see gh-18584
+ arr = np.array([value], dtype=object)
+ result = lib.maybe_convert_objects(arr)
+ tm.assert_numpy_array_equal(arr, result)
+
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 876e0ea7ea0b3..8fd196bfc4d2a 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -195,6 +195,18 @@ def test_constructor_overflow_int64(self):
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
+ @pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
+ np.array([2**65]), [2**64 + 1],
+ np.array([-2**63 - 4], dtype=object),
+ np.array([-2**64 - 1]), [-2**65 - 2]])
+ def test_constructor_int_overflow(self, values):
+ # see gh-18584
+ value = values[0]
+ result = DataFrame(values)
+
+ assert result[0].dtype == object
+ assert result[0][0] == value
+
def test_constructor_ordereddict(self):
import random
nitems = 100
| For integers larger than what uint64 can handle (or smaller than what int64 can handle), we gracefully default to the object dtype instead of overflowing.
Closes #18584. | https://api.github.com/repos/pandas-dev/pandas/pulls/18624 | 2017-12-04T10:16:07Z | 2017-12-05T22:54:44Z | 2017-12-05T22:54:44Z | 2017-12-06T02:35:38Z |
BLD: Bump Cython version from 0.23 to 0.24 | diff --git a/ci/requirements-2.7.build b/ci/requirements-2.7.build
index d1cc61df0a77c..e24baa98d956e 100644
--- a/ci/requirements-2.7.build
+++ b/ci/requirements-2.7.build
@@ -3,4 +3,4 @@ python-dateutil=2.5.0
pytz=2013b
nomkl
numpy
-cython=0.23
+cython=0.24
diff --git a/ci/requirements-2.7_COMPAT.build b/ci/requirements-2.7_COMPAT.build
index aa767c1001196..0a83a7346e8b5 100644
--- a/ci/requirements-2.7_COMPAT.build
+++ b/ci/requirements-2.7_COMPAT.build
@@ -1,5 +1,5 @@
python=2.7*
numpy=1.9.2
-cython=0.23
+cython=0.24
python-dateutil=2.5.0
pytz=2013b
diff --git a/ci/requirements-2.7_LOCALE.build b/ci/requirements-2.7_LOCALE.build
index 96cb184ec2665..a6f2e25387910 100644
--- a/ci/requirements-2.7_LOCALE.build
+++ b/ci/requirements-2.7_LOCALE.build
@@ -2,4 +2,4 @@ python=2.7*
python-dateutil
pytz=2013b
numpy=1.9.2
-cython=0.23
+cython=0.24
diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst
index 264bd1de1fc77..cbe945e0cf2cf 100644
--- a/doc/source/enhancingperf.rst
+++ b/doc/source/enhancingperf.rst
@@ -94,8 +94,7 @@ hence we'll concentrate our efforts cythonizing these two functions.
Plain cython
~~~~~~~~~~~~
-First we're going to need to import the cython magic function to ipython (for
-cython versions < 0.21 you can use ``%load_ext cythonmagic``):
+First we're going to need to import the cython magic function to ipython:
.. ipython:: python
:okwarning:
diff --git a/doc/source/install.rst b/doc/source/install.rst
index ae89c64b6e91e..aeb1abbadabb3 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -228,7 +228,7 @@ Optional Dependencies
~~~~~~~~~~~~~~~~~~~~~
* `Cython <http://www.cython.org>`__: Only necessary to build development
- version. Version 0.23 or higher.
+ version. Version 0.24 or higher.
* `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions, Version 0.14.0 or higher
* `xarray <http://xarray.pydata.org>`__: pandas like handling for > 2 dims, needed for converting Panels to xarray objects. Version 0.7.0 or higher is recommended.
* `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage. Version 3.0.0 or higher required, Version 3.2.1 or higher highly recommended.
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 5e605ecb7d8d5..fd37f269c2f83 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -83,10 +83,6 @@ Other Enhancements
Backwards incompatible API changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- :func:`Series.fillna` now raises a ``TypeError`` instead of a ``ValueError`` when passed a list, tuple or DataFrame as a ``value`` (:issue:`18293`)
-- :func:`pandas.DataFrame.merge` no longer casts a ``float`` column to ``object`` when merging on ``int`` and ``float`` columns (:issue:`16572`)
-- The default NA value for :class:`UInt64Index` has changed from 0 to ``NaN``, which impacts methods that mask with NA, such as ``UInt64Index.where()`` (:issue:`18398`)
-
.. _whatsnew_0220.api_breaking.deps:
Dependencies have increased minimum versions
@@ -104,8 +100,6 @@ If installed, we now require:
+-----------------+-----------------+----------+
-
-
.. _whatsnew_0220.api:
Other API Changes
@@ -129,6 +123,10 @@ Other API Changes
- :func:`DataFrame.from_items` provides a more informative error message when passed scalar values (:issue:`17312`)
- When created with duplicate labels, ``MultiIndex`` now raises a ``ValueError``. (:issue:`17464`)
- Building from source now explicity requires ``setuptools`` in ``setup.py`` (:issue:`18113`)
+- :func:`Series.fillna` now raises a ``TypeError`` instead of a ``ValueError`` when passed a list, tuple or DataFrame as a ``value`` (:issue:`18293`)
+- :func:`pandas.DataFrame.merge` no longer casts a ``float`` column to ``object`` when merging on ``int`` and ``float`` columns (:issue:`16572`)
+- The default NA value for :class:`UInt64Index` has changed from 0 to ``NaN``, which impacts methods that mask with NA, such as ``UInt64Index.where()`` (:issue:`18398`)
+- Building pandas for development now requires ``cython >= 0.24`` (:issue:`18613`)
.. _whatsnew_0220.deprecations:
diff --git a/setup.py b/setup.py
index 57131255884de..004f111115079 100755
--- a/setup.py
+++ b/setup.py
@@ -32,7 +32,7 @@ def is_platform_mac():
return sys.platform == 'darwin'
-min_cython_ver = '0.23'
+min_cython_ver = '0.24'
try:
import Cython
ver = Cython.__version__
| - [X] closes #18613
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
For the whatsnew:
- Used the same whatsnew message as was used during the last cython version bump in 0.20
- Moved some previous entries that were under the main "Backwards incompatible API changes" section to the "Other API Changes" subsection. | https://api.github.com/repos/pandas-dev/pandas/pulls/18623 | 2017-12-04T08:33:06Z | 2017-12-04T20:13:32Z | 2017-12-04T20:13:32Z | 2017-12-04T20:17:22Z |
CLN: Remove SparseList from pandas API | diff --git a/doc/source/sparse.rst b/doc/source/sparse.rst
index 89efa7b4be3ee..2e224f103a95e 100644
--- a/doc/source/sparse.rst
+++ b/doc/source/sparse.rst
@@ -85,15 +85,6 @@ can be converted back to a regular ndarray by calling ``to_dense``:
sparr.to_dense()
-.. _sparse.list:
-
-SparseList
-----------
-
-The ``SparseList`` class has been deprecated and will be removed in a future version.
-See the `docs of a previous version <http://pandas.pydata.org/pandas-docs/version/0.18.1/sparse.html#sparselist>`__
-for documentation on ``SparseList``.
-
SparseIndex objects
-------------------
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 304ccd1f9350b..baa7a4b5383b6 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -136,6 +136,7 @@ Removal of prior version deprecations/changes
- ``pd.tseries.util.pivot_annual`` has been removed (deprecated since v0.19). Use ``pivot_table`` instead (:issue:`18370`)
- ``pd.tseries.util.isleapyear`` has been removed (deprecated since v0.19). Use ``.is_leap_year`` property in Datetime-likes instead (:issue:`18370`)
- ``pd.ordered_merge`` has been removed (deprecated since v0.19). Use ``pd.merge_ordered`` instead (:issue:`18459`)
+- The ``SparseList`` class has been removed (:issue:`14007`)
.. _whatsnew_0220.performance:
diff --git a/pandas/core/sparse/api.py b/pandas/core/sparse/api.py
index f79bb4886da4b..85941e6923338 100644
--- a/pandas/core/sparse/api.py
+++ b/pandas/core/sparse/api.py
@@ -1,6 +1,5 @@
# pylint: disable=W0611
# flake8: noqa
from pandas.core.sparse.array import SparseArray
-from pandas.core.sparse.list import SparseList
from pandas.core.sparse.series import SparseSeries
from pandas.core.sparse.frame import SparseDataFrame
diff --git a/pandas/core/sparse/list.py b/pandas/core/sparse/list.py
deleted file mode 100644
index f3e64b7efc764..0000000000000
--- a/pandas/core/sparse/list.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import warnings
-import numpy as np
-from pandas.core.base import PandasObject
-from pandas.io.formats.printing import pprint_thing
-
-from pandas.core.dtypes.common import is_scalar
-from pandas.core.sparse.array import SparseArray
-from pandas.util._validators import validate_bool_kwarg
-import pandas._libs.sparse as splib
-
-
-class SparseList(PandasObject):
-
- """
- Data structure for accumulating data to be converted into a
- SparseArray. Has similar API to the standard Python list
-
- Parameters
- ----------
- data : scalar or array-like
- fill_value : scalar, default NaN
- """
-
- def __init__(self, data=None, fill_value=np.nan):
-
- # see gh-13784
- warnings.warn("SparseList is deprecated and will be removed "
- "in a future version", FutureWarning, stacklevel=2)
-
- self.fill_value = fill_value
- self._chunks = []
-
- if data is not None:
- self.append(data)
-
- def __unicode__(self):
- contents = '\n'.join(repr(c) for c in self._chunks)
- return '{self}\n{contents}'.format(self=object.__repr__(self),
- contents=pprint_thing(contents))
-
- def __len__(self):
- return sum(len(c) for c in self._chunks)
-
- def __getitem__(self, i):
- if i < 0:
- if i + len(self) < 0: # pragma: no cover
- raise ValueError('{index} out of range'.format(index=i))
- i += len(self)
-
- passed = 0
- j = 0
- while i >= passed + len(self._chunks[j]):
- passed += len(self._chunks[j])
- j += 1
- return self._chunks[j][i - passed]
-
- def __setitem__(self, i, value):
- raise NotImplementedError
-
- @property
- def nchunks(self):
- return len(self._chunks)
-
- @property
- def is_consolidated(self):
- return self.nchunks == 1
-
- def consolidate(self, inplace=True):
- """
- Internally consolidate chunks of data
-
- Parameters
- ----------
- inplace : boolean, default True
- Modify the calling object instead of constructing a new one
-
- Returns
- -------
- splist : SparseList
- If inplace=False, new object, otherwise reference to existing
- object
- """
- inplace = validate_bool_kwarg(inplace, 'inplace')
- if not inplace:
- result = self.copy()
- else:
- result = self
-
- if result.is_consolidated:
- return result
-
- result._consolidate_inplace()
- return result
-
- def _consolidate_inplace(self):
- new_values = np.concatenate([c.sp_values for c in self._chunks])
- new_index = _concat_sparse_indexes([c.sp_index for c in self._chunks])
- new_arr = SparseArray(new_values, sparse_index=new_index,
- fill_value=self.fill_value)
- self._chunks = [new_arr]
-
- def copy(self):
- """
- Return copy of the list
-
- Returns
- -------
- new_list : SparseList
- """
- new_splist = SparseList(fill_value=self.fill_value)
- new_splist._chunks = list(self._chunks)
- return new_splist
-
- def to_array(self):
- """
- Return SparseArray from data stored in the SparseList
-
- Returns
- -------
- sparr : SparseArray
- """
- self.consolidate(inplace=True)
- return self._chunks[0]
-
- def append(self, value):
- """
- Append element or array-like chunk of data to the SparseList
-
- Parameters
- ----------
- value: scalar or array-like
- """
- if is_scalar(value):
- value = [value]
-
- sparr = SparseArray(value, fill_value=self.fill_value)
- self._chunks.append(sparr)
- self._consolidated = False
-
-
-def _concat_sparse_indexes(indexes):
- all_indices = []
- total_length = 0
-
- for index in indexes:
- # increment by offset
- inds = index.to_int_index().indices + total_length
-
- all_indices.append(inds)
- total_length += index.length
-
- return splib.IntIndex(total_length, np.concatenate(all_indices))
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py
index 0d1ea1c775aeb..e47f1919faaf5 100644
--- a/pandas/tests/api/test_api.py
+++ b/pandas/tests/api/test_api.py
@@ -52,7 +52,7 @@ class TestPDApi(Base):
# these are already deprecated; awaiting removal
deprecated_classes = ['WidePanel', 'Panel4D', 'TimeGrouper',
- 'SparseList', 'Expr', 'Term']
+ 'Expr', 'Term']
# these should be deprecated in the future
deprecated_classes_in_future = ['Panel']
diff --git a/pandas/tests/sparse/test_list.py b/pandas/tests/sparse/test_list.py
deleted file mode 100644
index 6c721ca813a21..0000000000000
--- a/pandas/tests/sparse/test_list.py
+++ /dev/null
@@ -1,111 +0,0 @@
-from pandas.compat import range
-
-from numpy import nan
-import numpy as np
-
-from pandas.core.sparse.api import SparseList, SparseArray
-import pandas.util.testing as tm
-
-
-class TestSparseList(object):
-
- def setup_method(self, method):
- self.na_data = np.array([nan, nan, 1, 2, 3, nan, 4, 5, nan, 6])
- self.zero_data = np.array([0, 0, 1, 2, 3, 0, 4, 5, 0, 6])
-
- def test_deprecation(self):
- # see gh-13784
- with tm.assert_produces_warning(FutureWarning):
- SparseList()
-
- def test_constructor(self):
- with tm.assert_produces_warning(FutureWarning):
- lst1 = SparseList(self.na_data[:5])
- with tm.assert_produces_warning(FutureWarning):
- exp = SparseList()
-
- exp.append(self.na_data[:5])
- tm.assert_sp_list_equal(lst1, exp)
-
- def test_len(self):
- with tm.assert_produces_warning(FutureWarning):
- arr = self.na_data
- splist = SparseList()
- splist.append(arr[:5])
- assert len(splist) == 5
- splist.append(arr[5])
- assert len(splist) == 6
- splist.append(arr[6:])
- assert len(splist) == 10
-
- def test_append_na(self):
- with tm.assert_produces_warning(FutureWarning):
- arr = self.na_data
- splist = SparseList()
- splist.append(arr[:5])
- splist.append(arr[5])
- splist.append(arr[6:])
-
- sparr = splist.to_array()
- tm.assert_sp_array_equal(sparr, SparseArray(arr))
-
- def test_append_zero(self):
- with tm.assert_produces_warning(FutureWarning):
- arr = self.zero_data
- splist = SparseList(fill_value=0)
- splist.append(arr[:5])
- splist.append(arr[5])
- splist.append(arr[6:])
-
- # list always produces int64, but SA constructor
- # is platform dtype aware
- sparr = splist.to_array()
- exp = SparseArray(arr, fill_value=0)
- tm.assert_sp_array_equal(sparr, exp, check_dtype=False)
-
- def test_consolidate(self):
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- arr = self.na_data
- exp_sparr = SparseArray(arr)
-
- splist = SparseList()
- splist.append(arr[:5])
- splist.append(arr[5])
- splist.append(arr[6:])
-
- consol = splist.consolidate(inplace=False)
- assert consol.nchunks == 1
- assert splist.nchunks == 3
- tm.assert_sp_array_equal(consol.to_array(), exp_sparr)
-
- splist.consolidate()
- assert splist.nchunks == 1
- tm.assert_sp_array_equal(splist.to_array(), exp_sparr)
-
- def test_copy(self):
- with tm.assert_produces_warning(FutureWarning,
- check_stacklevel=False):
- arr = self.na_data
- exp_sparr = SparseArray(arr)
-
- splist = SparseList()
- splist.append(arr[:5])
- splist.append(arr[5])
-
- cp = splist.copy()
- cp.append(arr[6:])
- assert splist.nchunks == 2
- tm.assert_sp_array_equal(cp.to_array(), exp_sparr)
-
- def test_getitem(self):
- with tm.assert_produces_warning(FutureWarning):
- arr = self.na_data
- splist = SparseList()
- splist.append(arr[:5])
- splist.append(arr[5])
- splist.append(arr[6:])
-
- for i in range(len(arr)):
- tm.assert_almost_equal(splist[i], arr[i])
- tm.assert_almost_equal(splist[-i], arr[-i])
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 850c42a011958..9db09f23eb849 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1582,13 +1582,6 @@ def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
for col in right:
assert (col in left)
-
-def assert_sp_list_equal(left, right):
- assert isinstance(left, pd.SparseList)
- assert isinstance(right, pd.SparseList)
-
- assert_sp_array_equal(left.to_array(), right.to_array())
-
# -----------------------------------------------------------------------------
# Others
| Deprecated in 0.19.0.
xref #14007.
| https://api.github.com/repos/pandas-dev/pandas/pulls/18621 | 2017-12-04T02:15:50Z | 2017-12-04T11:00:55Z | 2017-12-04T11:00:55Z | 2017-12-05T07:50:13Z |
timestamp/timedelta test cleanup | diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py
index 17c818779c76d..001f6c1fdbef4 100644
--- a/pandas/tests/scalar/test_timedelta.py
+++ b/pandas/tests/scalar/test_timedelta.py
@@ -15,6 +15,28 @@
class TestTimedeltaArithmetic(object):
_multiprocess_can_split_ = True
+ def test_arithmetic_overflow(self):
+ with pytest.raises(OverflowError):
+ pd.Timestamp('1700-01-01') + pd.Timedelta(13 * 19999, unit='D')
+
+ with pytest.raises(OverflowError):
+ pd.Timestamp('1700-01-01') + timedelta(days=13 * 19999)
+
+ def test_ops_error_str(self):
+ # GH 13624
+ td = Timedelta('1 day')
+
+ for left, right in [(td, 'a'), ('a', td)]:
+
+ with pytest.raises(TypeError):
+ left + right
+
+ with pytest.raises(TypeError):
+ left > right
+
+ assert not left == right
+ assert left != right
+
def test_to_timedelta_on_nanoseconds(self):
# GH 9273
result = Timedelta(nanoseconds=100)
@@ -93,38 +115,53 @@ def test_ops_offsets(self):
assert Timedelta(239, unit='h') == td - pd.offsets.Hour(1)
assert Timedelta(-239, unit='h') == pd.offsets.Hour(1) - td
- # TODO: Split by op, better name
- def test_ops(self):
+ def test_unary_ops(self):
td = Timedelta(10, unit='d')
+
+ # __neg__, __pos__
assert -td == Timedelta(-10, unit='d')
+ assert -td == Timedelta('-10d')
assert +td == Timedelta(10, unit='d')
- assert td - td == Timedelta(0, unit='ns')
+
+ # __abs__, __abs__(__neg__)
+ assert abs(td) == td
+ assert abs(-td) == td
+ assert abs(-td) == Timedelta('10d')
+
+ def test_binary_ops_nat(self):
+ td = Timedelta(10, unit='d')
+
assert (td - pd.NaT) is pd.NaT
- assert td + td == Timedelta(20, unit='d')
assert (td + pd.NaT) is pd.NaT
- assert td * 2 == Timedelta(20, unit='d')
assert (td * pd.NaT) is pd.NaT
- assert td / 2 == Timedelta(5, unit='d')
- assert td // 2 == Timedelta(5, unit='d')
- assert abs(td) == td
- assert abs(-td) == td
- assert td / td == 1
assert (td / pd.NaT) is np.nan
assert (td // pd.NaT) is np.nan
+ def test_binary_ops_integers(self):
+ td = Timedelta(10, unit='d')
+
+ assert td * 2 == Timedelta(20, unit='d')
+ assert td / 2 == Timedelta(5, unit='d')
+ assert td // 2 == Timedelta(5, unit='d')
+
# invert
- assert -td == Timedelta('-10d')
assert td * -1 == Timedelta('-10d')
assert -1 * td == Timedelta('-10d')
- assert abs(-td) == Timedelta('10d')
-
- # invalid multiply with another timedelta
- pytest.raises(TypeError, lambda: td * td)
# can't operate with integers
pytest.raises(TypeError, lambda: td + 2)
pytest.raises(TypeError, lambda: td - 2)
+ def test_binary_ops_with_timedelta(self):
+ td = Timedelta(10, unit='d')
+
+ assert td - td == Timedelta(0, unit='ns')
+ assert td + td == Timedelta(20, unit='d')
+ assert td / td == 1
+
+ # invalid multiply with another timedelta
+ pytest.raises(TypeError, lambda: td * td)
+
class TestTimedeltas(object):
_multiprocess_can_split_ = True
@@ -733,14 +770,6 @@ def test_timedelta_arithmetic(self):
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
- def test_arithmetic_overflow(self):
-
- with pytest.raises(OverflowError):
- pd.Timestamp('1700-01-01') + pd.Timedelta(13 * 19999, unit='D')
-
- with pytest.raises(OverflowError):
- pd.Timestamp('1700-01-01') + timedelta(days=13 * 19999)
-
def test_apply_to_timedelta(self):
timedelta_NaT = pd.to_timedelta('NaT')
@@ -803,18 +832,3 @@ def test_isoformat(self):
result = Timedelta(minutes=1).isoformat()
expected = 'P0DT0H1M0S'
assert result == expected
-
- def test_ops_error_str(self):
- # GH 13624
- td = Timedelta('1 day')
-
- for l, r in [(td, 'a'), ('a', td)]:
-
- with pytest.raises(TypeError):
- l + r
-
- with pytest.raises(TypeError):
- l > r
-
- assert not l == r
- assert l != r
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index 9d97057569580..dab508de335c4 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -45,6 +45,11 @@ def test_overflow_offset(self):
with pytest.raises(OverflowError):
stamp - offset
+ def test_delta_preserve_nanos(self):
+ val = Timestamp(long(1337299200000000123))
+ result = val + timedelta(1)
+ assert result.nanosecond == val.nanosecond
+
class TestTimestampProperties(object):
@@ -68,7 +73,7 @@ def test_properties_business(self):
assert control.is_quarter_end
-class TestTimestamp(object):
+class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
@@ -290,6 +295,17 @@ def test_constructor_fromordinal(self):
assert Timestamp('2000-01-01', tz='US/Eastern') == ts
assert base.toordinal() == ts.toordinal()
+ # GH#3042
+ dt = datetime(2011, 4, 16, 0, 0)
+ ts = Timestamp.fromordinal(dt.toordinal())
+ assert ts.to_pydatetime() == dt
+
+ # with a tzinfo
+ stamp = Timestamp('2011-4-16', tz='US/Eastern')
+ dt_tz = stamp.to_pydatetime()
+ ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
+ assert ts.to_pydatetime() == dt_tz
+
def test_constructor_offset_depr(self):
# see gh-12160
with tm.assert_produces_warning(FutureWarning,
@@ -320,6 +336,9 @@ def test_constructor_offset_depr_fromordinal(self):
with tm.assert_raises_regex(TypeError, msg):
Timestamp.fromordinal(base.toordinal(), offset='D', freq='D')
+
+class TestTimestamp(object):
+
def test_conversion(self):
# GH 9255
ts = Timestamp('2000-01-01')
@@ -335,10 +354,10 @@ def test_conversion(self):
assert type(result) == type(expected)
assert result.dtype == expected.dtype
- def test_repr(self):
- dates = ['2014-03-07', '2014-01-01 09:00',
- '2014-01-01 00:00:00.000000001']
-
+ @pytest.mark.parametrize('freq', ['D', 'M', 'S', 'N'])
+ @pytest.mark.parametrize('date', ['2014-03-07', '2014-01-01 09:00',
+ '2014-01-01 00:00:00.000000001'])
+ def test_repr(self, date, freq):
# dateutil zone change (only matters for repr)
if (dateutil.__version__ >= LooseVersion('2.3') and
(dateutil.__version__ <= LooseVersion('2.4.0') or
@@ -349,43 +368,40 @@ def test_repr(self):
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/America/Los_Angeles']
- freqs = ['D', 'M', 'S', 'N']
-
- for date in dates:
- for tz in timezones:
- for freq in freqs:
-
- # avoid to match with timezone name
- freq_repr = "'{0}'".format(freq)
- if tz.startswith('dateutil'):
- tz_repr = tz.replace('dateutil', '')
- else:
- tz_repr = tz
-
- date_only = Timestamp(date)
- assert date in repr(date_only)
- assert tz_repr not in repr(date_only)
- assert freq_repr not in repr(date_only)
- assert date_only == eval(repr(date_only))
-
- date_tz = Timestamp(date, tz=tz)
- assert date in repr(date_tz)
- assert tz_repr in repr(date_tz)
- assert freq_repr not in repr(date_tz)
- assert date_tz == eval(repr(date_tz))
-
- date_freq = Timestamp(date, freq=freq)
- assert date in repr(date_freq)
- assert tz_repr not in repr(date_freq)
- assert freq_repr in repr(date_freq)
- assert date_freq == eval(repr(date_freq))
-
- date_tz_freq = Timestamp(date, tz=tz, freq=freq)
- assert date in repr(date_tz_freq)
- assert tz_repr in repr(date_tz_freq)
- assert freq_repr in repr(date_tz_freq)
- assert date_tz_freq == eval(repr(date_tz_freq))
+ for tz in timezones:
+ # avoid to match with timezone name
+ freq_repr = "'{0}'".format(freq)
+ if tz.startswith('dateutil'):
+ tz_repr = tz.replace('dateutil', '')
+ else:
+ tz_repr = tz
+
+ date_only = Timestamp(date)
+ assert date in repr(date_only)
+ assert tz_repr not in repr(date_only)
+ assert freq_repr not in repr(date_only)
+ assert date_only == eval(repr(date_only))
+
+ date_tz = Timestamp(date, tz=tz)
+ assert date in repr(date_tz)
+ assert tz_repr in repr(date_tz)
+ assert freq_repr not in repr(date_tz)
+ assert date_tz == eval(repr(date_tz))
+
+ date_freq = Timestamp(date, freq=freq)
+ assert date in repr(date_freq)
+ assert tz_repr not in repr(date_freq)
+ assert freq_repr in repr(date_freq)
+ assert date_freq == eval(repr(date_freq))
+
+ date_tz_freq = Timestamp(date, tz=tz, freq=freq)
+ assert date in repr(date_tz_freq)
+ assert tz_repr in repr(date_tz_freq)
+ assert freq_repr in repr(date_tz_freq)
+ assert date_tz_freq == eval(repr(date_tz_freq))
+
+ def test_repr_utcoffset(self):
# This can cause the tz field to be populated, but it's redundant to
# include this information in the date-string.
date_with_utc_offset = Timestamp('2014-03-13 00:00:00-0400', tz=None)
@@ -396,6 +412,16 @@ def test_repr(self):
'pytz.FixedOffset(-240)')
assert date_with_utc_offset == eval(expr)
+ def test_timestamp_repr_pre1900(self):
+ # pre-1900
+ stamp = Timestamp('1850-01-01', tz='US/Eastern')
+ repr(stamp)
+
+ iso8601 = '1850-01-01 01:23:45.012345'
+ stamp = Timestamp(iso8601, tz='US/Eastern')
+ result = repr(stamp)
+ assert iso8601 in result
+
def test_bounds_with_different_units(self):
out_of_bounds_dates = ('1677-09-21', '2262-04-12', )
@@ -474,32 +500,34 @@ def test_tz_localize_errors_ambiguous(self):
pytest.raises(AmbiguousTimeError,
ts.tz_localize, 'US/Pacific', errors='coerce')
- def test_tz_localize_roundtrip(self):
- for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
- for t in ['2014-02-01 09:00', '2014-07-08 09:00',
- '2014-11-01 17:00', '2014-11-05 00:00']:
- ts = Timestamp(t)
- localized = ts.tz_localize(tz)
- assert localized == Timestamp(t, tz=tz)
-
- with pytest.raises(TypeError):
- localized.tz_localize(tz)
-
- reset = localized.tz_localize(None)
- assert reset == ts
- assert reset.tzinfo is None
-
- def test_tz_convert_roundtrip(self):
- for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
- for t in ['2014-02-01 09:00', '2014-07-08 09:00',
- '2014-11-01 17:00', '2014-11-05 00:00']:
- ts = Timestamp(t, tz='UTC')
- converted = ts.tz_convert(tz)
-
- reset = converted.tz_convert(None)
- assert reset == Timestamp(t)
- assert reset.tzinfo is None
- assert reset == converted.tz_convert('UTC').tz_localize(None)
+ @pytest.mark.parametrize('tz', ['UTC', 'Asia/Tokyo',
+ 'US/Eastern', 'dateutil/US/Pacific'])
+ def test_tz_localize_roundtrip(self, tz):
+ for t in ['2014-02-01 09:00', '2014-07-08 09:00',
+ '2014-11-01 17:00', '2014-11-05 00:00']:
+ ts = Timestamp(t)
+ localized = ts.tz_localize(tz)
+ assert localized == Timestamp(t, tz=tz)
+
+ with pytest.raises(TypeError):
+ localized.tz_localize(tz)
+
+ reset = localized.tz_localize(None)
+ assert reset == ts
+ assert reset.tzinfo is None
+
+ @pytest.mark.parametrize('tz', ['UTC', 'Asia/Tokyo',
+ 'US/Eastern', 'dateutil/US/Pacific'])
+ def test_tz_convert_roundtrip(self, tz):
+ for t in ['2014-02-01 09:00', '2014-07-08 09:00',
+ '2014-11-01 17:00', '2014-11-05 00:00']:
+ ts = Timestamp(t, tz='UTC')
+ converted = ts.tz_convert(tz)
+
+ reset = converted.tz_convert(None)
+ assert reset == Timestamp(t)
+ assert reset.tzinfo is None
+ assert reset == converted.tz_convert('UTC').tz_localize(None)
def test_barely_oob_dts(self):
one_us = np.timedelta64(1).astype('timedelta64[us]')
@@ -906,6 +934,51 @@ def test_roundtrip(self):
assert result == Timestamp(str(base) + ".200005")
assert result.microsecond == 5 + 200 * 1000
+ def test_hash_equivalent(self):
+ d = {datetime(2011, 1, 1): 5}
+ stamp = Timestamp(datetime(2011, 1, 1))
+ assert d[stamp] == 5
+
+ @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
+ def test_is_leap_year(self, tz):
+ # GH 13727
+ dt = Timestamp('2000-01-01 00:00:00', tz=tz)
+ assert dt.is_leap_year
+ assert isinstance(dt.is_leap_year, bool)
+
+ dt = Timestamp('1999-01-01 00:00:00', tz=tz)
+ assert not dt.is_leap_year
+
+ dt = Timestamp('2004-01-01 00:00:00', tz=tz)
+ assert dt.is_leap_year
+
+ dt = Timestamp('2100-01-01 00:00:00', tz=tz)
+ assert not dt.is_leap_year
+
+ def test_timestamp(self):
+ # GH#17329
+ # tz-naive --> treat it as if it were UTC for purposes of timestamp()
+ ts = Timestamp.now()
+ uts = ts.replace(tzinfo=utc)
+ assert ts.timestamp() == uts.timestamp()
+
+ tsc = Timestamp('2014-10-11 11:00:01.12345678', tz='US/Central')
+ utsc = tsc.tz_convert('UTC')
+
+ # utsc is a different representation of the same time
+ assert tsc.timestamp() == utsc.timestamp()
+
+ if PY3:
+
+ # datetime.timestamp() converts in the local timezone
+ with tm.set_timezone('UTC'):
+
+ # should agree with datetime.timestamp method
+ dt = ts.to_pydatetime()
+ assert dt.timestamp() == ts.timestamp()
+
+
+class TestTimestampComparison(object):
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
@@ -937,7 +1010,6 @@ def test_comparison(self):
assert other >= val
def test_compare_invalid(self):
-
# GH 8058
val = Timestamp('20130101 12:01:02')
assert not val == 'foo'
@@ -1028,16 +1100,6 @@ def test_cant_compare_tz_naive_w_aware_dateutil(self):
assert not a == b.to_pydatetime()
assert not a.to_pydatetime() == b
- def test_delta_preserve_nanos(self):
- val = Timestamp(long(1337299200000000123))
- result = val + timedelta(1)
- assert result.nanosecond == val.nanosecond
-
- def test_hash_equivalent(self):
- d = {datetime(2011, 1, 1): 5}
- stamp = Timestamp(datetime(2011, 1, 1))
- assert d[stamp] == 5
-
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
@@ -1098,43 +1160,20 @@ def test_timestamp_compare_series(self):
result = right_f(Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
- def test_is_leap_year(self):
- # GH 13727
- for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
- dt = Timestamp('2000-01-01 00:00:00', tz=tz)
- assert dt.is_leap_year
- assert isinstance(dt.is_leap_year, bool)
-
- dt = Timestamp('1999-01-01 00:00:00', tz=tz)
- assert not dt.is_leap_year
-
- dt = Timestamp('2004-01-01 00:00:00', tz=tz)
- assert dt.is_leap_year
-
- dt = Timestamp('2100-01-01 00:00:00', tz=tz)
- assert not dt.is_leap_year
-
- def test_timestamp(self):
- # GH#17329
- # tz-naive --> treat it as if it were UTC for purposes of timestamp()
- ts = Timestamp.now()
- uts = ts.replace(tzinfo=utc)
- assert ts.timestamp() == uts.timestamp()
-
- tsc = Timestamp('2014-10-11 11:00:01.12345678', tz='US/Central')
- utsc = tsc.tz_convert('UTC')
-
- # utsc is a different representation of the same time
- assert tsc.timestamp() == utsc.timestamp()
-
- if PY3:
-
- # datetime.timestamp() converts in the local timezone
- with tm.set_timezone('UTC'):
+ def test_timestamp_compare_with_early_datetime(self):
+ # e.g. datetime.min
+ stamp = Timestamp('2012-01-01')
- # should agree with datetime.timestamp method
- dt = ts.to_pydatetime()
- assert dt.timestamp() == ts.timestamp()
+ assert not stamp == datetime.min
+ assert not stamp == datetime(1600, 1, 1)
+ assert not stamp == datetime(2700, 1, 1)
+ assert stamp != datetime.min
+ assert stamp != datetime(1600, 1, 1)
+ assert stamp != datetime(2700, 1, 1)
+ assert stamp > datetime(1600, 1, 1)
+ assert stamp >= datetime(1600, 1, 1)
+ assert stamp < datetime(2700, 1, 1)
+ assert stamp <= datetime(2700, 1, 1)
class TestTimestampNsOperations(object):
@@ -1281,7 +1320,9 @@ def test_addition_subtraction_preserve_frequency(self):
assert (timestamp_instance -
timedelta64_instance).freq == original_freq
- def test_resolution(self):
+ @pytest.mark.parametrize('tz', [None, 'Asia/Tokyo', 'US/Eastern',
+ 'dateutil/US/Eastern'])
+ def test_resolution(self, tz):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
@@ -1290,12 +1331,9 @@ def test_resolution(self):
RESO_HR, RESO_MIN,
RESO_SEC, RESO_MS,
RESO_US]):
- for tz in [None, 'Asia/Tokyo', 'US/Eastern',
- 'dateutil/US/Eastern']:
- idx = date_range(start='2013-04-01', periods=30, freq=freq,
- tz=tz)
- result = period.resolution(idx.asi8, idx.tz)
- assert result == expected
+ idx = date_range(start='2013-04-01', periods=30, freq=freq, tz=tz)
+ result = period.resolution(idx.asi8, idx.tz)
+ assert result == expected
class TestTimestampToJulianDate(object):
@@ -1321,8 +1359,7 @@ def test_compare_hour13(self):
assert r == 2451769.0416666666666666
-class TestTimeSeries(object):
-
+class TestTimestampConversion(object):
def test_timestamp_to_datetime(self):
stamp = Timestamp('20090415', tz='US/Eastern', freq='D')
dtval = stamp.to_pydatetime()
@@ -1350,47 +1387,25 @@ def test_timestamp_to_datetime_explicit_dateutil(self):
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
- def test_timestamp_date_out_of_range(self):
- pytest.raises(ValueError, Timestamp, '1676-01-01')
- pytest.raises(ValueError, Timestamp, '2263-01-01')
-
- def test_timestamp_repr(self):
- # pre-1900
- stamp = Timestamp('1850-01-01', tz='US/Eastern')
- repr(stamp)
-
- iso8601 = '1850-01-01 01:23:45.012345'
- stamp = Timestamp(iso8601, tz='US/Eastern')
- result = repr(stamp)
- assert iso8601 in result
-
- def test_timestamp_from_ordinal(self):
+ def test_to_datetime_bijective(self):
+ # Ensure that converting to datetime and back only loses precision
+ # by going from nanoseconds to microseconds.
+ exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning
+ with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
+ assert (Timestamp(Timestamp.max.to_pydatetime()).value / 1000 ==
+ Timestamp.max.value / 1000)
- # GH 3042
- dt = datetime(2011, 4, 16, 0, 0)
- ts = Timestamp.fromordinal(dt.toordinal())
- assert ts.to_pydatetime() == dt
+ exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning
+ with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
+ assert (Timestamp(Timestamp.min.to_pydatetime()).value / 1000 ==
+ Timestamp.min.value / 1000)
- # with a tzinfo
- stamp = Timestamp('2011-4-16', tz='US/Eastern')
- dt_tz = stamp.to_pydatetime()
- ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
- assert ts.to_pydatetime() == dt_tz
- def test_timestamp_compare_with_early_datetime(self):
- # e.g. datetime.min
- stamp = Timestamp('2012-01-01')
+class TestTimeSeries(object):
- assert not stamp == datetime.min
- assert not stamp == datetime(1600, 1, 1)
- assert not stamp == datetime(2700, 1, 1)
- assert stamp != datetime.min
- assert stamp != datetime(1600, 1, 1)
- assert stamp != datetime(2700, 1, 1)
- assert stamp > datetime(1600, 1, 1)
- assert stamp >= datetime(1600, 1, 1)
- assert stamp < datetime(2700, 1, 1)
- assert stamp <= datetime(2700, 1, 1)
+ def test_timestamp_date_out_of_range(self):
+ pytest.raises(ValueError, Timestamp, '1676-01-01')
+ pytest.raises(ValueError, Timestamp, '2263-01-01')
def test_timestamp_equality(self):
@@ -1483,16 +1498,3 @@ def test_min_valid(self):
def test_max_valid(self):
# Ensure that Timestamp.max is a valid Timestamp
Timestamp(Timestamp.max)
-
- def test_to_datetime_bijective(self):
- # Ensure that converting to datetime and back only loses precision
- # by going from nanoseconds to microseconds.
- exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning
- with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
- assert (Timestamp(Timestamp.max.to_pydatetime()).value / 1000 ==
- Timestamp.max.value / 1000)
-
- exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning
- with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
- assert (Timestamp(Timestamp.min.to_pydatetime()).value / 1000 ==
- Timestamp.min.value / 1000)
| More informative names in timedeltas tests
remove/merge redundant tests in timestamps. Separate some big test classes thematically.
Use pytest.parametrize in a few places | https://api.github.com/repos/pandas-dev/pandas/pulls/18619 | 2017-12-04T01:20:17Z | 2017-12-04T11:21:50Z | 2017-12-04T11:21:50Z | 2017-12-08T19:38:19Z |
handle DST appropriately in Timestamp.replace | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 92564285bb36a..a670bf2348bfc 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -381,3 +381,4 @@ Other
^^^^^
- Improved error message when attempting to use a Python keyword as an identifier in a ``numexpr`` backed query (:issue:`18221`)
+- :func:`Timestamp.replace` will now handle Daylight Savings transitions gracefully (:issue:`18319`)
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index c7744bf9db58e..ffc1c89dd8adf 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -33,7 +33,7 @@ from np_datetime cimport (reverse_ops, cmp_scalar, check_dts_bounds,
is_leapyear)
from timedeltas import Timedelta
from timedeltas cimport delta_to_nanoseconds
-from timezones cimport get_timezone, is_utc, maybe_get_tz
+from timezones cimport get_timezone, is_utc, maybe_get_tz, treat_tz_as_pytz
# ----------------------------------------------------------------------
# Constants
@@ -922,8 +922,18 @@ class Timestamp(_Timestamp):
_tzinfo = tzinfo
# reconstruct & check bounds
- ts_input = datetime(dts.year, dts.month, dts.day, dts.hour, dts.min,
- dts.sec, dts.us, tzinfo=_tzinfo)
+ if _tzinfo is not None and treat_tz_as_pytz(_tzinfo):
+ # replacing across a DST boundary may induce a new tzinfo object
+ # see GH#18319
+ ts_input = _tzinfo.localize(datetime(dts.year, dts.month, dts.day,
+ dts.hour, dts.min, dts.sec,
+ dts.us))
+ _tzinfo = ts_input.tzinfo
+ else:
+ ts_input = datetime(dts.year, dts.month, dts.day,
+ dts.hour, dts.min, dts.sec, dts.us,
+ tzinfo=_tzinfo)
+
ts = convert_datetime_to_tsobject(ts_input, _tzinfo)
value = ts.value + (dts.ps // 1000)
if value != NPY_NAT:
diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py
index b3813d03532fb..7ae63d7d080cc 100644
--- a/pandas/tests/tseries/test_timezones.py
+++ b/pandas/tests/tseries/test_timezones.py
@@ -61,6 +61,10 @@ def tzstr(self, tz):
def localize(self, tz, x):
return tz.localize(x)
+ def normalize(self, ts):
+ tzinfo = ts.tzinfo
+ return tzinfo.normalize(ts)
+
def cmptz(self, tz1, tz2):
# Compare two timezones. Overridden in subclass to parameterize
# tests.
@@ -935,6 +939,27 @@ def test_datetimeindex_tz_nat(self):
assert isna(idx[1])
assert idx[0].tzinfo is not None
+ def test_replace_across_dst(self):
+ # GH#18319 check that 1) timezone is correctly normalized and
+ # 2) that hour is not incorrectly changed by this normalization
+ tz = self.tz('US/Eastern')
+
+ ts_naive = Timestamp('2017-12-03 16:03:30')
+ ts_aware = self.localize(tz, ts_naive)
+
+ # Preliminary sanity-check
+ assert ts_aware == self.normalize(ts_aware)
+
+ # Replace across DST boundary
+ ts2 = ts_aware.replace(month=6)
+
+ # Check that `replace` preserves hour literal
+ assert (ts2.hour, ts2.minute) == (ts_aware.hour, ts_aware.minute)
+
+ # Check that post-replace object is appropriately normalized
+ ts2b = self.normalize(ts2)
+ assert ts2 == ts2b
+
class TestTimeZoneSupportDateutil(TestTimeZoneSupportPytz):
@@ -959,6 +984,10 @@ def cmptz(self, tz1, tz2):
def localize(self, tz, x):
return x.replace(tzinfo=tz)
+ def normalize(self, ts):
+ # no-op for dateutil
+ return ts
+
@td.skip_if_windows
def test_utc_with_system_utc(self):
from pandas._libs.tslibs.timezones import maybe_get_tz
| ```
ts = pd.Timestamp('2017-12-03 16:03:24', tz='US/Eastern')
# Timestamp('2017-12-03 16:03:24-0500', tz='US/Eastern')
ts2 = ts.replace(month=6) # <-- across DST boundary
# master --> Timestamp('2017-06-03 16:03:24-0500', tz='US/Eastern')
# PR --> Timestamp('2017-06-03 16:03:24-0400', tz='US/Eastern')
ts3 = ts2.tzinfo.normalize(ts2)
# master --> Timestamp('2017-06-03 18:03:24-0400', tz='US/Eastern')
# PR --> Timestamp('2017-06-03 16:03:24-0400', tz='US/Eastern')
assert ts3 == ts2
assert ts3.hour == ts.hour
```
Both assertions fail under master, pass under this PR.
The whatsnew entry is not super-informative. Any ideas for a one-sentence description of this fix?
- [x] closes #18319
xref #7825 (I think)
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18618 | 2017-12-04T00:59:44Z | 2018-01-05T23:37:26Z | 2018-01-05T23:37:26Z | 2018-01-23T04:40:39Z |
DOC: Removing keep=False from docs on nlargest/nsmallest | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ff42e39d9dbdd..f377fa1f5731a 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3769,7 +3769,7 @@ def nlargest(self, n, columns, keep='first'):
Number of items to retrieve
columns : list or str
Column name or names to order by
- keep : {'first', 'last', False}, default 'first'
+ keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
@@ -3804,7 +3804,7 @@ def nsmallest(self, n, columns, keep='first'):
Number of items to retrieve
columns : list or str
Column name or names to order by
- keep : {'first', 'last', False}, default 'first'
+ keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 5d0e6907a6595..34ad493d85fbc 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2058,7 +2058,7 @@ def nlargest(self, n=5, keep='first'):
----------
n : int
Return this many descending sorted values
- keep : {'first', 'last', False}, default 'first'
+ keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
@@ -2105,7 +2105,7 @@ def nsmallest(self, n=5, keep='first'):
----------
n : int
Return this many ascending sorted values
- keep : {'first', 'last', False}, default 'first'
+ keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
| - [x] closes #18559
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/18617 | 2017-12-03T23:39:33Z | 2017-12-04T11:27:30Z | 2017-12-04T11:27:30Z | 2017-12-11T20:21:06Z |
CLN: Remove io.data and io.wb | diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index f5c65e175b0db..7c7457df8ea93 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -137,7 +137,6 @@ See the package overview for more detail about what's in the library.
visualization
style
io
- remote_data
enhancingperf
sparse
gotchas
diff --git a/doc/source/remote_data.rst b/doc/source/remote_data.rst
deleted file mode 100644
index 5054bb7bcd12e..0000000000000
--- a/doc/source/remote_data.rst
+++ /dev/null
@@ -1,30 +0,0 @@
-.. _remote_data:
-
-.. currentmodule:: pandas
-
-******************
-Remote Data Access
-******************
-
-.. _remote_data.pandas_datareader:
-
-DataReader
-----------
-
-The sub-package ``pandas.io.data`` was removed in
-`v.0.19 <http://pandas-docs.github.io/pandas-docs-travis/whatsnew.html#v0-19-0-october-2-2016>`__.
-Instead there has been created a separately installable
-`pandas-datareader package <https://github.com/pydata/pandas-datareader>`__.
-This will allow the data modules to be independently updated on your pandas installation.
-
-For code older than < 0.19 you should replace the imports of the following:
-
-.. code-block:: python
-
- from pandas.io import data, wb
-
-With:
-
-.. code-block:: python
-
- from pandas_datareader import data, wb
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 5e605ecb7d8d5..a7e2fd444a284 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -150,6 +150,7 @@ Removal of prior version deprecations/changes
- ``pd.tseries.util.isleapyear`` has been removed (deprecated since v0.19). Use ``.is_leap_year`` property in Datetime-likes instead (:issue:`18370`)
- ``pd.ordered_merge`` has been removed (deprecated since v0.19). Use ``pd.merge_ordered`` instead (:issue:`18459`)
- The ``SparseList`` class has been removed (:issue:`14007`)
+- The ``pandas.io.wb`` and ``pandas.io.data`` stub modules have been removed (:issue:`13735`)
.. _whatsnew_0220.performance:
diff --git a/pandas/io/data.py b/pandas/io/data.py
deleted file mode 100644
index e76790a6ab98b..0000000000000
--- a/pandas/io/data.py
+++ /dev/null
@@ -1,6 +0,0 @@
-raise ImportError(
- "The pandas.io.data module is moved to a separate package "
- "(pandas-datareader). After installing the pandas-datareader package "
- "(https://github.com/pydata/pandas-datareader), you can change "
- "the import ``from pandas.io import data, wb`` to "
- "``from pandas_datareader import data, wb``.")
diff --git a/pandas/io/wb.py b/pandas/io/wb.py
deleted file mode 100644
index 5dc4d9ce1adc4..0000000000000
--- a/pandas/io/wb.py
+++ /dev/null
@@ -1,6 +0,0 @@
-raise ImportError(
- "The pandas.io.wb module is moved to a separate package "
- "(pandas-datareader). After installing the pandas-datareader package "
- "(https://github.com/pydata/pandas-datareader), you can change "
- "the import ``from pandas.io import data, wb`` to "
- "``from pandas_datareader import data, wb``.")
| Deprecated in 0.17.0.
xref #13735.
| https://api.github.com/repos/pandas-dev/pandas/pulls/18612 | 2017-12-03T21:21:31Z | 2017-12-05T00:23:55Z | 2017-12-05T00:23:55Z | 2017-12-05T07:50:02Z |
CLN: ASV groupby benchmarks | diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py
index 13b5cd2b06032..3abf2338e1d94 100644
--- a/asv_bench/benchmarks/groupby.py
+++ b/asv_bench/benchmarks/groupby.py
@@ -1,85 +1,108 @@
-from .pandas_vb_common import *
from string import ascii_letters, digits
from itertools import product
+from functools import partial
+import numpy as np
+from pandas import (DataFrame, Series, MultiIndex, date_range, period_range,
+ TimeGrouper, Categorical)
+import pandas.util.testing as tm
-class groupby_agg_builtins(object):
+from .pandas_vb_common import setup # noqa
+
+
+class ApplyDictReturn(object):
goal_time = 0.2
def setup(self):
- np.random.seed(27182)
- self.n = 100000
- self.df = DataFrame(np.random.randint(1, (self.n / 100), (self.n, 3)), columns=['jim', 'joe', 'jolie'])
+ self.labels = np.arange(1000).repeat(10)
+ self.data = Series(np.random.randn(len(self.labels)))
+ self.f = lambda x: {'first': x.values[0], 'last': x.values[(-1)]}
- def time_groupby_agg_builtins1(self):
- self.df.groupby('jim').agg([sum, min, max])
+ def time_groupby_apply_dict_return(self):
+ self.data.groupby(self.labels).apply(self.f)
- def time_groupby_agg_builtins2(self):
- self.df.groupby(['jim', 'joe']).agg([sum, min, max])
-#----------------------------------------------------------------------
-# dict return values
+class Apply(object):
-class groupby_apply_dict_return(object):
goal_time = 0.2
def setup(self):
- self.labels = np.arange(1000).repeat(10)
- self.data = Series(randn(len(self.labels)))
- self.f = (lambda x: {'first': x.values[0], 'last': x.values[(-1)], })
+ N = 10**4
+ labels = np.random.randint(0, 2000, size=N)
+ labels2 = np.random.randint(0, 3, size=N)
+ self.df = DataFrame({'key': labels,
+ 'key2': labels2,
+ 'value1': np.random.randn(N),
+ 'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4),
+ })
+ self.scalar_function = lambda x: 1
+
+ def time_scalar_function_multi_col(self):
+ self.df.groupby(['key', 'key2']).apply(self.scalar_function)
- def time_groupby_apply_dict_return(self):
- self.data.groupby(self.labels).apply(self.f)
+ def time_scalar_function_single_col(self):
+ self.df.groupby('key').apply(self.scalar_function)
+ @staticmethod
+ def df_copy_function(g):
+ # ensure that the group name is available (see GH #15062)
+ g.name
+ return g.copy()
+
+ def time_copy_function_multi_col(self):
+ self.df.groupby(['key', 'key2']).apply(self.df_copy_function)
+
+ def time_copy_overhead_single_col(self):
+ self.df.groupby('key').apply(self.df_copy_function)
-#----------------------------------------------------------------------
-# groups
class Groups(object):
- goal_time = 0.1
- size = 2 ** 22
- data = {
- 'int64_small': Series(np.random.randint(0, 100, size=size)),
- 'int64_large' : Series(np.random.randint(0, 10000, size=size)),
- 'object_small': Series(tm.makeStringIndex(100).take(np.random.randint(0, 100, size=size))),
- 'object_large': Series(tm.makeStringIndex(10000).take(np.random.randint(0, 10000, size=size)))
- }
+ goal_time = 0.2
- param_names = ['df']
+ param_names = ['key']
params = ['int64_small', 'int64_large', 'object_small', 'object_large']
- def setup(self, df):
- self.df = self.data[df]
+ def setup_cache(self):
+ size = 10**6
+ data = {'int64_small': Series(np.random.randint(0, 100, size=size)),
+ 'int64_large': Series(np.random.randint(0, 10000, size=size)),
+ 'object_small': Series(
+ tm.makeStringIndex(100).take(
+ np.random.randint(0, 100, size=size))),
+ 'object_large': Series(
+ tm.makeStringIndex(10000).take(
+ np.random.randint(0, 10000, size=size)))}
+ return data
- def time_groupby_groups(self, df):
- self.df.groupby(self.df).groups
+ def setup(self, data, key):
+ self.ser = data[key]
+ def time_series_groups(self, data, key):
+ self.ser.groupby(self.ser).groups
-#----------------------------------------------------------------------
-# First / last functions
class FirstLast(object):
+
goal_time = 0.2
param_names = ['dtype']
params = ['float32', 'float64', 'datetime', 'object']
- # with datetimes (GH7555)
-
def setup(self, dtype):
-
+ N = 10**5
+ # with datetimes (GH7555)
if dtype == 'datetime':
- self.df = DataFrame(
- {'values': date_range('1/1/2011', periods=100000, freq='s'),
- 'key': range(100000),})
+ self.df = DataFrame({'values': date_range('1/1/2011',
+ periods=N,
+ freq='s'),
+ 'key': range(N)})
elif dtype == 'object':
- self.df = DataFrame(
- {'values': (['foo'] * 100000),
- 'key': range(100000)})
+ self.df = DataFrame({'values': ['foo'] * N,
+ 'key': range(N)})
else:
- labels = np.arange(10000).repeat(10)
- data = Series(randn(len(labels)), dtype=dtype)
+ labels = np.arange(N / 10).repeat(10)
+ data = Series(np.random.randn(len(labels)), dtype=dtype)
data[::3] = np.nan
data[1::3] = np.nan
labels = labels.take(np.random.permutation(len(labels)))
@@ -91,313 +114,249 @@ def time_groupby_first(self, dtype):
def time_groupby_last(self, dtype):
self.df.groupby('key').last()
- def time_groupby_nth_any(self, dtype):
+ def time_groupby_nth_all(self, dtype):
self.df.groupby('key').nth(0, dropna='all')
def time_groupby_nth_none(self, dtype):
self.df.groupby('key').nth(0)
-#----------------------------------------------------------------------
-# DataFrame Apply overhead
+class GroupManyLabels(object):
-class groupby_frame_apply(object):
goal_time = 0.2
+ params = [1, 1000]
+ param_names = ['ncols']
- def setup(self):
- self.N = 10000
- self.labels = np.random.randint(0, 2000, size=self.N)
- self.labels2 = np.random.randint(0, 3, size=self.N)
- self.df = DataFrame({
- 'key': self.labels,
- 'key2': self.labels2,
- 'value1': np.random.randn(self.N),
- 'value2': (['foo', 'bar', 'baz', 'qux'] * (self.N // 4)),
- })
-
- @staticmethod
- def scalar_function(g):
- return 1
+ def setup(self, ncols):
+ N = 1000
+ data = np.random.randn(N, ncols)
+ self.labels = np.random.randint(0, 100, size=N)
+ self.df = DataFrame(data)
- def time_groupby_frame_apply_scalar_function(self):
- self.df.groupby(['key', 'key2']).apply(self.scalar_function)
-
- def time_groupby_frame_apply_scalar_function_overhead(self):
- self.df.groupby('key').apply(self.scalar_function)
+ def time_sum(self, ncols):
+ self.df.groupby(self.labels).sum()
- @staticmethod
- def df_copy_function(g):
- # ensure that the group name is available (see GH #15062)
- g.name
- return g.copy()
- def time_groupby_frame_df_copy_function(self):
- self.df.groupby(['key', 'key2']).apply(self.df_copy_function)
+class Nth(object):
- def time_groupby_frame_apply_df_copy_overhead(self):
- self.df.groupby('key').apply(self.df_copy_function)
+ goal_time = 0.2
+ def setup_cache(self):
+ df = DataFrame(np.random.randint(1, 100, (10000, 2)))
+ df.iloc[1, 1] = np.nan
+ return df
-#----------------------------------------------------------------------
-# 2d grouping, aggregate many columns
+ def time_frame_nth_any(self, df):
+ df.groupby(0).nth(0, dropna='any')
-class groupby_frame_cython_many_columns(object):
- goal_time = 0.2
+ def time_frame_nth(self, df):
+ df.groupby(0).nth(0)
- def setup(self):
- self.labels = np.random.randint(0, 100, size=1000)
- self.df = DataFrame(randn(1000, 1000))
+ def time_series_nth_any(self, df):
+ df[1].groupby(df[0]).nth(0, dropna='any')
- def time_sum(self):
- self.df.groupby(self.labels).sum()
+ def time_series_nth(self, df):
+ df[1].groupby(df[0]).nth(0)
-#----------------------------------------------------------------------
-# single key, long, integer key
+class DateAttributes(object):
-class groupby_frame_singlekey_integer(object):
goal_time = 0.2
def setup(self):
- self.data = np.random.randn(100000, 1)
- self.labels = np.random.randint(0, 1000, size=100000)
- self.df = DataFrame(self.data)
+ rng = date_range('1/1/2000', '12/31/2005', freq='H')
+ self.year, self.month, self.day = rng.year, rng.month, rng.day
+ self.ts = Series(np.random.randn(len(rng)), index=rng)
- def time_sum(self):
- self.df.groupby(self.labels).sum()
+ def time_len_groupby_object(self):
+ len(self.ts.groupby([self.year, self.month, self.day]))
-#----------------------------------------------------------------------
-# DataFrame nth
+class Int64(object):
-class groupby_nth(object):
goal_time = 0.2
def setup(self):
- self.df = DataFrame(np.random.randint(1, 100, (10000, 2)))
-
- def time_groupby_frame_nth_any(self):
- self.df.groupby(0).nth(0, dropna='any')
-
- def time_groupby_frame_nth_none(self):
- self.df.groupby(0).nth(0)
+ arr = np.random.randint(-1 << 12, 1 << 12, (1 << 17, 5))
+ i = np.random.choice(len(arr), len(arr) * 5)
+ arr = np.vstack((arr, arr[i]))
+ i = np.random.permutation(len(arr))
+ arr = arr[i]
+ self.cols = list('abcde')
+ self.df = DataFrame(arr, columns=self.cols)
+ self.df['jim'], self.df['joe'] = np.random.randn(2, len(self.df)) * 10
- def time_groupby_series_nth_any(self):
- self.df[1].groupby(self.df[0]).nth(0, dropna='any')
+ def time_overflow(self):
+ self.df.groupby(self.cols).max()
- def time_groupby_series_nth_none(self):
- self.df[1].groupby(self.df[0]).nth(0)
+class CountMultiDtype(object):
-#----------------------------------------------------------------------
-# groupby_indices replacement, chop up Series
-
-class groupby_indices(object):
goal_time = 0.2
- def setup(self):
- try:
- self.rng = date_range('1/1/2000', '12/31/2005', freq='H')
- (self.year, self.month, self.day) = (self.rng.year, self.rng.month, self.rng.day)
- except:
- self.rng = date_range('1/1/2000', '12/31/2000', offset=datetools.Hour())
- self.year = self.rng.map((lambda x: x.year))
- self.month = self.rng.map((lambda x: x.month))
- self.day = self.rng.map((lambda x: x.day))
- self.ts = Series(np.random.randn(len(self.rng)), index=self.rng)
-
- def time_groupby_indices(self):
- len(self.ts.groupby([self.year, self.month, self.day]))
-
+ def setup_cache(self):
+ n = 10000
+ offsets = np.random.randint(n, size=n).astype('timedelta64[ns]')
+ dates = np.datetime64('now') + offsets
+ dates[np.random.rand(n) > 0.5] = np.datetime64('nat')
+ offsets[np.random.rand(n) > 0.5] = np.timedelta64('nat')
+ value2 = np.random.randn(n)
+ value2[np.random.rand(n) > 0.5] = np.nan
+ obj = np.random.choice(list('ab'), size=n).astype(object)
+ obj[np.random.randn(n) > 0.5] = np.nan
+ df = DataFrame({'key1': np.random.randint(0, 500, size=n),
+ 'key2': np.random.randint(0, 100, size=n),
+ 'dates': dates,
+ 'value2': value2,
+ 'value3': np.random.randn(n),
+ 'ints': np.random.randint(0, 1000, size=n),
+ 'obj': obj,
+ 'offsets': offsets})
+ return df
+
+ def time_multi_count(self, df):
+ df.groupby(['key1', 'key2']).count()
+
+
+class CountInt(object):
-class groupby_int64_overflow(object):
goal_time = 0.2
- def setup(self):
- self.arr = np.random.randint(((-1) << 12), (1 << 12), ((1 << 17), 5))
- self.i = np.random.choice(len(self.arr), (len(self.arr) * 5))
- self.arr = np.vstack((self.arr, self.arr[self.i]))
- self.i = np.random.permutation(len(self.arr))
- self.arr = self.arr[self.i]
- self.df = DataFrame(self.arr, columns=list('abcde'))
- (self.df['jim'], self.df['joe']) = (np.random.randn(2, len(self.df)) * 10)
+ def setup_cache(self):
+ n = 10000
+ df = DataFrame({'key1': np.random.randint(0, 500, size=n),
+ 'key2': np.random.randint(0, 100, size=n),
+ 'ints': np.random.randint(0, 1000, size=n),
+ 'ints2': np.random.randint(0, 1000, size=n)})
+ return df
- def time_groupby_int64_overflow(self):
- self.df.groupby(list('abcde')).max()
+ def time_int_count(self, df):
+ df.groupby(['key1', 'key2']).count()
+ def time_int_nunique(self, df):
+ df.groupby(['key1', 'key2']).nunique()
-#----------------------------------------------------------------------
-# count() speed
-class groupby_multi_count(object):
- goal_time = 0.2
+class AggFunctions(object):
- def setup(self):
- self.n = 10000
- self.offsets = np.random.randint(self.n, size=self.n).astype('timedelta64[ns]')
- self.dates = (np.datetime64('now') + self.offsets)
- self.dates[(np.random.rand(self.n) > 0.5)] = np.datetime64('nat')
- self.offsets[(np.random.rand(self.n) > 0.5)] = np.timedelta64('nat')
- self.value2 = np.random.randn(self.n)
- self.value2[(np.random.rand(self.n) > 0.5)] = np.nan
- self.obj = np.random.choice(list('ab'), size=self.n).astype(object)
- self.obj[(np.random.randn(self.n) > 0.5)] = np.nan
- self.df = DataFrame({'key1': np.random.randint(0, 500, size=self.n),
- 'key2': np.random.randint(0, 100, size=self.n),
- 'dates': self.dates,
- 'value2': self.value2,
- 'value3': np.random.randn(self.n),
- 'ints': np.random.randint(0, 1000, size=self.n),
- 'obj': self.obj,
- 'offsets': self.offsets, })
-
- def time_groupby_multi_count(self):
- self.df.groupby(['key1', 'key2']).count()
-
-
-class groupby_int_count(object):
goal_time = 0.2
- def setup(self):
- self.n = 10000
- self.df = DataFrame({'key1': randint(0, 500, size=self.n),
- 'key2': randint(0, 100, size=self.n),
- 'ints': randint(0, 1000, size=self.n),
- 'ints2': randint(0, 1000, size=self.n), })
-
- def time_groupby_int_count(self):
- self.df.groupby(['key1', 'key2']).count()
+ def setup_cache(self):
+ N = 10**5
+ fac1 = np.array(['A', 'B', 'C'], dtype='O')
+ fac2 = np.array(['one', 'two'], dtype='O')
+ df = DataFrame({'key1': fac1.take(np.random.randint(0, 3, size=N)),
+ 'key2': fac2.take(np.random.randint(0, 2, size=N)),
+ 'value1': np.random.randn(N),
+ 'value2': np.random.randn(N),
+ 'value3': np.random.randn(N)})
+ return df
+ def time_different_str_functions(self, df):
+ df.groupby(['key1', 'key2']).agg({'value1': 'mean',
+ 'value2': 'var',
+ 'value3': 'sum'})
-#----------------------------------------------------------------------
-# nunique() speed
+ def time_different_numpy_functions(self, df):
+ df.groupby(['key1', 'key2']).agg({'value1': np.mean,
+ 'value2': np.var,
+ 'value3': np.sum})
-class groupby_nunique(object):
+ def time_different_python_functions_multicol(self, df):
+ df.groupby(['key1', 'key2']).agg([sum, min, max])
- def setup(self):
- self.n = 10000
- self.df = DataFrame({'key1': randint(0, 500, size=self.n),
- 'key2': randint(0, 100, size=self.n),
- 'ints': randint(0, 1000, size=self.n),
- 'ints2': randint(0, 1000, size=self.n), })
+ def time_different_python_functions_singlecol(self, df):
+ df.groupby('key1').agg([sum, min, max])
- def time_groupby_nunique(self):
- self.df.groupby(['key1', 'key2']).nunique()
+class GroupStrings(object):
-#----------------------------------------------------------------------
-# group with different functions per column
-
-class groupby_agg_multi(object):
- goal_time = 0.2
-
- def setup(self):
- self.fac1 = np.array(['A', 'B', 'C'], dtype='O')
- self.fac2 = np.array(['one', 'two'], dtype='O')
- self.df = DataFrame({'key1': self.fac1.take(np.random.randint(0, 3, size=100000)), 'key2': self.fac2.take(np.random.randint(0, 2, size=100000)), 'value1': np.random.randn(100000), 'value2': np.random.randn(100000), 'value3': np.random.randn(100000), })
-
- def time_groupby_multi_different_functions(self):
- self.df.groupby(['key1', 'key2']).agg({'value1': 'mean', 'value2': 'var', 'value3': 'sum'})
-
- def time_groupby_multi_different_numpy_functions(self):
- self.df.groupby(['key1', 'key2']).agg({'value1': np.mean, 'value2': np.var, 'value3': np.sum})
-
-
-class groupby_multi_index(object):
goal_time = 0.2
def setup(self):
- self.n = (((5 * 7) * 11) * (1 << 9))
- self.alpha = list(map(''.join, product((ascii_letters + digits), repeat=4)))
- self.f = (lambda k: np.repeat(np.random.choice(self.alpha, (self.n // k)), k))
- self.df = DataFrame({'a': self.f(11), 'b': self.f(7), 'c': self.f(5), 'd': self.f(1), })
+ n = (5 * 7 * 11) * (1 << 9)
+ alpha = list(map(''.join, product((ascii_letters + digits), repeat=4)))
+ f = lambda k: np.repeat(np.random.choice(alpha, (n // k)), k)
+ self.df = DataFrame({'a': f(11),
+ 'b': f(7),
+ 'c': f(5),
+ 'd': f(1)})
self.df['joe'] = (np.random.randn(len(self.df)) * 10).round(3)
- self.i = np.random.permutation(len(self.df))
- self.df = self.df.iloc[self.i].reset_index(drop=True).copy()
+ i = np.random.permutation(len(self.df))
+ self.df = self.df.iloc[i].reset_index(drop=True)
- def time_groupby_multi_index(self):
+ def time_multi_columns(self):
self.df.groupby(list('abcd')).max()
-class groupby_multi(object):
+class MultiColumn(object):
+
goal_time = 0.2
def setup(self):
- self.N = 100000
- self.ngroups = 100
- self.df = DataFrame({'key1': self.get_test_data(ngroups=self.ngroups), 'key2': self.get_test_data(ngroups=self.ngroups), 'data1': np.random.randn(self.N), 'data2': np.random.randn(self.N), })
- self.simple_series = Series(np.random.randn(self.N))
- self.key1 = self.df['key1']
-
- def get_test_data(self, ngroups=100, n=100000):
- self.unique_groups = range(self.ngroups)
- self.arr = np.asarray(np.tile(self.unique_groups, int(n / self.ngroups)), dtype=object)
- if (len(self.arr) < n):
- self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object)
- random.shuffle(self.arr)
- return self.arr
-
- def f(self):
- self.df.groupby(['key1', 'key2']).agg((lambda x: x.values.sum()))
-
- def time_groupby_multi_cython(self):
+ N = 10**5
+ key1 = np.tile(np.arange(100, dtype=object), 1000)
+ key2 = key1.copy()
+ np.random.shuffle(key1)
+ np.random.shuffle(key2)
+ self.df = DataFrame({'key1': key1,
+ 'key2': key2,
+ 'data1': np.random.randn(N),
+ 'data2': np.random.randn(N)})
+ self.f = lambda x: x.values.sum()
+
+ def time_lambda_sum(self):
+ self.df.groupby(['key1', 'key2']).agg(self.f)
+
+ def time_cython_sum(self):
self.df.groupby(['key1', 'key2']).sum()
- def time_groupby_multi_python(self):
- self.df.groupby(['key1', 'key2'])['data1'].agg((lambda x: x.values.sum()))
-
- def time_groupby_multi_series_op(self):
- self.df.groupby(['key1', 'key2'])['data1'].agg(np.std)
-
- def time_groupby_series_simple_cython(self):
- self.simple_series.groupby(self.key1).sum()
+ def time_col_select_lambda_sum(self):
+ self.df.groupby(['key1', 'key2'])['data1'].agg(self.f)
- def time_groupby_series_simple_rank(self):
- self.df.groupby('key1').rank(pct=True)
+ def time_col_select_numpy_sum(self):
+ self.df.groupby(['key1', 'key2'])['data1'].agg(np.sum)
-#----------------------------------------------------------------------
-# size() speed
+class Size(object):
-class groupby_size(object):
goal_time = 0.2
def setup(self):
- self.n = 100000
- self.offsets = np.random.randint(self.n, size=self.n).astype('timedelta64[ns]')
- self.dates = (np.datetime64('now') + self.offsets)
- self.df = DataFrame({'key1': np.random.randint(0, 500, size=self.n), 'key2': np.random.randint(0, 100, size=self.n), 'value1': np.random.randn(self.n), 'value2': np.random.randn(self.n), 'value3': np.random.randn(self.n), 'dates': self.dates, })
-
- N = 1000000
- self.draws = pd.Series(np.random.randn(N))
- labels = pd.Series(['foo', 'bar', 'baz', 'qux'] * (N // 4))
+ n = 10**5
+ offsets = np.random.randint(n, size=n).astype('timedelta64[ns]')
+ dates = np.datetime64('now') + offsets
+ self.df = DataFrame({'key1': np.random.randint(0, 500, size=n),
+ 'key2': np.random.randint(0, 100, size=n),
+ 'value1': np.random.randn(n),
+ 'value2': np.random.randn(n),
+ 'value3': np.random.randn(n),
+ 'dates': dates})
+ self.draws = Series(np.random.randn(n))
+ labels = Series(['foo', 'bar', 'baz', 'qux'] * (n // 4))
self.cats = labels.astype('category')
- def time_groupby_multi_size(self):
+ def time_multi_size(self):
self.df.groupby(['key1', 'key2']).size()
- def time_groupby_dt_size(self):
+ def time_dt_size(self):
self.df.groupby(['dates']).size()
- def time_groupby_dt_timegrouper_size(self):
+ def time_dt_timegrouper_size(self):
self.df.groupby(TimeGrouper(key='dates', freq='M')).size()
- def time_groupby_size(self):
+ def time_category_size(self):
self.draws.groupby(self.cats).size()
+class GroupByMethods(object):
-#----------------------------------------------------------------------
-# groupby with a variable value for ngroups
-
-class GroupBySuite(object):
goal_time = 0.2
param_names = ['dtype', 'ngroups']
params = [['int', 'float'], [100, 10000]]
def setup(self, dtype, ngroups):
- np.random.seed(1234)
size = ngroups * 2
rng = np.arange(ngroups)
values = rng.take(np.random.randint(0, ngroups, size=size))
@@ -479,6 +438,9 @@ def time_rank(self, dtype, ngroups):
def time_sem(self, dtype, ngroups):
self.df.groupby('key')['values'].sem()
+ def time_shift(self, dtype, ngroups):
+ self.df.groupby('key')['values'].shift()
+
def time_size(self, dtype, ngroups):
self.df.groupby('key')['values'].size()
@@ -504,7 +466,7 @@ def time_var(self, dtype, ngroups):
self.df.groupby('key')['values'].var()
-class groupby_float32(object):
+class Float32(object):
# GH 13335
goal_time = 0.2
@@ -515,27 +477,28 @@ def setup(self):
arr = np.repeat(tmp, 10)
self.df = DataFrame(dict(a=arr, b=arr))
- def time_groupby_sum(self):
+ def time_sum(self):
self.df.groupby(['a'])['b'].sum()
-class groupby_categorical(object):
+class Categories(object):
+
goal_time = 0.2
def setup(self):
- N = 100000
+ N = 10**5
arr = np.random.random(N)
-
- self.df = DataFrame(dict(
- a=Categorical(np.random.randint(10000, size=N)),
- b=arr))
- self.df_ordered = DataFrame(dict(
- a=Categorical(np.random.randint(10000, size=N), ordered=True),
- b=arr))
- self.df_extra_cat = DataFrame(dict(
- a=Categorical(np.random.randint(100, size=N),
- categories=np.arange(10000)),
- b=arr))
+ data = {'a': Categorical(np.random.randint(10000, size=N)),
+ 'b': arr}
+ self.df = DataFrame(data)
+ data = {'a': Categorical(np.random.randint(10000, size=N),
+ ordered=True),
+ 'b': arr}
+ self.df_ordered = DataFrame(data)
+ data = {'a': Categorical(np.random.randint(100, size=N),
+ categories=np.arange(10000)),
+ 'b': arr}
+ self.df_extra_cat = DataFrame(data)
def time_groupby_sort(self):
self.df.groupby('a')['b'].count()
@@ -556,130 +519,71 @@ def time_groupby_extra_cat_nosort(self):
self.df_extra_cat.groupby('a', sort=False)['b'].count()
-class groupby_period(object):
+class Datelike(object):
# GH 14338
goal_time = 0.2
-
- def make_grouper(self, N):
- return pd.period_range('1900-01-01', freq='D', periods=N)
-
- def setup(self):
- N = 10000
- self.grouper = self.make_grouper(N)
- self.df = pd.DataFrame(np.random.randn(N, 2))
-
- def time_groupby_sum(self):
+ params = ['period_range', 'date_range', 'date_range_tz']
+ param_names = ['grouper']
+
+ def setup(self, grouper):
+ N = 10**4
+ rng_map = {'period_range': period_range,
+ 'date_range': date_range,
+ 'date_range_tz': partial(date_range, tz='US/Central')}
+ self.grouper = rng_map[grouper]('1900-01-01', freq='D', periods=N)
+ self.df = DataFrame(np.random.randn(10**4, 2))
+
+ def time_sum(self, grouper):
self.df.groupby(self.grouper).sum()
-class groupby_datetime(groupby_period):
- def make_grouper(self, N):
- return pd.date_range('1900-01-01', freq='D', periods=N)
-
-
-class groupby_datetimetz(groupby_period):
- def make_grouper(self, N):
- return pd.date_range('1900-01-01', freq='D', periods=N,
- tz='US/Central')
-
-#----------------------------------------------------------------------
-# Series.value_counts
-
-class series_value_counts(object):
- goal_time = 0.2
-
- def setup(self):
- self.s = Series(np.random.randint(0, 1000, size=100000))
- self.s2 = self.s.astype(float)
-
- self.K = 1000
- self.N = 100000
- self.uniques = tm.makeStringIndex(self.K).values
- self.s3 = Series(np.tile(self.uniques, (self.N // self.K)))
-
- def time_value_counts_int64(self):
- self.s.value_counts()
-
- def time_value_counts_float64(self):
- self.s2.value_counts()
-
- def time_value_counts_strings(self):
- self.s.value_counts()
-
-
-#----------------------------------------------------------------------
-# pivot_table
-
-class groupby_pivot_table(object):
- goal_time = 0.2
-
- def setup(self):
- self.fac1 = np.array(['A', 'B', 'C'], dtype='O')
- self.fac2 = np.array(['one', 'two'], dtype='O')
- self.ind1 = np.random.randint(0, 3, size=100000)
- self.ind2 = np.random.randint(0, 2, size=100000)
- self.df = DataFrame({'key1': self.fac1.take(self.ind1), 'key2': self.fac2.take(self.ind2), 'key3': self.fac2.take(self.ind2), 'value1': np.random.randn(100000), 'value2': np.random.randn(100000), 'value3': np.random.randn(100000), })
-
- def time_groupby_pivot_table(self):
- self.df.pivot_table(index='key1', columns=['key2', 'key3'])
-
-
-#----------------------------------------------------------------------
-# Sum booleans #2692
-
-class groupby_sum_booleans(object):
+class SumBools(object):
+ # GH 2692
goal_time = 0.2
def setup(self):
- self.N = 500
- self.df = DataFrame({'ii': range(self.N), 'bb': [True for x in range(self.N)], })
+ N = 500
+ self.df = DataFrame({'ii': range(N),
+ 'bb': [True] * N})
def time_groupby_sum_booleans(self):
self.df.groupby('ii').sum()
-#----------------------------------------------------------------------
-# multi-indexed group sum #9049
-
-class groupby_sum_multiindex(object):
+class SumMultiLevel(object):
+ # GH 9049
goal_time = 0.2
+ timeout = 120.0
def setup(self):
- self.N = 50
- self.df = DataFrame({'A': (list(range(self.N)) * 2), 'B': list(range((self.N * 2))), 'C': 1, }).set_index(['A', 'B'])
+ N = 50
+ self.df = DataFrame({'A': range(N) * 2,
+ 'B': range(N * 2),
+ 'C': 1}).set_index(['A', 'B'])
def time_groupby_sum_multiindex(self):
self.df.groupby(level=[0, 1]).sum()
-#-------------------------------------------------------------------------------
-# Transform testing
-
class Transform(object):
+
goal_time = 0.2
def setup(self):
n1 = 400
n2 = 250
-
- index = MultiIndex(
- levels=[np.arange(n1), pd.util.testing.makeStringIndex(n2)],
- labels=[[i for i in range(n1) for _ in range(n2)],
- (list(range(n2)) * n1)],
- names=['lev1', 'lev2'])
-
- data = DataFrame(np.random.randn(n1 * n2, 3),
- index=index, columns=['col1', 'col20', 'col3'])
- step = int((n1 * n2 * 0.1))
- for col in range(len(data.columns)):
- idx = col
- while (idx < len(data)):
- data.set_value(data.index[idx], data.columns[col], np.nan)
- idx += step
+ index = MultiIndex(levels=[np.arange(n1), tm.makeStringIndex(n2)],
+ labels=[np.repeat(range(n1), n2).tolist(),
+ list(range(n2)) * n1],
+ names=['lev1', 'lev2'])
+ arr = np.random.randn(n1 * n2, 3)
+ arr[::10000, 0] = np.nan
+ arr[1::10000, 1] = np.nan
+ arr[2::10000, 2] = np.nan
+ data = DataFrame(arr, index=index, columns=['col1', 'col20', 'col3'])
self.df = data
- self.f_fillna = (lambda x: x.fillna(method='pad'))
+ self.f_max = lambda x: max(x)
- np.random.seed(2718281)
n = 20000
self.df1 = DataFrame(np.random.randint(1, n, (n, 3)),
columns=['jim', 'joe', 'jolie'])
@@ -691,10 +595,10 @@ def setup(self):
self.df4 = self.df3.copy()
self.df4['jim'] = self.df4['joe']
- def time_transform_func(self):
- self.df.groupby(level='lev2').transform(self.f_fillna)
+ def time_transform_lambda_max(self):
+ self.df.groupby(level='lev1').transform(self.f_max)
- def time_transform_ufunc(self):
+ def time_transform_ufunc_max(self):
self.df.groupby(level='lev1').transform(np.max)
def time_transform_multi_key1(self):
@@ -710,63 +614,31 @@ def time_transform_multi_key4(self):
self.df4.groupby(['jim', 'joe'])['jolie'].transform('max')
+class TransformBools(object):
-
-np.random.seed(0)
-N = 120000
-N_TRANSITIONS = 1400
-transition_points = np.random.permutation(np.arange(N))[:N_TRANSITIONS]
-transition_points.sort()
-transitions = np.zeros((N,), dtype=np.bool)
-transitions[transition_points] = True
-g = transitions.cumsum()
-df = DataFrame({'signal': np.random.rand(N), })
-
-
-
-
-
-class groupby_transform_series(object):
goal_time = 0.2
def setup(self):
- np.random.seed(0)
N = 120000
transition_points = np.sort(np.random.choice(np.arange(N), 1400))
- transitions = np.zeros((N,), dtype=np.bool)
+ transitions = np.zeros(N, dtype=np.bool)
transitions[transition_points] = True
self.g = transitions.cumsum()
self.df = DataFrame({'signal': np.random.rand(N)})
- def time_groupby_transform_series(self):
+ def time_transform_mean(self):
self.df['signal'].groupby(self.g).transform(np.mean)
-class groupby_transform_series2(object):
+class TransformNaN(object):
+ # GH 12737
goal_time = 0.2
def setup(self):
- np.random.seed(0)
- self.df = DataFrame({'key': (np.arange(100000) // 3),
- 'val': np.random.randn(100000)})
-
- self.df_nans = pd.DataFrame({'key': np.repeat(np.arange(1000), 10),
- 'B': np.nan,
- 'C': np.nan})
- self.df_nans.ix[4::10, 'B':'C'] = 5
-
- def time_transform_series2(self):
- self.df.groupby('key')['val'].transform(np.mean)
-
- def time_cumprod(self):
- self.df.groupby('key').cumprod()
-
- def time_cumsum(self):
- self.df.groupby('key').cumsum()
-
- def time_shift(self):
- self.df.groupby('key').shift()
+ self.df_nans = DataFrame({'key': np.repeat(np.arange(1000), 10),
+ 'B': np.nan,
+ 'C': np.nan})
+ self.df_nans.loc[4::10, 'B':'C'] = 5
- def time_transform_dataframe(self):
- # GH 12737
+ def time_first(self):
self.df_nans.groupby('key').transform('first')
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py
index 177e3e7cb87fa..951f718257170 100644
--- a/asv_bench/benchmarks/reshape.py
+++ b/asv_bench/benchmarks/reshape.py
@@ -117,3 +117,23 @@ def setup(self):
def time_wide_to_long_big(self):
self.df['id'] = self.df.index
wide_to_long(self.df, list(self.vars), i='id', j='year')
+
+
+class PivotTable(object):
+ goal_time = 0.2
+
+ def setup(self):
+ N = 100000
+ fac1 = np.array(['A', 'B', 'C'], dtype='O')
+ fac2 = np.array(['one', 'two'], dtype='O')
+ ind1 = np.random.randint(0, 3, size=N)
+ ind2 = np.random.randint(0, 2, size=N)
+ self.df = DataFrame({'key1': fac1.take(ind1),
+ 'key2': fac2.take(ind2),
+ 'key3': fac2.take(ind2),
+ 'value1': np.random.randn(N),
+ 'value2': np.random.randn(N),
+ 'value3': np.random.randn(N)})
+
+ def time_pivot_table(self):
+ self.df.pivot_table(index='key1', columns=['key2', 'key3'])
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index 5e8cf3a0350bb..81c43f7bc975f 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -155,3 +155,25 @@ def setup(self):
def time_series_dropna_datetime(self):
self.s.clip(0, 1)
+
+
+class series_value_counts(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.s = Series(np.random.randint(0, 1000, size=100000))
+ self.s2 = self.s.astype(float)
+
+ self.K = 1000
+ self.N = 100000
+ self.uniques = tm.makeStringIndex(self.K).values
+ self.s3 = Series(np.tile(self.uniques, (self.N // self.K)))
+
+ def time_value_counts_int64(self):
+ self.s.value_counts()
+
+ def time_value_counts_float64(self):
+ self.s2.value_counts()
+
+ def time_value_counts_strings(self):
+ self.s.value_counts()
| - Ran flake8 and replaced star imports
- Moved `series_value_counts` to `series_methods.py`
- Used `params` with `param_names` and `setup_cache` where possible.
```
asv dev -b ^groupby
[ 0.00%] ··· Setting up /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/groupby.py:267
[ 1.19%] ··· Running groupby.AggBuiltins.time_agg_builtin_single_col 23.5ms
[ 2.38%] ··· Running groupby.AggBuiltins.time_agg_builtins_multi_col 74.4ms
[ 2.38%] ··· Setting up /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/groupby.py:222
[ 3.57%] ··· Running groupby.CountInt.time_int_count 6.13ms
[ 4.76%] ··· Running groupby.CountInt.time_int_nunique 16.1ms
[ 4.76%] ··· Setting up /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/groupby.py:241
[ 5.95%] ··· Running groupby.AggMultiColFuncs.time_different_numpy_functions 22.3ms
[ 7.14%] ··· Running groupby.AggMultiColFuncs.time_different_str_functions 22.2ms
[ 7.14%] ··· Setting up /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/groupby.py:142
[ 8.33%] ··· Running groupby.Nth.time_frame_nth 4.66ms
[ 9.52%] ··· Running groupby.Nth.time_frame_nth_any 11.1ms
[ 10.71%] ··· Running groupby.Nth.time_series_nth 7.77ms
[ 11.90%] ··· Running groupby.Nth.time_series_nth_any 15.5ms
[ 13.10%] ··· Running groupby.Apply.time_copy_function_multi_col 3.08s
[ 14.29%] ··· Running groupby.Apply.time_copy_overhead_single_col 1.20s
[ 15.48%] ··· Running groupby.Apply.time_scalar_function_multi_col 70.6ms
[ 16.67%] ··· Running groupby.Apply.time_scalar_function_single_col 13.4ms
[ 17.86%] ··· Running groupby.ApplyDictReturn.time_groupby_apply_dict_return 92.7ms
[ 19.05%] ··· Running groupby.Categories.time_groupby_extra_cat_nosort 20.1ms
[ 20.24%] ··· Running groupby.Categories.time_groupby_extra_cat_sort 4.14ms
[ 21.43%] ··· Running groupby.Categories.time_groupby_nosort 23.3ms
[ 22.62%] ··· Running groupby.Categories.time_groupby_ordered_nosort 33.0ms
[ 23.81%] ··· Running groupby.Categories.time_groupby_ordered_sort 4.65ms
[ 25.00%] ··· Running groupby.Categories.time_groupby_sort 4.58ms
[ 26.19%] ··· Running groupby.CountMultiDtype.time_multi_count 9.43ms
[ 27.38%] ··· Running groupby.Datelike.time_sum ok
[ 27.38%] ····
============================================== ========
grouper
---------------------------------------------- --------
<function period_range at 0x7f68a63f75f0> 64.0ms
<function date_range at 0x7f68a63f22a8> 5.11ms
<functools.partial object at 0x7f68a09aba48> 7.83ms
============================================== ========
[ 28.57%] ··· Running groupby.FirstLast.time_groupby_first ok
[ 28.57%] ····
========== ========
dtype
---------- --------
float32 9.26ms
float64 9.12ms
datetime 20.3ms
object 29.8ms
========== ========
[ 29.76%] ··· Running groupby.FirstLast.time_groupby_last ok
[ 29.76%] ····
========== ========
dtype
---------- --------
float32 9.40ms
float64 8.81ms
datetime 20.3ms
object 26.8ms
========== ========
[ 30.95%] ··· Running groupby.FirstLast.time_groupby_nth_all ok
[ 30.95%] ····
========== ========
dtype
---------- --------
float32 53.6ms
float64 55.3ms
datetime 78.5ms
object 88.9ms
========== ========
[ 32.14%] ··· Running groupby.FirstLast.time_groupby_nth_none ok
[ 32.14%] ····
========== ========
dtype
---------- --------
float32 27.8ms
float64 27.6ms
datetime 41.4ms
object 44.8ms
========== ========
[ 33.33%] ··· Running groupby.Float32.time_sum 15.8ms
[ 34.52%] ··· Running groupby.GroupByMethods.time_all ok
[ 34.52%] ····
======= ======== =======
-- ngroups
------- ----------------
dtype 100 10000
======= ======== =======
int 27.6ms 2.52s
float 42.0ms 4.03s
======= ======== =======
[ 35.71%] ··· Running groupby.GroupByMethods.time_any ok
[ 35.71%] ····
======= ======== =======
-- ngroups
------- ----------------
dtype 100 10000
======= ======== =======
int 27.5ms 2.55s
float 42.7ms 4.05s
======= ======== =======
[ 36.90%] ··· Running groupby.GroupByMethods.time_count ok
[ 36.90%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.03ms 3.93ms
float 1.09ms 5.49ms
======= ======== ========
[ 38.10%] ··· Running groupby.GroupByMethods.time_cumcount ok
[ 38.10%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.23ms 5.41ms
float 1.24ms 6.52ms
======= ======== ========
[ 39.29%] ··· Running groupby.GroupByMethods.time_cummax ok
[ 39.29%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.22ms 4.28ms
float 1.27ms 5.63ms
======= ======== ========
[ 40.48%] ··· Running groupby.GroupByMethods.time_cummin ok
[ 40.48%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.21ms 4.28ms
float 1.32ms 5.74ms
======= ======== ========
[ 41.67%] ··· Running groupby.GroupByMethods.time_cumprod ok
[ 41.67%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.60ms 5.12ms
float 1.67ms 6.47ms
======= ======== ========
[ 42.86%] ··· Running groupby.GroupByMethods.time_cumsum ok
[ 42.86%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.29ms 4.26ms
float 1.34ms 5.62ms
======= ======== ========
[ 44.05%] ··· Running groupby.GroupByMethods.time_describe ok
[ 44.05%] ····
======= ======= =======
-- ngroups
------- ---------------
dtype 100 10000
======= ======= =======
int 296ms 28.8s
float 465ms 45.9s
======= ======= =======
[ 45.24%] ··· Running groupby.GroupByMethods.time_diff ok
[ 45.24%] ····
======= ======== =======
-- ngroups
------- ----------------
dtype 100 10000
======= ======== =======
int 37.6ms 3.47s
float 57.0ms 5.50s
======= ======== =======
[ 46.43%] ··· Running groupby.GroupByMethods.time_first ok
[ 46.43%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.19ms 4.45ms
float 1.27ms 5.98ms
======= ======== ========
[ 47.62%] ··· Running groupby.GroupByMethods.time_head ok
[ 47.62%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.72ms 5.90ms
float 1.77ms 7.32ms
======= ======== ========
[ 48.81%] ··· Running groupby.GroupByMethods.time_last ok
[ 48.81%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.18ms 4.27ms
float 1.26ms 5.89ms
======= ======== ========
[ 50.00%] ··· Running groupby.GroupByMethods.time_mad ok
[ 50.00%] ····
======= ======= =======
-- ngroups
------- ---------------
dtype 100 10000
======= ======= =======
int 104ms 9.95s
float 157ms 15.6s
======= ======= =======
[ 51.19%] ··· Running groupby.GroupByMethods.time_max ok
[ 51.19%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.44ms 4.39ms
float 1.34ms 5.98ms
======= ======== ========
[ 52.38%] ··· Running groupby.GroupByMethods.time_mean ok
[ 52.38%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.39ms 4.64ms
float 1.71ms 6.86ms
======= ======== ========
[ 53.57%] ··· Running groupby.GroupByMethods.time_median ok
[ 53.57%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.39ms 5.81ms
float 1.66ms 7.52ms
======= ======== ========
[ 54.76%] ··· Running groupby.GroupByMethods.time_min ok
[ 54.76%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.21ms 4.54ms
float 1.26ms 5.99ms
======= ======== ========
[ 55.95%] ··· Running groupby.GroupByMethods.time_nunique ok
[ 55.95%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.17ms 8.68ms
float 1.20ms 10.2ms
======= ======== ========
[ 57.14%] ··· Running groupby.GroupByMethods.time_pct_change ok
[ 57.14%] ····
======= ======= =======
-- ngroups
------- ---------------
dtype 100 10000
======= ======= =======
int 118ms 11.4s
float 181ms 17.8s
======= ======= =======
[ 58.33%] ··· Running groupby.GroupByMethods.time_prod ok
[ 58.33%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.60ms 4.90ms
float 1.61ms 6.78ms
======= ======== ========
[ 59.52%] ··· Running groupby.GroupByMethods.time_rank ok
[ 59.52%] ····
======= ======== =======
-- ngroups
------- ----------------
dtype 100 10000
======= ======== =======
int 42.1ms 3.95s
float 64.3ms 6.24s
======= ======== =======
[ 60.71%] ··· Running groupby.GroupByMethods.time_sem ok
[ 60.71%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 2.05ms 6.39ms
float 2.01ms 7.83ms
======= ======== ========
[ 61.90%] ··· Running groupby.GroupByMethods.time_shift ok
[ 61.90%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.03ms 4.64ms
float 1.08ms 5.89ms
======= ======== ========
[ 63.10%] ··· Running groupby.GroupByMethods.time_size ok
[ 63.10%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 978μs 3.80ms
float 1.06ms 5.38ms
======= ======== ========
[ 64.29%] ··· Running groupby.GroupByMethods.time_skew ok
[ 64.29%] ····
======= ======== =======
-- ngroups
------- ----------------
dtype 100 10000
======= ======== =======
int 43.3ms 4.13s
float 66.6ms 6.52s
======= ======== =======
[ 65.48%] ··· Running groupby.GroupByMethods.time_std ok
[ 65.48%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.51ms 5.04ms
float 1.51ms 6.62ms
======= ======== ========
[ 66.67%] ··· Running groupby.GroupByMethods.time_sum ok
[ 66.67%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.89ms 4.99ms
float 1.66ms 6.69ms
======= ======== ========
[ 67.86%] ··· Running groupby.GroupByMethods.time_tail ok
[ 67.86%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.71ms 5.97ms
float 1.76ms 7.05ms
======= ======== ========
[ 69.05%] ··· Running groupby.GroupByMethods.time_unique ok
[ 69.05%] ····
======= ======== =======
-- ngroups
------- ----------------
dtype 100 10000
======= ======== =======
int 26.3ms 2.37s
float 40.3ms 3.77s
======= ======== =======
[ 70.24%] ··· Running groupby.GroupByMethods.time_value_counts ok
[ 70.24%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.75ms 13.6ms
float 1.81ms 15.3ms
======= ======== ========
[ 71.43%] ··· Running groupby.GroupByMethods.time_var ok
[ 71.43%] ····
======= ======== ========
-- ngroups
------- -----------------
dtype 100 10000
======= ======== ========
int 1.35ms 4.85ms
float 1.38ms 6.25ms
======= ======== ========
[ 72.62%] ··· Running groupby.GroupManyLabels.time_sum ok
[ 72.62%] ····
======= ========
ncols
------- --------
1 6.72ms
1000 14.2ms
======= ========
[ 73.81%] ··· Running groupby.GroupStrings.time_multi_columns 1.28s
[ 75.00%] ··· Running groupby.Groups.time_series_groups ok
[ 75.00%] ····
============== ========
ser
-------------- --------
int64_small 90.4ms
int64_large 320ms
object_small 140ms
object_large 457ms
============== ========
[ 76.19%] ··· Running groupby.Incidies.time_datetime_indicies 11.3ms
[ 77.38%] ··· Running groupby.Int64.time_overflow 615ms
[ 78.57%] ··· Running groupby.MultiColumn.time_col_select_lambda_sum 252ms
[ 79.76%] ··· Running groupby.MultiColumn.time_col_select_numpy_sum 24.7ms
[ 80.95%] ··· Running groupby.MultiColumn.time_cython_sum 29.0ms
[ 82.14%] ··· Running groupby.MultiColumn.time_lambda_sum 496ms
[ 83.33%] ··· Running groupby.PivotTable.time_pivot_table 37.2ms
[ 84.52%] ··· Running groupby.Size.time_category_size 13.1ms
[ 85.71%] ··· Running groupby.Size.time_dt_size 19.5ms
[ 86.90%] ··· Running groupby.Size.time_dt_timegrouper_size 51.4ms
[ 88.10%] ··· Running groupby.Size.time_multi_size 25.5ms
[ 89.29%] ··· Running groupby.SumBools.time_groupby_sum_booleans 3.22ms
[ 90.48%] ··· Running groupby.SumMultiLevel.time_groupby_sum_multiindex 2.65ms
[ 91.67%] ··· Running groupby.Transform.time_transform_lambda_max 735ms
[ 92.86%] ··· Running groupby.Transform.time_transform_multi_key1 15.9ms
[ 94.05%] ··· Running groupby.Transform.time_transform_multi_key2 13.5ms
[ 95.24%] ··· Running groupby.Transform.time_transform_multi_key3 12.6ms
[ 96.43%] ··· Running groupby.Transform.time_transform_multi_key4 6.76ms
[ 97.62%] ··· Running groupby.Transform.time_transform_ufunc_max 13.8ms
[ 98.81%] ··· Running groupby.TransformBools.time_transform_mean 8.44ms
[100.00%] ··· Running groupby.TransformNaN.time_first 5.82ms
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/18611 | 2017-12-03T18:30:06Z | 2017-12-05T09:18:56Z | 2017-12-05T09:18:56Z | 2017-12-05T17:47:43Z |
json_normalize: Make code more pythonic and avoid modification of meta if mutable | diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt
index 3d4850b334ff9..a9608594be547 100644
--- a/doc/source/whatsnew/v0.21.1.txt
+++ b/doc/source/whatsnew/v0.21.1.txt
@@ -90,6 +90,7 @@ I/O
- Bug in parsing integer datetime-like columns with specified format in ``read_sql`` (:issue:`17855`).
- Bug in :meth:`DataFrame.to_msgpack` when serializing data of the numpy.bool_ datatype (:issue:`18390`)
- Bug in :func:`read_json` not decoding when reading line deliminted JSON from S3 (:issue:`17200`)
+- Bug in :func:`pandas.io.json.json_normalize` to avoid modification of ``meta`` (:issue:`18610`)
Plotting
^^^^^^^^
diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py
index d062e4f2830ff..595031b04e367 100644
--- a/pandas/io/json/normalize.py
+++ b/pandas/io/json/normalize.py
@@ -181,7 +181,7 @@ def _pull_field(js, spec):
return result
- if isinstance(data, list) and len(data) is 0:
+ if isinstance(data, list) and not data:
return DataFrame()
# A bit of a hackjob
@@ -207,9 +207,7 @@ def _pull_field(js, spec):
elif not isinstance(meta, list):
meta = [meta]
- for i, x in enumerate(meta):
- if not isinstance(x, list):
- meta[i] = [x]
+ meta = [m if isinstance(m, list) else [m] for m in meta]
# Disastrously inefficient for now
records = []
diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py
index 49b765b18d623..1cceae32cd748 100644
--- a/pandas/tests/io/json/test_normalize.py
+++ b/pandas/tests/io/json/test_normalize.py
@@ -173,6 +173,21 @@ def test_meta_name_conflict(self):
for val in ['metafoo', 'metabar', 'foo', 'bar']:
assert val in result
+ def test_meta_parameter_not_modified(self):
+ # GH 18610
+ data = [{'foo': 'hello',
+ 'bar': 'there',
+ 'data': [{'foo': 'something', 'bar': 'else'},
+ {'foo': 'something2', 'bar': 'else2'}]}]
+
+ COLUMNS = ['foo', 'bar']
+ result = json_normalize(data, 'data', meta=COLUMNS,
+ meta_prefix='meta')
+
+ assert COLUMNS == ['foo', 'bar']
+ for val in ['metafoo', 'metabar', 'foo', 'bar']:
+ assert val in result
+
def test_record_prefix(self, state_data):
result = json_normalize(state_data[0], 'counties')
expected = DataFrame(state_data[0]['counties'])
| - [x] closes itself #18610
- [x] tests added / passed
- [x] passes `git diff master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
- Update json_normalize to prevent modifying `meta` parameter.
- Make the code more Pythonic too. | https://api.github.com/repos/pandas-dev/pandas/pulls/18610 | 2017-12-03T13:54:03Z | 2017-12-04T12:55:54Z | 2017-12-04T12:55:54Z | 2017-12-11T20:21:03Z |
ENH: Raise error for 'sheet' arg in read_excel | diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt
index 7362e11b22189..5d6ed50ca3f26 100644
--- a/doc/source/whatsnew/v0.24.0.txt
+++ b/doc/source/whatsnew/v0.24.0.txt
@@ -377,7 +377,7 @@ I/O
^^^
- :func:`read_html()` no longer ignores all-whitespace ``<tr>`` within ``<thead>`` when considering the ``skiprows`` and ``header`` arguments. Previously, users had to decrease their ``header`` and ``skiprows`` values on such tables to work around the issue. (:issue:`21641`)
--
+- :func:`read_excel()` will correctly show the deprecation warning for previously deprecated ``sheetname`` (:issue:`17994`)
-
Plotting
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 793a95ffb0ee7..fa3a1bd74eda5 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -303,6 +303,16 @@ def read_excel(io,
convert_float=True,
**kwds):
+ # Can't use _deprecate_kwarg since sheetname=None has a special meaning
+ if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds:
+ warnings.warn("The `sheetname` keyword is deprecated, use "
+ "`sheet_name` instead", FutureWarning, stacklevel=2)
+ sheet_name = kwds.pop("sheetname")
+
+ if 'sheet' in kwds:
+ raise TypeError("read_excel() got an unexpected keyword argument "
+ "`sheet`")
+
if not isinstance(io, ExcelFile):
io = ExcelFile(io, engine=engine)
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 1fda56dbff772..d1eab16e7c22c 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -219,6 +219,16 @@ def test_excel_passes_na(self, ext):
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
+ def test_deprecated_sheetname(self, ext):
+ # gh-17964
+ excel = self.get_excelfile('test1', ext)
+
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ read_excel(excel, sheetname='Sheet1')
+
+ with pytest.raises(TypeError):
+ read_excel(excel, sheet='Sheet1')
+
def test_excel_table_sheet_by_index(self, ext):
excel = self.get_excelfile('test1', ext)
| - [ ] closes #17994
I was also thinking about the possibility of raising a warning instead, and do something like:
```
if sheet in kwds:
sheet_name = sheet
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/18604 | 2017-12-02T22:09:03Z | 2018-07-07T18:56:41Z | 2018-07-07T18:56:40Z | 2018-07-07T18:56:41Z |
DOC: Give python3 precedence over py2 in the install notes | diff --git a/doc/source/install.rst b/doc/source/install.rst
index 7c1fde119ceaa..95331271f95be 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -141,28 +141,24 @@ and can take a few minutes to complete.
Installing using your Linux distribution's package manager.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The commands in this table will install pandas for Python 2 from your distribution.
-To install pandas for Python 3 you may need to use the package ``python3-pandas``.
+The commands in this table will install pandas for Python 3 from your distribution.
+To install pandas for Python 2 you may need to use the package ``python-pandas``.
.. csv-table::
:header: "Distribution", "Status", "Download / Repository Link", "Install method"
:widths: 10, 10, 20, 50
- Debian, stable, `official Debian repository <http://packages.debian.org/search?keywords=pandas&searchon=names&suite=all§ion=all>`__ , ``sudo apt-get install python-pandas``
- Debian & Ubuntu, unstable (latest packages), `NeuroDebian <http://neuro.debian.net/index.html#how-to-use-this-repository>`__ , ``sudo apt-get install python-pandas``
- Ubuntu, stable, `official Ubuntu repository <http://packages.ubuntu.com/search?keywords=pandas&searchon=names&suite=all§ion=all>`__ , ``sudo apt-get install python-pandas``
- Ubuntu, unstable (daily builds), `PythonXY PPA <https://code.launchpad.net/~pythonxy/+archive/pythonxy-devel>`__; activate by: ``sudo add-apt-repository ppa:pythonxy/pythonxy-devel && sudo apt-get update``, ``sudo apt-get install python-pandas``
- OpenSuse, stable, `OpenSuse Repository <http://software.opensuse.org/package/python-pandas?search_term=pandas>`__ , ``zypper in python-pandas``
- Fedora, stable, `official Fedora repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``dnf install python-pandas``
- Centos/RHEL, stable, `EPEL repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``yum install python-pandas``
-
-
-
-
-
-
+ Debian, stable, `official Debian repository <http://packages.debian.org/search?keywords=pandas&searchon=names&suite=all§ion=all>`__ , ``sudo apt-get install python3-pandas``
+ Debian & Ubuntu, unstable (latest packages), `NeuroDebian <http://neuro.debian.net/index.html#how-to-use-this-repository>`__ , ``sudo apt-get install python3-pandas``
+ Ubuntu, stable, `official Ubuntu repository <http://packages.ubuntu.com/search?keywords=pandas&searchon=names&suite=all§ion=all>`__ , ``sudo apt-get install python3-pandas``
+ OpenSuse, stable, `OpenSuse Repository <http://software.opensuse.org/package/python-pandas?search_term=pandas>`__ , ``zypper in python3-pandas``
+ Fedora, stable, `official Fedora repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``dnf install python3-pandas``
+ Centos/RHEL, stable, `EPEL repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``yum install python3-pandas``
+**However**, the packages in the linux package managers are often a few versions behind, so
+to get the newest version of pandas, it's recommended to install using the ``pip`` or ``conda``
+methods described above.
Installing from source
| Currently the install notes show python 2 more prominently than Python 3. I suggest reversing that.
The argument is that pandas has pledged to drop support for Python 2 in 2020, and that the general transition to python 3 is now well under way (Django has gone py3 only and numpy will only develop new feature for Python3 from 1. jan. 2019 etc). | https://api.github.com/repos/pandas-dev/pandas/pulls/18603 | 2017-12-02T20:12:36Z | 2017-12-06T23:20:36Z | 2017-12-06T23:20:36Z | 2017-12-11T20:19:37Z |
STYLE: Use decorator syntax instead of legacy syntax for defining properties in Cython | diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index fa2e1271f4649..f8371d4855803 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -220,34 +220,31 @@ cdef class IndexEngine:
def __sizeof__(self):
return self.sizeof()
- property is_unique:
+ @property
+ def is_unique(self):
+ if self.need_unique_check:
+ self._do_unique_check()
- def __get__(self):
- if self.need_unique_check:
- self._do_unique_check()
-
- return self.unique == 1
+ return self.unique == 1
cdef inline _do_unique_check(self):
# this de-facto the same
self._ensure_mapping_populated()
- property is_monotonic_increasing:
-
- def __get__(self):
- if self.need_monotonic_check:
- self._do_monotonic_check()
+ @property
+ def is_monotonic_increasing(self):
+ if self.need_monotonic_check:
+ self._do_monotonic_check()
- return self.monotonic_inc == 1
+ return self.monotonic_inc == 1
- property is_monotonic_decreasing:
+ @property
+ def is_monotonic_decreasing(self):
+ if self.need_monotonic_check:
+ self._do_monotonic_check()
- def __get__(self):
- if self.need_monotonic_check:
- self._do_monotonic_check()
-
- return self.monotonic_dec == 1
+ return self.monotonic_dec == 1
cdef inline _do_monotonic_check(self):
cdef object is_unique
@@ -279,10 +276,9 @@ cdef class IndexEngine:
cdef _check_type(self, object val):
hash(val)
- property is_mapping_populated:
-
- def __get__(self):
- return self.mapping is not None
+ @property
+ def is_mapping_populated(self):
+ return self.mapping is not None
cdef inline _ensure_mapping_populated(self):
# this populates the mapping
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx
index 7f3cc0a7e81dd..0257d13157acc 100644
--- a/pandas/_libs/tslibs/conversion.pyx
+++ b/pandas/_libs/tslibs/conversion.pyx
@@ -203,9 +203,9 @@ cdef class _TSObject:
# int64_t value # numpy dt64
# object tzinfo
- property value:
- def __get__(self):
- return self.value
+ @property
+ def value(self):
+ return self.value
cpdef int64_t pydt_to_i8(object pydt) except? -1:
| - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
See the [Cython docs](http://cython.readthedocs.io/en/latest/src/userguide/extension_types.html#properties) for a comparison of the decorator syntax and legacy syntax. I only found a few instances of the legacy syntax; looks like most of the Cython code is already using decorator syntax. | https://api.github.com/repos/pandas-dev/pandas/pulls/18602 | 2017-12-02T19:48:38Z | 2017-12-06T11:27:35Z | 2017-12-06T11:27:35Z | 2017-12-06T15:43:15Z |
DEPR/CLN: Remove freq parameters from df.rolling/expanding/ewm | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 495d0beaf3faa..67e428e096cdb 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -150,6 +150,8 @@ Removal of prior version deprecations/changes
- The ``SparseList`` class has been removed (:issue:`14007`)
- The ``pandas.io.wb`` and ``pandas.io.data`` stub modules have been removed (:issue:`13735`)
- ``Categorical.from_array`` has been removed (:issue:`13854`)
+- The ``freq`` parameter has been removed from the ``rolling``/``expanding``/``ewm`` methods of DataFrame
+ and Series (deprecated since v0.18). Instead, resample before calling the methods. (:issue:18601)
.. _whatsnew_0220.performance:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 83fd36f0a864f..ea4a645927d7b 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -7357,31 +7357,31 @@ def _add_series_or_dataframe_operations(cls):
from pandas.core import window as rwindow
@Appender(rwindow.rolling.__doc__)
- def rolling(self, window, min_periods=None, freq=None, center=False,
+ def rolling(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
axis = self._get_axis_number(axis)
return rwindow.rolling(self, window=window,
- min_periods=min_periods, freq=freq,
+ min_periods=min_periods,
center=center, win_type=win_type,
on=on, axis=axis, closed=closed)
cls.rolling = rolling
@Appender(rwindow.expanding.__doc__)
- def expanding(self, min_periods=1, freq=None, center=False, axis=0):
+ def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
- return rwindow.expanding(self, min_periods=min_periods, freq=freq,
+ return rwindow.expanding(self, min_periods=min_periods,
center=center, axis=axis)
cls.expanding = expanding
@Appender(rwindow.ewm.__doc__)
def ewm(self, com=None, span=None, halflife=None, alpha=None,
- min_periods=0, freq=None, adjust=True, ignore_na=False,
+ min_periods=0, adjust=True, ignore_na=False,
axis=0):
axis = self._get_axis_number(axis)
return rwindow.ewm(self, com=com, span=span, halflife=halflife,
- alpha=alpha, min_periods=min_periods, freq=freq,
+ alpha=alpha, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na, axis=axis)
cls.ewm = ewm
diff --git a/pandas/core/window.py b/pandas/core/window.py
index 345f9b035a36b..807f8bfa12674 100644
--- a/pandas/core/window.py
+++ b/pandas/core/window.py
@@ -58,19 +58,14 @@
class _Window(PandasObject, SelectionMixin):
- _attributes = ['window', 'min_periods', 'freq', 'center', 'win_type',
+ _attributes = ['window', 'min_periods', 'center', 'win_type',
'axis', 'on', 'closed']
exclusions = set()
- def __init__(self, obj, window=None, min_periods=None, freq=None,
+ def __init__(self, obj, window=None, min_periods=None,
center=False, win_type=None, axis=0, on=None, closed=None,
**kwargs):
- if freq is not None:
- warnings.warn("The freq kw is deprecated and will be removed in a "
- "future version. You can resample prior to passing "
- "to a window function", FutureWarning, stacklevel=3)
-
self.__dict__.update(kwargs)
self.blocks = []
self.obj = obj
@@ -78,7 +73,6 @@ def __init__(self, obj, window=None, min_periods=None, freq=None,
self.closed = closed
self.window = window
self.min_periods = min_periods
- self.freq = freq
self.center = center
self.win_type = win_type
self.win_freq = None
@@ -117,16 +111,6 @@ def _convert_freq(self, how=None):
obj = self._selected_obj
index = None
- if (self.freq is not None and
- isinstance(obj, (ABCSeries, ABCDataFrame))):
- if how is not None:
- warnings.warn("The how kw argument is deprecated and removed "
- "in a future version. You can resample prior "
- "to passing to a window function", FutureWarning,
- stacklevel=6)
-
- obj = obj.resample(self.freq).aggregate(how or 'asfreq')
-
return obj, index
def _create_blocks(self, how):
@@ -374,14 +358,11 @@ class Window(_Window):
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
this will default to 1.
- freq : string or DateOffset object, optional (default None)
- .. deprecated:: 0.18.0
- Frequency to conform the data to before computing the statistic.
- Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
- Provide a window type. See the notes below.
+ Provide a window type. If ``None``, all points are evenly weighted.
+ See the notes below for further information.
on : string, optional
For a DataFrame, column on which to calculate
the rolling window, rather than the index
@@ -479,10 +460,6 @@ class Window(_Window):
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
- The `freq` keyword is used to conform time series data to a specified
- frequency by resampling the data. This is done with the default parameters
- of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
-
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
@@ -506,6 +483,11 @@ class Window(_Window):
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
+
+ See Also
+ --------
+ expanding : Provides expanding transformations.
+ ewm : Provides exponential weighted functions
"""
def validate(self):
@@ -876,8 +858,6 @@ def sum(self, *args, **kwargs):
def max(self, how=None, *args, **kwargs):
nv.validate_window_func('max', args, kwargs)
- if self.freq is not None and how is None:
- how = 'max'
return self._apply('roll_max', 'max', how=how, **kwargs)
_shared_docs['min'] = dedent("""
@@ -891,8 +871,6 @@ def max(self, how=None, *args, **kwargs):
def min(self, how=None, *args, **kwargs):
nv.validate_window_func('min', args, kwargs)
- if self.freq is not None and how is None:
- how = 'min'
return self._apply('roll_min', 'min', how=how, **kwargs)
def mean(self, *args, **kwargs):
@@ -909,8 +887,6 @@ def mean(self, *args, **kwargs):
Method for down- or re-sampling""")
def median(self, how=None, **kwargs):
- if self.freq is not None and how is None:
- how = 'median'
return self._apply('roll_median_c', 'median', how=how, **kwargs)
_shared_docs['std'] = dedent("""
@@ -1060,9 +1036,9 @@ def corr(self, other=None, pairwise=None, **kwargs):
def _get_corr(a, b):
a = a.rolling(window=window, min_periods=self.min_periods,
- freq=self.freq, center=self.center)
+ center=self.center)
b = b.rolling(window=window, min_periods=self.min_periods,
- freq=self.freq, center=self.center)
+ center=self.center)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
@@ -1136,7 +1112,7 @@ def _validate_monotonic(self):
"monotonic".format(formatted))
def _validate_freq(self):
- """ validate & return our freq """
+ """ validate & return window frequency """
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
@@ -1346,10 +1322,6 @@ class Expanding(_Rolling_and_Expanding):
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
- freq : string or DateOffset object, optional (default None)
- .. deprecated:: 0.18.0
- Frequency to conform the data to before computing the statistic.
- Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
axis : int or string, default 0
@@ -1382,17 +1354,18 @@ class Expanding(_Rolling_and_Expanding):
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
- The `freq` keyword is used to conform time series data to a specified
- frequency by resampling the data. This is done with the default parameters
- of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
+ See Also
+ --------
+ rolling : Provides rolling window calculations
+ ewm : Provides exponential weighted functions
"""
- _attributes = ['min_periods', 'freq', 'center', 'axis']
+ _attributes = ['min_periods', 'center', 'axis']
- def __init__(self, obj, min_periods=1, freq=None, center=False, axis=0,
+ def __init__(self, obj, min_periods=1, center=False, axis=0,
**kwargs):
super(Expanding, self).__init__(obj=obj, min_periods=min_periods,
- freq=freq, center=center, axis=axis)
+ center=center, axis=axis)
@property
def _constructor(self):
@@ -1611,9 +1584,6 @@ class EWM(_Rolling):
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
- freq : None or string alias / date offset object, default=None
- .. deprecated:: 0.18.0
- Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average)
@@ -1651,10 +1621,6 @@ class EWM(_Rolling):
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
- The `freq` keyword is used to conform time series data to a specified
- frequency by resampling the data. This is done with the default parameters
- of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
-
When adjust is True (default), weighted averages are calculated using
weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
@@ -1674,16 +1640,20 @@ class EWM(_Rolling):
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
+
+ See Also
+ --------
+ rolling : Provides rolling window calculations
+ expanding : Provides expanding transformations.
"""
- _attributes = ['com', 'min_periods', 'freq', 'adjust', 'ignore_na', 'axis']
+ _attributes = ['com', 'min_periods', 'adjust', 'ignore_na', 'axis']
def __init__(self, obj, com=None, span=None, halflife=None, alpha=None,
- min_periods=0, freq=None, adjust=True, ignore_na=False,
+ min_periods=0, adjust=True, ignore_na=False,
axis=0):
self.obj = obj
self.com = _get_center_of_mass(com, span, halflife, alpha)
self.min_periods = min_periods
- self.freq = freq
self.adjust = adjust
self.ignore_na = ignore_na
self.axis = axis
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index a0e94aa0c8581..4290001fea405 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -208,6 +208,8 @@ def ensure_compat(dispatch, name, arg, func_kw=None, *args, **kwargs):
if value is not None:
kwds[k] = value
+ # TODO: the below is only in place temporary until this module is removed.
+ kwargs.pop('freq', None) # freq removed in 0.22
# how is a keyword that if not-None should be in kwds
how = kwargs.pop('how', None)
if how is not None:
@@ -680,7 +682,6 @@ def f(arg, min_periods=1, freq=None, **kwargs):
name,
arg,
min_periods=min_periods,
- freq=freq,
func_kw=func_kw,
**kwargs)
return f
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 8135e263f412f..db94cd08b0050 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -7,7 +7,6 @@
from datetime import datetime, timedelta
from numpy.random import randn
import numpy as np
-from distutils.version import LooseVersion
import pandas as pd
from pandas import (Series, DataFrame, bdate_range, isna,
@@ -284,33 +283,6 @@ def test_preserve_metadata(self):
assert s2.name == 'foo'
assert s3.name == 'foo'
- def test_how_compat(self):
- # in prior versions, we would allow how to be used in the resample
- # now that its deprecated, we need to handle this in the actual
- # aggregation functions
- s = Series(np.random.randn(20),
- index=pd.date_range('1/1/2000', periods=20, freq='12H'))
-
- for how in ['min', 'max', 'median']:
- for op in ['mean', 'sum', 'std', 'var', 'kurt', 'skew']:
- for t in ['rolling', 'expanding']:
-
- with catch_warnings(record=True):
-
- dfunc = getattr(pd, "{0}_{1}".format(t, op))
- if dfunc is None:
- continue
-
- if t == 'rolling':
- kwargs = {'window': 5}
- else:
- kwargs = {}
- result = dfunc(s, freq='D', how=how, **kwargs)
-
- expected = getattr(
- getattr(s, t)(freq='D', **kwargs), op)(how=how)
- tm.assert_series_equal(result, expected)
-
class TestWindow(Base):
@@ -1452,22 +1424,18 @@ def get_result(arr, window, min_periods=None, center=False):
def _check_structures(self, f, static_comp, name=None,
has_min_periods=True, has_time_rule=True,
has_center=True, fill_value=None, **kwargs):
- def get_result(obj, window, min_periods=None, freq=None, center=False):
+ def get_result(obj, window, min_periods=None, center=False):
# check via the API calls if name is provided
if name is not None:
-
- # catch a freq deprecation warning if freq is provided and not
- # None
- with catch_warnings(record=True):
- r = obj.rolling(window=window, min_periods=min_periods,
- freq=freq, center=center)
+ r = obj.rolling(window=window, min_periods=min_periods,
+ center=center)
return getattr(r, name)(**kwargs)
# check via the moments API
with catch_warnings(record=True):
return f(obj, window=window, min_periods=min_periods,
- freq=freq, center=center, **kwargs)
+ center=center, **kwargs)
series_result = get_result(self.series, window=50)
frame_result = get_result(self.frame, window=50)
@@ -1479,17 +1447,17 @@ def get_result(obj, window, min_periods=None, freq=None, center=False):
if has_time_rule:
win = 25
minp = 10
+ series = self.series[::2].resample('B').mean()
+ frame = self.frame[::2].resample('B').mean()
if has_min_periods:
- series_result = get_result(self.series[::2], window=win,
- min_periods=minp, freq='B')
- frame_result = get_result(self.frame[::2], window=win,
- min_periods=minp, freq='B')
+ series_result = get_result(series, window=win,
+ min_periods=minp)
+ frame_result = get_result(frame, window=win,
+ min_periods=minp)
else:
- series_result = get_result(self.series[::2], window=win,
- freq='B')
- frame_result = get_result(self.frame[::2], window=win,
- freq='B')
+ series_result = get_result(series, window=win)
+ frame_result = get_result(frame, window=win)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
@@ -2035,15 +2003,11 @@ class TestMomentsConsistency(Base):
(np.nanmax, 1, 'max'),
(np.nanmin, 1, 'min'),
(np.nansum, 1, 'sum'),
+ (np.nanmean, 1, 'mean'),
+ (lambda v: np.nanstd(v, ddof=1), 1, 'std'),
+ (lambda v: np.nanvar(v, ddof=1), 1, 'var'),
+ (np.nanmedian, 1, 'median'),
]
- if np.__version__ >= LooseVersion('1.8.0'):
- base_functions += [
- (np.nanmean, 1, 'mean'),
- (lambda v: np.nanstd(v, ddof=1), 1, 'std'),
- (lambda v: np.nanvar(v, ddof=1), 1, 'var'),
- ]
- if np.__version__ >= LooseVersion('1.9.0'):
- base_functions += [(np.nanmedian, 1, 'median'), ]
no_nan_functions = [
(np.max, None, 'max'),
(np.min, None, 'min'),
@@ -2597,9 +2561,9 @@ def test_expanding_apply(self):
ser = Series([])
tm.assert_series_equal(ser, ser.expanding().apply(lambda x: x.mean()))
- def expanding_mean(x, min_periods=1, freq=None):
+ def expanding_mean(x, min_periods=1):
return mom.expanding_apply(x, lambda x: x.mean(),
- min_periods=min_periods, freq=freq)
+ min_periods=min_periods)
self._check_expanding(expanding_mean, np.mean)
@@ -3052,8 +3016,7 @@ def test_rolling_max_gh6297(self):
expected = Series([1.0, 2.0, 6.0, 4.0, 5.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
- with catch_warnings(record=True):
- x = series.rolling(window=1, freq='D').max()
+ x = series.resample('D').max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_max_how_resample(self):
@@ -3071,24 +3034,21 @@ def test_rolling_max_how_resample(self):
# Default how should be max
expected = Series([0.0, 1.0, 2.0, 3.0, 20.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
- with catch_warnings(record=True):
- x = series.rolling(window=1, freq='D').max()
+ x = series.resample('D').max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series([0.0, 1.0, 2.0, 3.0, 10.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
- with catch_warnings(record=True):
- x = series.rolling(window=1, freq='D').max(how='median')
+ x = series.resample('D').median().rolling(window=1).max(how='median')
tm.assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series([0.0, 1.0, 2.0, 3.0, v],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
- with catch_warnings(record=True):
- x = series.rolling(window=1, freq='D').max(how='mean')
- tm.assert_series_equal(expected, x)
+ x = series.resample('D').mean().rolling(window=1).max(how='mean')
+ tm.assert_series_equal(expected, x)
def test_rolling_min_how_resample(self):
@@ -3105,9 +3065,8 @@ def test_rolling_min_how_resample(self):
# Default how should be min
expected = Series([0.0, 1.0, 2.0, 3.0, 4.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
- with catch_warnings(record=True):
- r = series.rolling(window=1, freq='D')
- tm.assert_series_equal(expected, r.min())
+ r = series.resample('D').min().rolling(window=1)
+ tm.assert_series_equal(expected, r.min())
def test_rolling_median_how_resample(self):
@@ -3124,9 +3083,8 @@ def test_rolling_median_how_resample(self):
# Default how should be median
expected = Series([0.0, 1.0, 2.0, 3.0, 10],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
- with catch_warnings(record=True):
- x = series.rolling(window=1, freq='D').median()
- tm.assert_series_equal(expected, x)
+ x = series.resample('D').median().rolling(window=1).median()
+ tm.assert_series_equal(expected, x)
def test_rolling_median_memory_error(self):
# GH11722
| -- [x ] tests added / passed
- [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x ] whatsnew entry
The ``freq`` parameter of df.rolling/expanding/ewm was deprecated in 0.18 (#11603). This PR removes the parameter from the code base.
After this PR, I will remove the ``how`` parameter and lastly the ``pd.rolling_*``, ``pd.expanding_*`` and ``pd.ewm_*`` will be removed (AKA ``pd.stats.*``). By removing ``freq`` and ``how``before ``pd.stats`` I think it will be easier to clean up `` pandas/tests/test_window.py``, as ATM these three issues are not very cleanly separated in that test module.
In some test in ``test_window.py::TestMoments`` there is a bit of resampling going on, as I've moved ``freq`` stuff from ``rolling`` into a prior ``df.resample`` step. These are tests for ``how`` and will be removed once ``how`` is removed (unless the tests good for testing the windows functions, I'm not completely sure ATM, but will look into it when I reach that point).
Additionally (and unrelated), in `` pandas/tests/test_window.py`` there are checks for numpy>=1.8 and >=1.9. These checks are no longer necessary, as numpy 1.9 is the current minium version, so they're removed,. | https://api.github.com/repos/pandas-dev/pandas/pulls/18601 | 2017-12-02T18:19:55Z | 2017-12-06T01:13:53Z | 2017-12-06T01:13:53Z | 2017-12-11T08:08:49Z |
BUG: Fix initialization of DataFrame from dict with NaN as key | diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index 21b20cb123ed6..9def910df0bab 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -16,11 +16,11 @@ class FromDicts(object):
def setup(self):
N, K = 5000, 50
- index = tm.makeStringIndex(N)
- columns = tm.makeStringIndex(K)
- frame = DataFrame(np.random.randn(N, K), index=index, columns=columns)
+ self.index = tm.makeStringIndex(N)
+ self.columns = tm.makeStringIndex(K)
+ frame = DataFrame(np.random.randn(N, K), index=self.index,
+ columns=self.columns)
self.data = frame.to_dict()
- self.some_dict = list(self.data.values())[0]
self.dict_list = frame.to_dict(orient='records')
self.data2 = {i: {j: float(j) for j in range(100)}
for i in range(2000)}
@@ -31,8 +31,14 @@ def time_list_of_dict(self):
def time_nested_dict(self):
DataFrame(self.data)
- def time_dict(self):
- Series(self.some_dict)
+ def time_nested_dict_index(self):
+ DataFrame(self.data, index=self.index)
+
+ def time_nested_dict_columns(self):
+ DataFrame(self.data, columns=self.columns)
+
+ def time_nested_dict_index_columns(self):
+ DataFrame(self.data, index=self.index, columns=self.columns)
def time_nested_dict_int64(self):
# nested dict, integer indexes, regression described in #621
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index ce63cb2473bc4..21a2a66831fd3 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -1135,6 +1135,10 @@ Reshaping
- Bug in :func:`DataFrame.unstack` which casts int to float if ``columns`` is a ``MultiIndex`` with unused levels (:issue:`17845`)
- Bug in :func:`DataFrame.unstack` which raises an error if ``index`` is a ``MultiIndex`` with unused labels on the unstacked level (:issue:`18562`)
- Fixed construction of a :class:`Series` from a ``dict`` containing ``NaN`` as key (:issue:`18480`)
+- Fixed construction of a :class:`DataFrame` from a ``dict`` containing ``NaN`` as key (:issue:`18455`)
+- Suppressed error in the construction of a :class:`DataFrame` from a ``dict`` containing scalar values when the corresponding keys are not included in the passed index (:issue:`18600`)
+
+- Fixed (changed from ``object`` to ``float64``) dtype of :class:`DataFrame` initialized with axes, no data, and ``dtype=int`` (:issue:`19646`)
- Bug in :func:`Series.rank` where ``Series`` containing ``NaT`` modifies the ``Series`` inplace (:issue:`18521`)
- Bug in :func:`cut` which fails when using readonly arrays (:issue:`18773`)
- Bug in :func:`DataFrame.pivot_table` which fails when the ``aggfunc`` arg is of type string. The behavior is now consistent with other methods like ``agg`` and ``apply`` (:issue:`18713`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 9626079660771..0f5e8a99eb685 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -27,6 +27,7 @@
from pandas.core.dtypes.cast import (
maybe_upcast,
cast_scalar_to_array,
+ construct_1d_arraylike_from_scalar,
maybe_cast_to_datetime,
maybe_infer_to_datetimelike,
maybe_convert_platform,
@@ -429,44 +430,27 @@ def _init_dict(self, data, index, columns, dtype=None):
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
- columns = _ensure_index(columns)
+ arrays = Series(data, index=columns, dtype=object)
+ data_names = arrays.index
- # GH10856
- # raise ValueError if only scalars in dict
+ missing = arrays.isnull()
if index is None:
- extract_index(list(data.values()))
-
- # prefilter if columns passed
- data = {k: v for k, v in compat.iteritems(data) if k in columns}
-
- if index is None:
- index = extract_index(list(data.values()))
-
+ # GH10856
+ # raise ValueError if only scalars in dict
+ index = extract_index(arrays[~missing])
else:
index = _ensure_index(index)
- arrays = []
- data_names = []
- for k in columns:
- if k not in data:
- # no obvious "empty" int column
- if dtype is not None and issubclass(dtype.type,
- np.integer):
- continue
-
- if dtype is None:
- # 1783
- v = np.empty(len(index), dtype=object)
- elif np.issubdtype(dtype, np.flexible):
- v = np.empty(len(index), dtype=object)
- else:
- v = np.empty(len(index), dtype=dtype)
-
- v.fill(np.nan)
+ # no obvious "empty" int column
+ if missing.any() and not is_integer_dtype(dtype):
+ if dtype is None or np.issubdtype(dtype, np.flexible):
+ # 1783
+ nan_dtype = object
else:
- v = data[k]
- data_names.append(k)
- arrays.append(v)
+ nan_dtype = dtype
+ v = construct_1d_arraylike_from_scalar(np.nan, len(index),
+ nan_dtype)
+ arrays.loc[missing] = [v] * missing.sum()
else:
keys = com._dict_keys_to_ordered_list(data)
@@ -7253,8 +7237,6 @@ def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
- else:
- index = _ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index d5cd22732f0a9..cf14bb8f2e534 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -7341,7 +7341,6 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
- cond = cond.astype(bool, copy=False)
cond = -cond if inplace else cond
# try to align with other
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index f5956aacf8646..b0a6086c450ef 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -4841,7 +4841,7 @@ def form_blocks(arrays, names, axes):
items_dict = defaultdict(list)
extra_locs = []
- names_idx = Index(names)
+ names_idx = _ensure_index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f3630dc43fbd1..422d9c8f0ecf7 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -24,6 +24,7 @@
is_extension_array_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
+ is_object_dtype,
is_list_like,
is_hashable,
is_iterator,
@@ -38,7 +39,8 @@
maybe_upcast, infer_dtype_from_scalar,
maybe_convert_platform,
maybe_cast_to_datetime, maybe_castable,
- construct_1d_arraylike_from_scalar)
+ construct_1d_arraylike_from_scalar,
+ construct_1d_object_array_from_listlike)
from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
@@ -297,6 +299,7 @@ def _init_dict(self, data, index=None, dtype=None):
# raises KeyError), so we iterate the entire dict, and align
if data:
keys, values = zip(*compat.iteritems(data))
+ values = list(values)
else:
keys, values = [], []
@@ -4042,7 +4045,13 @@ def _try_cast(arr, take_fast_path):
try:
subarr = maybe_cast_to_datetime(arr, dtype)
- if not is_extension_type(subarr):
+ # Take care in creating object arrays (but iterators are not
+ # supported):
+ if is_object_dtype(dtype) and (is_list_like(subarr) and
+ not (is_iterator(subarr) or
+ isinstance(subarr, np.ndarray))):
+ subarr = construct_1d_object_array_from_listlike(subarr)
+ elif not is_extension_type(subarr):
subarr = np.array(subarr, dtype=dtype, copy=copy)
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 499751e864331..47b7d60e3b6e8 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -287,8 +287,50 @@ def test_constructor_dict(self):
with tm.assert_raises_regex(ValueError, msg):
DataFrame({'a': 0.7}, columns=['a'])
- with tm.assert_raises_regex(ValueError, msg):
- DataFrame({'a': 0.7}, columns=['b'])
+ @pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
+ def test_constructor_invalid_items_unused(self, scalar):
+ # No error if invalid (scalar) value is in fact not used:
+ result = DataFrame({'a': scalar}, columns=['b'])
+ expected = DataFrame(columns=['b'])
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
+ def test_constructor_dict_nan_key(self, value):
+ # GH 18455
+ cols = [1, value, 3]
+ idx = ['a', value]
+ values = [[0, 3], [1, 4], [2, 5]]
+ data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
+ result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
+ expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
+ index=idx, columns=cols)
+ tm.assert_frame_equal(result, expected)
+
+ result = DataFrame(data, index=idx).sort_values('a', axis=1)
+ tm.assert_frame_equal(result, expected)
+
+ result = DataFrame(data, index=idx, columns=cols)
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("value", [np.nan, None, float('nan')])
+ def test_constructor_dict_nan_tuple_key(self, value):
+ # GH 18455
+ cols = Index([(11, 21), (value, 22), (13, value)])
+ idx = Index([('a', value), (value, 2)])
+ values = [[0, 3], [1, 4], [2, 5]]
+ data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
+ result = (DataFrame(data)
+ .sort_values((11, 21))
+ .sort_values(('a', value), axis=1))
+ expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
+ index=idx, columns=cols)
+ tm.assert_frame_equal(result, expected)
+
+ result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
+ tm.assert_frame_equal(result, expected)
+
+ result = DataFrame(data, index=idx, columns=cols)
+ tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
@@ -753,7 +795,7 @@ def test_constructor_corner(self):
# does not error but ends up float
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=int)
- assert df.values.dtype == np.object_
+ assert df.values.dtype == np.dtype('float64')
# #1783 empty dtype object
df = DataFrame({}, columns=['foo', 'bar'])
@@ -761,7 +803,7 @@ def test_constructor_corner(self):
df = DataFrame({'b': 1}, index=lrange(10), columns=list('abc'),
dtype=int)
- assert df.values.dtype == np.object_
+ assert df.values.dtype == np.dtype('float64')
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index 68df0982a1e3e..d89731dc09044 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -603,13 +603,7 @@ def test_unstack_unused_levels(self):
cols = pd.MultiIndex.from_product([[0, 1], col_level])
expected = pd.DataFrame(exp_data.reshape(3, 6),
index=idx_level, columns=cols)
- # Broken (GH 18455):
- # tm.assert_frame_equal(result, expected)
- diff = result - expected
- assert(diff.sum().sum() == 0)
- assert((diff + 1).sum().sum() == 8)
-
- assert((result.columns.levels[1] == idx.levels[level]).all())
+ tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [['A', 'C'], slice(None)])
def test_unstack_unused_level(self, cols):
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index cbb5932a890dc..5ef6dc07a5c22 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -461,7 +461,7 @@ def test_read_one_empty_col_with_header(self, ext):
)
expected_header_none = DataFrame(pd.Series([0], dtype='int64'))
tm.assert_frame_equal(actual_header_none, expected_header_none)
- expected_header_zero = DataFrame(columns=[0], dtype='int64')
+ expected_header_zero = DataFrame(columns=[0])
tm.assert_frame_equal(actual_header_zero, expected_header_zero)
@td.skip_if_no('openpyxl')
| - [x] closes #18455
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This does not solve the [MI example](https://github.com/pandas-dev/pandas/issues/18455#issuecomment-346700652) in #18455, but that should be included in #18485 . | https://api.github.com/repos/pandas-dev/pandas/pulls/18600 | 2017-12-02T14:27:59Z | 2018-04-01T17:47:06Z | 2018-04-01T17:47:05Z | 2018-04-01T17:48:17Z |
BLD Added --strict and -r sxX to test scripts | diff --git a/test.bat b/test.bat
index 2424f62b8dbfe..e07c84f257a69 100644
--- a/test.bat
+++ b/test.bat
@@ -1,3 +1,3 @@
:: test on windows
-pytest --strict --skip-slow --skip-network pandas -n 2 %*
+pytest --skip-slow --skip-network pandas -n 2 -r sxX --strict %*
diff --git a/test.sh b/test.sh
index 23c7ff52d2ce9..1255a39816f78 100755
--- a/test.sh
+++ b/test.sh
@@ -1,4 +1,4 @@
#!/bin/sh
command -v coverage >/dev/null && coverage erase
command -v python-coverage >/dev/null && python-coverage erase
-pytest pandas --cov=pandas
+pytest pandas --cov=pandas -r sxX --strict
diff --git a/test_fast.bat b/test_fast.bat
index 17dc54b580137..81f30dd310e28 100644
--- a/test_fast.bat
+++ b/test_fast.bat
@@ -1,3 +1,3 @@
:: test on windows
set PYTHONHASHSEED=314159265
-pytest --skip-slow --skip-network -m "not single" -n 4 pandas
+pytest --skip-slow --skip-network -m "not single" -n 4 -r sXX --strict pandas
diff --git a/test_fast.sh b/test_fast.sh
index 9b984156a796c..1fb55e581d292 100755
--- a/test_fast.sh
+++ b/test_fast.sh
@@ -5,4 +5,4 @@
# https://github.com/pytest-dev/pytest/issues/1075
export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 4294967295))')
-pytest pandas --skip-slow --skip-network -m "not single" -n 4 "$@"
+pytest pandas --skip-slow --skip-network -m "not single" -n 4 -r sxX --strict "$@"
| - no related issue in issue tracker
- no tests added because it was a minor build script change)
- modified shell script, no python syntax check required
- [] whatsnew entry
Altered pytest output to show summaries for xFailed, xPassed and skipped
Added --strict to align these convenience scripts with ci
it will now output summaries below like this
```
...
.........................s........s..s...............s........................................... [ 99%]
....................s...............sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 99%]
sssssssssssssssssssssssssssssssssssssssssssssssss.. [100%]
======================================== short test summary info ========================================
SKIP [1] /Users/206673/open/pandas-nicku33/pandas/tests/io/test_packers.py:649: no blosc
SKIP [3] /Users/206673/open/pandas-nicku33/pandas/tests/io/parser/usecols.py:381: TODO: see gh-13253
SKIP [1] pandas/tests/plotting/test_series.py:667: Missing matplotlib dependency
SKIP [1] /Users/206673/open/pandas-nicku33/pandas/tests/tseries/offsets/test_offsets.py:164: cannot create out_of_range offset: TestBYearBegin object at 0x1259dbd50> year is out of range
...
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/18598 | 2017-12-01T19:12:57Z | 2017-12-01T19:15:23Z | 2017-12-01T19:15:23Z | 2017-12-01T19:15:25Z |
EHN: Add index parameter to to_json | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 32b548e5f32f1..f3dac20758441 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -136,6 +136,7 @@ Other Enhancements
- :func:`DataFrame.corrwith` now silently drops non-numeric columns when passed a Series. Before, an exception was raised (:issue:`18570`).
- :class:`IntervalIndex` now supports time zone aware ``Interval`` objects (:issue:`18537`, :issue:`18538`)
- :func:`read_excel()` has gained the ``nrows`` parameter (:issue:`16645`)
+- :func:``DataFrame.to_json`` and ``Series.to_json`` now accept an ``index`` argument which allows the user to exclude the index from the JSON output (:issue:`17394`)
.. _whatsnew_0220.api_breaking:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index ea4a645927d7b..79ba18140c651 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -1603,7 +1603,8 @@ def _repr_latex_(self):
def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=10, force_ascii=True, date_unit='ms',
- default_handler=None, lines=False, compression=None):
+ default_handler=None, lines=False, compression=None,
+ index=True):
"""
Convert the object to a JSON string.
@@ -1671,6 +1672,13 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None,
.. versionadded:: 0.21.0
+ index : boolean, default True
+ Whether to include the index values in the JSON string. Not
+ including the index (``index=False``) is only supported when
+ orient is 'split' or 'table'.
+
+ .. versionadded:: 0.22.0
+
Returns
-------
same type as input object with filtered info axis
@@ -1723,7 +1731,8 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=double_precision,
force_ascii=force_ascii, date_unit=date_unit,
default_handler=default_handler,
- lines=lines, compression=compression)
+ lines=lines, compression=compression,
+ index=index)
def to_hdf(self, path_or_buf, key, **kwargs):
"""Write the contained data to an HDF5 file using HDFStore.
diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py
index 21736673350d8..0e0aae0506809 100644
--- a/pandas/io/json/json.py
+++ b/pandas/io/json/json.py
@@ -28,7 +28,12 @@
# interface to/from
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
- default_handler=None, lines=False, compression=None):
+ default_handler=None, lines=False, compression=None,
+ index=True):
+
+ if not index and orient not in ['split', 'table']:
+ raise ValueError("'index=False' is only valid when 'orient' is "
+ "'split' or 'table'")
path_or_buf = _stringify_path(path_or_buf)
if lines and orient != 'records':
@@ -49,7 +54,8 @@ def to_json(path_or_buf, obj, orient=None, date_format='epoch',
s = writer(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
- date_unit=date_unit, default_handler=default_handler).write()
+ date_unit=date_unit, default_handler=default_handler,
+ index=index).write()
if lines:
s = _convert_to_line_delimits(s)
@@ -69,7 +75,7 @@ def to_json(path_or_buf, obj, orient=None, date_format='epoch',
class Writer(object):
def __init__(self, obj, orient, date_format, double_precision,
- ensure_ascii, date_unit, default_handler=None):
+ ensure_ascii, date_unit, index, default_handler=None):
self.obj = obj
if orient is None:
@@ -81,6 +87,7 @@ def __init__(self, obj, orient, date_format, double_precision,
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
+ self.index = index
self.is_copy = None
self._format_axes()
@@ -89,14 +96,20 @@ def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
+ return self._write(self.obj, self.orient, self.double_precision,
+ self.ensure_ascii, self.date_unit,
+ self.date_format == 'iso', self.default_handler)
+
+ def _write(self, obj, orient, double_precision, ensure_ascii,
+ date_unit, iso_dates, default_handler):
return dumps(
- self.obj,
- orient=self.orient,
- double_precision=self.double_precision,
- ensure_ascii=self.ensure_ascii,
- date_unit=self.date_unit,
- iso_dates=self.date_format == 'iso',
- default_handler=self.default_handler
+ obj,
+ orient=orient,
+ double_precision=double_precision,
+ ensure_ascii=ensure_ascii,
+ date_unit=date_unit,
+ iso_dates=iso_dates,
+ default_handler=default_handler
)
@@ -108,6 +121,15 @@ def _format_axes(self):
raise ValueError("Series index must be unique for orient="
"'{orient}'".format(orient=self.orient))
+ def _write(self, obj, orient, double_precision, ensure_ascii,
+ date_unit, iso_dates, default_handler):
+ if not self.index and orient == 'split':
+ obj = {"name": obj.name, "data": obj.values}
+ return super(SeriesWriter, self)._write(obj, orient,
+ double_precision,
+ ensure_ascii, date_unit,
+ iso_dates, default_handler)
+
class FrameWriter(Writer):
_default_orient = 'columns'
@@ -123,12 +145,22 @@ def _format_axes(self):
raise ValueError("DataFrame columns must be unique for orient="
"'{orient}'.".format(orient=self.orient))
+ def _write(self, obj, orient, double_precision, ensure_ascii,
+ date_unit, iso_dates, default_handler):
+ if not self.index and orient == 'split':
+ obj = obj.to_dict(orient='split')
+ del obj["index"]
+ return super(FrameWriter, self)._write(obj, orient,
+ double_precision,
+ ensure_ascii, date_unit,
+ iso_dates, default_handler)
+
class JSONTableWriter(FrameWriter):
_default_orient = 'records'
def __init__(self, obj, orient, date_format, double_precision,
- ensure_ascii, date_unit, default_handler=None):
+ ensure_ascii, date_unit, index, default_handler=None):
"""
Adds a `schema` attribut with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
@@ -137,7 +169,7 @@ def __init__(self, obj, orient, date_format, double_precision,
"""
super(JSONTableWriter, self).__init__(
obj, orient, date_format, double_precision, ensure_ascii,
- date_unit, default_handler=default_handler)
+ date_unit, index, default_handler=default_handler)
if date_format != 'iso':
msg = ("Trying to write with `orient='table'` and "
@@ -146,7 +178,7 @@ def __init__(self, obj, orient, date_format, double_precision,
.format(fmt=date_format))
raise ValueError(msg)
- self.schema = build_table_schema(obj)
+ self.schema = build_table_schema(obj, index=self.index)
# NotImplementd on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
@@ -168,14 +200,24 @@ def __init__(self, obj, orient, date_format, double_precision,
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()
- self.obj = obj.reset_index()
+ # exclude index from obj if index=False
+ if not self.index:
+ self.obj = obj.reset_index(drop=True)
+ else:
+ self.obj = obj.reset_index(drop=False)
self.date_format = 'iso'
self.orient = 'records'
-
- def write(self):
- data = super(JSONTableWriter, self).write()
+ self.index = index
+
+ def _write(self, obj, orient, double_precision, ensure_ascii,
+ date_unit, iso_dates, default_handler):
+ data = super(JSONTableWriter, self)._write(obj, orient,
+ double_precision,
+ ensure_ascii, date_unit,
+ iso_dates,
+ default_handler)
serialized = '{{"schema": {schema}, "data": {data}}}'.format(
- schema=dumps(self.schema), data=data)
+ schema=dumps(self.schema), data=data)
return serialized
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py
index fe447534efdc7..7cf3d6cd7b612 100644
--- a/pandas/tests/io/json/test_pandas.py
+++ b/pandas/tests/io/json/test_pandas.py
@@ -9,6 +9,7 @@
read_json, compat)
from datetime import timedelta
import pandas as pd
+import json
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network,
@@ -1147,3 +1148,64 @@ def test_data_frame_size_after_to_json(self):
size_after = df.memory_usage(index=True, deep=True).sum()
assert size_before == size_after
+
+ @pytest.mark.parametrize('data, expected', [
+ (DataFrame([[1, 2], [4, 5]], columns=['a', 'b']),
+ {'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
+ (DataFrame([[1, 2], [4, 5]], columns=['a', 'b']).rename_axis('foo'),
+ {'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
+ (DataFrame([[1, 2], [4, 5]], columns=['a', 'b'],
+ index=[['a', 'b'], ['c', 'd']]),
+ {'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
+ (Series([1, 2, 3], name='A'),
+ {'name': 'A', 'data': [1, 2, 3]}),
+ (Series([1, 2, 3], name='A').rename_axis('foo'),
+ {'name': 'A', 'data': [1, 2, 3]}),
+ (Series([1, 2], name='A', index=[['a', 'b'], ['c', 'd']]),
+ {'name': 'A', 'data': [1, 2]}),
+ ])
+ def test_index_false_to_json_split(self, data, expected):
+ # GH 17394
+ # Testing index=False in to_json with orient='split'
+
+ result = data.to_json(orient='split', index=False)
+ result = json.loads(result)
+
+ assert result == expected
+
+ @pytest.mark.parametrize('data', [
+ (DataFrame([[1, 2], [4, 5]], columns=['a', 'b'])),
+ (DataFrame([[1, 2], [4, 5]], columns=['a', 'b']).rename_axis('foo')),
+ (DataFrame([[1, 2], [4, 5]], columns=['a', 'b'],
+ index=[['a', 'b'], ['c', 'd']])),
+ (Series([1, 2, 3], name='A')),
+ (Series([1, 2, 3], name='A').rename_axis('foo')),
+ (Series([1, 2], name='A', index=[['a', 'b'], ['c', 'd']])),
+ ])
+ def test_index_false_to_json_table(self, data):
+ # GH 17394
+ # Testing index=False in to_json with orient='table'
+
+ result = data.to_json(orient='table', index=False)
+ result = json.loads(result)
+
+ expected = {
+ 'schema': pd.io.json.build_table_schema(data, index=False),
+ 'data': DataFrame(data).to_dict(orient='records')
+ }
+
+ assert result == expected
+
+ @pytest.mark.parametrize('orient', [
+ 'records', 'index', 'columns', 'values'
+ ])
+ def test_index_false_error_to_json(self, orient):
+ # GH 17394
+ # Testing error message from to_json with index=False
+
+ df = pd.DataFrame([[1, 2], [4, 5]], columns=['a', 'b'])
+
+ with tm.assert_raises_regex(ValueError, "'index=False' is only "
+ "valid when 'orient' is "
+ "'split' or 'table'"):
+ df.to_json(orient=orient, index=False)
| - [ ] closes #17394
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Examples:
```
In [1]: df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'b', 'c'])
In [2]: df.to_json(orient='split', index=True)
Out[2]:
{"columns":["a","b","c"],"index":[0,1],"data":[[1,2,3],[4,5,6]]}
In [3]: df.to_json(orient='split', index=False)
Out[3]:
{"columns":["a","b","c"],"data":[[1,2,3],[4,5,6]]}
In [4]: df.to_json(orient='table', index=True)
Out[4]:
{"schema": {"fields":[{"name":"index","type":"integer"},{"name":"a","type":"integer"},{"name":"b","type":"integer"},{"name":"c","type":"integer"}],"primaryKey":["index"],"pandas_version":"0.20.0"}, "data": [{"index":0,"a":1,"b":2,"c":3},{"index":1,"a":4,"b":5,"c":6}]}
In [5]: df.to_json(orient='table', index=False)
Out[5]:
{"schema": {"fields":[{"name":"a","type":"integer"},{"name":"b","type":"integer"},{"name":"c","type":"integer"}],"pandas_version":"0.20.0"}, "data": [{"a":1,"b":2,"c":3},{"a":4,"b":5,"c":6}]}
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/18591 | 2017-12-01T14:33:43Z | 2017-12-10T15:26:29Z | 2017-12-10T15:26:29Z | 2017-12-10T16:13:21Z |
BUG: Unwanted conversion from timedelta to float (#18493) | diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt
index bebfd0ab50e90..3d4850b334ff9 100644
--- a/doc/source/whatsnew/v0.21.1.txt
+++ b/doc/source/whatsnew/v0.21.1.txt
@@ -74,6 +74,7 @@ Indexing
- Bug where a ``MultiIndex`` with more than a million records was not raising ``AttributeError`` when trying to access a missing attribute (:issue:`18165`)
- Bug in :class:`IntervalIndex` constructor when a list of intervals is passed with non-default ``closed`` (:issue:`18334`)
- Bug in ``Index.putmask`` when an invalid mask passed (:issue:`18368`)
+- Bug in masked assignment of a ``timedelta64[ns]`` dtype ``Series``, incorrectly coerced to float (:issue:`18493`)
-
I/O
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 4f25a19d437ca..1d1d71be16c00 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -1956,7 +1956,8 @@ def _can_hold_element(self, element):
tipo = maybe_infer_dtype_type(element)
if tipo is not None:
return issubclass(tipo.type, np.timedelta64)
- return isinstance(element, (timedelta, np.timedelta64))
+ return is_integer(element) or isinstance(
+ element, (timedelta, np.timedelta64))
def fillna(self, value, **kwargs):
diff --git a/pandas/tests/indexing/test_timedelta.py b/pandas/tests/indexing/test_timedelta.py
index 32609362e49af..3ad3b771b2ab2 100644
--- a/pandas/tests/indexing/test_timedelta.py
+++ b/pandas/tests/indexing/test_timedelta.py
@@ -2,6 +2,7 @@
import pandas as pd
from pandas.util import testing as tm
+import numpy as np
class TestTimedeltaIndexing(object):
@@ -47,3 +48,23 @@ def test_string_indexing(self):
expected = df.iloc[0]
sliced = df.loc['0 days']
tm.assert_series_equal(sliced, expected)
+
+ @pytest.mark.parametrize(
+ "value",
+ [None, pd.NaT, np.nan])
+ def test_masked_setitem(self, value):
+ # issue (#18586)
+ series = pd.Series([0, 1, 2], dtype='timedelta64[ns]')
+ series[series == series[0]] = value
+ expected = pd.Series([pd.NaT, 1, 2], dtype='timedelta64[ns]')
+ tm.assert_series_equal(series, expected)
+
+ @pytest.mark.parametrize(
+ "value",
+ [None, pd.NaT, np.nan])
+ def test_listlike_setitem(self, value):
+ # issue (#18586)
+ series = pd.Series([0, 1, 2], dtype='timedelta64[ns]')
+ series.iloc[0] = value
+ expected = pd.Series([pd.NaT, 1, 2], dtype='timedelta64[ns]')
+ tm.assert_series_equal(series, expected)
| - [ ] closes #18493
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/18586 | 2017-12-01T08:50:04Z | 2017-12-02T17:43:02Z | 2017-12-02T17:43:01Z | 2017-12-11T20:21:11Z |
CLN/DOC: Interval and IntervalIndex classes | diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index 39b26c61172ed..822df1ce2b968 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -14,30 +14,46 @@ import numbers
_VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither'])
-cdef class IntervalMixin:
- property closed_left:
- def __get__(self):
- return self.closed == 'left' or self.closed == 'both'
-
- property closed_right:
- def __get__(self):
- return self.closed == 'right' or self.closed == 'both'
-
- property open_left:
- def __get__(self):
- return not self.closed_left
-
- property open_right:
- def __get__(self):
- return not self.closed_right
-
- property mid:
- def __get__(self):
- try:
- return 0.5 * (self.left + self.right)
- except TypeError:
- # datetime safe version
- return self.left + 0.5 * (self.right - self.left)
+cdef class IntervalMixin(object):
+
+ @property
+ def closed_left(self):
+ """
+ Return True if the Interval is closed on the left-side, else False
+ """
+ return self.closed in ('left', 'both')
+
+ @property
+ def closed_right(self):
+ """
+ Return True if the Interval is closed on the right-side, else False
+ """
+ return self.closed in ('right', 'both')
+
+ @property
+ def open_left(self):
+ """
+ Return True if the Interval is open on the left-side, else False
+ """
+ return not self.closed_left
+
+ @property
+ def open_right(self):
+ """
+ Return True if the Interval is open on the right-side, else False
+ """
+ return not self.closed_right
+
+ @property
+ def mid(self):
+ """
+ Return the midpoint of the Interval
+ """
+ try:
+ return 0.5 * (self.left + self.right)
+ except TypeError:
+ # datetime safe version
+ return self.left + 0.5 * (self.right - self.left)
cdef _interval_like(other):
@@ -55,12 +71,12 @@ cdef class Interval(IntervalMixin):
Parameters
----------
left : value
- Left bound for interval.
+ Left bound for the interval
right : value
- Right bound for interval.
- closed : {'left', 'right', 'both', 'neither'}
+ Right bound for the interval
+ closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the interval is closed on the left-side, right-side, both or
- neither. Defaults to 'right'.
+ neither
Examples
--------
@@ -77,20 +93,30 @@ cdef class Interval(IntervalMixin):
See Also
--------
- IntervalIndex : an Index of ``interval`` s that are all closed on the same
- side.
- cut, qcut : convert arrays of continuous data into categoricals/series of
- ``Interval``.
+ IntervalIndex : An Index of Interval objects that are all closed on the
+ same side.
+ cut, qcut : Convert arrays of continuous data into Categoricals/Series of
+ Interval.
"""
- cdef readonly object left, right
+ cdef readonly object left
+ """Left bound for the interval"""
+
+ cdef readonly object right
+ """Right bound for the interval"""
+
cdef readonly str closed
+ """
+ Whether the interval is closed on the left-side, right-side, both or
+ neither
+ """
def __init__(self, left, right, str closed='right'):
# note: it is faster to just do these checks than to use a special
# constructor (__cinit__/__new__) to avoid them
if closed not in _VALID_CLOSED:
- raise ValueError("invalid option for 'closed': %s" % closed)
+ msg = "invalid option for 'closed': {closed}".format(closed=closed)
+ raise ValueError(msg)
if not left <= right:
raise ValueError('left side of interval must be <= right side')
self.left = left
@@ -122,10 +148,11 @@ cdef class Interval(IntervalMixin):
if op == Py_EQ or op == Py_NE:
return NotImplemented
else:
+ name = type(self).__name__
+ other = type(other).__name__
op_str = {Py_LT: '<', Py_LE: '<=', Py_GT: '>', Py_GE: '>='}[op]
- raise TypeError(
- 'unorderable types: %s() %s %s()' %
- (type(self).__name__, op_str, type(other).__name__))
+ raise TypeError('unorderable types: {name}() {op} {other}()'
+ .format(name=name, op=op_str, other=other))
def __reduce__(self):
args = (self.left, self.right, self.closed)
@@ -145,15 +172,18 @@ cdef class Interval(IntervalMixin):
def __repr__(self):
left, right = self._repr_base()
- return ('%s(%r, %r, closed=%r)' %
- (type(self).__name__, left, right, self.closed))
+ name = type(self).__name__
+ repr_str = '{name}({left!r}, {right!r}, closed={closed!r})'.format(
+ name=name, left=left, right=right, closed=self.closed)
+ return repr_str
def __str__(self):
left, right = self._repr_base()
start_symbol = '[' if self.closed_left else '('
end_symbol = ']' if self.closed_right else ')'
- return '%s%s, %s%s' % (start_symbol, left, right, end_symbol)
+ return '{start}{left}, {right}{end}'.format(
+ start=start_symbol, left=left, right=right, end=end_symbol)
def __add__(self, y):
if isinstance(y, numbers.Number):
@@ -222,8 +252,8 @@ cpdef intervals_to_interval_bounds(ndarray intervals):
continue
if not isinstance(interval, Interval):
- raise TypeError("type {} with value {} is not an interval".format(
- type(interval), interval))
+ raise TypeError("type {typ} with value {iv} is not an interval"
+ .format(typ=type(interval), iv=interval))
left[i] = interval.left
right[i] = interval.right
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 3f74694880533..02ac74e619fa4 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -61,8 +61,8 @@ def _get_next_label(label):
elif is_float_dtype(dtype):
return np.nextafter(label, np.infty)
else:
- raise TypeError('cannot determine next label for type %r'
- % type(label))
+ raise TypeError('cannot determine next label for type {typ!r}'
+ .format(typ=type(label)))
def _get_prev_label(label):
@@ -76,8 +76,8 @@ def _get_prev_label(label):
elif is_float_dtype(dtype):
return np.nextafter(label, -np.infty)
else:
- raise TypeError('cannot determine next label for type %r'
- % type(label))
+ raise TypeError('cannot determine next label for type {typ!r}'
+ .format(typ=type(label)))
def _get_interval_closed_bounds(interval):
@@ -94,17 +94,18 @@ def _get_interval_closed_bounds(interval):
def _new_IntervalIndex(cls, d):
- """ This is called upon unpickling,
- rather than the default which doesn't
- have arguments and breaks __new__ """
-
+ """
+ This is called upon unpickling, rather than the default which doesn't have
+ arguments and breaks __new__
+ """
return cls.from_arrays(**d)
class IntervalIndex(IntervalMixin, Index):
"""
Immutable Index implementing an ordered, sliceable set. IntervalIndex
- represents an Index of intervals that are all closed on the same side.
+ represents an Index of Interval objects that are all closed on the same
+ side.
.. versionadded:: 0.20.0
@@ -117,9 +118,9 @@ class IntervalIndex(IntervalMixin, Index):
----------
left, right : array-like (1-dimensional)
Left and right bounds for each interval.
- closed : {'left', 'right', 'both', 'neither'}, optional
+ closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both or
- neither. Defaults to 'right'.
+ neither.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
@@ -146,7 +147,7 @@ class IntervalIndex(IntervalMixin, Index):
closed='right', dtype='interval[int64]')
It may also be constructed using one of the constructor
- methods :meth:`IntervalIndex.from_arrays`,
+ methods: :meth:`IntervalIndex.from_arrays`,
:meth:`IntervalIndex.from_breaks`, :meth:`IntervalIndex.from_intervals`
and :meth:`IntervalIndex.from_tuples`.
@@ -162,12 +163,10 @@ class IntervalIndex(IntervalMixin, Index):
See Also
--------
Index : The base pandas Index type
- Interval : A bounded slice-like interval
- interval_range : Function to create a fixed frequency
- IntervalIndex, IntervalIndex.from_arrays, IntervalIndex.from_breaks,
- IntervalIndex.from_intervals, IntervalIndex.from_tuples
- cut, qcut : convert arrays of continuous data into categoricals/series of
- ``Interval``.
+ Interval : A bounded slice-like interval; the elements of an IntervalIndex
+ interval_range : Function to create a fixed frequency IntervalIndex
+ cut, qcut : Convert arrays of continuous data into Categoricals/Series of
+ Intervals
"""
_typ = 'intervalindex'
_comparables = ['name']
@@ -232,9 +231,9 @@ def _simple_new(cls, left, right, closed=None, name=None,
left = left.astype(right.dtype)
if type(left) != type(right):
- raise ValueError("must not have differing left [{}] "
- "and right [{}] types".format(
- type(left), type(right)))
+ raise ValueError("must not have differing left [{left}] "
+ "and right [{right}] types"
+ .format(left=type(left), right=type(right)))
if isinstance(left, ABCPeriodIndex):
raise ValueError("Period dtypes are not supported, "
@@ -279,7 +278,8 @@ def _validate(self):
Verify that the IntervalIndex is valid.
"""
if self.closed not in _VALID_CLOSED:
- raise ValueError("invalid options for 'closed': %s" % self.closed)
+ raise ValueError("invalid options for 'closed': {closed}"
+ .format(closed=self.closed))
if len(self.left) != len(self.right):
raise ValueError('left and right must have the same length')
left_mask = notna(self.left)
@@ -293,12 +293,15 @@ def _validate(self):
@cache_readonly
def hasnans(self):
- """ return if I have any nans; enables various perf speedups """
+ """
+ Return if the IntervalIndex has any nans; enables various performance
+ speedups
+ """
return self._isnan.any()
@cache_readonly
def _isnan(self):
- """ return if each value is nan"""
+ """Return a mask indicating if each value is NA"""
if self._mask is None:
self._mask = isna(self.left)
return self._mask
@@ -335,7 +338,7 @@ def __contains__(self, key):
def contains(self, key):
"""
- return a boolean if this key is IN the index
+ Return a boolean indicating if the key is IN the index
We accept / allow keys to be not *just* actual
objects.
@@ -363,9 +366,9 @@ def from_breaks(cls, breaks, closed='right', name=None, copy=False):
----------
breaks : array-like (1-dimensional)
Left and right bounds for each interval.
- closed : {'left', 'right', 'both', 'neither'}, optional
+ closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
- or neither. Defaults to 'right'.
+ or neither.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
@@ -404,9 +407,9 @@ def from_arrays(cls, left, right, closed='right', name=None, copy=False):
Left bounds for each interval.
right : array-like (1-dimensional)
Right bounds for each interval.
- closed : {'left', 'right', 'both', 'neither'}, optional
+ closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
- or neither. Defaults to 'right'.
+ or neither.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
@@ -491,9 +494,9 @@ def from_tuples(cls, data, closed='right', name=None, copy=False):
----------
data : array-like (1-dimensional)
Array of tuples
- closed : {'left', 'right', 'both', 'neither'}, optional
+ closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
- or neither. Defaults to 'right'.
+ or neither.
name : object, optional
Name to be stored in the index.
copy : boolean, default False
@@ -521,15 +524,12 @@ def from_tuples(cls, data, closed='right', name=None, copy=False):
left = right = data
for d in data:
-
if isna(d):
- left.append(np.nan)
- right.append(np.nan)
- continue
-
- l, r = d
- left.append(l)
- right.append(r)
+ lhs = rhs = np.nan
+ else:
+ lhs, rhs = d
+ left.append(lhs)
+ right.append(rhs)
# TODO
# if we have nulls and we previous had *only*
@@ -538,6 +538,7 @@ def from_tuples(cls, data, closed='right', name=None, copy=False):
return cls.from_arrays(left, right, closed, name=name, copy=False)
def to_tuples(self):
+ """Return an Index of tuples of the form (left, right)"""
return Index(_asarray_tuplesafe(zip(self.left, self.right)))
@cache_readonly
@@ -547,14 +548,26 @@ def _multiindex(self):
@property
def left(self):
+ """
+ Return the left endpoints of each Interval in the IntervalIndex as
+ an Index
+ """
return self._left
@property
def right(self):
+ """
+ Return the right endpoints of each Interval in the IntervalIndex as
+ an Index
+ """
return self._right
@property
def closed(self):
+ """
+ Whether the intervals are closed on the left-side, right-side, both or
+ neither
+ """
return self._closed
def __len__(self):
@@ -563,7 +576,7 @@ def __len__(self):
@cache_readonly
def values(self):
"""
- Returns the IntervalIndex's data as a numpy array of Interval
+ Return the IntervalIndex's data as a numpy array of Interval
objects (with dtype='object')
"""
left = self.left
@@ -615,14 +628,17 @@ def astype(self, dtype, copy=True):
elif is_categorical_dtype(dtype):
from pandas import Categorical
return Categorical(self, ordered=True)
- raise ValueError('Cannot cast IntervalIndex to dtype %s' % dtype)
+ raise ValueError('Cannot cast IntervalIndex to dtype {dtype}'
+ .format(dtype=dtype))
@cache_readonly
def dtype(self):
+ """Return the dtype object of the underlying data"""
return IntervalDtype.construct_from_string(str(self.left.dtype))
@property
def inferred_type(self):
+ """Return a string of the type inferred from the values"""
return 'interval'
@Appender(Index.memory_usage.__doc__)
@@ -634,7 +650,8 @@ def memory_usage(self, deep=False):
@cache_readonly
def mid(self):
- """Returns the mid-point of each interval in the index as an array
+ """
+ Return the midpoint of each Interval in the IntervalIndex as an Index
"""
try:
return Index(0.5 * (self.left.values + self.right.values))
@@ -645,22 +662,42 @@ def mid(self):
@cache_readonly
def is_monotonic(self):
+ """
+ Return True if the IntervalIndex is monotonic increasing (only equal or
+ increasing values), else False
+ """
return self._multiindex.is_monotonic
@cache_readonly
def is_monotonic_increasing(self):
+ """
+ Return True if the IntervalIndex is monotonic increasing (only equal or
+ increasing values), else False
+ """
return self._multiindex.is_monotonic_increasing
@cache_readonly
def is_monotonic_decreasing(self):
+ """
+ Return True if the IntervalIndex is monotonic decreasing (only equal or
+ decreasing values), else False
+ """
return self._multiindex.is_monotonic_decreasing
@cache_readonly
def is_unique(self):
+ """
+ Return True if the IntervalIndex contains unique elements, else False
+ """
return self._multiindex.is_unique
@cache_readonly
def is_non_overlapping_monotonic(self):
+ """
+ Return True if the IntervalIndex is non-overlapping (no Intervals share
+ points) and is either monotonic increasing or monotonic decreasing,
+ else False
+ """
# must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )
# or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)
# we already require left <= right
@@ -725,9 +762,8 @@ def _check_method(self, method):
return
if method in ['bfill', 'backfill', 'pad', 'ffill', 'nearest']:
- raise NotImplementedError(
- 'method {} not yet implemented for '
- 'IntervalIndex'.format(method))
+ msg = 'method {method} not yet implemented for IntervalIndex'
+ raise NotImplementedError(msg.format(method=method))
raise ValueError("Invalid fill method")
@@ -866,17 +902,14 @@ def get_value(self, series, key):
elif isinstance(key, slice):
if not (key.step is None or key.step == 1):
- raise ValueError("cannot support not-default "
- "step in a slice")
+ raise ValueError("cannot support not-default step in a slice")
try:
loc = self.get_loc(key)
except TypeError:
-
- # we didn't find exact intervals
- # or are non-unique
- raise ValueError("unable to slice with "
- "this key: {}".format(key))
+ # we didn't find exact intervals or are non-unique
+ msg = "unable to slice with this key: {key}".format(key=key)
+ raise ValueError(msg)
else:
loc = self.get_loc(key)
@@ -929,31 +962,31 @@ def _get_reindexer(self, target):
indexer = []
n = len(self)
- for i, (l, r) in enumerate(zip(lindexer, rindexer)):
+ for i, (lhs, rhs) in enumerate(zip(lindexer, rindexer)):
target_value = target[i]
# matching on the lhs bound
- if (l != -1 and
+ if (lhs != -1 and
self.closed == 'right' and
- target_value.left == self[l].right):
- l += 1
+ target_value.left == self[lhs].right):
+ lhs += 1
# matching on the lhs bound
- if (r != -1 and
+ if (rhs != -1 and
self.closed == 'left' and
- target_value.right == self[r].left):
- r -= 1
+ target_value.right == self[rhs].left):
+ rhs -= 1
# not found
- if l == -1 and r == -1:
+ if lhs == -1 and rhs == -1:
indexer.append(np.array([-1]))
- elif r == -1:
+ elif rhs == -1:
- indexer.append(np.arange(l, n))
+ indexer.append(np.arange(lhs, n))
- elif l == -1:
+ elif lhs == -1:
# care about left/right closed here
value = self[i]
@@ -976,10 +1009,10 @@ def _get_reindexer(self, target):
indexer.append(np.array([-1]))
continue
- indexer.append(np.arange(0, r + 1))
+ indexer.append(np.arange(0, rhs + 1))
else:
- indexer.append(np.arange(l, r + 1))
+ indexer.append(np.arange(lhs, rhs + 1))
return np.concatenate(indexer)
@@ -996,11 +1029,32 @@ def where(self, cond, other=None):
return self._shallow_copy(values)
def delete(self, loc):
+ """
+ Return a new IntervalIndex with passed location(-s) deleted
+
+ Returns
+ -------
+ new_index : IntervalIndex
+ """
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
return self._shallow_copy(new_left, new_right)
def insert(self, loc, item):
+ """
+ Return a new IntervalIndex inserting new item at location. Follows
+ Python list.append semantics for negative values. Only Interval
+ objects and NA can be inserted into an IntervalIndex
+
+ Parameters
+ ----------
+ loc : int
+ item : object
+
+ Returns
+ -------
+ new_index : IntervalIndex
+ """
if isinstance(item, Interval):
if item.closed != self.closed:
raise ValueError('inserted item must be closed on the same '
@@ -1108,23 +1162,23 @@ def _format_data(self, name=None):
summary = '[]'
elif n == 1:
first = formatter(self[0])
- summary = '[{}]'.format(first)
+ summary = '[{first}]'.format(first=first)
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
- summary = '[{}, {}]'.format(first, last)
+ summary = '[{first}, {last}]'.format(first=first, last=last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
- summary = '[{} ... {}]'.format(', '.join(head),
- ', '.join(tail))
+ summary = '[{head} ... {tail}]'.format(
+ head=', '.join(head), tail=', '.join(tail))
else:
head = []
tail = [formatter(x) for x in self]
- summary = '[{}]'.format(', '.join(tail))
+ summary = '[{tail}]'.format(tail=', '.join(tail))
return summary + self._format_space()
@@ -1132,17 +1186,20 @@ def _format_attrs(self):
attrs = [('closed', repr(self.closed))]
if self.name is not None:
attrs.append(('name', default_pprint(self.name)))
- attrs.append(('dtype', "'%s'" % self.dtype))
+ attrs.append(('dtype', "'{dtype}'".format(dtype=self.dtype)))
return attrs
def _format_space(self):
- return "\n%s" % (' ' * (len(self.__class__.__name__) + 1))
+ space = ' ' * (len(self.__class__.__name__) + 1)
+ return "\n{space}".format(space=space)
def argsort(self, *args, **kwargs):
return np.lexsort((self.right, self.left))
def equals(self, other):
-
+ """
+ Determines if two IntervalIndex objects contain the same elements
+ """
if self.is_(other):
return True
@@ -1216,8 +1273,9 @@ def interval_range(start=None, end=None, periods=None, freq=None,
for numeric and 'D' (calendar daily) for datetime-like.
name : string, default None
Name of the resulting IntervalIndex
- closed : string, default 'right'
- options are: 'left', 'right', 'both', 'neither'
+ closed : {'left', 'right', 'both', 'neither'}, default 'right'
+ Whether the intervals are closed on the left-side, right-side, both
+ or neither.
Notes
-----
| Progress towards #16130
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
Changes shouldn't impact the behavior of `Interval` or `IntervalIndex`:
- Restructured the `IntervalMixin` class, as it was using legacy syntax for defining properties ([see here](http://cython.readthedocs.io/en/latest/src/userguide/extension_types.html#properties))
- Added docstrings to various `Interval` and `IntervalIndex` methods and attributes
- Replaced `%` syntax with `.format`
- Minor variable renaming for additional clarity, e.g. `l` -> `lhs`
- My linter was complaining PEP 8 (E741): ambiguous variable name 'l'
| https://api.github.com/repos/pandas-dev/pandas/pulls/18585 | 2017-12-01T05:12:58Z | 2017-12-01T11:30:30Z | 2017-12-01T11:30:30Z | 2017-12-01T18:06:14Z |
API/BUG: .apply will correctly infer output shape when axis=1 | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index 18da53506f018..fb9e5a6cc75cb 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -793,8 +793,14 @@ The :meth:`~DataFrame.apply` method will also dispatch on a string method name.
df.apply('mean')
df.apply('mean', axis=1)
-Depending on the return type of the function passed to :meth:`~DataFrame.apply`,
-the result will either be of lower dimension or the same dimension.
+The return type of the function passed to :meth:`~DataFrame.apply` affects the
+type of the ultimate output from DataFrame.apply
+
+* If the applied function returns a ``Series``, the ultimate output is a ``DataFrame``.
+ The columns match the index of the ``Series`` returned by the applied function.
+* If the applied function returns any other type, the ultimate output is a ``Series``.
+* A ``result_type`` kwarg is accepted with the options: ``reduce``, ``broadcast``, and ``expand``.
+ These will determine how list-likes return results expand (or not) to a ``DataFrame``.
:meth:`~DataFrame.apply` combined with some cleverness can be used to answer many questions
about a data set. For example, suppose we wanted to extract the date where the
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 7322bd9fe3327..8d3dad5a6fe28 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -142,7 +142,7 @@ Previous Behavior:
4 NaN
dtype: float64
-Current Behavior
+Current Behavior:
.. ipython:: python
@@ -167,7 +167,7 @@ Previous Behavior:
3 2.5
dtype: float64
-Current Behavior
+Current Behavior:
.. ipython:: python
@@ -332,6 +332,73 @@ Convert to an xarray DataArray
p.to_xarray()
+.. _whatsnew_0230.api_breaking.apply:
+
+Apply Changes
+~~~~~~~~~~~~~
+
+:func:`DataFrame.apply` was inconsistent when applying an arbitrary user-defined-function that returned a list-like with ``axis=1``. Several bugs and inconsistencies
+are resolved. If the applied function returns a Series, then pandas will return a DataFrame; otherwise a Series will be returned, this includes the case
+where a list-like (e.g. ``tuple`` or ``list`` is returned), (:issue:`16353`, :issue:`17437`, :issue:`17970`, :issue:`17348`, :issue:`17892`, :issue:`18573`,
+:issue:`17602`, :issue:`18775`, :issue:`18901`, :issue:`18919`)
+
+.. ipython:: python
+
+ df = pd.DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1, columns=['A', 'B', 'C'])
+ df
+
+Previous Behavior. If the returned shape happened to match the original columns, this would return a ``DataFrame``.
+If the return shape did not match, a ``Series`` with lists was returned.
+
+.. code-block:: python
+
+ In [3]: df.apply(lambda x: [1, 2, 3], axis=1)
+ Out[3]:
+ A B C
+ 0 1 2 3
+ 1 1 2 3
+ 2 1 2 3
+ 3 1 2 3
+ 4 1 2 3
+ 5 1 2 3
+
+ In [4]: df.apply(lambda x: [1, 2], axis=1)
+ Out[4]:
+ 0 [1, 2]
+ 1 [1, 2]
+ 2 [1, 2]
+ 3 [1, 2]
+ 4 [1, 2]
+ 5 [1, 2]
+ dtype: object
+
+
+New Behavior. The behavior is consistent. These will *always* return a ``Series``.
+
+.. ipython:: python
+
+ df.apply(lambda x: [1, 2, 3], axis=1)
+ df.apply(lambda x: [1, 2], axis=1)
+
+To have expanded columns, you can use ``result_type='expand'``
+
+.. ipython:: python
+
+ df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand')
+
+To have broadcast the result across, you can use ``result_type='broadcast'``. The shape
+must match the original columns.
+
+.. ipython:: python
+
+ df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
+
+Returning a ``Series`` allows one to control the exact return structure and column names:
+
+.. ipython:: python
+
+ df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1)
+
.. _whatsnew_0230.api_breaking.build_changes:
@@ -456,6 +523,8 @@ Deprecations
- The ``is_copy`` attribute is deprecated and will be removed in a future version (:issue:`18801`).
- ``IntervalIndex.from_intervals`` is deprecated in favor of the :class:`IntervalIndex` constructor (:issue:`19263`)
- :func:``DataFrame.from_items`` is deprecated. Use :func:``DataFrame.from_dict()`` instead, or :func:``DataFrame.from_dict(OrderedDict())`` if you wish to preserve the key order (:issue:`17320`)
+- The ``broadcast`` parameter of ``.apply()`` is removed in favor of ``result_type='broadcast'`` (:issue:`18577`)
+- The ``reduce`` parameter of ``.apply()`` is removed in favor of ``result_type='reduce'`` (:issue:`18577`)
.. _whatsnew_0230.prior_deprecations:
diff --git a/pandas/core/apply.py b/pandas/core/apply.py
index 4cdec54b9a07a..c65943fbbb201 100644
--- a/pandas/core/apply.py
+++ b/pandas/core/apply.py
@@ -1,15 +1,20 @@
+import warnings
import numpy as np
from pandas import compat
from pandas._libs import reduction
+from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.common import (
is_extension_type,
is_sequence)
+from pandas.util._decorators import cache_readonly
from pandas.io.formats.printing import pprint_thing
-def frame_apply(obj, func, axis=0, broadcast=False,
- raw=False, reduce=None, args=(), **kwds):
+def frame_apply(obj, func, axis=0, broadcast=None,
+ raw=False, reduce=None, result_type=None,
+ ignore_failures=False,
+ args=None, kwds=None):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
@@ -19,20 +24,49 @@ def frame_apply(obj, func, axis=0, broadcast=False,
klass = FrameColumnApply
return klass(obj, func, broadcast=broadcast,
- raw=raw, reduce=reduce, args=args, kwds=kwds)
+ raw=raw, reduce=reduce, result_type=result_type,
+ ignore_failures=ignore_failures,
+ args=args, kwds=kwds)
class FrameApply(object):
- def __init__(self, obj, func, broadcast, raw, reduce, args, kwds):
+ def __init__(self, obj, func, broadcast, raw, reduce, result_type,
+ ignore_failures, args, kwds):
self.obj = obj
- self.broadcast = broadcast
self.raw = raw
- self.reduce = reduce
- self.args = args
-
- self.ignore_failures = kwds.pop('ignore_failures', False)
- self.kwds = kwds
+ self.ignore_failures = ignore_failures
+ self.args = args or ()
+ self.kwds = kwds or {}
+
+ if result_type not in [None, 'reduce', 'broadcast', 'expand']:
+ raise ValueError("invalid value for result_type, must be one "
+ "of {None, 'reduce', 'broadcast', 'expand'}")
+
+ if broadcast is not None:
+ warnings.warn("The broadcast argument is deprecated and will "
+ "be removed in a future version. You can specify "
+ "result_type='broadcast' to broadcast the result "
+ "to the original dimensions",
+ FutureWarning, stacklevel=4)
+ if broadcast:
+ result_type = 'broadcast'
+
+ if reduce is not None:
+ warnings.warn("The reduce argument is deprecated and will "
+ "be removed in a future version. You can specify "
+ "result_type='reduce' to try to reduce the result "
+ "to the original dimensions",
+ FutureWarning, stacklevel=4)
+ if reduce:
+
+ if result_type is not None:
+ raise ValueError(
+ "cannot pass both reduce=True and result_type")
+
+ result_type = 'reduce'
+
+ self.result_type = result_type
# curry if needed
if kwds or args and not isinstance(func, np.ufunc):
@@ -43,6 +77,11 @@ def f(x):
self.f = f
+ # results
+ self.result = None
+ self.res_index = None
+ self.res_columns = None
+
@property
def columns(self):
return self.obj.columns
@@ -51,10 +90,14 @@ def columns(self):
def index(self):
return self.obj.index
- @property
+ @cache_readonly
def values(self):
return self.obj.values
+ @cache_readonly
+ def dtypes(self):
+ return self.obj.dtypes
+
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
@@ -68,8 +111,7 @@ def get_result(self):
# string dispatch
if isinstance(self.f, compat.string_types):
- if self.axis:
- self.kwds['axis'] = self.axis
+ self.kwds['axis'] = self.axis
return getattr(self.obj, self.f)(*self.args, **self.kwds)
# ufunc
@@ -80,25 +122,37 @@ def get_result(self):
columns=self.columns, copy=False)
# broadcasting
- if self.broadcast:
+ if self.result_type == 'broadcast':
return self.apply_broadcast()
# one axis empty
- if not all(self.obj.shape):
+ elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
- if self.raw and not self.obj._is_mixed_type:
+ elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
def apply_empty_result(self):
- from pandas import Series
- reduce = self.reduce
+ """
+ we have an empty result; at least 1 axis is 0
+
+ we will try to apply the function to an empty
+ series in order to see if this is a reduction function
+ """
+
+ # we are not asked to reduce or infer reduction
+ # so just return a copy of the existing object
+ if self.result_type not in ['reduce', None]:
+ return self.obj.copy()
+
+ # we may need to infer
+ reduce = self.result_type == 'reduce'
- if reduce is None:
- reduce = False
+ from pandas import Series
+ if not reduce:
EMPTY_SERIES = Series([])
try:
@@ -113,6 +167,8 @@ def apply_empty_result(self):
return self.obj.copy()
def apply_raw(self):
+ """ apply to the values as a numpy array """
+
try:
result = reduction.reduce(self.values, self.f, axis=self.axis)
except Exception:
@@ -125,49 +181,70 @@ def apply_raw(self):
else:
return Series(result, index=self.agg_axis)
- def apply_standard(self):
- from pandas import Series
+ def apply_broadcast(self, target):
+ result_values = np.empty_like(target.values)
+
+ # axis which we want to compare compliance
+ result_compare = target.shape[0]
+
+ for i, col in enumerate(target.columns):
+ res = self.f(target[col])
+ ares = np. asarray(res).ndim
+
+ # must be a scalar or 1d
+ if ares > 1:
+ raise ValueError("too many dims to broadcast")
+ elif ares == 1:
+
+ # must match return dim
+ if result_compare != len(res):
+ raise ValueError("cannot broadcast result")
- reduce = self.reduce
- if reduce is None:
- reduce = True
+ result_values[:, i] = res
+
+ # we *always* preserve the original index / columns
+ result = self.obj._constructor(result_values,
+ index=target.index,
+ columns=target.columns)
+ return result
+
+ def apply_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
- if reduce:
- values = self.values
- # we cannot reduce using non-numpy dtypes,
- # as demonstrated in gh-12244
- if not is_extension_type(values):
+ # we cannot reduce using non-numpy dtypes,
+ # as demonstrated in gh-12244
+ if (self.result_type in ['reduce', None] and
+ not self.dtypes.apply(is_extension_type).any()):
- # Create a dummy Series from an empty array
- index = self.obj._get_axis(self.axis)
- empty_arr = np.empty(len(index), dtype=values.dtype)
-
- dummy = Series(empty_arr, index=index, dtype=values.dtype)
+ # Create a dummy Series from an empty array
+ from pandas import Series
+ values = self.values
+ index = self.obj._get_axis(self.axis)
+ labels = self.agg_axis
+ empty_arr = np.empty(len(index), dtype=values.dtype)
+ dummy = Series(empty_arr, index=index, dtype=values.dtype)
- try:
- labels = self.agg_axis
- result = reduction.reduce(values, self.f,
- axis=self.axis,
- dummy=dummy,
- labels=labels)
- return Series(result, index=labels)
- except Exception:
- pass
+ try:
+ result = reduction.reduce(values, self.f,
+ axis=self.axis,
+ dummy=dummy,
+ labels=labels)
+ return Series(result, index=labels)
+ except Exception:
+ pass
# compute the result using the series generator
- results, res_index, res_columns = self._apply_series_generator()
+ self.apply_series_generator()
# wrap results
- return self.wrap_results(results, res_index, res_columns)
+ return self.wrap_results()
- def _apply_series_generator(self):
+ def apply_series_generator(self):
series_gen = self.series_generator
res_index = self.result_index
- res_columns = self.result_columns
i = None
keys = []
@@ -201,40 +278,23 @@ def _apply_series_generator(self):
pprint_thing(k), )
raise
- return results, res_index, res_columns
+ self.results = results
+ self.res_index = res_index
+ self.res_columns = self.result_columns
- def wrap_results(self, results, res_index, res_columns):
- from pandas import Series
+ def wrap_results(self):
+ results = self.results
+ # see if we can infer the results
if len(results) > 0 and is_sequence(results[0]):
- if not isinstance(results[0], Series):
- index = res_columns
- else:
- index = None
- result = self.obj._constructor(data=results, index=index)
- result.columns = res_index
+ return self.wrap_results_for_axis()
- if self.axis == 1:
- result = result.T
- result = result._convert(
- datetime=True, timedelta=True, copy=False)
-
- else:
-
- result = Series(results)
- result.index = res_index
-
- return result
-
- def _apply_broadcast(self, target):
- result_values = np.empty_like(target.values)
- columns = target.columns
- for i, col in enumerate(columns):
- result_values[:, i] = self.f(target[col])
+ # dict of scalars
+ from pandas import Series
+ result = Series(results)
+ result.index = self.res_index
- result = self.obj._constructor(result_values, index=target.index,
- columns=target.columns)
return result
@@ -251,7 +311,7 @@ def get_result(self):
return super(FrameRowApply, self).get_result()
def apply_broadcast(self):
- return self._apply_broadcast(self.obj)
+ return super(FrameRowApply, self).apply_broadcast(self.obj)
@property
def series_generator(self):
@@ -266,29 +326,37 @@ def result_index(self):
def result_columns(self):
return self.index
+ def wrap_results_for_axis(self):
+ """ return the results for the rows """
-class FrameColumnApply(FrameApply):
- axis = 1
+ results = self.results
+ result = self.obj._constructor(data=results)
- def __init__(self, obj, func, broadcast, raw, reduce, args, kwds):
- super(FrameColumnApply, self).__init__(obj, func, broadcast,
- raw, reduce, args, kwds)
+ if not isinstance(results[0], ABCSeries):
+ try:
+ result.index = self.res_columns
+ except ValueError:
+ pass
- # skip if we are mixed datelike and trying reduce across axes
- # GH6125
- if self.reduce:
- if self.obj._is_mixed_type and self.obj._is_datelike_mixed_type:
- self.reduce = False
+ try:
+ result.columns = self.res_index
+ except ValueError:
+ pass
+
+ return result
+
+
+class FrameColumnApply(FrameApply):
+ axis = 1
def apply_broadcast(self):
- return self._apply_broadcast(self.obj.T).T
+ result = super(FrameColumnApply, self).apply_broadcast(self.obj.T)
+ return result.T
@property
def series_generator(self):
- from pandas import Series
- dtype = object if self.obj._is_mixed_type else None
- return (Series._from_array(arr, index=self.columns, name=name,
- dtype=dtype)
+ constructor = self.obj._constructor_sliced
+ return (constructor(arr, index=self.columns, name=name)
for i, (arr, name) in enumerate(zip(self.values,
self.index)))
@@ -299,3 +367,39 @@ def result_index(self):
@property
def result_columns(self):
return self.columns
+
+ def wrap_results_for_axis(self):
+ """ return the results for the columns """
+ results = self.results
+
+ # we have requested to expand
+ if self.result_type == 'expand':
+ result = self.infer_to_same_shape()
+
+ # we have a non-series and don't want inference
+ elif not isinstance(results[0], ABCSeries):
+ from pandas import Series
+
+ result = Series(results)
+ result.index = self.res_index
+
+ # we may want to infer results
+ else:
+ result = self.infer_to_same_shape()
+
+ return result
+
+ def infer_to_same_shape(self):
+ """ infer the results to the same shape as the input object """
+ results = self.results
+
+ result = self.obj._constructor(data=results)
+ result = result.T
+
+ # set the index
+ result.index = self.res_index
+
+ # infer dtypes
+ result = result.infer_objects()
+
+ return result
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 3d1983f65d70d..8de429fe5f4b9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4833,8 +4833,8 @@ def aggregate(self, func, axis=0, *args, **kwargs):
agg = aggregate
- def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
- args=(), **kwds):
+ def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None,
+ result_type=None, args=(), **kwds):
"""Applies function along input axis of DataFrame.
Objects passed to functions are Series objects having index
@@ -4849,9 +4849,14 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index': apply function to each column
* 1 or 'columns': apply function to each row
- broadcast : boolean, default False
+ broadcast : boolean, optional
For aggregation functions, return object of same size with values
propagated
+
+ .. deprecated:: 0.23.0
+ This argument will be removed in a future version, replaced
+ by result_type='broadcast'.
+
raw : boolean, default False
If False, convert each row or column into a Series. If raw=True the
passed function will receive ndarray objects instead. If you are
@@ -4865,6 +4870,24 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
+
+ .. deprecated:: 0.23.0
+ This argument will be removed in a future version, replaced
+ by result_type='reduce'.
+
+ result_type : {'expand', 'reduce', 'broadcast, None}
+ These only act when axis=1 {columns}
+ * 'expand' : list-like results will be turned into columns.
+ * 'reduce' : return a Series if possible rather than expanding
+ list-like results. This is the opposite to 'expand'.
+ * 'broadcast' : results will be broadcast to the original shape
+ of the frame, the original index & columns will be retained.
+ * None : list-like results will be returned as a list
+ in a single column. However if the apply function
+ returns a Series these are expanded to columns.
+
+ .. versionadded:: 0.23.0
+
args : tuple
Positional arguments to pass to function in addition to the
array/series
@@ -4880,9 +4903,96 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
Examples
--------
- >>> df.apply(numpy.sqrt) # returns DataFrame
- >>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)
- >>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)
+
+ We use this DataFrame to illustrate
+
+ >>> df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
+ ... columns=['A', 'B', 'C'])
+ >>> df
+ A B C
+ 0 1 2 3
+ 1 1 2 3
+ 2 1 2 3
+ 3 1 2 3
+ 4 1 2 3
+ 5 1 2 3
+
+ Using a ufunc
+
+ >>> df.apply(np.sqrt)
+ A B C
+ 0 1.0 1.414214 1.732051
+ 1 1.0 1.414214 1.732051
+ 2 1.0 1.414214 1.732051
+ 3 1.0 1.414214 1.732051
+ 4 1.0 1.414214 1.732051
+ 5 1.0 1.414214 1.732051
+
+ Using a reducing function on either axis
+
+ >>> df.apply(np.sum, axis=0)
+ A 6
+ B 12
+ C 18
+ dtype: int64
+
+ >>> df.apply(np.sum, axis=1)
+ 0 6
+ 1 6
+ 2 6
+ 3 6
+ 4 6
+ 5 6
+ dtype: int64
+
+ Retuning a list-like will result in a Series
+
+ >>> df.apply(lambda x: [1, 2], axis=1)
+ 0 [1, 2]
+ 1 [1, 2]
+ 2 [1, 2]
+ 3 [1, 2]
+ 4 [1, 2]
+ 5 [1, 2]
+
+ Passing result_type='expand' will expand list-like results
+ to columns of a Dataframe
+
+ >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
+ 0 1
+ 0 1 2
+ 1 1 2
+ 2 1 2
+ 3 1 2
+ 4 1 2
+ 5 1 2
+
+ Return a Series inside the function is similar to passing
+ Passing result_type='expand'. The resulting column names
+ will be the Series index.
+
+ >>> df.apply(lambda x: Series([1, 2], index=['foo', 'bar']), axis=1)
+ foo bar
+ 0 1 2
+ 1 1 2
+ 2 1 2
+ 3 1 2
+ 4 1 2
+ 5 1 2
+
+
+ Passing result_type='broadcast' will take a same shape
+ result, whether list-like or scalar and broadcast it
+ along the axis. The resulting column names will be the originals.
+
+ >>> df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
+ A B C
+ 0 1 2 3
+ 1 1 2 3
+ 2 1 2 3
+ 3 1 2 3
+ 4 1 2 3
+ 5 1 2 3
See also
--------
@@ -4901,7 +5011,9 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
broadcast=broadcast,
raw=raw,
reduce=reduce,
- args=args, **kwds)
+ result_type=result_type,
+ args=args,
+ kwds=kwds)
return op.get_result()
def applymap(self, func):
@@ -5605,12 +5717,16 @@ def f(x):
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
- result = self.apply(f, reduce=False,
- ignore_failures=True)
+ from pandas.core.apply import frame_apply
+ opa = frame_apply(self,
+ func=f,
+ result_type='expand',
+ ignore_failures=True)
+ result = opa.get_result()
if result.ndim == self.ndim:
result = result.iloc[0]
return result
- except:
+ except Exception:
pass
if filter_type is None or filter_type == 'numeric':
diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py
index 91dc44e3f185e..e696fa2e55131 100644
--- a/pandas/core/sparse/frame.py
+++ b/pandas/core/sparse/frame.py
@@ -835,7 +835,8 @@ def notna(self):
return self._apply_columns(lambda x: x.notna())
notnull = notna
- def apply(self, func, axis=0, broadcast=False, reduce=False):
+ def apply(self, func, axis=0, broadcast=None, reduce=None,
+ result_type=None):
"""
Analogous to DataFrame.apply, for SparseDataFrame
@@ -848,6 +849,35 @@ def apply(self, func, axis=0, broadcast=False, reduce=False):
For aggregation functions, return object of same size with values
propagated
+ .. deprecated:: 0.23.0
+ This argument will be removed in a future version, replaced
+ by result_type='broadcast'.
+
+ reduce : boolean or None, default None
+ Try to apply reduction procedures. If the DataFrame is empty,
+ apply will use reduce to determine whether the result should be a
+ Series or a DataFrame. If reduce is None (the default), apply's
+ return value will be guessed by calling func an empty Series (note:
+ while guessing, exceptions raised by func will be ignored). If
+ reduce is True a Series will always be returned, and if False a
+ DataFrame will always be returned.
+
+ .. deprecated:: 0.23.0
+ This argument will be removed in a future version, replaced
+ by result_type='reduce'.
+
+ result_type : {'expand', 'reduce', 'broadcast, None}
+ These only act when axis=1 {columns}
+ * 'expand' : list-like results will be turned into columns
+ * 'reduce' : return a Series if possible rather than expanding
+ list-like results. This is the opposite to 'expand'
+ * 'broadcast' : scalar results will be broadcast to all columns
+ * None : list-like results will be returned as a list
+ in a single column. However if the apply function
+ returns a Series these are expanded to columns.
+
+ .. versionadded:: 0.23.0
+
Returns
-------
applied : Series or SparseDataFrame
@@ -871,12 +901,10 @@ def apply(self, func, axis=0, broadcast=False, reduce=False):
op = frame_apply(self,
func=func,
axis=axis,
- reduce=reduce)
-
- if broadcast:
- return op.apply_broadcast()
-
- return op.apply_standard()
+ reduce=reduce,
+ broadcast=broadcast,
+ result_type=result_type)
+ return op.get_result()
def applymap(self, func):
"""
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py
index 20e72dd6bde91..525f487d8aa39 100644
--- a/pandas/io/formats/style.py
+++ b/pandas/io/formats/style.py
@@ -509,7 +509,9 @@ def _apply(self, func, axis=0, subset=None, **kwargs):
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
- result = data.apply(func, axis=axis, **kwargs)
+ result = data.apply(func, axis=axis,
+ result_type='expand', **kwargs)
+ result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py
index d69ddcd8f14d4..d1ad9f71e6350 100644
--- a/pandas/tests/frame/test_apply.py
+++ b/pandas/tests/frame/test_apply.py
@@ -82,24 +82,30 @@ def test_apply_empty(self):
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
+ def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
- result = self.empty.apply(x.append, axis=1, reduce=False)
+ result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
- result = self.empty.apply(x.append, axis=1, reduce=True)
+ result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
- result = empty_with_cols.apply(x.append, axis=1, reduce=False)
+ result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
- result = empty_with_cols.apply(x.append, axis=1, reduce=True)
+ result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
+ def test_apply_deprecate_reduce(self):
+ with warnings.catch_warnings(record=True):
+ x = []
+ self.empty.apply(x.append, axis=1, result_type='reduce')
+
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
@@ -121,17 +127,79 @@ def test_with_string_args(self):
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
+ def test_apply_broadcast_deprecated(self):
+ with tm.assert_produces_warning(FutureWarning):
+ self.frame.apply(np.mean, broadcast=True)
+
def test_apply_broadcast(self):
- broadcasted = self.frame.apply(np.mean, broadcast=True)
- agged = self.frame.apply(np.mean)
- for col, ts in compat.iteritems(broadcasted):
- assert (ts == agged[col]).all()
+ # scalars
+ result = self.frame.apply(np.mean, result_type='broadcast')
+ expected = DataFrame([self.frame.mean()], index=self.frame.index)
+ tm.assert_frame_equal(result, expected)
+
+ result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
+ m = self.frame.mean(axis=1)
+ expected = DataFrame({c: m for c in self.frame.columns})
+ tm.assert_frame_equal(result, expected)
+
+ # lists
+ result = self.frame.apply(
+ lambda x: list(range(len(self.frame.columns))),
+ axis=1,
+ result_type='broadcast')
+ m = list(range(len(self.frame.columns)))
+ expected = DataFrame([m] * len(self.frame.index),
+ dtype='float64',
+ index=self.frame.index,
+ columns=self.frame.columns)
+ tm.assert_frame_equal(result, expected)
- broadcasted = self.frame.apply(np.mean, axis=1, broadcast=True)
- agged = self.frame.apply(np.mean, axis=1)
- for idx in broadcasted.index:
- assert (broadcasted.xs(idx) == agged[idx]).all()
+ result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
+ result_type='broadcast')
+ m = list(range(len(self.frame.index)))
+ expected = DataFrame({c: m for c in self.frame.columns},
+ dtype='float64',
+ index=self.frame.index)
+ tm.assert_frame_equal(result, expected)
+
+ # preserve columns
+ df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
+ columns=list('ABC'))
+ result = df.apply(lambda x: [1, 2, 3],
+ axis=1,
+ result_type='broadcast')
+ tm.assert_frame_equal(result, df)
+
+ df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
+ columns=list('ABC'))
+ result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
+ axis=1,
+ result_type='broadcast')
+ expected = df.copy()
+ tm.assert_frame_equal(result, expected)
+
+ def test_apply_broadcast_error(self):
+ df = DataFrame(
+ np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
+ columns=['A', 'B', 'C'])
+
+ # > 1 ndim
+ with pytest.raises(ValueError):
+ df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
+ axis=1,
+ result_type='broadcast')
+
+ # cannot broadcast
+ with pytest.raises(ValueError):
+ df.apply(lambda x: [1, 2],
+ axis=1,
+ result_type='broadcast')
+
+ with pytest.raises(ValueError):
+ df.apply(lambda x: Series([1, 2]),
+ axis=1,
+ result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
@@ -208,7 +276,7 @@ def _checkit(axis=0, raw=False):
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
- result = no_cols.apply(lambda x: x.mean(), broadcast=True)
+ result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
@@ -350,33 +418,37 @@ def test_apply_attach_name(self):
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
- expected = DataFrame(np.tile(self.frame.index,
- (len(self.frame.columns), 1)).T,
- index=self.frame.index,
- columns=self.frame.columns)
- assert_frame_equal(result, expected)
+ expected = Series(np.repeat(t[0], len(self.frame.columns))
+ for t in self.frame.itertuples())
+ expected.index = self.frame.index
+ assert_series_equal(result, expected)
def test_apply_multi_index(self):
- s = DataFrame([[1, 2], [3, 4], [5, 6]])
- s.index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
- s.columns = ['col1', 'col2']
- res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)
- assert isinstance(res.index, MultiIndex)
+ index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
+ s = DataFrame([[1, 2], [3, 4], [5, 6]],
+ index=index,
+ columns=['col1', 'col2'])
+ result = s.apply(
+ lambda x: Series({'min': min(x), 'max': max(x)}), 1)
+ expected = DataFrame([[1, 2], [3, 4], [5, 6]],
+ index=index,
+ columns=['min', 'max'])
+ assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
- A_dicts = pd.Series([dict([(0, 'foo'), (1, 'spam')]),
- dict([(0, 'bar'), (1, 'eggs')])])
+ A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
+ dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
- B_dicts = pd.Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
+ B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
- reduce_true = df.apply(fn, reduce=True)
- reduce_false = df.apply(fn, reduce=False)
- reduce_none = df.apply(fn, reduce=None)
+ reduce_true = df.apply(fn, result_type='reduce')
+ reduce_false = df.apply(fn, result_type='expand')
+ reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
@@ -465,8 +537,8 @@ def test_frame_apply_dont_convert_datetime64(self):
assert df.x1.dtype == 'M8[ns]'
- # See gh-12244
def test_apply_non_numpy_dtype(self):
+ # See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
@@ -482,6 +554,256 @@ def test_apply_non_numpy_dtype(self):
assert_frame_equal(result, df)
+class TestInferOutputShape(object):
+ # the user has supplied an opaque UDF where
+ # they are transforming the input that requires
+ # us to infer the output
+
+ def test_infer_row_shape(self):
+ # gh-17437
+ # if row shape is changing, infer it
+ df = pd.DataFrame(np.random.rand(10, 2))
+ result = df.apply(np.fft.fft, axis=0)
+ assert result.shape == (10, 2)
+
+ result = df.apply(np.fft.rfft, axis=0)
+ assert result.shape == (6, 2)
+
+ def test_with_dictlike_columns(self):
+ # gh 17602
+ df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
+ result = df.apply(lambda x: {'s': x['a'] + x['b']},
+ axis=1)
+ expected = Series([{'s': 3} for t in df.itertuples()])
+ assert_series_equal(result, expected)
+
+ df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
+ pd.Timestamp('2017-05-02 00:00:00')]
+ result = df.apply(lambda x: {'s': x['a'] + x['b']},
+ axis=1)
+ assert_series_equal(result, expected)
+
+ # compose a series
+ result = (df['a'] + df['b']).apply(lambda x: {'s': x})
+ expected = Series([{'s': 3}, {'s': 3}])
+ assert_series_equal(result, expected)
+
+ # gh-18775
+ df = DataFrame()
+ df["author"] = ["X", "Y", "Z"]
+ df["publisher"] = ["BBC", "NBC", "N24"]
+ df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
+ '13-05-2011 08:20:35',
+ '15-01-2013 09:09:09'])
+ result = df.apply(lambda x: {}, axis=1)
+ expected = Series([{}, {}, {}])
+ assert_series_equal(result, expected)
+
+ def test_with_dictlike_columns_with_infer(self):
+ # gh 17602
+ df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
+ result = df.apply(lambda x: {'s': x['a'] + x['b']},
+ axis=1, result_type='expand')
+ expected = DataFrame({'s': [3, 3]})
+ assert_frame_equal(result, expected)
+
+ df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
+ pd.Timestamp('2017-05-02 00:00:00')]
+ result = df.apply(lambda x: {'s': x['a'] + x['b']},
+ axis=1, result_type='expand')
+ assert_frame_equal(result, expected)
+
+ def test_with_listlike_columns(self):
+ # gh-17348
+ df = DataFrame({'a': Series(np.random.randn(4)),
+ 'b': ['a', 'list', 'of', 'words'],
+ 'ts': date_range('2016-10-01', periods=4, freq='H')})
+
+ result = df[['a', 'b']].apply(tuple, axis=1)
+ expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
+ assert_series_equal(result, expected)
+
+ result = df[['a', 'ts']].apply(tuple, axis=1)
+ expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
+ assert_series_equal(result, expected)
+
+ # gh-18919
+ df = DataFrame({'x': Series([['a', 'b'], ['q']]),
+ 'y': Series([['z'], ['q', 't']])})
+ df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
+
+ result = df.apply(
+ lambda row: [el for el in row['x'] if el in row['y']],
+ axis=1)
+ expected = Series([[], ['q']], index=df.index)
+ assert_series_equal(result, expected)
+
+ def test_infer_output_shape_columns(self):
+ # gh-18573
+
+ df = DataFrame({'number': [1., 2.],
+ 'string': ['foo', 'bar'],
+ 'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
+ pd.Timestamp('2017-11-29 03:45:00')]})
+ result = df.apply(lambda row: (row.number, row.string), axis=1)
+ expected = Series([t[2:] for t in df.itertuples()])
+ assert_series_equal(result, expected)
+
+ def test_infer_output_shape_listlike_columns(self):
+ # gh-16353
+
+ df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
+
+ result = df.apply(lambda x: [1, 2, 3], axis=1)
+ expected = Series([[1, 2, 3] for t in df.itertuples()])
+ assert_series_equal(result, expected)
+
+ result = df.apply(lambda x: [1, 2], axis=1)
+ expected = Series([[1, 2] for t in df.itertuples()])
+ assert_series_equal(result, expected)
+
+ # gh-17970
+ df = DataFrame({"a": [1, 2, 3]}, index=list('abc'))
+
+ result = df.apply(lambda row: np.ones(1), axis=1)
+ expected = Series([np.ones(1) for t in df.itertuples()],
+ index=df.index)
+ assert_series_equal(result, expected)
+
+ result = df.apply(lambda row: np.ones(2), axis=1)
+ expected = Series([np.ones(2) for t in df.itertuples()],
+ index=df.index)
+ assert_series_equal(result, expected)
+
+ # gh-17892
+ df = pd.DataFrame({'a': [pd.Timestamp('2010-02-01'),
+ pd.Timestamp('2010-02-04'),
+ pd.Timestamp('2010-02-05'),
+ pd.Timestamp('2010-02-06')],
+ 'b': [9, 5, 4, 3],
+ 'c': [5, 3, 4, 2],
+ 'd': [1, 2, 3, 4]})
+
+ def fun(x):
+ return (1, 2)
+
+ result = df.apply(fun, axis=1)
+ expected = Series([(1, 2) for t in df.itertuples()])
+ assert_series_equal(result, expected)
+
+ def test_consistent_coerce_for_shapes(self):
+ # we want column names to NOT be propagated
+ # just because the shape matches the input shape
+ df = DataFrame(np.random.randn(4, 3), columns=['A', 'B', 'C'])
+
+ result = df.apply(lambda x: [1, 2, 3], axis=1)
+ expected = Series([[1, 2, 3] for t in df.itertuples()])
+ assert_series_equal(result, expected)
+
+ result = df.apply(lambda x: [1, 2], axis=1)
+ expected = Series([[1, 2] for t in df.itertuples()])
+ assert_series_equal(result, expected)
+
+ def test_consistent_names(self):
+ # if a Series is returned, we should use the resulting index names
+ df = DataFrame(
+ np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
+ columns=['A', 'B', 'C'])
+
+ result = df.apply(lambda x: Series([1, 2, 3],
+ index=['test', 'other', 'cols']),
+ axis=1)
+ expected = DataFrame(
+ np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
+ columns=['test', 'other', 'cols'])
+ assert_frame_equal(result, expected)
+
+ result = df.apply(
+ lambda x: pd.Series([1, 2], index=['test', 'other']), axis=1)
+ expected = DataFrame(
+ np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1,
+ columns=['test', 'other'])
+ assert_frame_equal(result, expected)
+
+ def test_result_type(self):
+ # result_type should be consistent no matter which
+ # path we take in the code
+ df = DataFrame(
+ np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
+ columns=['A', 'B', 'C'])
+
+ result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand')
+ expected = df.copy()
+ expected.columns = [0, 1, 2]
+ assert_frame_equal(result, expected)
+
+ result = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
+ expected = df[['A', 'B']].copy()
+ expected.columns = [0, 1]
+ assert_frame_equal(result, expected)
+
+ # broadcast result
+ result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
+ expected = df.copy()
+ assert_frame_equal(result, expected)
+
+ columns = ['other', 'col', 'names']
+ result = df.apply(
+ lambda x: pd.Series([1, 2, 3],
+ index=columns),
+ axis=1,
+ result_type='broadcast')
+ expected = df.copy()
+ assert_frame_equal(result, expected)
+
+ # series result
+ result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1)
+ expected = df.copy()
+ assert_frame_equal(result, expected)
+
+ # series result with other index
+ columns = ['other', 'col', 'names']
+ result = df.apply(
+ lambda x: pd.Series([1, 2, 3], index=columns),
+ axis=1)
+ expected = df.copy()
+ expected.columns = columns
+ assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("result_type", ['foo', 1])
+ def test_result_type_error(self, result_type):
+ # allowed result_type
+ df = DataFrame(
+ np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
+ columns=['A', 'B', 'C'])
+
+ with pytest.raises(ValueError):
+ df.apply(lambda x: [1, 2, 3],
+ axis=1,
+ result_type=result_type)
+
+ @pytest.mark.parametrize(
+ "box",
+ [lambda x: list(x),
+ lambda x: tuple(x),
+ lambda x: np.array(x, dtype='int64')],
+ ids=['list', 'tuple', 'array'])
+ def test_consistency_for_boxed(self, box):
+ # passing an array or list should not affect the output shape
+ df = DataFrame(
+ np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
+ columns=['A', 'B', 'C'])
+
+ result = df.apply(lambda x: box([1, 2]), axis=1)
+ expected = Series([box([1, 2]) for t in df.itertuples()])
+ assert_series_equal(result, expected)
+
+ result = df.apply(lambda x: box([1, 2]), axis=1, result_type='expand')
+ expected = DataFrame(
+ np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1)
+ assert_frame_equal(result, expected)
+
+
def zip_frames(*frames):
"""
take a list of frames, zip the columns together for each
@@ -657,13 +979,13 @@ def test_non_callable_aggregates(self):
# Function aggregate
result = df.agg({'A': 'count'})
- expected = pd.Series({'A': 2})
+ expected = Series({'A': 2})
assert_series_equal(result, expected)
# Non-function aggregate
result = df.agg({'A': 'size'})
- expected = pd.Series({'A': 3})
+ expected = Series({'A': 3})
assert_series_equal(result, expected)
diff --git a/pandas/tests/sparse/frame/test_apply.py b/pandas/tests/sparse/frame/test_apply.py
new file mode 100644
index 0000000000000..07e4b1bf7c913
--- /dev/null
+++ b/pandas/tests/sparse/frame/test_apply.py
@@ -0,0 +1,92 @@
+import pytest
+import numpy as np
+from pandas import SparseDataFrame, DataFrame, Series, bdate_range
+from pandas.core import nanops
+from pandas.util import testing as tm
+
+
+@pytest.fixture
+def dates():
+ return bdate_range('1/1/2011', periods=10)
+
+
+@pytest.fixture
+def empty():
+ return SparseDataFrame()
+
+
+@pytest.fixture
+def frame(dates):
+ data = {'A': [np.nan, np.nan, np.nan, 0, 1, 2, 3, 4, 5, 6],
+ 'B': [0, 1, 2, np.nan, np.nan, np.nan, 3, 4, 5, 6],
+ 'C': np.arange(10, dtype=np.float64),
+ 'D': [0, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan]}
+
+ return SparseDataFrame(data, index=dates)
+
+
+@pytest.fixture
+def fill_frame(frame):
+ values = frame.values.copy()
+ values[np.isnan(values)] = 2
+
+ return SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
+ default_fill_value=2,
+ index=frame.index)
+
+
+def test_apply(frame):
+ applied = frame.apply(np.sqrt)
+ assert isinstance(applied, SparseDataFrame)
+ tm.assert_almost_equal(applied.values, np.sqrt(frame.values))
+
+ # agg / broadcast
+ with tm.assert_produces_warning(FutureWarning):
+ broadcasted = frame.apply(np.sum, broadcast=True)
+ assert isinstance(broadcasted, SparseDataFrame)
+
+ with tm.assert_produces_warning(FutureWarning):
+ exp = frame.to_dense().apply(np.sum, broadcast=True)
+ tm.assert_frame_equal(broadcasted.to_dense(), exp)
+
+ applied = frame.apply(np.sum)
+ tm.assert_series_equal(applied,
+ frame.to_dense().apply(nanops.nansum))
+
+
+def test_apply_fill(fill_frame):
+ applied = fill_frame.apply(np.sqrt)
+ assert applied['A'].fill_value == np.sqrt(2)
+
+
+def test_apply_empty(empty):
+ assert empty.apply(np.sqrt) is empty
+
+
+def test_apply_nonuq():
+ orig = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ index=['a', 'a', 'c'])
+ sparse = orig.to_sparse()
+ res = sparse.apply(lambda s: s[0], axis=1)
+ exp = orig.apply(lambda s: s[0], axis=1)
+
+ # dtype must be kept
+ assert res.dtype == np.int64
+
+ # ToDo: apply must return subclassed dtype
+ assert isinstance(res, Series)
+ tm.assert_series_equal(res.to_dense(), exp)
+
+ # df.T breaks
+ sparse = orig.T.to_sparse()
+ res = sparse.apply(lambda s: s[0], axis=0) # noqa
+ exp = orig.T.apply(lambda s: s[0], axis=0)
+
+ # TODO: no non-unique columns supported in sparse yet
+ # tm.assert_series_equal(res.to_dense(), exp)
+
+
+def test_applymap(frame):
+ # just test that it works
+ result = frame.applymap(lambda x: x * 2)
+ assert isinstance(result, SparseDataFrame)
diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py
index 54f567bcd2a8c..29fad3c8eefaf 100644
--- a/pandas/tests/sparse/frame/test_frame.py
+++ b/pandas/tests/sparse/frame/test_frame.py
@@ -621,52 +621,6 @@ def test_append(self):
tm.assert_sp_frame_equal(appended.iloc[:, :3], self.frame.iloc[:, :3],
exact_indices=False)
- def test_apply(self):
- applied = self.frame.apply(np.sqrt)
- assert isinstance(applied, SparseDataFrame)
- tm.assert_almost_equal(applied.values, np.sqrt(self.frame.values))
-
- applied = self.fill_frame.apply(np.sqrt)
- assert applied['A'].fill_value == np.sqrt(2)
-
- # agg / broadcast
- broadcasted = self.frame.apply(np.sum, broadcast=True)
- assert isinstance(broadcasted, SparseDataFrame)
-
- exp = self.frame.to_dense().apply(np.sum, broadcast=True)
- tm.assert_frame_equal(broadcasted.to_dense(), exp)
-
- assert self.empty.apply(np.sqrt) is self.empty
-
- from pandas.core import nanops
- applied = self.frame.apply(np.sum)
- tm.assert_series_equal(applied,
- self.frame.to_dense().apply(nanops.nansum))
-
- def test_apply_nonuq(self):
- orig = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
- index=['a', 'a', 'c'])
- sparse = orig.to_sparse()
- res = sparse.apply(lambda s: s[0], axis=1)
- exp = orig.apply(lambda s: s[0], axis=1)
- # dtype must be kept
- assert res.dtype == np.int64
- # ToDo: apply must return subclassed dtype
- assert isinstance(res, pd.Series)
- tm.assert_series_equal(res.to_dense(), exp)
-
- # df.T breaks
- sparse = orig.T.to_sparse()
- res = sparse.apply(lambda s: s[0], axis=0) # noqa
- exp = orig.T.apply(lambda s: s[0], axis=0)
- # TODO: no non-unique columns supported in sparse yet
- # tm.assert_series_equal(res.to_dense(), exp)
-
- def test_applymap(self):
- # just test that it works
- result = self.frame.applymap(lambda x: x * 2)
- assert isinstance(result, SparseDataFrame)
-
def test_astype(self):
sparse = pd.SparseDataFrame({'A': SparseArray([1, 2, 3, 4],
dtype=np.int64),
| closes #16353
closes #17348
closes #17437
closes #18573
closes #17970
closes #17892
closes #17602
closes #15628
closes #18775
closes #18901
closes #18919
This fixes apply to work correctly when the returned shape mismatches the original. It will try to set the indices if it possible. Setting to a list-like with ``axis=1`` is now disallowed (but still possible if you operate *row-wise*). We were applying this inconsitently. This is of course a discouraged practice anyhow.
Prob should add some examples / update the doc-string a bit.
| https://api.github.com/repos/pandas-dev/pandas/pulls/18577 | 2017-11-30T14:08:17Z | 2018-02-07T13:06:48Z | 2018-02-07T13:06:47Z | 2018-02-07T14:20:44Z |
BUG: Series.rank modifies inplace with NaT | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index c2da0c420f643..3e9e2bf329674 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -315,7 +315,7 @@ Reshaping
- Bug in :func:`DataFrame.stack` which fails trying to sort mixed type levels under Python 3 (:issue:`18310`)
- Fixed construction of a :class:`Series` from a ``dict`` containing ``NaN`` as key (:issue:`18480`)
-
+- Bug in :func:`Series.rank` where ``Series`` containing ``NaT`` modifies the ``Series`` inplace (:issue:`18521`)
-
Numeric
@@ -336,4 +336,3 @@ Other
^^^^^
- Improved error message when attempting to use a Python keyword as an identifier in a ``numexpr`` backed query (:issue:`18221`)
--
diff --git a/pandas/_libs/algos_rank_helper.pxi.in b/pandas/_libs/algos_rank_helper.pxi.in
index 0e46530e20d1c..8ccc6e036da80 100644
--- a/pandas/_libs/algos_rank_helper.pxi.in
+++ b/pandas/_libs/algos_rank_helper.pxi.in
@@ -84,6 +84,11 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average', ascending=True,
mask = np.isnan(values)
{{elif dtype == 'int64'}}
mask = values == iNaT
+
+ # create copy in case of iNaT
+ # values are mutated inplace
+ if mask.any():
+ values = values.copy()
{{endif}}
# double sort first by mask and then by values to ensure nan values are
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 4bba6d7601ae8..7014929db4c2d 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -2214,3 +2214,12 @@ def test_series_broadcasting(self):
df_nan.clip_lower(s, axis=0)
for op in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:
getattr(df, op)(s_nan, axis=0)
+
+ def test_series_nat_conversion(self):
+ # GH 18521
+ # Check rank does not mutate DataFrame
+ df = DataFrame(np.random.randn(10, 3), dtype='float64')
+ expected = df.copy()
+ df.rank()
+ result = df
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py
index 311d14e928caa..bccc46f1e0ca8 100644
--- a/pandas/tests/series/test_rank.py
+++ b/pandas/tests/series/test_rank.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-from pandas import compat
+from pandas import compat, Timestamp
import pytest
@@ -368,3 +368,13 @@ def test_rank_object_bug(self):
# smoke tests
Series([np.nan] * 32).astype(object).rank(ascending=True)
Series([np.nan] * 32).astype(object).rank(ascending=False)
+
+ def test_rank_modify_inplace(self):
+ # GH 18521
+ # Check rank does not mutate series
+ s = Series([Timestamp('2017-01-05 10:20:27.569000'), NaT])
+ expected = s.copy()
+
+ s.rank()
+ result = s
+ assert_series_equal(result, expected)
| - [x] closes #18521
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Looks like `_ensure_data` converts NaT to -9223372036854775808 then that gets converted back to datetime64. | https://api.github.com/repos/pandas-dev/pandas/pulls/18576 | 2017-11-30T13:20:00Z | 2017-12-14T11:30:35Z | 2017-12-14T11:30:35Z | 2017-12-14T11:30:45Z |
CLN: ASV remove uncessary selfs and add setups | diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py
index 7ffb180b49e09..45d62163ae80b 100644
--- a/asv_bench/benchmarks/algorithms.py
+++ b/asv_bench/benchmarks/algorithms.py
@@ -11,6 +11,8 @@
except:
pass
+from .pandas_vb_common import setup # noqa
+
class Factorize(object):
@@ -21,7 +23,6 @@ class Factorize(object):
def setup(self, sort):
N = 10**5
- np.random.seed(1234)
self.int_idx = pd.Int64Index(np.arange(N).repeat(5))
self.float_idx = pd.Float64Index(np.random.randn(N).repeat(5))
self.string_idx = tm.makeStringIndex(N)
@@ -45,7 +46,6 @@ class Duplicated(object):
def setup(self, keep):
N = 10**5
- np.random.seed(1234)
self.int_idx = pd.Int64Index(np.arange(N).repeat(5))
self.float_idx = pd.Float64Index(np.random.randn(N).repeat(5))
self.string_idx = tm.makeStringIndex(N)
@@ -79,7 +79,6 @@ class Match(object):
goal_time = 0.2
def setup(self):
- np.random.seed(1234)
self.uniques = tm.makeStringIndex(1000).values
self.all = self.uniques.repeat(10)
@@ -92,7 +91,6 @@ class Hashing(object):
goal_time = 0.2
def setup_cache(self):
- np.random.seed(1234)
N = 10**5
df = pd.DataFrame(
diff --git a/asv_bench/benchmarks/binary_ops.py b/asv_bench/benchmarks/binary_ops.py
index 14169ced4b71f..cc8766e1fa39c 100644
--- a/asv_bench/benchmarks/binary_ops.py
+++ b/asv_bench/benchmarks/binary_ops.py
@@ -6,6 +6,8 @@
except ImportError:
import pandas.computation.expressions as expr
+from .pandas_vb_common import setup # noqa
+
class Ops(object):
@@ -15,7 +17,6 @@ class Ops(object):
param_names = ['use_numexpr', 'threads']
def setup(self, use_numexpr, threads):
- np.random.seed(1234)
self.df = DataFrame(np.random.randn(20000, 100))
self.df2 = DataFrame(np.random.randn(20000, 100))
@@ -47,7 +48,6 @@ class Ops2(object):
def setup(self):
N = 10**3
- np.random.seed(1234)
self.df = DataFrame(np.random.randn(N, N))
self.df2 = DataFrame(np.random.randn(N, N))
@@ -89,14 +89,12 @@ class Timeseries(object):
param_names = ['tz']
def setup(self, tz):
- self.N = 10**6
- self.halfway = ((self.N // 2) - 1)
- self.s = Series(date_range('20010101', periods=self.N, freq='T',
- tz=tz))
- self.ts = self.s[self.halfway]
+ N = 10**6
+ halfway = (N // 2) - 1
+ self.s = Series(date_range('20010101', periods=N, freq='T', tz=tz))
+ self.ts = self.s[halfway]
- self.s2 = Series(date_range('20010101', periods=self.N, freq='s',
- tz=tz))
+ self.s2 = Series(date_range('20010101', periods=N, freq='s', tz=tz))
def time_series_timestamp_compare(self, tz):
self.s <= self.ts
@@ -131,7 +129,6 @@ class AddOverflowArray(object):
goal_time = 0.2
def setup(self):
- np.random.seed(1234)
N = 10**6
self.arr = np.arange(N)
self.arr_rev = np.arange(-N, 0)
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index df41a2afad1f8..1613ca1b97f4b 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -9,6 +9,8 @@
except ImportError:
pass
+from .pandas_vb_common import setup # noqa
+
class Concat(object):
@@ -76,7 +78,6 @@ class ValueCounts(object):
def setup(self, dropna):
n = 5 * 10**5
- np.random.seed(2718281)
arr = ['s%04d' % i for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype('category')
@@ -101,7 +102,6 @@ class SetCategories(object):
def setup(self):
n = 5 * 10**5
- np.random.seed(2718281)
arr = ['s%04d' % i for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype('category')
@@ -116,7 +116,6 @@ class Rank(object):
def setup(self):
N = 10**5
ncats = 100
- np.random.seed(1234)
self.s_str = pd.Series(tm.makeCategoricalIndex(N, ncats)).astype(str)
self.s_str_cat = self.s_str.astype('category')
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py
index 2c9c382e2db86..6276dc324ca0d 100644
--- a/asv_bench/benchmarks/ctors.py
+++ b/asv_bench/benchmarks/ctors.py
@@ -1,6 +1,8 @@
import numpy as np
from pandas import DataFrame, Series, Index, DatetimeIndex, Timestamp
+from .pandas_vb_common import setup # noqa
+
class Constructors(object):
@@ -8,7 +10,6 @@ class Constructors(object):
def setup(self):
N = 10**2
- np.random.seed(1234)
self.arr = np.random.randn(N, N)
self.arr_str = np.array(['foo', 'bar', 'baz'], dtype=object)
diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py
index fd18b3f21cf45..8e581dcf22b4c 100644
--- a/asv_bench/benchmarks/eval.py
+++ b/asv_bench/benchmarks/eval.py
@@ -5,6 +5,8 @@
except ImportError:
import pandas.computation.expressions as expr
+from .pandas_vb_common import setup # noqa
+
class Eval(object):
@@ -14,7 +16,6 @@ class Eval(object):
param_names = ['engine', 'threads']
def setup(self, engine, threads):
- np.random.seed(1234)
self.df = pd.DataFrame(np.random.randn(20000, 100))
self.df2 = pd.DataFrame(np.random.randn(20000, 100))
self.df3 = pd.DataFrame(np.random.randn(20000, 100))
@@ -45,17 +46,16 @@ class Query(object):
goal_time = 0.2
def setup(self):
- np.random.seed(1234)
- self.N = 10**6
- self.halfway = (self.N // 2) - 1
- self.index = pd.date_range('20010101', periods=self.N, freq='T')
- self.s = pd.Series(self.index)
- self.ts = self.s.iloc[self.halfway]
- self.df = pd.DataFrame({'a': np.random.randn(self.N), 'dates': self.s},
- index=self.index)
- self.data = np.random.randn(self.N)
- self.min_val = self.data.min()
- self.max_val = self.data.max()
+ N = 10**6
+ halfway = (N // 2) - 1
+ index = pd.date_range('20010101', periods=N, freq='T')
+ s = pd.Series(index)
+ self.ts = s.iloc[halfway]
+ self.df = pd.DataFrame({'a': np.random.randn(N), 'dates': s},
+ index=index)
+ data = np.random.randn(N)
+ self.min_val = data.min()
+ self.max_val = data.max()
def time_query_datetime_index(self):
self.df.query('index < @self.ts')
diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index d577ebc20a31c..5f465a91d38d3 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -4,27 +4,23 @@
try:
from pandas.tseries import offsets
except:
- from pandas.core.datetools import *
+ from pandas.core.datetools import * # noqa
+from .pandas_vb_common import setup # noqa
-# ----------------------------------------------------------------------
-# Creation from nested dict
class FromDicts(object):
goal_time = 0.2
def setup(self):
- np.random.seed(1234)
N, K = 5000, 50
- self.index = tm.makeStringIndex(N)
- self.columns = tm.makeStringIndex(K)
- self.frame = DataFrame(np.random.randn(N, K),
- index=self.index,
- columns=self.columns)
- self.data = self.frame.to_dict()
+ index = tm.makeStringIndex(N)
+ columns = tm.makeStringIndex(K)
+ frame = DataFrame(np.random.randn(N, K), index=index, columns=columns)
+ self.data = frame.to_dict()
self.some_dict = list(self.data.values())[0]
- self.dict_list = self.frame.to_dict(orient='records')
+ self.dict_list = frame.to_dict(orient='records')
self.data2 = {i: {j: float(j) for j in range(100)}
for i in range(2000)}
@@ -42,14 +38,13 @@ def time_frame_ctor_nested_dict_int64(self):
DataFrame(self.data2)
-# from a mi-series
-
class FromSeries(object):
+
goal_time = 0.2
def setup(self):
- self.mi = MultiIndex.from_product([range(100), range(100)])
- self.s = Series(np.random.randn(10000), index=self.mi)
+ mi = MultiIndex.from_product([range(100), range(100)])
+ self.s = Series(np.random.randn(10000), index=mi)
def time_frame_from_mi_series(self):
DataFrame(self.s)
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 7ed341425e561..2b48168238ee8 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -3,7 +3,8 @@
import pandas.util.testing as tm
from pandas import (DataFrame, Series, MultiIndex, date_range, period_range,
isnull, NaT)
-from .pandas_vb_common import setup
+
+from .pandas_vb_common import setup # noqa
class GetNumericData(object):
| @jorisvandenbossche Added `setup` with #noqa to the other benchmarks I edited and removed uncessesary `self`s | https://api.github.com/repos/pandas-dev/pandas/pulls/18575 | 2017-11-30T04:34:56Z | 2017-11-30T08:13:59Z | 2017-11-30T08:13:59Z | 2017-11-30T18:28:20Z |
DEPR: deprecate .asobject property | diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py
index 7697c3b9d3840..a607168ea0457 100644
--- a/asv_bench/benchmarks/index_object.py
+++ b/asv_bench/benchmarks/index_object.py
@@ -12,7 +12,7 @@ def setup(self):
if (self.rng.dtype == object):
self.idx_rng = self.rng.view(Index)
else:
- self.idx_rng = self.rng.asobject
+ self.idx_rng = self.rng.astype(object)
self.idx_rng2 = self.idx_rng[:(-1)]
# other datetime
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 304ccd1f9350b..77503b4653437 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -124,7 +124,7 @@ Deprecations
- ``Series.from_array`` and ``SparseSeries.from_array`` are deprecated. Use the normal constructor ``Series(..)`` and ``SparseSeries(..)`` instead (:issue:`18213`).
- ``DataFrame.as_matrix`` is deprecated. Use ``DataFrame.values`` instead (:issue:`18458`).
--
+- ``Series.asobject``, ``DatetimeIndex.asobject``, ``PeriodIndex.asobject`` and ``TimeDeltaIndex.asobject`` have been deprecated. Use '.astype(object)' instead (:issue:`18572`)
.. _whatsnew_0220.prior_deprecations:
diff --git a/pandas/_libs/algos_common_helper.pxi.in b/pandas/_libs/algos_common_helper.pxi.in
index 336dd77ea9a89..0d3f6664da9e3 100644
--- a/pandas/_libs/algos_common_helper.pxi.in
+++ b/pandas/_libs/algos_common_helper.pxi.in
@@ -552,8 +552,8 @@ cpdef ensure_object(object arr):
return arr
else:
return arr.astype(np.object_)
- elif hasattr(arr, 'asobject'):
- return arr.asobject
+ elif hasattr(arr, '_box_values_as_index'):
+ return arr._box_values_as_index()
else:
return np.array(arr, dtype=np.object_)
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py
index 7a2da9655cc4a..53ead5e8f74a3 100644
--- a/pandas/core/accessor.py
+++ b/pandas/core/accessor.py
@@ -10,7 +10,7 @@
class DirNamesMixin(object):
_accessors = frozenset([])
- _deprecations = frozenset([])
+ _deprecations = frozenset(['asobject'])
def _dir_deletions(self):
""" delete unwanted __dir__ for this object """
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 9f712a1cf039b..0ceb8966fd3c8 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -369,7 +369,7 @@ def unique(values):
# to return an object array of tz-aware Timestamps
# TODO: it must return DatetimeArray with tz in pandas 2.0
- uniques = uniques.asobject.values
+ uniques = uniques.astype(object).values
return uniques
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py
index c1ba018adbcec..cd98064dee86e 100644
--- a/pandas/core/dtypes/concat.py
+++ b/pandas/core/dtypes/concat.py
@@ -401,7 +401,7 @@ def convert_to_pydatetime(x, axis):
# if dtype is of datetimetz or timezone
if x.dtype.kind == _NS_DTYPE.kind:
if getattr(x, 'tz', None) is not None:
- x = x.asobject.values
+ x = x.astype(object).values
else:
shape = x.shape
x = tslib.ints_to_pydatetime(x.view(np.int64).ravel(),
@@ -479,7 +479,7 @@ def _concat_index_asobject(to_concat, name=None):
"""
klasses = ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex
- to_concat = [x.asobject if isinstance(x, klasses) else x
+ to_concat = [x.astype(object) if isinstance(x, klasses) else x
for x in to_concat]
from pandas import Index
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ff42e39d9dbdd..90d1ab8d0e242 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3330,7 +3330,7 @@ class max type
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
- values = index.asobject.values
+ values = index.astype(object).values
elif isinstance(index, DatetimeIndex) and index.tz is not None:
values = index
else:
@@ -5077,7 +5077,7 @@ def applymap(self, func):
def infer(x):
if x.empty:
return lib.map_infer(x, func)
- return lib.map_infer(x.asobject, func)
+ return lib.map_infer(x.astype(object).values, func)
return self.apply(infer)
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index c2fc983c983a6..5c96e4eeff69d 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -242,6 +242,13 @@ def _box_values(self, values):
"""
return lib.map_infer(values, self._box_func)
+ def _box_values_as_index(self):
+ """
+ return object Index which contains boxed values
+ """
+ from pandas.core.index import Index
+ return Index(self._box_values(self.asi8), name=self.name, dtype=object)
+
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
@@ -360,7 +367,7 @@ def map(self, f):
raise TypeError('The map function must return an Index object')
return result
except Exception:
- return self.asobject.map(f)
+ return self.astype(object).map(f)
def sort_values(self, return_indexer=False, ascending=True):
"""
@@ -424,13 +431,15 @@ def _isnan(self):
@property
def asobject(self):
- """
+ """DEPRECATED: Use ``astype(object)`` instead.
+
return object Index which contains boxed values
*this is an internal non-public method*
"""
- from pandas.core.index import Index
- return Index(self._box_values(self.asi8), name=self.name, dtype=object)
+ warnings.warn("'asobject' is deprecated. Use 'astype(object)'"
+ " instead", FutureWarning, stacklevel=2)
+ return self.astype(object)
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance, box=False))
@@ -468,7 +477,7 @@ def tolist(self):
"""
return a list of the underlying data
"""
- return list(self.asobject)
+ return list(self.astype(object))
def min(self, axis=None, *args, **kwargs):
"""
@@ -746,7 +755,7 @@ def isin(self, values):
try:
values = type(self)(values)
except ValueError:
- return self.asobject.isin(values)
+ return self.astype(object).isin(values)
return algorithms.isin(self.asi8, values.asi8)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 1578ae924c9bb..55c6063b74286 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -907,7 +907,7 @@ def to_datetime(self, dayfirst=False):
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
- return self.asobject
+ return self._box_values_as_index()
elif is_integer_dtype(dtype):
return Index(self.values.astype('i8', copy=copy), name=self.name,
dtype='i8')
@@ -1679,7 +1679,7 @@ def time(self):
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
return self._maybe_mask_results(libalgos.arrmap_object(
- self.asobject.values,
+ self.astype(object).values,
lambda x: np.nan if x is libts.NaT else x.time()))
@property
@@ -1789,7 +1789,7 @@ def insert(self, loc, item):
# fall back to object index
if isinstance(item, compat.string_types):
- return self.asobject.insert(loc, item)
+ return self.astype(object).insert(loc, item)
raise TypeError(
"cannot insert DatetimeIndex with incompatible label")
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index ac9b511606066..cb0c4a9ce2a86 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -418,7 +418,7 @@ def _int64index(self):
@property
def values(self):
- return self.asobject.values
+ return self.astype(object).values
@property
def _values(self):
@@ -428,7 +428,7 @@ def __array__(self, dtype=None):
if is_integer_dtype(dtype):
return self.asi8
else:
- return self.asobject.values
+ return self.astype(object).values
def __array_wrap__(self, result, context=None):
"""
@@ -476,7 +476,7 @@ def _to_embed(self, keep_tz=False, dtype=None):
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
- return self.asobject.values
+ return self.astype(object).values
@property
def _formatter_func(self):
@@ -506,7 +506,7 @@ def asof_locs(self, where, mask):
def astype(self, dtype, copy=True, how='start'):
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
- return self.asobject
+ return self._box_values_as_index()
elif is_integer_dtype(dtype):
if copy:
return self._int64index.copy()
@@ -656,7 +656,7 @@ def end_time(self):
def _mpl_repr(self):
# how to represent ourselves to matplotlib
- return self.asobject.values
+ return self.astype(object).values
def to_timestamp(self, freq=None, how='start'):
"""
@@ -971,7 +971,7 @@ def _convert_tolerance(self, tolerance, target):
def insert(self, loc, item):
if not isinstance(item, Period) or self.freq != item.freq:
- return self.asobject.insert(loc, item)
+ return self.astype(object).insert(loc, item)
idx = np.concatenate((self[:loc].asi8, np.array([item.ordinal]),
self[loc:].asi8))
@@ -1018,7 +1018,7 @@ def _apply_meta(self, rawarr):
def _format_native_types(self, na_rep=u('NaT'), date_format=None,
**kwargs):
- values = self.asobject.values
+ values = self.astype(object).values
if date_format:
formatter = lambda dt: dt.strftime(date_format)
diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py
index 97f6ca2e5d642..77e05ccf4db22 100644
--- a/pandas/core/indexes/timedeltas.py
+++ b/pandas/core/indexes/timedeltas.py
@@ -482,7 +482,7 @@ def astype(self, dtype, copy=True):
dtype = np.dtype(dtype)
if is_object_dtype(dtype):
- return self.asobject
+ return self._box_values_as_index()
elif is_timedelta64_ns_dtype(dtype):
if copy is True:
return self.copy()
@@ -883,7 +883,7 @@ def insert(self, loc, item):
# fall back to object index
if isinstance(item, compat.string_types):
- return self.asobject.insert(loc, item)
+ return self.astype(object).insert(loc, item)
raise TypeError(
"cannot insert TimedeltaIndex with incompatible label")
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 90733fa6d68d1..c6642657e386e 100755
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -405,7 +405,8 @@ def _setitem_with_indexer(self, indexer, value):
new_values = np.concatenate([self.obj._values,
new_values])
except TypeError:
- new_values = np.concatenate([self.obj.asobject,
+ as_obj = self.obj.astype(object)
+ new_values = np.concatenate([as_obj,
new_values])
self.obj._data = self.obj._constructor(
new_values, index=new_index, name=self.obj.name)._data
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 1d1d71be16c00..e5db5679c43f6 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2191,7 +2191,7 @@ def _try_coerce_args(self, values, other):
if isinstance(other, ABCDatetimeIndex):
# to store DatetimeTZBlock as object
- other = other.asobject.values
+ other = other.astype(object).values
return values, False, other, False
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 934570602c99d..2fb0cbb14c225 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -850,7 +850,7 @@ def wrapper(self, other, axis=None):
# tested in test_nat_comparisons
# (pandas.tests.series.test_operators.TestSeriesOperators)
return self._constructor(na_op(self.values,
- other.asobject.values),
+ other.astype(object).values),
index=self.index)
return self._constructor(na_op(self.values, np.asarray(other)),
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 5d0e6907a6595..15550de16e5d2 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -149,7 +149,8 @@ class Series(base.IndexOpsMixin, generic.NDFrame):
_metadata = ['name']
_accessors = frozenset(['dt', 'cat', 'str'])
_deprecations = generic.NDFrame._deprecations | frozenset(
- ['sortlevel', 'reshape', 'get_value', 'set_value', 'from_csv'])
+ ['asobject', 'sortlevel', 'reshape', 'get_value', 'set_value',
+ 'from_csv'])
_allow_index_ops = True
def __init__(self, data=None, index=None, dtype=None, name=None,
@@ -449,12 +450,15 @@ def get_values(self):
@property
def asobject(self):
- """
+ """DEPRECATED: Use ``astype(object)`` instead.
+
return object Series which contains boxed values
*this is an internal non-public method*
"""
- return self._data.asobject
+ warnings.warn("'asobject' is deprecated. Use 'astype(object)'"
+ " instead", FutureWarning, stacklevel=2)
+ return self.astype(object).values
# ops
def ravel(self, order='C'):
@@ -1322,7 +1326,7 @@ def unique(self):
# to return an object array of tz-aware Timestamps
# TODO: it must return DatetimeArray with tz in pandas 2.0
- result = result.asobject.values
+ result = result.astype(object).values
return result
@@ -2549,7 +2553,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwds):
if is_extension_type(self.dtype):
mapped = self._values.map(f)
else:
- values = self.asobject
+ values = self.astype(object).values
mapped = lib.map_infer(values, f, convert=convert_dtype)
if len(mapped) and isinstance(mapped[0], Series):
@@ -3125,7 +3129,7 @@ def _sanitize_index(data, index, copy=False):
if isinstance(data, ABCIndexClass) and not copy:
pass
elif isinstance(data, PeriodIndex):
- data = data.asobject
+ data = data.astype(object).values
elif isinstance(data, DatetimeIndex):
data = data._to_embed(keep_tz=True)
elif isinstance(data, np.ndarray):
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index e116635c99264..8f25eb3af70cd 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -2231,7 +2231,7 @@ class Datetime64TZFormatter(Datetime64Formatter):
def _format_strings(self):
""" we by definition have a TZ """
- values = self.values.asobject
+ values = self.values.astype(object)
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or
_get_format_datetime64(is_dates_only,
diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py
index 9daee918b9f30..2ced5f653825d 100644
--- a/pandas/plotting/_converter.py
+++ b/pandas/plotting/_converter.py
@@ -363,7 +363,8 @@ def __call__(self):
tz = self.tz.tzname(None)
st = _from_ordinal(dates.date2num(dmin)) # strip tz
ed = _from_ordinal(dates.date2num(dmax))
- all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).asobject
+ all_dates = date_range(start=st, end=ed,
+ freq=freq, tz=tz).astype(object)
try:
if len(all_dates) > 0:
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index b6090a13c8d38..876e0ea7ea0b3 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -501,8 +501,8 @@ def test_constructor_period(self):
assert df['b'].dtype == 'object'
# list of periods
- df = pd.DataFrame({'a': a.asobject.tolist(),
- 'b': b.asobject.tolist()})
+ df = pd.DataFrame({'a': a.astype(object).tolist(),
+ 'b': b.astype(object).tolist()})
assert df['a'].dtype == 'object'
assert df['b'].dtype == 'object'
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index ad76d17c93c41..7d01a2a70145d 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -76,3 +76,10 @@ def test_map_dictlike(self, mapper):
expected = pd.Index([np.nan] * len(self.index))
result = self.index.map(mapper([], []))
tm.assert_index_equal(result, expected)
+
+ def test_asobject_deprecated(self):
+ # GH18572
+ d = self.create_index()
+ with tm.assert_produces_warning(FutureWarning):
+ i = d.asobject
+ assert isinstance(i, pd.Index)
diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py
index 0db26652eb191..41d0dd38cd5f6 100644
--- a/pandas/tests/indexes/datetimes/test_ops.py
+++ b/pandas/tests/indexes/datetimes/test_ops.py
@@ -51,7 +51,7 @@ def test_ops_properties_basic(self):
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
- def test_asobject_tolist(self):
+ def test_astype_object(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
@@ -59,7 +59,7 @@ def test_asobject_tolist(self):
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
- result = idx.asobject
+ result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
@@ -74,7 +74,7 @@ def test_asobject_tolist(self):
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
- result = idx.asobject
+ result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
@@ -87,7 +87,7 @@ def test_asobject_tolist(self):
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
- result = idx.asobject
+ result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
@@ -389,26 +389,27 @@ def test_comp_nat(self):
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
- for l, r in [(left, right), (left.asobject, right.asobject)]:
- result = l == r
+ for lhs, rhs in [(left, right),
+ (left.astype(object), right.astype(object))]:
+ result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
- result = l != r
+ result = lhs != rhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
- tm.assert_numpy_array_equal(l == pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT == r, expected)
+ tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
- tm.assert_numpy_array_equal(l != pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT != l, expected)
+ tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
- tm.assert_numpy_array_equal(l < pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT > l, expected)
+ tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_value_counts_unique(self):
# GH 7735
@@ -636,9 +637,9 @@ def test_equals(self):
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
- assert idx.equals(idx.asobject)
- assert idx.asobject.equals(idx)
- assert idx.asobject.equals(idx.asobject)
+ assert idx.equals(idx.astype(object))
+ assert idx.astype(object).equals(idx)
+ assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
@@ -646,8 +647,8 @@ def test_equals(self):
tz='US/Pacific')
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
- assert not idx.equals(idx2.asobject)
- assert not idx.asobject.equals(idx2)
+ assert not idx.equals(idx2.astype(object))
+ assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
@@ -656,8 +657,8 @@ def test_equals(self):
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
- assert not idx.equals(idx3.asobject)
- assert not idx.asobject.equals(idx3)
+ assert not idx.equals(idx3.astype(object))
+ assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py
index 1d77de0d2d8f3..a78bc6fc577b8 100644
--- a/pandas/tests/indexes/period/test_ops.py
+++ b/pandas/tests/indexes/period/test_ops.py
@@ -27,7 +27,7 @@ def test_ops_properties(self):
self.check_ops_properties(PeriodIndex._object_ops, f)
self.check_ops_properties(PeriodIndex._bool_ops, f)
- def test_asobject_tolist(self):
+ def test_astype_object(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
@@ -35,7 +35,7 @@ def test_asobject_tolist(self):
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
- result = idx.asobject
+ result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
@@ -49,7 +49,7 @@ def test_asobject_tolist(self):
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
- result = idx.asobject
+ result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
@@ -290,26 +290,27 @@ def test_comp_nat(self):
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
- for l, r in [(left, right), (left.asobject, right.asobject)]:
- result = l == r
+ for lhs, rhs in [(left, right),
+ (left.astype(object), right.astype(object))]:
+ result = lhs == rhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
- result = l != r
+ result = lhs != rhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
- tm.assert_numpy_array_equal(l == pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT == r, expected)
+ tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
- tm.assert_numpy_array_equal(l != pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT != l, expected)
+ tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
- tm.assert_numpy_array_equal(l < pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT > l, expected)
+ tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_value_counts_unique(self):
# GH 7735
@@ -614,9 +615,9 @@ def test_equals(self):
freq=freq)
assert idx.equals(idx)
assert idx.equals(idx.copy())
- assert idx.equals(idx.asobject)
- assert idx.asobject.equals(idx)
- assert idx.asobject.equals(idx.asobject)
+ assert idx.equals(idx.astype(object))
+ assert idx.astype(object).equals(idx)
+ assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
@@ -624,8 +625,8 @@ def test_equals(self):
freq='H')
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
- assert not idx.equals(idx2.asobject)
- assert not idx.asobject.equals(idx2)
+ assert not idx.equals(idx2.astype(object))
+ assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
@@ -634,8 +635,8 @@ def test_equals(self):
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
- assert not idx.equals(idx3.asobject)
- assert not idx.asobject.equals(idx3)
+ assert not idx.equals(idx3.astype(object))
+ assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py
index 13a63de22169e..48378233dd638 100644
--- a/pandas/tests/indexes/period/test_period.py
+++ b/pandas/tests/indexes/period/test_period.py
@@ -24,7 +24,7 @@ def setup_method(self, method):
def create_index(self):
return period_range('20130101', periods=5, freq='D')
- def test_astype(self):
+ def test_astype_conversion(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
@@ -380,23 +380,23 @@ def test_factorize(self):
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
- def test_asobject_like(self):
+ def test_astype_object(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=object)
- tm.assert_numpy_array_equal(idx.asobject.values, exp)
+ tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
- tm.assert_numpy_array_equal(idx.asobject.values, exp)
+ tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
- tm.assert_numpy_array_equal(idx.asobject.values, exp)
+ tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
def test_is_(self):
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 9ef7a43b2193a..72b312f29a793 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -122,7 +122,7 @@ def test_constructor_from_index_datetimetz(self):
tm.assert_index_equal(result, idx)
assert result.tz == idx.tz
- result = pd.Index(idx.asobject)
+ result = pd.Index(idx.astype(object))
tm.assert_index_equal(result, idx)
assert result.tz == idx.tz
@@ -131,7 +131,7 @@ def test_constructor_from_index_timedelta(self):
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
- result = pd.Index(idx.asobject)
+ result = pd.Index(idx.astype(object))
tm.assert_index_equal(result, idx)
def test_constructor_from_index_period(self):
@@ -139,7 +139,7 @@ def test_constructor_from_index_period(self):
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
- result = pd.Index(idx.asobject)
+ result = pd.Index(idx.astype(object))
tm.assert_index_equal(result, idx)
def test_constructor_from_series_datetimetz(self):
diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py
index 67238665a2e8a..fac3745ba4fb4 100644
--- a/pandas/tests/indexes/timedeltas/test_ops.py
+++ b/pandas/tests/indexes/timedeltas/test_ops.py
@@ -25,12 +25,12 @@ def test_ops_properties(self):
self.check_ops_properties(TimedeltaIndex._field_ops, f)
self.check_ops_properties(TimedeltaIndex._object_ops, f)
- def test_asobject_tolist(self):
+ def test_astype_object(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
- result = idx.asobject
+ result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
@@ -43,7 +43,7 @@ def test_asobject_tolist(self):
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
- result = idx.asobject
+ result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
@@ -217,26 +217,27 @@ def test_comp_nat(self):
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
- for l, r in [(left, right), (left.asobject, right.asobject)]:
- result = l == r
+ for lhs, rhs in [(left, right),
+ (left.astype(object), right.astype(object))]:
+ result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
- result = l != r
+ result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
- tm.assert_numpy_array_equal(l == pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT == r, expected)
+ tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
- tm.assert_numpy_array_equal(l != pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT != l, expected)
+ tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
- tm.assert_numpy_array_equal(l < pd.NaT, expected)
- tm.assert_numpy_array_equal(pd.NaT > l, expected)
+ tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
+ tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_value_counts_unique(self):
# GH 7735
@@ -473,18 +474,18 @@ def test_equals(self):
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
- assert idx.equals(idx.asobject)
- assert idx.asobject.equals(idx)
- assert idx.asobject.equals(idx.asobject)
+ assert idx.equals(idx.astype(object))
+ assert idx.astype(object).equals(idx)
+ assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
- assert not idx.equals(idx2.asobject)
- assert not idx.asobject.equals(idx2)
- assert not idx.asobject.equals(idx2.asobject)
+ assert not idx.equals(idx2.astype(object))
+ assert not idx.astype(object).equals(idx2)
+ assert not idx.astype(object).equals(idx2.astype(object))
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index f1a478581e730..8f237a7f810c3 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -272,7 +272,7 @@ def test_irreg_hf(self):
_, ax = self.plt.subplots()
df2 = df.copy()
- df2.index = df.index.asobject
+ df2.index = df.index.astype(object)
df2.plot(ax=ax)
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
assert (np.fabs(diffs[1:] - sec) < 1e-8).all()
@@ -712,9 +712,9 @@ def test_mixed_freq_irregular_first(self):
assert not hasattr(ax, 'freq')
lines = ax.get_lines()
x1 = lines[0].get_xdata()
- tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
+ tm.assert_numpy_array_equal(x1, s2.index.astype(object).values)
x2 = lines[1].get_xdata()
- tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
+ tm.assert_numpy_array_equal(x2, s1.index.astype(object).values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
@@ -744,9 +744,9 @@ def test_mixed_freq_irregular_first_df(self):
assert not hasattr(ax, 'freq')
lines = ax.get_lines()
x1 = lines[0].get_xdata()
- tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
+ tm.assert_numpy_array_equal(x1, s2.index.astype(object).values)
x2 = lines[1].get_xdata()
- tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
+ tm.assert_numpy_array_equal(x2, s1.index.astype(object).values)
def test_mixed_freq_hf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
@@ -1019,7 +1019,7 @@ def test_irreg_dtypes(self):
# np.datetime64
idx = date_range('1/1/2000', periods=10)
- idx = idx[[0, 2, 5, 9]].asobject
+ idx = idx[[0, 2, 5, 9]].astype(object)
df = DataFrame(np.random.randn(len(idx), 3), idx)
_, ax = self.plt.subplots()
_check_plot_works(df.plot, ax=ax)
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index a57385a9cf690..c814cade77e5c 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -632,7 +632,7 @@ def test_constructor_periodindex(self):
pi = period_range('20130101', periods=5, freq='D')
s = Series(pi)
- expected = Series(pi.asobject)
+ expected = Series(pi.astype(object))
assert_series_equal(s, expected)
assert s.dtype == 'object'
diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py
index e810eadd2dee9..b79d8def905af 100644
--- a/pandas/tests/series/test_datetime_values.py
+++ b/pandas/tests/series/test_datetime_values.py
@@ -228,7 +228,7 @@ def get_dir(s):
results, list(sorted(set(ok_for_dt + ok_for_dt_methods))))
s = Series(period_range('20130101', periods=5,
- freq='D', name='xxx').asobject)
+ freq='D', name='xxx').astype(object))
results = get_dir(s)
tm.assert_almost_equal(
results, list(sorted(set(ok_for_period + ok_for_period_methods))))
@@ -387,7 +387,7 @@ def test_sub_of_datetime_from_TimeSeries(self):
assert result.dtype == 'timedelta64[ns]'
def test_between(self):
- s = Series(bdate_range('1/1/2000', periods=20).asobject)
+ s = Series(bdate_range('1/1/2000', periods=20).astype(object))
s[::2] = np.nan
result = s[s.between(s[3], s[17])]
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index ad6d019b5287e..163950b75bc34 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -37,6 +37,12 @@ def test_astype(self, dtype):
assert as_typed.dtype == dtype
assert as_typed.name == s.name
+ def test_asobject_deprecated(self):
+ s = Series(np.random.randn(5), name='foo')
+ with tm.assert_produces_warning(FutureWarning):
+ o = s.asobject
+ assert isinstance(o, np.ndarray)
+
def test_dtype(self):
assert self.ts.dtype == np.dtype('float64')
diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py
index c1ef70bba8634..b0d0e2a51b5f4 100644
--- a/pandas/tests/series/test_timeseries.py
+++ b/pandas/tests/series/test_timeseries.py
@@ -134,13 +134,13 @@ def test_shift_dst(self):
assert res.dtype == 'datetime64[ns, US/Eastern]'
res = s.shift(1)
- exp_vals = [NaT] + dates.asobject.values.tolist()[:9]
+ exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == 'datetime64[ns, US/Eastern]'
res = s.shift(-2)
- exp_vals = dates.asobject.values.tolist()[2:] + [NaT, NaT]
+ exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == 'datetime64[ns, US/Eastern]'
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index 31f4ca146040e..df76390d7ce7a 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -437,7 +437,7 @@ def test_value_counts_unique_nunique(self):
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(result,
- orig._values.asobject.values)
+ orig._values.astype(object).values)
else:
tm.assert_numpy_array_equal(result, orig.values)
@@ -525,8 +525,8 @@ def test_value_counts_unique_nunique_null(self):
Index(values[1:], name='a'))
elif is_datetimetz(o):
# unable to compare NaT / nan
- tm.assert_numpy_array_equal(result[1:],
- values[2:].asobject.values)
+ vals = values[2:].astype(object).values
+ tm.assert_numpy_array_equal(result[1:], vals)
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
diff --git a/pandas/tests/tseries/test_frequencies.py b/pandas/tests/tseries/test_frequencies.py
index 9666a4c154c63..beea6df086b72 100644
--- a/pandas/tests/tseries/test_frequencies.py
+++ b/pandas/tests/tseries/test_frequencies.py
@@ -720,15 +720,15 @@ def _check_generated_range(self, start, freq):
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
- rng = Index(rng.to_timestamp('D', how='e').asobject)
+ rng = Index(rng.to_timestamp('D', how='e').astype(object))
assert rng.inferred_freq == 'Q-DEC'
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
- rng = Index(rng.to_timestamp('D', how='e').asobject)
+ rng = Index(rng.to_timestamp('D', how='e').astype(object))
assert rng.inferred_freq == 'Q-NOV'
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
- rng = Index(rng.to_timestamp('D', how='e').asobject)
+ rng = Index(rng.to_timestamp('D', how='e').astype(object))
assert rng.inferred_freq == 'Q-OCT'
def test_infer_freq_tz(self):
diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py
index a01166daf6be1..5fd2089d234c1 100644
--- a/pandas/tests/tseries/test_timezones.py
+++ b/pandas/tests/tseries/test_timezones.py
@@ -688,7 +688,7 @@ def test_index_astype_asobject_tzinfos(self):
# dates around a dst transition
rng = date_range('2/13/2010', '5/6/2010', tz=self.tzstr('US/Eastern'))
- objs = rng.asobject
+ objs = rng.astype(object)
for i, x in enumerate(objs):
exval = rng[i]
assert x == exval
@@ -1552,8 +1552,8 @@ def test_append_aware_naive(self):
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
- assert ts_result.index.equals(ts1.index.asobject.append(
- ts2.index.asobject))
+ assert ts_result.index.equals(ts1.index.astype(object).append(
+ ts2.index.astype(object)))
# mixed
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H')
@@ -1561,7 +1561,7 @@ def test_append_aware_naive(self):
ts1 = Series(np.random.randn(len(rng1)), index=rng1)
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
- assert ts_result.index.equals(ts1.index.asobject.append(
+ assert ts_result.index.equals(ts1.index.astype(object).append(
ts2.index))
def test_equal_join_ensure_utc(self):
| This PR supersedes #18477.
closes #18237
- [ x] xref #18262
- [x ] tests added / passed
- [ x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ x] whatsnew entry
Deprecate ``Series.asobject`` and ``DatetimeIndexOpsMixin.asobject`` as per discussion in #18262. ``DatetimeIndexOpsMixin`` is a mixin for ``DatetimeIndex``, ``PeriodIndex`` and ``TimeDeltaIndex``, so all the ``.asobject`` property will be deprecated all those classes.
Internal references to ``asobject`` have been cleaned up, so a eventual removal will be easy when that time comes. | https://api.github.com/repos/pandas-dev/pandas/pulls/18572 | 2017-11-30T01:09:08Z | 2017-12-04T10:49:25Z | 2017-12-04T10:49:25Z | 2017-12-04T11:23:32Z |
remove unused args etal from np_datetime.c | diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c
index cb4f9d3efdcd0..edc9c0f8f903d 100644
--- a/pandas/_libs/src/datetime/np_datetime.c
+++ b/pandas/_libs/src/datetime/np_datetime.c
@@ -318,26 +318,19 @@ int cmp_pandas_datetimestruct(const pandas_datetimestruct *a,
/*
*
* Tests for and converts a Python datetime.datetime or datetime.date
- * object into a NumPy pandas_datetimestruct.
+ * object into a NumPy pandas_datetimestruct. Uses tzinfo (if present)
+ * to convert to UTC time.
*
* While the C API has PyDate_* and PyDateTime_* functions, the following
* implementation just asks for attributes, and thus supports
* datetime duck typing. The tzinfo time zone conversion would require
* this style of access anyway.
*
- * 'out_bestunit' gives a suggested unit based on whether the object
- * was a datetime.date or datetime.datetime object.
- *
- * If 'apply_tzinfo' is 1, this function uses the tzinfo to convert
- * to UTC time, otherwise it returns the struct with the local time.
- *
* Returns -1 on error, 0 on success, and 1 (with no error set)
* if obj doesn't have the neeeded date or datetime attributes.
*/
int convert_pydatetime_to_datetimestruct(PyObject *obj,
- pandas_datetimestruct *out,
- PANDAS_DATETIMEUNIT *out_bestunit,
- int apply_tzinfo) {
+ pandas_datetimestruct *out) {
PyObject *tmp;
int isleap;
@@ -404,10 +397,6 @@ int convert_pydatetime_to_datetimestruct(PyObject *obj,
!PyObject_HasAttrString(obj, "minute") ||
!PyObject_HasAttrString(obj, "second") ||
!PyObject_HasAttrString(obj, "microsecond")) {
- /* The best unit for date is 'D' */
- if (out_bestunit != NULL) {
- *out_bestunit = PANDAS_FR_D;
- }
return 0;
}
@@ -465,7 +454,7 @@ int convert_pydatetime_to_datetimestruct(PyObject *obj,
}
/* Apply the time zone offset if it exists */
- if (apply_tzinfo && PyObject_HasAttrString(obj, "tzinfo")) {
+ if (PyObject_HasAttrString(obj, "tzinfo")) {
tmp = PyObject_GetAttrString(obj, "tzinfo");
if (tmp == NULL) {
return -1;
@@ -506,11 +495,6 @@ int convert_pydatetime_to_datetimestruct(PyObject *obj,
}
}
- /* The resolution of Python's datetime is 'us' */
- if (out_bestunit != NULL) {
- *out_bestunit = PANDAS_FR_us;
- }
-
return 0;
invalid_date:
@@ -529,51 +513,34 @@ int convert_pydatetime_to_datetimestruct(PyObject *obj,
npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr,
pandas_datetimestruct *d) {
- pandas_datetime_metadata meta;
npy_datetime result = PANDAS_DATETIME_NAT;
- meta.base = fr;
- meta.num = 1;
-
- convert_datetimestruct_to_datetime(&meta, d, &result);
+ convert_datetimestruct_to_datetime(fr, d, &result);
return result;
}
void pandas_datetime_to_datetimestruct(npy_datetime val, PANDAS_DATETIMEUNIT fr,
pandas_datetimestruct *result) {
- pandas_datetime_metadata meta;
-
- meta.base = fr;
- meta.num = 1;
-
- convert_datetime_to_datetimestruct(&meta, val, result);
+ convert_datetime_to_datetimestruct(fr, val, result);
}
void pandas_timedelta_to_timedeltastruct(npy_timedelta val,
PANDAS_DATETIMEUNIT fr,
pandas_timedeltastruct *result) {
- pandas_datetime_metadata meta;
-
- meta.base = fr;
- meta.num = 1;
-
- convert_timedelta_to_timedeltastruct(&meta, val, result);
+ convert_timedelta_to_timedeltastruct(fr, val, result);
}
/*
* Converts a datetime from a datetimestruct to a datetime based
- * on some metadata. The date is assumed to be valid.
- *
- * TODO: If meta->num is really big, there could be overflow
+ * on a metadata unit. The date is assumed to be valid.
*
* Returns 0 on success, -1 on failure.
*/
-int convert_datetimestruct_to_datetime(pandas_datetime_metadata *meta,
+int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base,
const pandas_datetimestruct *dts,
npy_datetime *out) {
npy_datetime ret;
- PANDAS_DATETIMEUNIT base = meta->base;
if (base == PANDAS_FR_Y) {
/* Truncate to the year */
@@ -665,15 +632,6 @@ int convert_datetimestruct_to_datetime(pandas_datetime_metadata *meta,
}
}
- /* Divide by the multiplier */
- if (meta->num > 1) {
- if (ret >= 0) {
- ret /= meta->num;
- } else {
- ret = (ret - meta->num + 1) / meta->num;
- }
- }
-
*out = ret;
return 0;
@@ -682,7 +640,7 @@ int convert_datetimestruct_to_datetime(pandas_datetime_metadata *meta,
/*
* Converts a datetime based on the given metadata into a datetimestruct
*/
-int convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta,
+int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base,
npy_datetime dt,
pandas_datetimestruct *out) {
npy_int64 perday;
@@ -693,14 +651,11 @@ int convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta,
out->month = 1;
out->day = 1;
- /* TODO: Change to a mechanism that avoids the potential overflow */
- dt *= meta->num;
-
/*
* Note that care must be taken with the / and % operators
* for negative values.
*/
- switch (meta->base) {
+ switch (base) {
case PANDAS_FR_Y:
out->year = 1970 + dt;
break;
@@ -902,11 +857,11 @@ int convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta,
/*
* Converts a timedelta from a timedeltastruct to a timedelta based
- * on some metadata. The timedelta is assumed to be valid.
+ * on a metadata unit. The timedelta is assumed to be valid.
*
* Returns 0 on success, -1 on failure.
*/
-int convert_timedelta_to_timedeltastruct(pandas_timedelta_metadata *meta,
+int convert_timedelta_to_timedeltastruct(PANDAS_DATETIMEUNIT base,
npy_timedelta td,
pandas_timedeltastruct *out) {
npy_int64 frac;
@@ -918,7 +873,7 @@ int convert_timedelta_to_timedeltastruct(pandas_timedelta_metadata *meta,
/* Initialize the output to all zeros */
memset(out, 0, sizeof(pandas_timedeltastruct));
- switch (meta->base) {
+ switch (base) {
case PANDAS_FR_ns:
// put frac in seconds
diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h
index 980c66218f7e6..b6c0852bfe764 100644
--- a/pandas/_libs/src/datetime/np_datetime.h
+++ b/pandas/_libs/src/datetime/np_datetime.h
@@ -40,8 +40,6 @@ typedef enum {
#define PANDAS_DATETIME_NUMUNITS 13
-#define PANDAS_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1)
-
#define PANDAS_DATETIME_NAT NPY_MIN_INT64
typedef struct {
@@ -54,13 +52,6 @@ typedef struct {
npy_int32 hrs, min, sec, ms, us, ns, seconds, microseconds, nanoseconds;
} pandas_timedeltastruct;
-typedef struct {
- PANDAS_DATETIMEUNIT base;
- int num;
-} pandas_datetime_metadata;
-
-typedef pandas_datetime_metadata pandas_timedelta_metadata;
-
extern const pandas_datetimestruct _NS_MIN_DTS;
extern const pandas_datetimestruct _NS_MAX_DTS;
@@ -68,9 +59,7 @@ extern const pandas_datetimestruct _NS_MAX_DTS;
// ----------------------------------------------------------------------------
int convert_pydatetime_to_datetimestruct(PyObject *obj,
- pandas_datetimestruct *out,
- PANDAS_DATETIMEUNIT *out_bestunit,
- int apply_tzinfo);
+ pandas_datetimestruct *out);
npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr,
pandas_datetimestruct *d);
@@ -91,19 +80,6 @@ extern const int days_per_month_table[2][12];
int is_leapyear(npy_int64 year);
-/*
- * Converts a datetime from a datetimestruct to a datetime based
- * on some metadata. The date is assumed to be valid.
- *
- * TODO: If meta->num is really big, there could be overflow
- *
- * Returns 0 on success, -1 on failure.
- */
-int
-convert_datetimestruct_to_datetime(pandas_datetime_metadata *meta,
- const pandas_datetimestruct *dts,
- npy_datetime *out);
-
/*
* Calculates the days offset from the 1970 epoch.
*/
@@ -127,14 +103,8 @@ add_minutes_to_datetimestruct(pandas_datetimestruct *dts, int minutes);
int
-convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta,
+convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base,
npy_datetime dt,
pandas_datetimestruct *out);
-int
-convert_timedelta_to_timedeltastruct(pandas_timedelta_metadata *meta,
- npy_timedelta td,
- pandas_timedeltastruct *out);
-
-
#endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_
diff --git a/pandas/_libs/src/datetime/np_datetime_strings.c b/pandas/_libs/src/datetime/np_datetime_strings.c
index 1ff4f08cf3c9d..92f030b5fea2b 100644
--- a/pandas/_libs/src/datetime/np_datetime_strings.c
+++ b/pandas/_libs/src/datetime/np_datetime_strings.c
@@ -279,14 +279,9 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
if (len == 3 && tolower(str[0]) == 'n' && tolower(str[1]) == 'o' &&
tolower(str[2]) == 'w') {
NPY_TIME_T rawtime = 0;
- pandas_datetime_metadata meta;
time(&rawtime);
- /* Set up a dummy metadata for the conversion */
- meta.base = PANDAS_FR_s;
- meta.num = 1;
-
bestunit = PANDAS_FR_s;
/*
@@ -304,7 +299,7 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
*out_special = 1;
}
- return convert_datetime_to_datetimestruct(&meta, rawtime, out);
+ return convert_datetime_to_datetimestruct(PANDAS_FR_s, rawtime, out);
}
/* Anything else isn't a special value */
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index c8a29cd949c3c..7c64db69f0c46 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -493,7 +493,7 @@ static void *PyDateTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue,
PRINTMARK();
- if (!convert_pydatetime_to_datetimestruct(obj, &dts, NULL, 1)) {
+ if (!convert_pydatetime_to_datetimestruct(obj, &dts)) {
PRINTMARK();
return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen);
} else {
| What #18565 does for np_datetime_strings.c, this does for np_datetime.c
Getting rid of `meta` also gets rid of a potential problem noted in a comment that `meta->num` could potentially cause an overflow. Since `meta->num` is 1 in all pandas usages, that pitfall doesn't apply. | https://api.github.com/repos/pandas-dev/pandas/pulls/18567 | 2017-11-29T17:36:03Z | 2017-12-04T11:37:27Z | 2017-12-04T11:37:27Z | 2017-12-08T19:38:20Z |
Remove unused arguments from np_datetime_strings | diff --git a/pandas/_libs/src/datetime.pxd b/pandas/_libs/src/datetime.pxd
index 6e5d8b82c118f..d919fca09c006 100644
--- a/pandas/_libs/src/datetime.pxd
+++ b/pandas/_libs/src/datetime.pxd
@@ -7,9 +7,6 @@ from cpython cimport PyUnicode_Check, PyUnicode_AsASCIIString
cdef extern from "numpy/ndarrayobject.h":
ctypedef int64_t npy_datetime
-cdef extern from "numpy/npy_common.h":
- ctypedef unsigned char npy_bool
-
cdef extern from "datetime/np_datetime.h":
ctypedef enum PANDAS_DATETIMEUNIT:
PANDAS_FR_Y
@@ -37,11 +34,9 @@ cdef extern from "datetime/np_datetime.h":
cdef extern from "datetime/np_datetime_strings.h":
- int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
+ int parse_iso_8601_datetime(char *str, int len,
pandas_datetimestruct *out,
- int *out_local, int *out_tzoffset,
- PANDAS_DATETIMEUNIT *out_bestunit,
- npy_bool *out_special)
+ int *out_local, int *out_tzoffset)
cdef inline int _string_to_dts(object val, pandas_datetimestruct* dts,
int* out_local, int* out_tzoffset) except? -1:
@@ -62,11 +57,8 @@ cdef inline int _cstring_to_dts(char *val, int length,
pandas_datetimestruct* dts,
int* out_local, int* out_tzoffset) except? -1:
cdef:
- npy_bool special
- PANDAS_DATETIMEUNIT out_bestunit
int result
- result = parse_iso_8601_datetime(val, length, PANDAS_FR_ns,
- dts, out_local, out_tzoffset,
- &out_bestunit, &special)
+ result = parse_iso_8601_datetime(val, length,
+ dts, out_local, out_tzoffset)
return result
diff --git a/pandas/_libs/src/datetime/np_datetime_strings.c b/pandas/_libs/src/datetime/np_datetime_strings.c
index 92f030b5fea2b..a047650f4c88d 100644
--- a/pandas/_libs/src/datetime/np_datetime_strings.c
+++ b/pandas/_libs/src/datetime/np_datetime_strings.c
@@ -36,24 +36,6 @@ This file implements string parsing and creation for NumPy datetime.
/* Platform-specific time_t typedef */
typedef time_t NPY_TIME_T;
-/* We *do* want these symbols, but for Cython, not for C.
- Fine in Mac OSX, but Linux complains.
-
-static void _suppress_unused_variable_warning(void) {
- int x = days_per_month_table[0][0];
- x = x;
-
- int y = _month_offset[0][0];
- y = y;
-
- char *z = _datetime_strings[0];
- z = z;
-} */
-
-/* Exported as DATETIMEUNITS in multiarraymodule.c */
-static char *_datetime_strings[PANDAS_DATETIME_NUMUNITS] = {
- "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as",
-};
/*
* Wraps `localtime` functionality for multiple platforms. This
* converts a time value to a time structure in the local timezone.
@@ -100,72 +82,6 @@ static int get_localtime(NPY_TIME_T *ts, struct tm *tms) {
}
-/*
- * Converts a datetimestruct in UTC to a datetimestruct in local time,
- * also returning the timezone offset applied.
- *
- * Returns 0 on success, -1 on failure.
- */
-static int convert_datetimestruct_utc_to_local(
- pandas_datetimestruct *out_dts_local, const pandas_datetimestruct *dts_utc,
- int *out_timezone_offset) {
- NPY_TIME_T rawtime = 0, localrawtime;
- struct tm tm_;
- npy_int64 year_correction = 0;
-
- /* Make a copy of the input 'dts' to modify */
- *out_dts_local = *dts_utc;
-
- /* HACK: Use a year < 2038 for later years for small time_t */
- if (sizeof(NPY_TIME_T) == 4 && out_dts_local->year >= 2038) {
- if (is_leapyear(out_dts_local->year)) {
- /* 2036 is a leap year */
- year_correction = out_dts_local->year - 2036;
- out_dts_local->year -= year_correction;
- } else {
- /* 2037 is not a leap year */
- year_correction = out_dts_local->year - 2037;
- out_dts_local->year -= year_correction;
- }
- }
-
- /*
- * Convert everything in 'dts' to a time_t, to minutes precision.
- * This is POSIX time, which skips leap-seconds, but because
- * we drop the seconds value from the pandas_datetimestruct, everything
- * is ok for this operation.
- */
- rawtime = (time_t)get_datetimestruct_days(out_dts_local) * 24 * 60 * 60;
- rawtime += dts_utc->hour * 60 * 60;
- rawtime += dts_utc->min * 60;
-
- /* localtime converts a 'time_t' into a local 'struct tm' */
- if (get_localtime(&rawtime, &tm_) < 0) {
- return -1;
- }
-
- /* Copy back all the values except seconds */
- out_dts_local->min = tm_.tm_min;
- out_dts_local->hour = tm_.tm_hour;
- out_dts_local->day = tm_.tm_mday;
- out_dts_local->month = tm_.tm_mon + 1;
- out_dts_local->year = tm_.tm_year + 1900;
-
- /* Extract the timezone offset that was applied */
- rawtime /= 60;
- localrawtime = (time_t)get_datetimestruct_days(out_dts_local) * 24 * 60;
- localrawtime += out_dts_local->hour * 60;
- localrawtime += out_dts_local->min;
-
- *out_timezone_offset = localrawtime - rawtime;
-
- /* Reapply the year 2038 year correction HACK */
- out_dts_local->year += year_correction;
-
- return 0;
-}
-
-
/*
* Parses (almost) standard ISO 8601 date strings. The differences are:
*
@@ -182,8 +98,6 @@ static int convert_datetimestruct_utc_to_local(
* omitted, each component must be 2 digits if it appears. (GH-10041)
*
* 'str' must be a NULL-terminated string, and 'len' must be its length.
- * 'unit' should contain -1 if the unit is unknown, or the unit
- * which will be used if it is.
*
* 'out' gets filled with the parsed date-time.
* 'out_local' gets set to 1 if the parsed time contains timezone,
@@ -193,24 +107,15 @@ static int convert_datetimestruct_utc_to_local(
* to 0 otherwise. The values 'now' and 'today' don't get counted
* as local, and neither do UTC +/-#### timezone offsets, because
* they aren't using the computer's local timezone offset.
- * 'out_bestunit' gives a suggested unit based on the amount of
- * resolution provided in the string, or -1 for NaT.
- * 'out_special' gets set to 1 if the parsed time was 'today',
- * 'now', or ''/'NaT'. For 'today', the unit recommended is
- * 'D', for 'now', the unit recommended is 's', and for 'NaT'
- * the unit recommended is 'Y'.
*
* Returns 0 on success, -1 on failure.
*/
-int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
+int parse_iso_8601_datetime(char *str, int len,
pandas_datetimestruct *out,
- int *out_local, int *out_tzoffset,
- PANDAS_DATETIMEUNIT *out_bestunit,
- npy_bool *out_special) {
+ int *out_local, int *out_tzoffset) {
int year_leap = 0;
int i, numdigits;
char *substr, sublen;
- PANDAS_DATETIMEUNIT bestunit;
/* If year-month-day are separated by a valid separator,
* months/days without leading zeroes will be parsed
@@ -256,8 +161,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
out->month = tm_.tm_mon + 1;
out->day = tm_.tm_mday;
- bestunit = PANDAS_FR_D;
-
/*
* Indicate that this was a special value, and
* is a date (unit 'D').
@@ -265,12 +168,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
if (out_local != NULL) {
*out_local = 0;
}
- if (out_bestunit != NULL) {
- *out_bestunit = bestunit;
- }
- if (out_special != NULL) {
- *out_special = 1;
- }
return 0;
}
@@ -282,8 +179,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
time(&rawtime);
- bestunit = PANDAS_FR_s;
-
/*
* Indicate that this was a special value, and
* use 's' because the time() function has resolution
@@ -292,21 +187,10 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
if (out_local != NULL) {
*out_local = 0;
}
- if (out_bestunit != NULL) {
- *out_bestunit = bestunit;
- }
- if (out_special != NULL) {
- *out_special = 1;
- }
return convert_datetime_to_datetimestruct(PANDAS_FR_s, rawtime, out);
}
- /* Anything else isn't a special value */
- if (out_special != NULL) {
- *out_special = 0;
- }
-
substr = str;
sublen = len;
@@ -349,7 +233,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
if (out_local != NULL) {
*out_local = 0;
}
- bestunit = PANDAS_FR_Y;
goto finish;
}
@@ -400,7 +283,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
if (out_local != NULL) {
*out_local = 0;
}
- bestunit = PANDAS_FR_M;
goto finish;
}
@@ -441,7 +323,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
if (out_local != NULL) {
*out_local = 0;
}
- bestunit = PANDAS_FR_D;
goto finish;
}
@@ -477,7 +358,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
if (!hour_was_2_digits) {
goto parse_error;
}
- bestunit = PANDAS_FR_h;
goto finish;
}
@@ -493,7 +373,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
if (!hour_was_2_digits) {
goto parse_error;
}
- bestunit = PANDAS_FR_h;
goto parse_timezone;
}
@@ -517,7 +396,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
}
if (sublen == 0) {
- bestunit = PANDAS_FR_m;
goto finish;
}
@@ -532,7 +410,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
}
} else if (!has_hms_sep && isdigit(*substr)) {
} else {
- bestunit = PANDAS_FR_m;
goto parse_timezone;
}
@@ -560,7 +437,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
++substr;
--sublen;
} else {
- bestunit = PANDAS_FR_s;
goto parse_timezone;
}
@@ -577,11 +453,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
}
if (sublen == 0 || !isdigit(*substr)) {
- if (numdigits > 3) {
- bestunit = PANDAS_FR_us;
- } else {
- bestunit = PANDAS_FR_ms;
- }
goto parse_timezone;
}
@@ -598,11 +469,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
}
if (sublen == 0 || !isdigit(*substr)) {
- if (numdigits > 3) {
- bestunit = PANDAS_FR_ps;
- } else {
- bestunit = PANDAS_FR_ns;
- }
goto parse_timezone;
}
@@ -618,12 +484,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
}
}
- if (numdigits > 3) {
- bestunit = PANDAS_FR_as;
- } else {
- bestunit = PANDAS_FR_fs;
- }
-
parse_timezone:
/* trim any whitepsace between time/timeezone */
while (sublen > 0 && isspace(*substr)) {
@@ -740,10 +600,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
}
finish:
- if (out_bestunit != NULL) {
- *out_bestunit = bestunit;
- }
-
return 0;
parse_error:
@@ -814,38 +670,23 @@ int get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base) {
/*
* Converts an pandas_datetimestruct to an (almost) ISO 8601
- * NULL-terminated string. If the string fits in the space exactly,
- * it leaves out the NULL terminator and returns success.
+ * NULL-terminated string using timezone Z (UTC). If the string fits in
+ * the space exactly, it leaves out the NULL terminator and returns success.
*
* The differences from ISO 8601 are the 'NaT' string, and
* the number of year digits is >= 4 instead of strictly 4.
*
- * If 'local' is non-zero, it produces a string in local time with
- * a +-#### timezone offset, otherwise it uses timezone Z (UTC).
- *
* 'base' restricts the output to that unit. Set 'base' to
* -1 to auto-detect a base after which all the values are zero.
*
- * 'tzoffset' is used if 'local' is enabled, and 'tzoffset' is
- * set to a value other than -1. This is a manual override for
- * the local time zone to use, as an offset in minutes.
- *
* Returns 0 on success, -1 on failure (for example if the output
* string was too short).
*/
int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen,
- int local, PANDAS_DATETIMEUNIT base, int tzoffset) {
- pandas_datetimestruct dts_local;
- int timezone_offset = 0;
-
+ PANDAS_DATETIMEUNIT base) {
char *substr = outstr, sublen = outlen;
int tmplen;
- /* Only do local time within a reasonable year range */
- if ((dts->year <= 1800 || dts->year >= 10000) && tzoffset == -1) {
- local = 0;
- }
-
/*
* Print weeks with the same precision as days.
*
@@ -856,26 +697,6 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen,
base = PANDAS_FR_D;
}
- /* Use the C API to convert from UTC to local time */
- if (local && tzoffset == -1) {
- if (convert_datetimestruct_utc_to_local(&dts_local, dts,
- &timezone_offset) < 0) {
- return -1;
- }
-
- /* Set dts to point to our local time instead of the UTC time */
- dts = &dts_local;
- } else if (local) {
- // Use the manually provided tzoffset.
- // Make a copy of the pandas_datetimestruct we can modify.
- dts_local = *dts;
- dts = &dts_local;
-
- /* Set and apply the required timezone offset */
- timezone_offset = tzoffset;
- add_minutes_to_datetimestruct(dts, timezone_offset);
- }
-
/* YEAR */
/*
* Can't use PyOS_snprintf, because it always produces a '\0'
@@ -1139,48 +960,13 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen,
sublen -= 3;
add_time_zone:
- if (local) {
- /* Add the +/- sign */
- if (sublen < 1) {
- goto string_too_short;
- }
- if (timezone_offset < 0) {
- substr[0] = '-';
- timezone_offset = -timezone_offset;
- } else {
- substr[0] = '+';
- }
- substr += 1;
- sublen -= 1;
-
- /* Add the timezone offset */
- if (sublen < 1) {
- goto string_too_short;
- }
- substr[0] = (char)((timezone_offset / (10 * 60)) % 10 + '0');
- if (sublen < 2) {
- goto string_too_short;
- }
- substr[1] = (char)((timezone_offset / 60) % 10 + '0');
- if (sublen < 3) {
- goto string_too_short;
- }
- substr[2] = (char)(((timezone_offset % 60) / 10) % 10 + '0');
- if (sublen < 4) {
- goto string_too_short;
- }
- substr[3] = (char)((timezone_offset % 60) % 10 + '0');
- substr += 4;
- sublen -= 4;
- } else {
- /* UTC "Zulu" time */
- if (sublen < 1) {
- goto string_too_short;
- }
- substr[0] = 'Z';
- substr += 1;
- sublen -= 1;
+ /* UTC "Zulu" time */
+ if (sublen < 1) {
+ goto string_too_short;
}
+ substr[0] = 'Z';
+ substr += 1;
+ sublen -= 1;
/* Add a NULL terminator, and return */
if (sublen > 0) {
diff --git a/pandas/_libs/src/datetime/np_datetime_strings.h b/pandas/_libs/src/datetime/np_datetime_strings.h
index 4c248129b68c3..ef7fe200aa58e 100644
--- a/pandas/_libs/src/datetime/np_datetime_strings.h
+++ b/pandas/_libs/src/datetime/np_datetime_strings.h
@@ -38,8 +38,6 @@ This file implements string parsing and creation for NumPy datetime.
* day according to local time) and "Now" (current time in UTC).
*
* 'str' must be a NULL-terminated string, and 'len' must be its length.
- * 'unit' should contain -1 if the unit is unknown, or the unit
- * which will be used if it is.
*
* 'out' gets filled with the parsed date-time.
* 'out_local' gets whether returned value contains timezone. 0 for UTC, 1 for local time.
@@ -48,23 +46,14 @@ This file implements string parsing and creation for NumPy datetime.
* to 0 otherwise. The values 'now' and 'today' don't get counted
* as local, and neither do UTC +/-#### timezone offsets, because
* they aren't using the computer's local timezone offset.
- * 'out_bestunit' gives a suggested unit based on the amount of
- * resolution provided in the string, or -1 for NaT.
- * 'out_special' gets set to 1 if the parsed time was 'today',
- * 'now', or ''/'NaT'. For 'today', the unit recommended is
- * 'D', for 'now', the unit recommended is 's', and for 'NaT'
- * the unit recommended is 'Y'.
*
* Returns 0 on success, -1 on failure.
*/
int
parse_iso_8601_datetime(char *str, int len,
- PANDAS_DATETIMEUNIT unit,
pandas_datetimestruct *out,
int *out_local,
- int *out_tzoffset,
- PANDAS_DATETIMEUNIT *out_bestunit,
- npy_bool *out_special);
+ int *out_tzoffset);
/*
* Provides a string length to use for converting datetime
@@ -75,23 +64,16 @@ get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base);
/*
* Converts an pandas_datetimestruct to an (almost) ISO 8601
- * NULL-terminated string.
- *
- * If 'local' is non-zero, it produces a string in local time with
- * a +-#### timezone offset, otherwise it uses timezone Z (UTC).
+ * NULL-terminated string using timezone Z (UTC).
*
* 'base' restricts the output to that unit. Set 'base' to
* -1 to auto-detect a base after which all the values are zero.
*
- * 'tzoffset' is used if 'local' is enabled, and 'tzoffset' is
- * set to a value other than -1. This is a manual override for
- * the local time zone to use, as an offset in minutes.
- *
* Returns 0 on success, -1 on failure (for example if the output
* string was too short).
*/
int
make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen,
- int local, PANDAS_DATETIMEUNIT base, int tzoffset);
+ PANDAS_DATETIMEUNIT base);
#endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index 7c64db69f0c46..61e3752a49639 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -455,8 +455,7 @@ static void *PandasDateTimeStructToJSON(pandas_datetimestruct *dts,
return NULL;
}
- if (!make_iso_8601_datetime(dts, GET_TC(tc)->cStr, *_outLen, 0, base,
- -1)) {
+ if (!make_iso_8601_datetime(dts, GET_TC(tc)->cStr, *_outLen, base)) {
PRINTMARK();
*_outLen = strlen(GET_TC(tc)->cStr);
return GET_TC(tc)->cStr;
| Same deal as #18546. This completes the pure-cleaning phase for np_datetime_strings.
| https://api.github.com/repos/pandas-dev/pandas/pulls/18565 | 2017-11-29T16:56:17Z | 2017-12-06T01:39:25Z | 2017-12-06T01:39:24Z | 2017-12-08T19:38:16Z |
ENH: Support TZ Aware IntervalIndex | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 4c716bf15d923..f55c6f696544e 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -132,6 +132,7 @@ Other Enhancements
- :func:`pandas.read_clipboard` updated to use qtpy, falling back to PyQt5 and then PyQt4, adding compatibility with Python3 and multiple python-qt bindings (:issue:`17722`)
- Improved wording of ``ValueError`` raised in :func:`read_csv` when the ``usecols`` argument cannot match all columns. (:issue:`17301`)
- :func:`DataFrame.corrwith` now silently drops non-numeric columns when passed a Series. Before, an exception was raised (:issue:`18570`).
+- :class:`IntervalIndex` now supports time zone aware ``Interval`` objects (:issue:`18537`, :issue:`18538`)
.. _whatsnew_0220.api_breaking:
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index 822df1ce2b968..480ea5cb4fa80 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -6,6 +6,7 @@ cimport cython
import cython
from numpy cimport ndarray
from tslib import Timestamp
+from tslibs.timezones cimport get_timezone
from cpython.object cimport (Py_EQ, Py_NE, Py_GT, Py_LT, Py_GE, Py_LE,
PyObject_RichCompare)
@@ -119,6 +120,13 @@ cdef class Interval(IntervalMixin):
raise ValueError(msg)
if not left <= right:
raise ValueError('left side of interval must be <= right side')
+ if (isinstance(left, Timestamp) and
+ get_timezone(left.tzinfo) != get_timezone(right.tzinfo)):
+ # GH 18538
+ msg = ("left and right must have the same time zone, got "
+ "'{left_tz}' and '{right_tz}'")
+ raise ValueError(msg.format(left_tz=left.tzinfo,
+ right_tz=right.tzinfo))
self.left = left
self.right = right
self.closed = closed
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 02ac74e619fa4..a32e79920db41 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -3,13 +3,14 @@
import numpy as np
from pandas.core.dtypes.missing import notna, isna
-from pandas.core.dtypes.generic import ABCPeriodIndex
+from pandas.core.dtypes.generic import ABCDatetimeIndex, ABCPeriodIndex
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas.core.dtypes.cast import maybe_convert_platform
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_list_like,
is_datetime_or_timedelta_dtype,
+ is_datetime64tz_dtype,
is_integer_dtype,
is_object_dtype,
is_categorical_dtype,
@@ -54,7 +55,7 @@ def _get_next_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
- if is_datetime_or_timedelta_dtype(dtype):
+ if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label + np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label + 1
@@ -69,7 +70,7 @@ def _get_prev_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
- if is_datetime_or_timedelta_dtype(dtype):
+ if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label - np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label - 1
@@ -227,17 +228,22 @@ def _simple_new(cls, left, right, closed=None, name=None,
# coerce dtypes to match if needed
if is_float_dtype(left) and is_integer_dtype(right):
right = right.astype(left.dtype)
- if is_float_dtype(right) and is_integer_dtype(left):
+ elif is_float_dtype(right) and is_integer_dtype(left):
left = left.astype(right.dtype)
if type(left) != type(right):
- raise ValueError("must not have differing left [{left}] "
- "and right [{right}] types"
- .format(left=type(left), right=type(right)))
-
- if isinstance(left, ABCPeriodIndex):
- raise ValueError("Period dtypes are not supported, "
- "use a PeriodIndex instead")
+ msg = ('must not have differing left [{ltype}] and right '
+ '[{rtype}] types')
+ raise ValueError(msg.format(ltype=type(left).__name__,
+ rtype=type(right).__name__))
+ elif isinstance(left, ABCPeriodIndex):
+ msg = 'Period dtypes are not supported, use a PeriodIndex instead'
+ raise ValueError(msg)
+ elif (isinstance(left, ABCDatetimeIndex) and
+ str(left.tz) != str(right.tz)):
+ msg = ("left and right must have the same time zone, got "
+ "'{left_tz}' and '{right_tz}'")
+ raise ValueError(msg.format(left_tz=left.tz, right_tz=right.tz))
result._left = left
result._right = right
@@ -657,8 +663,8 @@ def mid(self):
return Index(0.5 * (self.left.values + self.right.values))
except TypeError:
# datetime safe version
- delta = self.right.values - self.left.values
- return Index(self.left.values + 0.5 * delta)
+ delta = self.right - self.left
+ return self.left + 0.5 * delta
@cache_readonly
def is_monotonic(self):
diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py
index dc06e51c6d8e7..1850ff2795a24 100644
--- a/pandas/tests/indexes/test_interval.py
+++ b/pandas/tests/indexes/test_interval.py
@@ -42,24 +42,37 @@ def create_index_with_nan(self, closed='right'):
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
- def test_constructors(self, closed, name):
- left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
+ @pytest.mark.parametrize('data', [
+ Index([0, 1, 2, 3, 4]),
+ Index(list('abcde')),
+ date_range('2017-01-01', periods=5),
+ date_range('2017-01-01', periods=5, tz='US/Eastern'),
+ timedelta_range('1 day', periods=5)])
+ def test_constructors(self, data, closed, name):
+ left, right = data[:-1], data[1:]
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
+ # validate expected
+ assert expected.closed == closed
+ assert expected.name == name
+ assert expected.dtype.subtype == data.dtype
+ tm.assert_index_equal(expected.left, data[:-1])
+ tm.assert_index_equal(expected.right, data[1:])
+
+ # validated constructors
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
- result = IntervalIndex.from_breaks(
- np.arange(5), closed=closed, name=name)
+ result = IntervalIndex.from_breaks(data, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
- left.values, right.values, closed=closed, name=name)
+ left, right, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
@@ -186,6 +199,9 @@ def test_constructors_errors(self):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
+ with tm.assert_raises_regex(ValueError, msg):
+ IntervalIndex([Interval(0, 1), Interval(2, 3, closed='left')])
+
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
@@ -209,26 +225,24 @@ def test_constructors_errors(self):
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
- def test_constructors_datetimelike(self, closed):
+ @pytest.mark.parametrize('tz_left, tz_right', [
+ (None, 'UTC'), ('UTC', None), ('UTC', 'US/Eastern')])
+ def test_constructors_errors_tz(self, tz_left, tz_right):
+ # GH 18537
+ left = date_range('2017-01-01', periods=4, tz=tz_left)
+ right = date_range('2017-01-02', periods=4, tz=tz_right)
- # DTI / TDI
- for idx in [pd.date_range('20130101', periods=5),
- pd.timedelta_range('1 day', periods=5)]:
- result = IntervalIndex.from_breaks(idx, closed=closed)
- expected = IntervalIndex.from_breaks(idx.values, closed=closed)
- tm.assert_index_equal(result, expected)
-
- expected_scalar_type = type(idx[0])
- i = result[0]
- assert isinstance(i.left, expected_scalar_type)
- assert isinstance(i.right, expected_scalar_type)
+ # don't need to check IntervalIndex(...) or from_intervals, since
+ # mixed tz are disallowed at the Interval level
+ with pytest.raises(ValueError):
+ IntervalIndex.from_arrays(left, right)
- def test_constructors_error(self):
+ with pytest.raises(ValueError):
+ IntervalIndex.from_tuples(lzip(left, right))
- # non-intervals
- def f():
- IntervalIndex.from_intervals([0.997, 4.0])
- pytest.raises(TypeError, f)
+ with pytest.raises(ValueError):
+ breaks = left.tolist() + [right[-1]]
+ IntervalIndex.from_breaks(breaks)
def test_properties(self, closed):
index = self.create_index(closed=closed)
@@ -964,23 +978,46 @@ def test_sort_values(self, closed):
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
- def test_datetime(self):
- dates = date_range('2000', periods=3)
- idx = IntervalIndex.from_breaks(dates)
-
- tm.assert_index_equal(idx.left, dates[:2])
- tm.assert_index_equal(idx.right, dates[-2:])
-
- expected = date_range('2000-01-01T12:00', periods=2)
- tm.assert_index_equal(idx.mid, expected)
-
- assert Timestamp('2000-01-01T12') not in idx
- assert Timestamp('2000-01-01T12') not in idx
-
- target = date_range('1999-12-31T12:00', periods=7, freq='12H')
- actual = idx.get_indexer(target)
+ @pytest.mark.parametrize('tz', [None, 'US/Eastern'])
+ def test_datetime(self, tz):
+ start = Timestamp('2000-01-01', tz=tz)
+ dates = date_range(start=start, periods=10)
+ index = IntervalIndex.from_breaks(dates)
+
+ # test mid
+ start = Timestamp('2000-01-01T12:00', tz=tz)
+ expected = date_range(start=start, periods=9)
+ tm.assert_index_equal(index.mid, expected)
+
+ # __contains__ doesn't check individual points
+ assert Timestamp('2000-01-01', tz=tz) not in index
+ assert Timestamp('2000-01-01T12', tz=tz) not in index
+ assert Timestamp('2000-01-02', tz=tz) not in index
+ iv_true = Interval(Timestamp('2000-01-01T08', tz=tz),
+ Timestamp('2000-01-01T18', tz=tz))
+ iv_false = Interval(Timestamp('1999-12-31', tz=tz),
+ Timestamp('2000-01-01', tz=tz))
+ assert iv_true in index
+ assert iv_false not in index
+
+ # .contains does check individual points
+ assert not index.contains(Timestamp('2000-01-01', tz=tz))
+ assert index.contains(Timestamp('2000-01-01T12', tz=tz))
+ assert index.contains(Timestamp('2000-01-02', tz=tz))
+ assert index.contains(iv_true)
+ assert not index.contains(iv_false)
+
+ # test get_indexer
+ start = Timestamp('1999-12-31T12:00', tz=tz)
+ target = date_range(start=start, periods=7, freq='12H')
+ actual = index.get_indexer(target)
+ expected = np.array([-1, -1, 0, 0, 1, 1, 2], dtype='intp')
+ tm.assert_numpy_array_equal(actual, expected)
- expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
+ start = Timestamp('2000-01-08T18:00', tz=tz)
+ target = date_range(start=start, periods=7, freq='6H')
+ actual = index.get_indexer(target)
+ expected = np.array([7, 7, 8, 8, 8, 8, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
@@ -1079,9 +1116,11 @@ def test_construction_from_numeric(self, closed, name):
closed=closed)
tm.assert_index_equal(result, expected)
- def test_construction_from_timestamp(self, closed, name):
+ @pytest.mark.parametrize('tz', [None, 'US/Eastern'])
+ def test_construction_from_timestamp(self, closed, name, tz):
# combinations of start/end/periods without freq
- start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
+ start = Timestamp('2017-01-01', tz=tz)
+ end = Timestamp('2017-01-06', tz=tz)
breaks = date_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
@@ -1099,7 +1138,8 @@ def test_construction_from_timestamp(self, closed, name):
# combinations of start/end/periods with fixed freq
freq = '2D'
- start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
+ start = Timestamp('2017-01-01', tz=tz)
+ end = Timestamp('2017-01-07', tz=tz)
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
@@ -1116,14 +1156,15 @@ def test_construction_from_timestamp(self, closed, name):
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
- end = Timestamp('2017-01-08')
+ end = Timestamp('2017-01-08', tz=tz)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with non-fixed freq
freq = 'M'
- start, end = Timestamp('2017-01-01'), Timestamp('2017-12-31')
+ start = Timestamp('2017-01-01', tz=tz)
+ end = Timestamp('2017-12-31', tz=tz)
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
@@ -1140,7 +1181,7 @@ def test_construction_from_timestamp(self, closed, name):
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
- end = Timestamp('2018-01-15')
+ end = Timestamp('2018-01-15', tz=tz)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
@@ -1308,6 +1349,13 @@ def test_errors(self):
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=Timedelta('1 day'), periods=10, freq='foo')
+ # mixed tz
+ start = Timestamp('2017-01-01', tz='US/Eastern')
+ end = Timestamp('2017-01-07', tz='US/Pacific')
+ msg = 'Start and end cannot both be tz-aware with different timezones'
+ with tm.assert_raises_regex(TypeError, msg):
+ interval_range(start=start, end=end)
+
class TestIntervalTree(object):
def setup_method(self, method):
diff --git a/pandas/tests/scalar/test_interval.py b/pandas/tests/scalar/test_interval.py
index d431db0b4ca4f..533a79656f174 100644
--- a/pandas/tests/scalar/test_interval.py
+++ b/pandas/tests/scalar/test_interval.py
@@ -1,6 +1,7 @@
from __future__ import division
-from pandas import Interval
+from pandas import Interval, Timestamp
+from pandas.core.common import _any_none
import pytest
import pandas.util.testing as tm
@@ -137,3 +138,22 @@ def test_math_div(self, interval):
with tm.assert_raises_regex(TypeError, msg):
interval / 'foo'
+
+ def test_constructor_errors(self):
+ msg = "invalid option for 'closed': foo"
+ with tm.assert_raises_regex(ValueError, msg):
+ Interval(0, 1, closed='foo')
+
+ msg = 'left side of interval must be <= right side'
+ with tm.assert_raises_regex(ValueError, msg):
+ Interval(1, 0)
+
+ @pytest.mark.parametrize('tz_left, tz_right', [
+ (None, 'UTC'), ('UTC', None), ('UTC', 'US/Eastern')])
+ def test_constructor_errors_tz(self, tz_left, tz_right):
+ # GH 18538
+ left = Timestamp('2017-01-01', tz=tz_left)
+ right = Timestamp('2017-01-02', tz=tz_right)
+ error = TypeError if _any_none(tz_left, tz_right) else ValueError
+ with pytest.raises(error):
+ Interval(left, right)
| - [X] closes #18537
- [X] closes #18538
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
Updates to `Interval`:
- Disallowed `Interval` objects with mixed time zones
- Minor docstring updates
Updates to `IntervalIndex`:
- Disallowed `IntervalIndex` with mixed time zones
- Fixed `IntervalIndex.mid` for tz aware
- Fixed `_get_next_label` and `_get_previous_label` for tz aware
- cascades to other methods
- Cleaned up error message for mixed `left`/`right` types
| https://api.github.com/repos/pandas-dev/pandas/pulls/18558 | 2017-11-29T04:48:12Z | 2017-12-08T11:26:07Z | 2017-12-08T11:26:06Z | 2017-12-08T15:47:55Z |
Update imports, use nogil version of sqrt | diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index 61d543cd7303a..df8f7bab51dbe 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -27,7 +27,7 @@ from numpy cimport (ndarray,
cdef double NaN = <double> np.NaN
cdef double nan = NaN
-from libc.math cimport sqrt, fabs
+from libc.math cimport fabs, sqrt
# this is our util.pxd
from util cimport numeric, get_nat
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 02b3839ebf181..a39f83d5261c0 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -1,13 +1,18 @@
# cython: profile=False
-cimport numpy as np
-cimport cython
-import numpy as np
-import sys
+import operator
-cdef bint PY3 = (sys.version_info[0] >= 3)
-
-from numpy cimport *
+cimport cython
+from cython cimport Py_ssize_t
+import numpy as np
+cimport numpy as np
+from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM, PyArray_SETITEM,
+ PyArray_ITER_DATA, PyArray_ITER_NEXT, PyArray_IterNew,
+ flatiter, NPY_OBJECT,
+ int64_t,
+ float32_t, float64_t,
+ uint8_t, uint64_t,
+ complex128_t)
# initialize numpy
np.import_array()
np.import_ufunc()
@@ -57,12 +62,12 @@ from tslib import NaT, Timestamp, Timedelta, array_to_datetime
from interval import Interval
from missing cimport checknull
-cdef int64_t NPY_NAT = util.get_nat()
cimport util
+cdef int64_t NPY_NAT = util.get_nat()
from util cimport is_array, _checknull
-from libc.math cimport sqrt, fabs
+from libc.math cimport fabs, sqrt
def values_from_object(object o):
@@ -494,7 +499,6 @@ def maybe_booleans_to_slice(ndarray[uint8_t] mask):
@cython.wraparound(False)
@cython.boundscheck(False)
def scalar_compare(ndarray[object] values, object val, object op):
- import operator
cdef:
Py_ssize_t i, n = len(values)
ndarray[uint8_t, cast=True] result
@@ -529,7 +533,7 @@ def scalar_compare(ndarray[object] values, object val, object op):
result[i] = True
else:
try:
- result[i] = cpython.PyObject_RichCompareBool(x, val, flag)
+ result[i] = PyObject_RichCompareBool(x, val, flag)
except (TypeError):
result[i] = True
elif flag == cpython.Py_EQ:
@@ -541,7 +545,7 @@ def scalar_compare(ndarray[object] values, object val, object op):
result[i] = False
else:
try:
- result[i] = cpython.PyObject_RichCompareBool(x, val, flag)
+ result[i] = PyObject_RichCompareBool(x, val, flag)
except (TypeError):
result[i] = False
@@ -553,7 +557,7 @@ def scalar_compare(ndarray[object] values, object val, object op):
elif isnull_val:
result[i] = False
else:
- result[i] = cpython.PyObject_RichCompareBool(x, val, flag)
+ result[i] = PyObject_RichCompareBool(x, val, flag)
return result.view(bool)
@@ -582,7 +586,6 @@ cpdef bint array_equivalent_object(object[:] left, object[:] right):
@cython.wraparound(False)
@cython.boundscheck(False)
def vec_compare(ndarray[object] left, ndarray[object] right, object op):
- import operator
cdef:
Py_ssize_t i, n = len(left)
ndarray[uint8_t, cast=True] result
@@ -617,7 +620,7 @@ def vec_compare(ndarray[object] left, ndarray[object] right, object op):
if checknull(x) or checknull(y):
result[i] = True
else:
- result[i] = cpython.PyObject_RichCompareBool(x, y, flag)
+ result[i] = PyObject_RichCompareBool(x, y, flag)
else:
for i in range(n):
x = left[i]
@@ -626,7 +629,7 @@ def vec_compare(ndarray[object] left, ndarray[object] right, object op):
if checknull(x) or checknull(y):
result[i] = False
else:
- result[i] = cpython.PyObject_RichCompareBool(x, y, flag)
+ result[i] = PyObject_RichCompareBool(x, y, flag)
return result.view(bool)
diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/src/util.pxd
index 61783ab47cb86..e5fe90aa81f7d 100644
--- a/pandas/_libs/src/util.pxd
+++ b/pandas/_libs/src/util.pxd
@@ -2,6 +2,7 @@ from numpy cimport ndarray
cimport numpy as cnp
cimport cpython
+
cdef extern from "numpy_helper.h":
void set_array_not_contiguous(ndarray ao)
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 1578ae924c9bb..cd84228ec0f99 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -43,8 +43,7 @@
DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin)
from pandas.tseries.offsets import (
DateOffset, generate_range, Tick, CDay, prefix_mapping)
-from pandas.core.tools.datetimes import (
- parse_time_string, normalize_date, to_time)
+
from pandas.core.tools.timedeltas import to_timedelta
from pandas.util._decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
@@ -55,7 +54,7 @@
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timestamp)
-from pandas._libs.tslibs import (timezones, conversion, fields,
+from pandas._libs.tslibs import (timezones, conversion, fields, parsing,
period as libperiod)
# -------- some conversion wrapper functions
@@ -524,14 +523,14 @@ def _generate(cls, start, end, periods, name, offset,
if start is not None:
if normalize:
- start = normalize_date(start)
+ start = libts.normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
- end = normalize_date(end)
+ end = libts.normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
@@ -1529,7 +1528,7 @@ def _maybe_cast_slice_bound(self, label, side, kind):
if isinstance(label, compat.string_types):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
- _, parsed, reso = parse_time_string(label, freq)
+ _, parsed, reso = parsing.parse_time_string(label, freq)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
# lower, upper form the half-open interval:
# [parsed, parsed + 1 freq)
@@ -1546,7 +1545,7 @@ def _maybe_cast_slice_bound(self, label, side, kind):
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
- _, parsed, reso = parse_time_string(key, freq)
+ _, parsed, reso = parsing.parse_time_string(key, freq)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
@@ -1965,8 +1964,8 @@ def indexer_between_time(self, start_time, end_time, include_start=True,
-------
values_between_time : TimeSeries
"""
- start_time = to_time(start_time)
- end_time = to_time(end_time)
+ start_time = tools.to_time(start_time)
+ end_time = tools.to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py
index 219fb3f67db97..4245b9eb641ba 100644
--- a/pandas/core/tools/datetimes.py
+++ b/pandas/core/tools/datetimes.py
@@ -629,8 +629,6 @@ def calc_with_mask(carg, mask):
return None
-normalize_date = tslib.normalize_date
-
# Fixed time formats for time parsing
_time_formats = ["%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"]
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index a1287c3102b77..adcd40e7317ce 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -16,7 +16,7 @@
from pandas._libs import tslib
from pandas._libs.tslibs import parsing
from pandas.core.tools import datetimes as tools
-from pandas.core.tools.datetimes import normalize_date
+
from pandas.compat import lmap
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import is_datetime64_ns_dtype
@@ -1576,12 +1576,12 @@ def test_coerce_of_invalid_datetimes(self):
def test_normalize_date():
value = date(2012, 9, 7)
- result = normalize_date(value)
+ result = tslib.normalize_date(value)
assert (result == datetime(2012, 9, 7))
value = datetime(2012, 9, 7, 12)
- result = normalize_date(value)
+ result = tslib.normalize_date(value)
assert (result == datetime(2012, 9, 7))
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index a3cddaa19dc17..857ec9e9881d9 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -8,7 +8,7 @@
import numpy as np
from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex, ABCPeriod
-from pandas.core.tools.datetimes import to_datetime, normalize_date
+from pandas.core.tools.datetimes import to_datetime
from pandas.core.common import AbstractMethodError
# import after tools, dateutil check
@@ -103,7 +103,7 @@ def wrapper(self, other):
if self.normalize:
# normalize_date returns normal datetime
- result = normalize_date(result)
+ result = tslib.normalize_date(result)
if tz is not None and result.tzinfo is None:
result = tslib._localize_pydatetime(result, tz)
| https://api.github.com/repos/pandas-dev/pandas/pulls/18557 | 2017-11-29T02:04:26Z | 2017-12-04T11:38:40Z | 2017-12-04T11:38:40Z | 2017-12-08T19:38:18Z | |
Cleanup cimports | diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx
index 6e964077dd56e..cb192fcced318 100644
--- a/pandas/_libs/src/inference.pyx
+++ b/pandas/_libs/src/inference.pyx
@@ -3,7 +3,7 @@ from decimal import Decimal
cimport util
cimport cython
from tslibs.nattype import NaT
-from tslib cimport convert_to_tsobject
+from tslibs.conversion cimport convert_to_tsobject
from tslibs.timedeltas cimport convert_to_timedelta64
from tslibs.timezones cimport get_timezone
from datetime import datetime, timedelta
diff --git a/pandas/_libs/tslib.pxd b/pandas/_libs/tslib.pxd
deleted file mode 100644
index b74cf5b79c4cb..0000000000000
--- a/pandas/_libs/tslib.pxd
+++ /dev/null
@@ -1,3 +0,0 @@
-from numpy cimport ndarray, int64_t
-
-from tslibs.conversion cimport convert_to_tsobject
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 6d8cf39114f6f..020ac812e1c20 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -5,8 +5,9 @@
# distutils: define_macros=CYTHON_TRACE_NOGIL=0
cimport numpy as np
-from numpy cimport int64_t, import_array, ndarray, float64_t
+from numpy cimport int64_t, ndarray, float64_t
import numpy as np
+np.import_array()
from cpython cimport PyTypeObject, PyFloat_Check
@@ -35,18 +36,15 @@ from tslibs.np_datetime cimport (check_dts_bounds,
dayofweek, is_leapyear)
from tslibs.np_datetime import OutOfBoundsDatetime
-from .tslibs.parsing import parse_datetime_string
+from tslibs.parsing import parse_datetime_string
cimport cython
+from cython cimport Py_ssize_t
-import warnings
import pytz
UTC = pytz.utc
-# initialize numpy
-import_array()
-
from tslibs.timedeltas cimport cast_from_unit
from tslibs.timedeltas import Timedelta
diff --git a/setup.py b/setup.py
index 37be0b696503d..da897b0b539b6 100755
--- a/setup.py
+++ b/setup.py
@@ -517,7 +517,9 @@ def pxd(name):
'depends': _pxi_dep['join']},
'_libs.lib': {
'pyxfile': '_libs/lib',
- 'pxdfiles': ['_libs/src/util', '_libs/missing'],
+ 'pxdfiles': ['_libs/src/util',
+ '_libs/missing',
+ '_libs/tslibs/conversion'],
'depends': lib_depends + tseries_depends},
'_libs.missing': {
'pyxfile': '_libs/missing',
| https://api.github.com/repos/pandas-dev/pandas/pulls/18556 | 2017-11-29T02:02:48Z | 2017-12-03T22:57:19Z | 2017-12-03T22:57:19Z | 2017-12-03T23:03:03Z | |
CLN: Move period.pyx to tslibs/period.pyx | diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx
index 03596d7d091e0..fa2e1271f4649 100644
--- a/pandas/_libs/index.pyx
+++ b/pandas/_libs/index.pyx
@@ -17,7 +17,8 @@ from tslibs.conversion cimport maybe_datetimelike_to_i8
from hashtable cimport HashTable
-from pandas._libs import algos, period as periodlib, hashtable as _hash
+from pandas._libs import algos, hashtable as _hash
+from pandas._libs.tslibs import period as periodlib
from pandas._libs.tslib import Timestamp, Timedelta
from datetime import datetime, timedelta, date
diff --git a/pandas/_libs/period.pyx b/pandas/_libs/tslibs/period.pyx
similarity index 98%
rename from pandas/_libs/period.pyx
rename to pandas/_libs/tslibs/period.pyx
index b95632b5b0eff..cf73257caf227 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/tslibs/period.pyx
@@ -21,26 +21,23 @@ from cpython.datetime cimport PyDateTime_Check, PyDateTime_IMPORT
# import datetime C API
PyDateTime_IMPORT
-from tslibs.np_datetime cimport (pandas_datetimestruct,
- dtstruct_to_dt64, dt64_to_dtstruct,
- is_leapyear)
-
+from np_datetime cimport (pandas_datetimestruct, dtstruct_to_dt64,
+ dt64_to_dtstruct, is_leapyear)
cimport util
from util cimport is_period_object, is_string_object, INT32_MIN
-from missing cimport is_null_datetimelike
-from pandas._libs.tslib import Timestamp
-from tslibs.timezones cimport (
- is_utc, is_tzlocal, get_utcoffset, get_dst_info)
-from tslibs.timedeltas cimport delta_to_nanoseconds
-
-from tslibs.parsing import (parse_time_string, NAT_SENTINEL,
- _get_rule_month, _MONTH_NUMBERS)
-from tslibs.frequencies cimport get_freq_code
-from tslibs.resolution import resolution, Resolution
-from tslibs.nattype import nat_strings, NaT, iNaT
-from tslibs.nattype cimport _nat_scalar_rules, NPY_NAT
+from pandas._libs.missing cimport is_null_datetimelike
+from timestamps import Timestamp
+from timezones cimport is_utc, is_tzlocal, get_utcoffset, get_dst_info
+from timedeltas cimport delta_to_nanoseconds
+
+from parsing import (parse_time_string, NAT_SENTINEL,
+ _get_rule_month, _MONTH_NUMBERS)
+from frequencies cimport get_freq_code
+from resolution import resolution, Resolution
+from nattype import nat_strings, NaT, iNaT
+from nattype cimport _nat_scalar_rules, NPY_NAT
from pandas.tseries import offsets
from pandas.tseries import frequencies
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 8015642919611..07b34961ce25d 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -74,7 +74,11 @@ def load_reduce(self):
('pandas._libs.sparse', 'BlockIndex'),
('pandas.tslib', 'Timestamp'):
('pandas._libs.tslib', 'Timestamp'),
- ('pandas._period', 'Period'): ('pandas._libs.period', 'Period'),
+
+ # 18543 moving period
+ ('pandas._period', 'Period'): ('pandas._libs.tslibs.period', 'Period'),
+ ('pandas._libs.period', 'Period'):
+ ('pandas._libs.tslibs.period', 'Period'),
# 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype
('pandas.tslib', '__nat_unpickle'):
diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py
index 2176338574304..27e1006c23174 100644
--- a/pandas/core/indexes/accessors.py
+++ b/pandas/core/indexes/accessors.py
@@ -14,7 +14,7 @@
from pandas.core.accessor import PandasDelegate
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimes import DatetimeIndex
-from pandas._libs.period import IncompatibleFrequency # noqa
+from pandas._libs.tslibs.period import IncompatibleFrequency # noqa
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.algorithms import take_1d
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 5f543ab6e510d..c2fc983c983a6 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -25,7 +25,7 @@
import pandas.io.formats.printing as printing
from pandas._libs import lib, iNaT, NaT
-from pandas._libs.period import Period
+from pandas._libs.tslibs.period import Period
from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
from pandas.core.indexes.base import Index, _index_shared_docs
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index ee6263a9f0aad..1578ae924c9bb 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -54,8 +54,9 @@
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
- Timestamp, period as libperiod)
-from pandas._libs.tslibs import timezones, conversion, fields
+ Timestamp)
+from pandas._libs.tslibs import (timezones, conversion, fields,
+ period as libperiod)
# -------- some conversion wrapper functions
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py
index 6535eee386e8b..ac9b511606066 100644
--- a/pandas/core/indexes/period.py
+++ b/pandas/core/indexes/period.py
@@ -31,12 +31,12 @@
import pandas.tseries.offsets as offsets
from pandas._libs.lib import infer_dtype
-from pandas._libs import tslib, period, index as libindex
-from pandas._libs.period import (Period, IncompatibleFrequency,
- get_period_field_arr, _validate_end_alias,
- _quarter_to_myear)
+from pandas._libs import tslib, index as libindex
+from pandas._libs.tslibs.period import (Period, IncompatibleFrequency,
+ get_period_field_arr,
+ _validate_end_alias, _quarter_to_myear)
from pandas._libs.tslibs.fields import isleapyear_arr
-from pandas._libs.tslibs import resolution
+from pandas._libs.tslibs import resolution, period
from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds
from pandas.core.base import _shared_docs
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index bd441a8248841..9f5439b68558b 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -24,7 +24,7 @@
from pandas._libs import lib, tslib
from pandas._libs.lib import Timestamp
-from pandas._libs.period import IncompatibleFrequency
+from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.util._decorators import Appender
from pandas.core.generic import _shared_docs
diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py
index d20ed66c06ce9..6cb4226dffc5a 100644
--- a/pandas/tests/indexes/period/test_indexing.py
+++ b/pandas/tests/indexes/period/test_indexing.py
@@ -9,7 +9,7 @@
from pandas._libs import tslib, tslibs
from pandas import (PeriodIndex, Series, DatetimeIndex,
period_range, Period)
-from pandas._libs import period as libperiod
+from pandas._libs.tslibs import period as libperiod
class TestGetItem(object):
diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py
index 074678164e6f9..3774111f44fb2 100644
--- a/pandas/tests/indexes/period/test_tools.py
+++ b/pandas/tests/indexes/period/test_tools.py
@@ -6,7 +6,7 @@
import pandas.core.indexes.period as period
from pandas.compat import lrange
from pandas.tseries.frequencies import get_freq, MONTHS
-from pandas._libs.period import period_ordinal, period_asfreq
+from pandas._libs.tslibs.period import period_ordinal, period_asfreq
from pandas import (PeriodIndex, Period, DatetimeIndex, Timestamp, Series,
date_range, to_datetime, period_range)
diff --git a/pandas/tests/scalar/test_period.py b/pandas/tests/scalar/test_period.py
index 3bd4a28b7767d..eb6363689cca0 100644
--- a/pandas/tests/scalar/test_period.py
+++ b/pandas/tests/scalar/test_period.py
@@ -10,7 +10,8 @@
from pandas.compat import text_type, iteritems
from pandas.compat.numpy import np_datetime64_compat
-from pandas._libs import tslib, period as libperiod
+from pandas._libs import tslib
+from pandas._libs.tslibs import period as libperiod
from pandas._libs.tslibs.parsing import DateParseError
from pandas import Period, Timestamp, offsets
from pandas._libs.tslibs.resolution import DAYS, _MONTHS as MONTHS
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index 992f211229441..9d97057569580 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -16,9 +16,8 @@
import pandas.util.testing as tm
from pandas.tseries import offsets, frequencies
-from pandas._libs import period
from pandas._libs.tslibs.timezones import get_timezone
-from pandas._libs.tslibs import conversion
+from pandas._libs.tslibs import conversion, period
from pandas.compat import long, PY3
from pandas.util.testing import assert_series_equal
diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py
index b0154f6db7022..1ebe59047b998 100644
--- a/pandas/tests/test_resample.py
+++ b/pandas/tests/test_resample.py
@@ -33,7 +33,7 @@
from pandas.core.indexes.timedeltas import timedelta_range, TimedeltaIndex
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, assert_index_equal)
-from pandas._libs.period import IncompatibleFrequency
+from pandas._libs.tslibs.period import IncompatibleFrequency
bday = BDay()
diff --git a/setup.py b/setup.py
index 68e1319458a33..d43f8ec12b18a 100755
--- a/setup.py
+++ b/setup.py
@@ -331,7 +331,6 @@ class CheckSDist(sdist_class):
_pyxfiles = ['pandas/_libs/lib.pyx',
'pandas/_libs/hashtable.pyx',
'pandas/_libs/tslib.pyx',
- 'pandas/_libs/period.pyx',
'pandas/_libs/index.pyx',
'pandas/_libs/algos.pyx',
'pandas/_libs/join.pyx',
@@ -344,6 +343,7 @@ class CheckSDist(sdist_class):
'pandas/_libs/skiplist.pyx',
'pandas/_libs/sparse.pyx',
'pandas/_libs/parsers.pyx',
+ 'pandas/_libs/tslibs/period.pyx',
'pandas/_libs/tslibs/strptime.pyx',
'pandas/_libs/tslibs/np_datetime.pyx',
'pandas/_libs/tslibs/timedeltas.pyx',
@@ -530,8 +530,8 @@ def pxd(name):
'pandas/_libs/src/numpy_helper.h'],
'sources': ['pandas/_libs/src/parser/tokenizer.c',
'pandas/_libs/src/parser/io.c']},
- '_libs.period': {
- 'pyxfile': '_libs/period',
+ '_libs.tslibs.period': {
+ 'pyxfile': '_libs/tslibs/period',
'pxdfiles': ['_libs/src/util',
'_libs/lib',
'_libs/tslibs/timedeltas',
| - [x] Closes #18543,
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Is a whatsnew entry needed here? | https://api.github.com/repos/pandas-dev/pandas/pulls/18555 | 2017-11-28T23:41:47Z | 2017-12-03T18:00:23Z | 2017-12-03T18:00:22Z | 2017-12-03T22:59:43Z |
bugfix for FY5253 case with bunched yearends | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index ae272282040b8..fd23f0b4335b6 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -264,3 +264,5 @@ Other
- Fixed a bug where creating a Series from an array that contains both tz-naive and tz-aware values will result in a Series whose dtype is tz-aware instead of object (:issue:`16406`)
- Fixed construction of a :class:`Series` from a ``dict`` containing ``NaN`` as key (:issue:`18480`)
- Adding a ``Period`` object to a ``datetime`` or ``Timestamp`` object will now correctly raise a ``TypeError`` (:issue:`17983`)
+- Fixed a bug where ``FY5253`` date offsets could incorrectly raise an ``AssertionError`` in arithmetic operatons (:issue:`14774`)
+-
diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py
index 45f12c6931fd9..2dd061dcc6f9e 100644
--- a/pandas/tests/tseries/offsets/test_fiscal.py
+++ b/pandas/tests/tseries/offsets/test_fiscal.py
@@ -9,6 +9,7 @@
import pandas.util.testing as tm
+from pandas import Timestamp
from pandas.tseries.frequencies import get_offset, _INVALID_FREQ_ERROR
from pandas.tseries.offsets import FY5253Quarter, FY5253
from pandas._libs.tslibs.offsets import WeekDay
@@ -604,3 +605,23 @@ def test_offset(self):
assert_offset_equal(offset2,
datetime(2013, 1, 15),
datetime(2013, 3, 30))
+
+
+def test_bunched_yearends():
+ # GH#14774 cases with two fiscal year-ends in the same calendar-year
+ fy = FY5253(n=1, weekday=5, startingMonth=12, variation='nearest')
+ dt = Timestamp('2004-01-01')
+ assert fy.rollback(dt) == Timestamp('2002-12-28')
+ assert (-fy).apply(dt) == Timestamp('2002-12-28')
+ assert dt - fy == Timestamp('2002-12-28')
+
+ assert fy.rollforward(dt) == Timestamp('2004-01-03')
+ assert fy.apply(dt) == Timestamp('2004-01-03')
+ assert fy + dt == Timestamp('2004-01-03')
+ assert dt + fy == Timestamp('2004-01-03')
+
+ # Same thing, but starting from a Timestamp in the previous year.
+ dt = Timestamp('2003-12-31')
+ assert fy.rollback(dt) == Timestamp('2002-12-28')
+ assert (-fy).apply(dt) == Timestamp('2002-12-28')
+ assert dt - fy == Timestamp('2002-12-28')
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 4dae59d11f66f..dd5f01a36a43e 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -1831,21 +1831,26 @@ def apply(self, other):
elif n > 0:
if other < prev_year:
n -= 2
- # TODO: Not hit in tests
- elif other < cur_year:
+ elif prev_year < other < cur_year:
n -= 1
- elif other < next_year:
+ elif cur_year < other < next_year:
pass
else:
assert False
else:
- if other > next_year:
+ if next_year < other:
n += 2
- # TODO: Not hit in tests
- elif other > cur_year:
+ # TODO: Not hit in tests; UPDATE: looks impossible
+ elif cur_year < other < next_year:
n += 1
- elif other > prev_year:
+ elif prev_year < other < cur_year:
pass
+ elif (other.year == prev_year.year and other < prev_year and
+ prev_year - other <= timedelta(6)):
+ # GH#14774, error when next_year.year == cur_year.year
+ # e.g. prev_year == datetime(2004, 1, 3),
+ # other == datetime(2004, 1, 1)
+ n -= 1
else:
assert False
| Also achieves coverage for one of the uncovered branches in `FY5253.apply`
- [x] closes #14774
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18550 | 2017-11-28T17:14:39Z | 2017-12-06T11:15:31Z | 2017-12-06T11:15:31Z | 2017-12-06T16:02:48Z |
Update pandas.read_gbq docs to point to pandas-gbq | diff --git a/doc/source/install.rst b/doc/source/install.rst
index b8968e18aecb0..7c1fde119ceaa 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -259,7 +259,8 @@ Optional Dependencies
`xsel <http://www.vergenet.net/~conrad/software/xsel/>`__, or
`xclip <https://github.com/astrand/xclip/>`__: necessary to use
:func:`~pandas.read_clipboard`. Most package managers on Linux distributions will have ``xclip`` and/or ``xsel`` immediately available for installation.
-* For Google BigQuery I/O - see `here <https://pandas-gbq.readthedocs.io/en/latest/install.html#dependencies>`__
+* `pandas-gbq <https://pandas-gbq.readthedocs.io/en/latest/install.html#dependencies>`__: for Google BigQuery I/O.
+
* `Backports.lzma <https://pypi.python.org/pypi/backports.lzma/>`__: Only for Python 2, for writing to and/or reading from an xz compressed DataFrame in CSV; Python 3 support is built into the standard library.
* One of the following combinations of libraries is needed to use the
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index 12e52123064e2..b452b0cf5ddd4 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -29,9 +29,8 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None,
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
- Google BigQuery API Client Library v2 for Python is used.
- Documentation is available `here
- <https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__
+ This function requires the `pandas-gbq package
+ <https://pandas-gbq.readthedocs.io>`__.
Authentication to the Google BigQuery service is via OAuth 2.0.
@@ -70,7 +69,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None,
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
- 'standard' : Use BigQuery's standard SQL (beta), which is
+ 'standard' : Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery SQL Reference
<https://cloud.google.com/bigquery/sql-reference/>`__
| The `pandas-gbq` package must be installed to use `pandas.read_gbq`.
Also, with soon-to-be-release version 0.3.0 of `pandas-gbq` the Google
Cloud client library is used instead of the Google API library.
Also, standard SQL is no longer beta. In fact it is highly recommended
over using legacy SQL.
- `N/A` - closes #xxxx
- `N/A` (docs change only) - tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- `N/A` (docs change only) - whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18548 | 2017-11-28T16:48:12Z | 2017-12-01T01:05:16Z | 2017-12-01T01:05:16Z | 2017-12-11T20:21:22Z |
Remove arg that is only ever used as NPY_UNSAFE_CASTING | diff --git a/pandas/_libs/src/datetime.pxd b/pandas/_libs/src/datetime.pxd
index 0624779e50497..6e5d8b82c118f 100644
--- a/pandas/_libs/src/datetime.pxd
+++ b/pandas/_libs/src/datetime.pxd
@@ -7,13 +7,6 @@ from cpython cimport PyUnicode_Check, PyUnicode_AsASCIIString
cdef extern from "numpy/ndarrayobject.h":
ctypedef int64_t npy_datetime
- ctypedef enum NPY_CASTING:
- NPY_NO_CASTING
- NPY_EQUIV_CASTING
- NPY_SAFE_CASTING
- NPY_SAME_KIND_CASTING
- NPY_UNSAFE_CASTING
-
cdef extern from "numpy/npy_common.h":
ctypedef unsigned char npy_bool
@@ -45,7 +38,6 @@ cdef extern from "datetime/np_datetime.h":
cdef extern from "datetime/np_datetime_strings.h":
int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
- NPY_CASTING casting,
pandas_datetimestruct *out,
int *out_local, int *out_tzoffset,
PANDAS_DATETIMEUNIT *out_bestunit,
@@ -75,7 +67,6 @@ cdef inline int _cstring_to_dts(char *val, int length,
int result
result = parse_iso_8601_datetime(val, length, PANDAS_FR_ns,
- NPY_UNSAFE_CASTING,
dts, out_local, out_tzoffset,
&out_bestunit, &special)
return result
diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c
index b1206bd3f2d7a..cb4f9d3efdcd0 100644
--- a/pandas/_libs/src/datetime/np_datetime.c
+++ b/pandas/_libs/src/datetime/np_datetime.c
@@ -679,44 +679,6 @@ int convert_datetimestruct_to_datetime(pandas_datetime_metadata *meta,
return 0;
}
-/*
- * This provides the casting rules for the DATETIME data type units.
- *
- * Notably, there is a barrier between 'date units' and 'time units'
- * for all but 'unsafe' casting.
- */
-npy_bool can_cast_datetime64_units(PANDAS_DATETIMEUNIT src_unit,
- PANDAS_DATETIMEUNIT dst_unit,
- NPY_CASTING casting) {
- switch (casting) {
- /* Allow anything with unsafe casting */
- case NPY_UNSAFE_CASTING:
- return 1;
-
- /*
- * Only enforce the 'date units' vs 'time units' barrier with
- * 'same_kind' casting.
- */
- case NPY_SAME_KIND_CASTING:
- return (src_unit <= PANDAS_FR_D && dst_unit <= PANDAS_FR_D) ||
- (src_unit > PANDAS_FR_D && dst_unit > PANDAS_FR_D);
-
- /*
- * Enforce the 'date units' vs 'time units' barrier and that
- * casting is only allowed towards more precise units with
- * 'safe' casting.
- */
- case NPY_SAFE_CASTING:
- return (src_unit <= dst_unit) &&
- ((src_unit <= PANDAS_FR_D && dst_unit <= PANDAS_FR_D) ||
- (src_unit > PANDAS_FR_D && dst_unit > PANDAS_FR_D));
-
- /* Enforce equality with 'no' or 'equiv' casting */
- default:
- return src_unit == dst_unit;
- }
-}
-
/*
* Converts a datetime based on the given metadata into a datetimestruct
*/
diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h
index 7ee7e1e99a704..980c66218f7e6 100644
--- a/pandas/_libs/src/datetime/np_datetime.h
+++ b/pandas/_libs/src/datetime/np_datetime.h
@@ -125,17 +125,6 @@ int cmp_pandas_datetimestruct(const pandas_datetimestruct *a,
void
add_minutes_to_datetimestruct(pandas_datetimestruct *dts, int minutes);
-/*
- * This provides the casting rules for the TIMEDELTA data type units.
- *
- * Notably, there is a barrier between the nonlinear years and
- * months units, and all the other units.
- */
-npy_bool
-can_cast_datetime64_units(PANDAS_DATETIMEUNIT src_unit,
- PANDAS_DATETIMEUNIT dst_unit,
- NPY_CASTING casting);
-
int
convert_datetime_to_datetimestruct(pandas_datetime_metadata *meta,
diff --git a/pandas/_libs/src/datetime/np_datetime_strings.c b/pandas/_libs/src/datetime/np_datetime_strings.c
index 5307d394423ff..1ff4f08cf3c9d 100644
--- a/pandas/_libs/src/datetime/np_datetime_strings.c
+++ b/pandas/_libs/src/datetime/np_datetime_strings.c
@@ -32,22 +32,6 @@ This file implements string parsing and creation for NumPy datetime.
#include "np_datetime.h"
#include "np_datetime_strings.h"
-NPY_NO_EXPORT const char *npy_casting_to_string(NPY_CASTING casting) {
- switch (casting) {
- case NPY_NO_CASTING:
- return "'no'";
- case NPY_EQUIV_CASTING:
- return "'equiv'";
- case NPY_SAFE_CASTING:
- return "'safe'";
- case NPY_SAME_KIND_CASTING:
- return "'same_kind'";
- case NPY_UNSAFE_CASTING:
- return "'unsafe'";
- default:
- return "<unknown>";
- }
-}
/* Platform-specific time_t typedef */
typedef time_t NPY_TIME_T;
@@ -115,51 +99,6 @@ static int get_localtime(NPY_TIME_T *ts, struct tm *tms) {
return -1;
}
-#if 0
-/*
- * Wraps `gmtime` functionality for multiple platforms. This
- * converts a time value to a time structure in UTC.
- *
- * Returns 0 on success, -1 on failure.
- */
-static int
-get_gmtime(NPY_TIME_T *ts, struct tm *tms) {
- char *func_name = "<unknown>";
-#if defined(_WIN32)
-#if defined(_MSC_VER) && (_MSC_VER >= 1400)
- if (gmtime_s(tms, ts) != 0) {
- func_name = "gmtime_s";
- goto fail;
- }
-#elif defined(__GNUC__) && defined(NPY_MINGW_USE_CUSTOM_MSVCR)
- if (_gmtime64_s(tms, ts) != 0) {
- func_name = "_gmtime64_s";
- goto fail;
- }
-#else
- struct tm *tms_tmp;
- gmtime_r(ts, tms_tmp);
- if (tms_tmp == NULL) {
- func_name = "gmtime";
- goto fail;
- }
- memcpy(tms, tms_tmp, sizeof(struct tm));
-#endif
-#else
- if (gmtime_r(ts, tms) == NULL) {
- func_name = "gmtime_r";
- goto fail;
- }
-#endif
-
- return 0;
-
-fail:
- PyErr_Format(PyExc_OSError, "Failed to use '%s' to convert "
- "to a UTC time", func_name);
- return -1;
-}
-#endif
/*
* Converts a datetimestruct in UTC to a datetimestruct in local time,
@@ -226,115 +165,6 @@ static int convert_datetimestruct_utc_to_local(
return 0;
}
-#if 0
-/*
- * Converts a datetimestruct in local time to a datetimestruct in UTC.
- *
- * Returns 0 on success, -1 on failure.
- */
-static int
-convert_datetimestruct_local_to_utc(pandas_datetimestruct *out_dts_utc,
- const pandas_datetimestruct *dts_local) {
- npy_int64 year_correction = 0;
-
- /* Make a copy of the input 'dts' to modify */
- *out_dts_utc = *dts_local;
-
- /* HACK: Use a year < 2038 for later years for small time_t */
- if (sizeof(NPY_TIME_T) == 4 && out_dts_utc->year >= 2038) {
- if (is_leapyear(out_dts_utc->year)) {
- /* 2036 is a leap year */
- year_correction = out_dts_utc->year - 2036;
- out_dts_utc->year -= year_correction;
- } else {
- /* 2037 is not a leap year */
- year_correction = out_dts_utc->year - 2037;
- out_dts_utc->year -= year_correction;
- }
- }
-
- /*
- * ISO 8601 states to treat date-times without a timezone offset
- * or 'Z' for UTC as local time. The C standard libary functions
- * mktime and gmtime allow us to do this conversion.
- *
- * Only do this timezone adjustment for recent and future years.
- * In this case, "recent" is defined to be 1970 and later, because
- * on MS Windows, mktime raises an error when given an earlier date.
- */
- if (out_dts_utc->year >= 1970) {
- NPY_TIME_T rawtime = 0;
- struct tm tm_;
-
- tm_.tm_sec = out_dts_utc->sec;
- tm_.tm_min = out_dts_utc->min;
- tm_.tm_hour = out_dts_utc->hour;
- tm_.tm_mday = out_dts_utc->day;
- tm_.tm_mon = out_dts_utc->month - 1;
- tm_.tm_year = out_dts_utc->year - 1900;
- tm_.tm_isdst = -1;
-
- /* mktime converts a local 'struct tm' into a time_t */
- rawtime = mktime(&tm_);
- if (rawtime == -1) {
- PyErr_SetString(PyExc_OSError, "Failed to use mktime to "
- "convert local time to UTC");
- return -1;
- }
-
- /* gmtime converts a 'time_t' into a UTC 'struct tm' */
- if (get_gmtime(&rawtime, &tm_) < 0) {
- return -1;
- }
- out_dts_utc->sec = tm_.tm_sec;
- out_dts_utc->min = tm_.tm_min;
- out_dts_utc->hour = tm_.tm_hour;
- out_dts_utc->day = tm_.tm_mday;
- out_dts_utc->month = tm_.tm_mon + 1;
- out_dts_utc->year = tm_.tm_year + 1900;
- }
-
- /* Reapply the year 2038 year correction HACK */
- out_dts_utc->year += year_correction;
-
- return 0;
-}
-#endif
-
-/* int */
-/* parse_python_string(PyObject* obj, pandas_datetimestruct *dts) { */
-/* PyObject *bytes = NULL; */
-/* char *str = NULL; */
-/* Py_ssize_t len = 0; */
-/* PANDAS_DATETIMEUNIT bestunit = -1; */
-
-/* /\* Convert to an ASCII string for the date parser *\/ */
-/* if (PyUnicode_Check(obj)) { */
-/* bytes = PyUnicode_AsASCIIString(obj); */
-/* if (bytes == NULL) { */
-/* return -1; */
-/* } */
-/* } */
-/* else { */
-/* bytes = obj; */
-/* Py_INCREF(bytes); */
-/* } */
-/* if (PyBytes_AsStringAndSize(bytes, &str, &len) == -1) { */
-/* Py_DECREF(bytes); */
-/* return -1; */
-/* } */
-
-/* /\* Parse the ISO date *\/ */
-/* if (parse_iso_8601_datetime(str, len, PANDAS_FR_us, NPY_UNSAFE_CASTING,
- */
-/* dts, NULL, &bestunit, NULL) < 0) { */
-/* Py_DECREF(bytes); */
-/* return -1; */
-/* } */
-/* Py_DECREF(bytes); */
-
-/* return 0; */
-/* } */
/*
* Parses (almost) standard ISO 8601 date strings. The differences are:
@@ -354,8 +184,6 @@ convert_datetimestruct_local_to_utc(pandas_datetimestruct *out_dts_utc,
* 'str' must be a NULL-terminated string, and 'len' must be its length.
* 'unit' should contain -1 if the unit is unknown, or the unit
* which will be used if it is.
- * 'casting' controls how the detected unit from the string is allowed
- * to be cast to the 'unit' parameter.
*
* 'out' gets filled with the parsed date-time.
* 'out_local' gets set to 1 if the parsed time contains timezone,
@@ -375,7 +203,7 @@ convert_datetimestruct_local_to_utc(pandas_datetimestruct *out_dts_utc,
* Returns 0 on success, -1 on failure.
*/
int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
- NPY_CASTING casting, pandas_datetimestruct *out,
+ pandas_datetimestruct *out,
int *out_local, int *out_tzoffset,
PANDAS_DATETIMEUNIT *out_bestunit,
npy_bool *out_special) {
@@ -444,16 +272,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
*out_special = 1;
}
- /* Check the casting rule */
- if (!can_cast_datetime64_units(bestunit, unit, casting)) {
- PyErr_Format(PyExc_TypeError,
- "Cannot parse \"%s\" as unit "
- "'%s' using casting rule %s",
- str, _datetime_strings[unit],
- npy_casting_to_string(casting));
- return -1;
- }
-
return 0;
}
@@ -486,16 +304,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
*out_special = 1;
}
- /* Check the casting rule */
- if (!can_cast_datetime64_units(bestunit, unit, casting)) {
- PyErr_Format(PyExc_TypeError,
- "Cannot parse \"%s\" as unit "
- "'%s' using casting rule %s",
- str, _datetime_strings[unit],
- npy_casting_to_string(casting));
- return -1;
- }
-
return convert_datetime_to_datetimestruct(&meta, rawtime, out);
}
@@ -941,16 +749,6 @@ int parse_iso_8601_datetime(char *str, int len, PANDAS_DATETIMEUNIT unit,
*out_bestunit = bestunit;
}
- /* Check the casting rule */
- if (!can_cast_datetime64_units(bestunit, unit, casting)) {
- PyErr_Format(PyExc_TypeError,
- "Cannot parse \"%s\" as unit "
- "'%s' using casting rule %s",
- str, _datetime_strings[unit],
- npy_casting_to_string(casting));
- return -1;
- }
-
return 0;
parse_error:
@@ -1018,38 +816,6 @@ int get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base) {
return len;
}
-/*
- * Finds the largest unit whose value is nonzero, and for which
- * the remainder for the rest of the units is zero.
- */
-static PANDAS_DATETIMEUNIT lossless_unit_from_datetimestruct(
- pandas_datetimestruct *dts) {
- if (dts->as % 1000 != 0) {
- return PANDAS_FR_as;
- } else if (dts->as != 0) {
- return PANDAS_FR_fs;
- } else if (dts->ps % 1000 != 0) {
- return PANDAS_FR_ps;
- } else if (dts->ps != 0) {
- return PANDAS_FR_ns;
- } else if (dts->us % 1000 != 0) {
- return PANDAS_FR_us;
- } else if (dts->us != 0) {
- return PANDAS_FR_ms;
- } else if (dts->sec != 0) {
- return PANDAS_FR_s;
- } else if (dts->min != 0) {
- return PANDAS_FR_m;
- } else if (dts->hour != 0) {
- return PANDAS_FR_h;
- } else if (dts->day != 1) {
- return PANDAS_FR_D;
- } else if (dts->month != 1) {
- return PANDAS_FR_M;
- } else {
- return PANDAS_FR_Y;
- }
-}
/*
* Converts an pandas_datetimestruct to an (almost) ISO 8601
@@ -1069,17 +835,11 @@ static PANDAS_DATETIMEUNIT lossless_unit_from_datetimestruct(
* set to a value other than -1. This is a manual override for
* the local time zone to use, as an offset in minutes.
*
- * 'casting' controls whether data loss is allowed by truncating
- * the data to a coarser unit. This interacts with 'local', slightly,
- * in order to form a date unit string as a local time, the casting
- * must be unsafe.
- *
* Returns 0 on success, -1 on failure (for example if the output
* string was too short).
*/
int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen,
- int local, PANDAS_DATETIMEUNIT base, int tzoffset,
- NPY_CASTING casting) {
+ int local, PANDAS_DATETIMEUNIT base, int tzoffset) {
pandas_datetimestruct dts_local;
int timezone_offset = 0;
@@ -1121,38 +881,6 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen,
add_minutes_to_datetimestruct(dts, timezone_offset);
}
- /*
- * Now the datetimestruct data is in the final form for
- * the string representation, so ensure that the data
- * is being cast according to the casting rule.
- */
- if (casting != NPY_UNSAFE_CASTING) {
- /* Producing a date as a local time is always 'unsafe' */
- if (base <= PANDAS_FR_D && local) {
- PyErr_SetString(PyExc_TypeError,
- "Cannot create a local "
- "timezone-based date string from a NumPy "
- "datetime without forcing 'unsafe' casting");
- return -1;
- } else {
- /* Only 'unsafe' and 'same_kind' allow data loss */
- PANDAS_DATETIMEUNIT unitprec;
-
- unitprec = lossless_unit_from_datetimestruct(dts);
- if (casting != NPY_SAME_KIND_CASTING && unitprec > base) {
- PyErr_Format(PyExc_TypeError,
- "Cannot create a "
- "string with unit precision '%s' "
- "from the NumPy datetime, which has data at "
- "unit precision '%s', "
- "requires 'unsafe' or 'same_kind' casting",
- _datetime_strings[base],
- _datetime_strings[unitprec]);
- return -1;
- }
- }
- }
-
/* YEAR */
/*
* Can't use PyOS_snprintf, because it always produces a '\0'
diff --git a/pandas/_libs/src/datetime/np_datetime_strings.h b/pandas/_libs/src/datetime/np_datetime_strings.h
index 833c1869c1664..4c248129b68c3 100644
--- a/pandas/_libs/src/datetime/np_datetime_strings.h
+++ b/pandas/_libs/src/datetime/np_datetime_strings.h
@@ -40,8 +40,6 @@ This file implements string parsing and creation for NumPy datetime.
* 'str' must be a NULL-terminated string, and 'len' must be its length.
* 'unit' should contain -1 if the unit is unknown, or the unit
* which will be used if it is.
- * 'casting' controls how the detected unit from the string is allowed
- * to be cast to the 'unit' parameter.
*
* 'out' gets filled with the parsed date-time.
* 'out_local' gets whether returned value contains timezone. 0 for UTC, 1 for local time.
@@ -62,7 +60,6 @@ This file implements string parsing and creation for NumPy datetime.
int
parse_iso_8601_datetime(char *str, int len,
PANDAS_DATETIMEUNIT unit,
- NPY_CASTING casting,
pandas_datetimestruct *out,
int *out_local,
int *out_tzoffset,
@@ -90,17 +87,11 @@ get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base);
* set to a value other than -1. This is a manual override for
* the local time zone to use, as an offset in minutes.
*
- * 'casting' controls whether data loss is allowed by truncating
- * the data to a coarser unit. This interacts with 'local', slightly,
- * in order to form a date unit string as a local time, the casting
- * must be unsafe.
- *
* Returns 0 on success, -1 on failure (for example if the output
* string was too short).
*/
int
make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen,
- int local, PANDAS_DATETIMEUNIT base, int tzoffset,
- NPY_CASTING casting);
+ int local, PANDAS_DATETIMEUNIT base, int tzoffset);
#endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_
diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c
index f799b7f6b4785..c8a29cd949c3c 100644
--- a/pandas/_libs/src/ujson/python/objToJSON.c
+++ b/pandas/_libs/src/ujson/python/objToJSON.c
@@ -456,7 +456,7 @@ static void *PandasDateTimeStructToJSON(pandas_datetimestruct *dts,
}
if (!make_iso_8601_datetime(dts, GET_TC(tc)->cStr, *_outLen, 0, base,
- -1, NPY_UNSAFE_CASTING)) {
+ -1)) {
PRINTMARK();
*_outLen = strlen(GET_TC(tc)->cStr);
return GET_TC(tc)->cStr;
| Several functions from src/datetime are only ever called with the casting rule NPY_UNSAFE_CASTING. By getting rid of that dummy argument, the remaining code gets simplified quite a bit.
This PR removes that argument, then removes code that this renders unreachable or unused. It also removes several commented-out functions.
There are a couple of other never-used args; taking these one at a time.
| https://api.github.com/repos/pandas-dev/pandas/pulls/18546 | 2017-11-28T16:02:42Z | 2017-11-29T12:02:06Z | 2017-11-29T12:02:06Z | 2017-12-08T19:38:20Z |
standalone implementation of ccalendar | diff --git a/pandas/_libs/tslibs/ccalendar.pxd b/pandas/_libs/tslibs/ccalendar.pxd
new file mode 100644
index 0000000000000..a1bbeea1cb69a
--- /dev/null
+++ b/pandas/_libs/tslibs/ccalendar.pxd
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
+
+from cython cimport Py_ssize_t
+
+from numpy cimport int64_t, int32_t
+
+
+cdef int dayofweek(int y, int m, int m) nogil
+cdef bint is_leapyear(int64_t year) nogil
+cpdef int32_t get_days_in_month(int year, Py_ssize_t month) nogil
+cpdef int32_t get_week_of_year(int year, int month, int day) nogil
diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx
new file mode 100644
index 0000000000000..a68ecbd2e8629
--- /dev/null
+++ b/pandas/_libs/tslibs/ccalendar.pyx
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
+# cython: boundscheck=False
+"""
+Cython implementations of functions resembling the stdlib calendar module
+"""
+
+cimport cython
+from cython cimport Py_ssize_t
+
+import numpy as np
+cimport numpy as np
+from numpy cimport int64_t, int32_t
+np.import_array()
+
+
+# ----------------------------------------------------------------------
+# Constants
+
+# Slightly more performant cython lookups than a 2D table
+# The first 12 entries correspond to month lengths for non-leap years.
+# The remaining 12 entries give month lengths for leap years
+cdef int32_t* days_per_month_array = [
+ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31,
+ 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
+
+cdef int* sakamoto_arr = [0, 3, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4]
+
+# The first 13 entries give the month days elapsed as of the first of month N
+# (or the total number of days in the year for N=13) in non-leap years.
+# The remaining 13 entries give the days elapsed in leap years.
+cdef int32_t* _month_offset = [
+ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365,
+ 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366]
+
+# ----------------------------------------------------------------------
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+cpdef inline int32_t get_days_in_month(int year, Py_ssize_t month) nogil:
+ """Return the number of days in the given month of the given year.
+
+ Parameters
+ ----------
+ year : int
+ month : int
+
+ Returns
+ -------
+ days_in_month : int
+
+ Notes
+ -----
+ Assumes that the arguments are valid. Passing a month not between 1 and 12
+ risks a segfault.
+ """
+ return days_per_month_array[12 * is_leapyear(year) + month - 1]
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+@cython.cdivision
+cdef int dayofweek(int y, int m, int d) nogil:
+ """Find the day of week for the date described by the Y/M/D triple y, m, d
+ using Sakamoto's method, from wikipedia.
+
+ 0 represents Monday. See [1]_.
+
+ Parameters
+ ----------
+ y : int
+ m : int
+ d : int
+
+ Returns
+ -------
+ weekday : int
+
+ Notes
+ -----
+ Assumes that y, m, d, represents a valid date.
+
+ See Also
+ --------
+ [1] https://docs.python.org/3.6/library/calendar.html#calendar.weekday
+
+ [2] https://en.wikipedia.org/wiki/\
+ Determination_of_the_day_of_the_week#Sakamoto.27s_methods
+ """
+ cdef:
+ int day
+
+ y -= m < 3
+ day = (y + y / 4 - y / 100 + y / 400 + sakamoto_arr[m - 1] + d) % 7
+ # convert to python day
+ return (day + 6) % 7
+
+
+cdef bint is_leapyear(int64_t year) nogil:
+ """Returns 1 if the given year is a leap year, 0 otherwise.
+
+ Parameters
+ ----------
+ year : int
+
+ Returns
+ -------
+ is_leap : bool
+ """
+ return ((year & 0x3) == 0 and # year % 4 == 0
+ ((year % 100) != 0 or (year % 400) == 0))
+
+
+@cython.wraparound(False)
+@cython.boundscheck(False)
+cpdef int32_t get_week_of_year(int year, int month, int day) nogil:
+ """Return the ordinal week-of-year for the given day.
+
+ Parameters
+ ----------
+ year : int
+ month : int
+ day : int
+
+ Returns
+ -------
+ week_of_year : int32_t
+
+ Notes
+ -----
+ Assumes the inputs describe a valid date.
+ """
+ cdef:
+ bint isleap, isleap_prev
+ int32_t mo_off
+ int32_t doy, dow
+ int woy
+
+ isleap = is_leapyear(year)
+ isleap_prev = is_leapyear(year - 1)
+
+ mo_off = _month_offset[isleap * 13 + month - 1]
+
+ doy = mo_off + day
+ dow = dayofweek(year, month, day)
+
+ # estimate
+ woy = (doy - 1) - dow + 3
+ if woy >= 0:
+ woy = woy / 7 + 1
+
+ # verify
+ if woy < 0:
+ if (woy > -2) or (woy == -2 and isleap_prev):
+ woy = 53
+ else:
+ woy = 52
+ elif woy == 53:
+ if 31 - day + dow < 3:
+ woy = 1
+
+ return woy
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx
index 3de361c511fbf..b321ca1659682 100644
--- a/pandas/_libs/tslibs/fields.pyx
+++ b/pandas/_libs/tslibs/fields.pyx
@@ -17,9 +17,10 @@ from numpy cimport ndarray, int64_t, int32_t, int8_t
np.import_array()
+from ccalendar cimport (get_days_in_month, is_leapyear, dayofweek,
+ get_week_of_year)
from np_datetime cimport (pandas_datetimestruct, pandas_timedeltastruct,
- dt64_to_dtstruct, td64_to_tdstruct,
- days_per_month_table, is_leapyear, dayofweek)
+ dt64_to_dtstruct, td64_to_tdstruct)
from nattype cimport NPY_NAT
@@ -379,7 +380,7 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
ndarray[int32_t, ndim=2] _month_offset
int isleap, isleap_prev
pandas_datetimestruct dts
- int mo_off, doy, dow, woy
+ int mo_off, doy, dow
_month_offset = np.array(
[[ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 ],
@@ -507,28 +508,7 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
continue
dt64_to_dtstruct(dtindex[i], &dts)
- isleap = is_leapyear(dts.year)
- isleap_prev = is_leapyear(dts.year - 1)
- mo_off = _month_offset[isleap, dts.month - 1]
- doy = mo_off + dts.day
- dow = dayofweek(dts.year, dts.month, dts.day)
-
- # estimate
- woy = (doy - 1) - dow + 3
- if woy >= 0:
- woy = woy / 7 + 1
-
- # verify
- if woy < 0:
- if (woy > -2) or (woy == -2 and isleap_prev):
- woy = 53
- else:
- woy = 52
- elif woy == 53:
- if 31 - dts.day + dow < 3:
- woy = 1
-
- out[i] = woy
+ out[i] = get_week_of_year(dts.year, dts.month, dts.day)
return out
elif field == 'q':
@@ -551,7 +531,7 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
continue
dt64_to_dtstruct(dtindex[i], &dts)
- out[i] = days_in_month(dts)
+ out[i] = get_days_in_month(dts.year, dts.month)
return out
elif field == 'is_leap_year':
return isleapyear_arr(get_date_field(dtindex, 'Y'))
@@ -676,10 +656,6 @@ def get_timedelta_field(ndarray[int64_t] tdindex, object field):
raise ValueError("Field %s not supported" % field)
-cdef inline int days_in_month(pandas_datetimestruct dts) nogil:
- return days_per_month_table[is_leapyear(dts.year)][dts.month - 1]
-
-
cpdef isleapyear_arr(ndarray years):
"""vectorized version of isleapyear; NaT evaluates as False"""
cdef:
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 59044fe314e08..478611fe9cab9 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -20,6 +20,7 @@ from util cimport (is_datetime64_object, is_timedelta64_object,
is_integer_object, is_string_object,
INT64_MAX)
+cimport ccalendar
from conversion import tz_localize_to_utc, date_normalize
from conversion cimport (tz_convert_single, _TSObject,
convert_to_tsobject, convert_datetime_to_tsobject)
@@ -699,6 +700,9 @@ class Timestamp(_Timestamp):
@property
def week(self):
+ if self.freq is None:
+ # fastpath for non-business
+ return ccalendar.get_week_of_year(self.year, self.month, self.day)
return self._get_field('woy')
weekofyear = week
@@ -709,7 +713,7 @@ class Timestamp(_Timestamp):
@property
def days_in_month(self):
- return self._get_field('dim')
+ return ccalendar.get_days_in_month(self.year, self.month)
daysinmonth = days_in_month
diff --git a/setup.py b/setup.py
index c58cc8ef99faf..e6480cfedaee0 100755
--- a/setup.py
+++ b/setup.py
@@ -317,6 +317,7 @@ class CheckSDist(sdist_class):
'pandas/_libs/skiplist.pyx',
'pandas/_libs/sparse.pyx',
'pandas/_libs/parsers.pyx',
+ 'pandas/_libs/tslibs/ccalendar.pyx',
'pandas/_libs/tslibs/period.pyx',
'pandas/_libs/tslibs/strptime.pyx',
'pandas/_libs/tslibs/np_datetime.pyx',
@@ -537,6 +538,8 @@ def pxd(name):
'_libs/tslibs/nattype'],
'depends': tseries_depends,
'sources': np_datetime_sources},
+ '_libs.tslibs.ccalendar': {
+ 'pyxfile': '_libs/tslibs/ccalendar'},
'_libs.tslibs.conversion': {
'pyxfile': '_libs/tslibs/conversion',
'pxdfiles': ['_libs/src/util',
@@ -547,7 +550,8 @@ def pxd(name):
'sources': np_datetime_sources},
'_libs.tslibs.fields': {
'pyxfile': '_libs/tslibs/fields',
- 'pxdfiles': ['_libs/tslibs/nattype'],
+ 'pxdfiles': ['_libs/tslibs/ccalendar',
+ '_libs/tslibs/nattype'],
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.frequencies': {
@@ -594,6 +598,7 @@ def pxd(name):
'_libs.tslibs.timestamps': {
'pyxfile': '_libs/tslibs/timestamps',
'pxdfiles': ['_libs/src/util',
+ '_libs/tslibs/ccalendar',
'_libs/tslibs/conversion',
'_libs/tslibs/nattype',
'_libs/tslibs/timedeltas',
| For the moment only updated cimports in `fields`.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18540 | 2017-11-28T03:51:32Z | 2017-12-07T11:08:45Z | 2017-12-07T11:08:45Z | 2017-12-07T15:57:37Z |
Fastpaths for Timestamp properties | diff --git a/asv_bench/benchmarks/timestamp.py b/asv_bench/benchmarks/timestamp.py
index fc5e6dc8c06d6..9d7d6d2998a8b 100644
--- a/asv_bench/benchmarks/timestamp.py
+++ b/asv_bench/benchmarks/timestamp.py
@@ -1,4 +1,3 @@
-from .pandas_vb_common import *
from pandas import to_timedelta, Timestamp
import pytz
import datetime
@@ -7,61 +6,64 @@
class TimestampProperties(object):
goal_time = 0.2
- params = [None, pytz.timezone('Europe/Amsterdam')]
- param_names = ['tz']
+ params = [(None, None),
+ (pytz.timezone('Europe/Amsterdam'), None),
+ (None, 'B'),
+ (pytz.timezone('Europe/Amsterdam'), 'B')]
+ param_names = ['tz', 'freq']
- def setup(self, tz):
- self.ts = Timestamp('2017-08-25 08:16:14', tzinfo=tz)
+ def setup(self, tz, freq):
+ self.ts = Timestamp('2017-08-25 08:16:14', tzinfo=tz, freq=freq)
- def time_tz(self, tz):
+ def time_tz(self, tz, freq):
self.ts.tz
- def time_offset(self, tz):
+ def time_offset(self, tz, freq):
self.ts.offset
- def time_dayofweek(self, tz):
+ def time_dayofweek(self, tz, freq):
self.ts.dayofweek
- def time_weekday_name(self, tz):
+ def time_weekday_name(self, tz, freq):
self.ts.weekday_name
- def time_dayofyear(self, tz):
+ def time_dayofyear(self, tz, freq):
self.ts.dayofyear
- def time_week(self, tz):
+ def time_week(self, tz, freq):
self.ts.week
- def time_quarter(self, tz):
+ def time_quarter(self, tz, freq):
self.ts.quarter
- def time_days_in_month(self, tz):
+ def time_days_in_month(self, tz, freq):
self.ts.days_in_month
- def time_freqstr(self, tz):
+ def time_freqstr(self, tz, freq):
self.ts.freqstr
- def time_is_month_start(self, tz):
+ def time_is_month_start(self, tz, freq):
self.ts.is_month_start
- def time_is_month_end(self, tz):
+ def time_is_month_end(self, tz, freq):
self.ts.is_month_end
- def time_is_quarter_start(self, tz):
+ def time_is_quarter_start(self, tz, freq):
self.ts.is_quarter_start
- def time_is_quarter_end(self, tz):
+ def time_is_quarter_end(self, tz, freq):
self.ts.is_quarter_end
- def time_is_year_start(self, tz):
+ def time_is_year_start(self, tz, freq):
self.ts.is_quarter_end
- def time_is_year_end(self, tz):
+ def time_is_year_end(self, tz, freq):
self.ts.is_quarter_end
- def time_is_leap_year(self, tz):
+ def time_is_leap_year(self, tz, freq):
self.ts.is_quarter_end
- def time_microsecond(self, tz):
+ def time_microsecond(self, tz, freq):
self.ts.microsecond
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index 8fdded0bcb07a..cf0c0e2c01d60 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -304,10 +304,12 @@ cdef class _Timestamp(datetime):
out = get_date_field(np.array([val], dtype=np.int64), field)
return int(out[0])
- cpdef _get_start_end_field(self, field):
+ cpdef bint _get_start_end_field(self, str field):
cdef:
int64_t val
dict kwds
+ ndarray out
+ int month_kw
freq = self.freq
if freq:
@@ -713,7 +715,7 @@ class Timestamp(_Timestamp):
@property
def quarter(self):
- return self._get_field('q')
+ return ((self.month - 1) // 3) + 1
@property
def days_in_month(self):
@@ -727,26 +729,44 @@ class Timestamp(_Timestamp):
@property
def is_month_start(self):
+ if self.freq is None:
+ # fast-path for non-business frequencies
+ return self.day == 1
return self._get_start_end_field('is_month_start')
@property
def is_month_end(self):
+ if self.freq is None:
+ # fast-path for non-business frequencies
+ return self.day == self.days_in_month
return self._get_start_end_field('is_month_end')
@property
def is_quarter_start(self):
+ if self.freq is None:
+ # fast-path for non-business frequencies
+ return self.day == 1 and self.month % 3 == 1
return self._get_start_end_field('is_quarter_start')
@property
def is_quarter_end(self):
+ if self.freq is None:
+ # fast-path for non-business frequencies
+ return (self.month % 3) == 0 and self.day == self.days_in_month
return self._get_start_end_field('is_quarter_end')
@property
def is_year_start(self):
+ if self.freq is None:
+ # fast-path for non-business frequencies
+ return self.day == self.month == 1
return self._get_start_end_field('is_year_start')
@property
def is_year_end(self):
+ if self.freq is None:
+ # fast-path for non-business frequencies
+ return self.month == 12 and self.day == 31
return self._get_start_end_field('is_year_end')
@property
diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py
index 545ed7f1ebbf3..992f211229441 100644
--- a/pandas/tests/scalar/test_timestamp.py
+++ b/pandas/tests/scalar/test_timestamp.py
@@ -47,6 +47,28 @@ def test_overflow_offset(self):
stamp - offset
+class TestTimestampProperties(object):
+
+ def test_properties_business(self):
+ ts = Timestamp('2017-10-01', freq='B')
+ control = Timestamp('2017-10-01')
+ assert ts.dayofweek == 6
+ assert not ts.is_month_start # not a weekday
+ assert not ts.is_quarter_start # not a weekday
+ # Control case: non-business is month/qtr start
+ assert control.is_month_start
+ assert control.is_quarter_start
+
+ ts = Timestamp('2017-09-30', freq='B')
+ control = Timestamp('2017-09-30')
+ assert ts.dayofweek == 5
+ assert not ts.is_month_end # not a weekday
+ assert not ts.is_quarter_end # not a weekday
+ # Control case: non-business is month/qtr start
+ assert control.is_month_end
+ assert control.is_quarter_end
+
+
class TestTimestamp(object):
def test_constructor(self):
| Addresses a bunch of the TimestampProperties regressions in #18532 . ASV vs 0.21.0
```
asv continuous -f 1.1 -E virtualenv 81372093 HEAD -b TimestampProperties
[...]
before after ratio
[81372093] [5fc79fb0]
+ 5.73±0.02μs 10.5±0.03μs 1.84 timestamp.TimestampProperties.time_dayofyear(<DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>)
+ 5.96±0.04μs 10.9±0.03μs 1.82 timestamp.TimestampProperties.time_is_month_end(<DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>)
+ 5.80±0.01μs 10.5±0.1μs 1.82 timestamp.TimestampProperties.time_days_in_month(<DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>)
+ 6.18±0.01μs 10.6±0.6μs 1.72 timestamp.TimestampProperties.time_week(<DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>)
- 5.82±0.01μs 337±4ns 0.06 timestamp.TimestampProperties.time_is_leap_year(<DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>)
- 5.99±0.09μs 326±10ns 0.05 timestamp.TimestampProperties.time_quarter(None)
- 5.99±0.01μs 324±0.6ns 0.05 timestamp.TimestampProperties.time_is_year_end(None)
- 5.90±0.01μs 319±0.5ns 0.05 timestamp.TimestampProperties.time_is_month_start(None)
- 6.02±0.02μs 321±1ns 0.05 timestamp.TimestampProperties.time_is_leap_year(None)
- 6.08±0.05μs 321±2ns 0.05 timestamp.TimestampProperties.time_is_year_end(<DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>)
- 6.06±0.01μs 316±0.8ns 0.05 timestamp.TimestampProperties.time_is_month_start(<DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>)
- 6.19±0.09μs 319±0.7ns 0.05 timestamp.TimestampProperties.time_is_quarter_start(None)
- 6.09±0.2μs 310±0.6ns 0.05 timestamp.TimestampProperties.time_quarter(<DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>)
- 6.38±0.2μs 323±0.7ns 0.05 timestamp.TimestampProperties.time_is_year_start(None)
- 6.90±0.4μs 337±4ns 0.05 timestamp.TimestampProperties.time_is_quarter_end(None)
- 6.73±0.1μs 325±2ns 0.05 timestamp.TimestampProperties.time_is_year_start(<DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>)
- 6.75±0.2μs 321±1ns 0.05 timestamp.TimestampProperties.time_is_quarter_start(<DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>)
- 7.14±0.1μs 325±0.4ns 0.05 timestamp.TimestampProperties.time_is_quarter_end(<DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>)
```
Timestamps with freqs are still SOL, but that's a relatively rare case. | https://api.github.com/repos/pandas-dev/pandas/pulls/18539 | 2017-11-28T03:12:42Z | 2017-11-29T00:22:40Z | 2017-11-29T00:22:40Z | 2017-12-08T19:38:24Z |
CLN: ASV frame_methods benchmark | diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index 5fad7b682c2ed..d577ebc20a31c 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -124,3 +124,18 @@ def setup(self, offset, n_steps):
def time_frame_ctor(self, offset, n_steps):
DataFrame(self.d)
+
+
+class FromRecords(object):
+
+ goal_time = 0.2
+ params = [None, 1000]
+ param_names = ['nrows']
+
+ def setup(self, nrows):
+ N = 100000
+ self.gen = ((x, (x * 20), (x * 100)) for x in range(N))
+
+ def time_frame_from_records_generator(self, nrows):
+ # issue-6700
+ self.df = DataFrame.from_records(self.gen, nrows=nrows)
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 53ee4d8019938..7ed341425e561 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -1,10 +1,13 @@
-from .pandas_vb_common import *
import string
+import numpy as np
+import pandas.util.testing as tm
+from pandas import (DataFrame, Series, MultiIndex, date_range, period_range,
+ isnull, NaT)
+from .pandas_vb_common import setup
-#----------------------------------------------------------------------
-# get_numeric_data
-class frame_get_numeric_data(object):
+class GetNumericData(object):
+
goal_time = 0.2
def setup(self):
@@ -16,19 +19,21 @@ def setup(self):
def time_frame_get_numeric_data(self):
self.df._get_numeric_data()
-#----------------------------------------------------------------------
-# lookup
-class frame_fancy_lookup(object):
+class Lookup(object):
+
goal_time = 0.2
def setup(self):
- self.df = DataFrame(np.random.randn(10000, 8), columns=list('abcdefgh'))
+ self.df = DataFrame(np.random.randn(10000, 8),
+ columns=list('abcdefgh'))
self.df['foo'] = 'bar'
self.row_labels = list(self.df.index[::10])[:900]
- self.col_labels = (list(self.df.columns) * 100)
- self.row_labels_all = np.array((list(self.df.index) * len(self.df.columns)), dtype='object')
- self.col_labels_all = np.array((list(self.df.columns) * len(self.df.index)), dtype='object')
+ self.col_labels = list(self.df.columns) * 100
+ self.row_labels_all = np.array(
+ list(self.df.index) * len(self.df.columns), dtype='object')
+ self.col_labels_all = np.array(
+ list(self.df.columns) * len(self.df.index), dtype='object')
def time_frame_fancy_lookup(self):
self.df.lookup(self.row_labels, self.col_labels)
@@ -37,25 +42,20 @@ def time_frame_fancy_lookup_all(self):
self.df.lookup(self.row_labels_all, self.col_labels_all)
-#----------------------------------------------------------------------
-# reindex
-
class Reindex(object):
+
goal_time = 0.2
def setup(self):
- self.df = DataFrame(randn(10000, 1000))
- self.idx = np.arange(4000, 7000)
-
+ N = 10**3
+ self.df = DataFrame(np.random.randn(N * 10, N))
+ self.idx = np.arange(4 * N, 7 * N)
self.df2 = DataFrame(
- dict([(c, {0: randint(0, 2, 1000).astype(np.bool_),
- 1: randint(0, 1000, 1000).astype(
- np.int16),
- 2: randint(0, 1000, 1000).astype(
- np.int32),
- 3: randint(0, 1000, 1000).astype(
- np.int64),}[randint(0, 4)]) for c in
- range(1000)]))
+ {c: {0: np.random.randint(0, 2, N).astype(np.bool_),
+ 1: np.random.randint(0, N, N).astype(np.int16),
+ 2: np.random.randint(0, N, N).astype(np.int32),
+ 3: np.random.randint(0, N, N).astype(np.int64)}
+ [np.random.randint(0, 4)] for c in range(N)})
def time_reindex_axis0(self):
self.df.reindex(self.idx)
@@ -67,81 +67,86 @@ def time_reindex_both_axes(self):
self.df.reindex(index=self.idx, columns=self.idx)
def time_reindex_both_axes_ix(self):
- self.df.ix[(self.idx, self.idx)]
+ self.df.ix[self.idx, self.idx]
def time_reindex_upcast(self):
self.df2.reindex(np.random.permutation(range(1200)))
-#----------------------------------------------------------------------
-# iteritems (monitor no-copying behaviour)
-
class Iteration(object):
+
goal_time = 0.2
def setup(self):
- self.df = DataFrame(randn(10000, 1000))
- self.df2 = DataFrame(np.random.randn(50000, 10))
- self.df3 = pd.DataFrame(np.random.randn(1000,5000),
- columns=['C'+str(c) for c in range(5000)])
+ N = 1000
+ self.df = DataFrame(np.random.randn(N * 10, N))
+ self.df2 = DataFrame(np.random.randn(N * 50, 10))
+ self.df3 = DataFrame(np.random.randn(N, 5 * N),
+ columns=['C' + str(c) for c in range(N * 5)])
- def f(self):
+ def time_iteritems(self):
+ # (monitor no-copying behaviour)
if hasattr(self.df, '_item_cache'):
self.df._item_cache.clear()
- for (name, col) in self.df.iteritems():
- pass
-
- def g(self):
- for (name, col) in self.df.iteritems():
+ for name, col in self.df.iteritems():
pass
- def time_iteritems(self):
- self.f()
-
def time_iteritems_cached(self):
- self.g()
+ for name, col in self.df.iteritems():
+ pass
def time_iteritems_indexing(self):
- df = self.df3
- for col in df:
- df[col]
+ for col in self.df3:
+ self.df3[col]
def time_itertuples(self):
for row in self.df2.itertuples():
pass
+ def time_iterrows(self):
+ for row in self.df.iterrows():
+ pass
-#----------------------------------------------------------------------
-# to_string, to_html, repr
-class Formatting(object):
+class ToString(object):
+
goal_time = 0.2
def setup(self):
- self.df = DataFrame(randn(100, 10))
+ self.df = DataFrame(np.random.randn(100, 10))
- self.nrows = 500
- self.df2 = DataFrame(randn(self.nrows, 10))
- self.df2[0] = period_range('2000', '2010', self.nrows)
- self.df2[1] = range(self.nrows)
+ def time_to_string_floats(self):
+ self.df.to_string()
- self.nrows = 10000
- self.data = randn(self.nrows, 10)
- self.idx = MultiIndex.from_arrays(np.tile(randn(3, int(self.nrows / 100)), 100))
- self.df3 = DataFrame(self.data, index=self.idx)
- self.idx = randn(self.nrows)
- self.df4 = DataFrame(self.data, index=self.idx)
- self.df_tall = pandas.DataFrame(np.random.randn(10000, 10))
+class ToHTML(object):
- self.df_wide = pandas.DataFrame(np.random.randn(10, 10000))
+ goal_time = 0.2
- def time_to_string_floats(self):
- self.df.to_string()
+ def setup(self):
+ nrows = 500
+ self.df2 = DataFrame(np.random.randn(nrows, 10))
+ self.df2[0] = period_range('2000', '2010', nrows)
+ self.df2[1] = range(nrows)
def time_to_html_mixed(self):
self.df2.to_html()
+
+class Repr(object):
+
+ goal_time = 0.2
+
+ def setup(self):
+ nrows = 10000
+ data = np.random.randn(nrows, 10)
+ idx = MultiIndex.from_arrays(np.tile(np.random.randn(3, nrows / 100),
+ 100))
+ self.df3 = DataFrame(data, index=idx)
+ self.df4 = DataFrame(data, index=np.random.randn(nrows))
+ self.df_tall = DataFrame(np.random.randn(nrows, 10))
+ self.df_wide = DataFrame(np.random.randn(10, nrows))
+
def time_html_repr_trunc_mi(self):
self.df3._repr_html_()
@@ -155,21 +160,16 @@ def time_frame_repr_wide(self):
repr(self.df_wide)
-#----------------------------------------------------------------------
-# nulls/masking
+class MaskBool(object):
-
-## masking
-
-class frame_mask_bools(object):
goal_time = 0.2
def setup(self):
- self.data = np.random.randn(1000, 500)
- self.df = DataFrame(self.data)
- self.df = self.df.where((self.df > 0))
- self.bools = (self.df > 0)
- self.mask = isnull(self.df)
+ data = np.random.randn(1000, 500)
+ df = DataFrame(data)
+ df = df.where(df > 0)
+ self.bools = df > 0
+ self.mask = isnull(df)
def time_frame_mask_bools(self):
self.bools.mask(self.mask)
@@ -178,31 +178,26 @@ def time_frame_mask_floats(self):
self.bools.astype(float).mask(self.mask)
-## isnull
+class Isnull(object):
-class FrameIsnull(object):
goal_time = 0.2
def setup(self):
- self.df_no_null = DataFrame(np.random.randn(1000, 1000))
-
- np.random.seed(1234)
- self.sample = np.array([np.nan, 1.0])
- self.data = np.random.choice(self.sample, (1000, 1000))
- self.df = DataFrame(self.data)
-
- np.random.seed(1234)
- self.sample = np.array(list(string.ascii_lowercase) +
- list(string.ascii_uppercase) +
- list(string.whitespace))
- self.data = np.random.choice(self.sample, (1000, 1000))
- self.df_strings= DataFrame(self.data)
-
- np.random.seed(1234)
- self.sample = np.array([NaT, np.nan, None, np.datetime64('NaT'),
- np.timedelta64('NaT'), 0, 1, 2.0, '', 'abcd'])
- self.data = np.random.choice(self.sample, (1000, 1000))
- self.df_obj = DataFrame(self.data)
+ N = 10**3
+ self.df_no_null = DataFrame(np.random.randn(N, N))
+
+ sample = np.array([np.nan, 1.0])
+ data = np.random.choice(sample, (N, N))
+ self.df = DataFrame(data)
+
+ sample = np.array(list(string.ascii_letters + string.whitespace))
+ data = np.random.choice(sample, (N, N))
+ self.df_strings = DataFrame(data)
+
+ sample = np.array([NaT, np.nan, None, np.datetime64('NaT'),
+ np.timedelta64('NaT'), 0, 1, 2.0, '', 'abcd'])
+ data = np.random.choice(sample, (N, N))
+ self.df_obj = DataFrame(data)
def time_isnull_floats_no_null(self):
isnull(self.df_no_null)
@@ -217,92 +212,74 @@ def time_isnull_obj(self):
isnull(self.df_obj)
-# ----------------------------------------------------------------------
-# fillna in place
-
-class frame_fillna_inplace(object):
- goal_time = 0.2
-
- def setup(self):
- self.df = DataFrame(randn(10000, 100))
- self.df.values[::2] = np.nan
-
- def time_frame_fillna_inplace(self):
- self.df.fillna(0, inplace=True)
-
+class Fillna(object):
-
-class frame_fillna_many_columns_pad(object):
goal_time = 0.2
+ params = ([True, False], ['pad', 'bfill'])
+ param_names = ['inplace', 'method']
- def setup(self):
- self.values = np.random.randn(1000, 1000)
- self.values[::2] = np.nan
- self.df = DataFrame(self.values)
-
- def time_frame_fillna_many_columns_pad(self):
- self.df.fillna(method='pad')
+ def setup(self, inplace, method):
+ values = np.random.randn(10000, 100)
+ values[::2] = np.nan
+ self.df = DataFrame(values)
+ def time_frame_fillna(self, inplace, method):
+ self.df.fillna(inplace=inplace, method=method)
class Dropna(object):
+
goal_time = 0.2
+ params = (['all', 'any'], [0, 1])
+ param_names = ['how', 'axis']
- def setup(self):
- self.data = np.random.randn(10000, 1000)
- self.df = DataFrame(self.data)
+ def setup(self, how, axis):
+ self.df = DataFrame(np.random.randn(10000, 1000))
self.df.ix[50:1000, 20:50] = np.nan
self.df.ix[2000:3000] = np.nan
self.df.ix[:, 60:70] = np.nan
self.df_mixed = self.df.copy()
self.df_mixed['foo'] = 'bar'
- self.df_mi = self.df.copy()
- self.df_mi.index = MultiIndex.from_tuples(self.df_mi.index.map((lambda x: (x, x))))
- self.df_mi.columns = MultiIndex.from_tuples(self.df_mi.columns.map((lambda x: (x, x))))
-
- self.df_mixed_mi = self.df_mixed.copy()
- self.df_mixed_mi.index = MultiIndex.from_tuples(self.df_mixed_mi.index.map((lambda x: (x, x))))
- self.df_mixed_mi.columns = MultiIndex.from_tuples(self.df_mixed_mi.columns.map((lambda x: (x, x))))
+ def time_dropna(self, how, axis):
+ self.df.dropna(how=how, axis=axis)
- def time_dropna_axis0_all(self):
- self.df.dropna(how='all', axis=0)
+ def time_dropna_axis_mixed_dtypes(self, how, axis):
+ self.df_mixed.dropna(how=how, axis=axis)
- def time_dropna_axis0_any(self):
- self.df.dropna(how='any', axis=0)
- def time_dropna_axis1_all(self):
- self.df.dropna(how='all', axis=1)
+class Count(object):
- def time_dropna_axis1_any(self):
- self.df.dropna(how='any', axis=1)
-
- def time_dropna_axis0_all_mixed_dtypes(self):
- self.df_mixed.dropna(how='all', axis=0)
-
- def time_dropna_axis0_any_mixed_dtypes(self):
- self.df_mixed.dropna(how='any', axis=0)
-
- def time_dropna_axis1_all_mixed_dtypes(self):
- self.df_mixed.dropna(how='all', axis=1)
+ goal_time = 0.2
- def time_dropna_axis1_any_mixed_dtypes(self):
- self.df_mixed.dropna(how='any', axis=1)
+ params = [0, 1]
+ param_names = ['axis']
- def time_count_level_axis0_multi(self):
- self.df_mi.count(axis=0, level=1)
+ def setup(self, axis):
+ self.df = DataFrame(np.random.randn(10000, 1000))
+ self.df.ix[50:1000, 20:50] = np.nan
+ self.df.ix[2000:3000] = np.nan
+ self.df.ix[:, 60:70] = np.nan
+ self.df_mixed = self.df.copy()
+ self.df_mixed['foo'] = 'bar'
- def time_count_level_axis1_multi(self):
- self.df_mi.count(axis=1, level=1)
+ self.df.index = MultiIndex.from_arrays([self.df.index, self.df.index])
+ self.df.columns = MultiIndex.from_arrays([self.df.columns,
+ self.df.columns])
+ self.df_mixed.index = MultiIndex.from_arrays([self.df_mixed.index,
+ self.df_mixed.index])
+ self.df_mixed.columns = MultiIndex.from_arrays([self.df_mixed.columns,
+ self.df_mixed.columns])
- def time_count_level_axis0_mixed_dtypes_multi(self):
- self.df_mixed_mi.count(axis=0, level=1)
+ def time_count_level_multi(self, axis):
+ self.df.count(axis=axis, level=1)
- def time_count_level_axis1_mixed_dtypes_multi(self):
- self.df_mixed_mi.count(axis=1, level=1)
+ def time_count_level_mixed_dtypes_multi(self, axis):
+ self.df_mixed.count(axis=axis, level=1)
class Apply(object):
+
goal_time = 0.2
def setup(self):
@@ -310,32 +287,29 @@ def setup(self):
self.s = Series(np.arange(1028.0))
self.df2 = DataFrame({i: self.s for i in range(1028)})
-
self.df3 = DataFrame(np.random.randn(1000, 3), columns=list('ABC'))
def time_apply_user_func(self):
- self.df2.apply((lambda x: np.corrcoef(x, self.s)[(0, 1)]))
+ self.df2.apply(lambda x: np.corrcoef(x, self.s)[(0, 1)])
def time_apply_axis_1(self):
- self.df.apply((lambda x: (x + 1)), axis=1)
+ self.df.apply(lambda x: x + 1, axis=1)
def time_apply_lambda_mean(self):
- self.df.apply((lambda x: x.mean()))
+ self.df.apply(lambda x: x.mean())
def time_apply_np_mean(self):
self.df.apply(np.mean)
def time_apply_pass_thru(self):
- self.df.apply((lambda x: x))
+ self.df.apply(lambda x: x)
def time_apply_ref_by_name(self):
- self.df3.apply((lambda x: (x['A'] + x['B'])), axis=1)
+ self.df3.apply(lambda x: x['A'] + x['B'], axis=1)
-#----------------------------------------------------------------------
-# dtypes
+class Dtypes(object):
-class frame_dtypes(object):
goal_time = 0.2
def setup(self):
@@ -344,316 +318,170 @@ def setup(self):
def time_frame_dtypes(self):
self.df.dtypes
-#----------------------------------------------------------------------
-# equals
class Equals(object):
+
goal_time = 0.2
def setup(self):
- self.float_df = DataFrame(np.random.randn(1000, 1000))
- self.object_df = DataFrame(([(['foo'] * 1000)] * 1000))
- self.nonunique_cols = self.object_df.copy()
- self.nonunique_cols.columns = (['A'] * len(self.nonunique_cols.columns))
- self.pairs = dict([(name, self.make_pair(frame)) for (name, frame) in (
- ('float_df', self.float_df), ('object_df', self.object_df),
- ('nonunique_cols', self.nonunique_cols))])
-
- def make_pair(self, frame):
- self.df = frame
- self.df2 = self.df.copy()
- self.df2.ix[((-1), (-1))] = np.nan
- return (self.df, self.df2)
+ N = 10**3
+ self.float_df = DataFrame(np.random.randn(N, N))
+ self.float_df_nan = self.float_df.copy()
+ self.float_df_nan.iloc[-1, -1] = np.nan
- def test_equal(self, name):
- (self.df, self.df2) = self.pairs[name]
- return self.df.equals(self.df)
+ self.object_df = DataFrame('foo', index=range(N), columns=range(N))
+ self.object_df_nan = self.object_df.copy()
+ self.object_df_nan.iloc[-1, -1] = np.nan
- def test_unequal(self, name):
- (self.df, self.df2) = self.pairs[name]
- return self.df.equals(self.df2)
+ self.nonunique_cols = self.object_df.copy()
+ self.nonunique_cols.columns = ['A'] * len(self.nonunique_cols.columns)
+ self.nonunique_cols_nan = self.nonunique_cols.copy()
+ self.nonunique_cols_nan.iloc[-1, -1] = np.nan
def time_frame_float_equal(self):
- self.test_equal('float_df')
+ self.float_df.equals(self.float_df)
def time_frame_float_unequal(self):
- self.test_unequal('float_df')
+ self.float_df.equals(self.float_df_nan)
def time_frame_nonunique_equal(self):
- self.test_equal('nonunique_cols')
+ self.nonunique_cols.equals(self.nonunique_cols)
def time_frame_nonunique_unequal(self):
- self.test_unequal('nonunique_cols')
+ self.nonunique_cols.equals(self.nonunique_cols_nan)
def time_frame_object_equal(self):
- self.test_equal('object_df')
+ self.object_df.equals(self.object_df)
def time_frame_object_unequal(self):
- self.test_unequal('object_df')
+ self.object_df.equals(self.object_df_nan)
class Interpolate(object):
+
goal_time = 0.2
+ params = [None, 'infer']
+ param_names = ['downcast']
- def setup(self):
+ def setup(self, downcast):
+ N = 10000
# this is the worst case, where every column has NaNs.
- self.df = DataFrame(randn(10000, 100))
+ self.df = DataFrame(np.random.randn(N, 100))
self.df.values[::2] = np.nan
- self.df2 = DataFrame(
- {'A': np.arange(0, 10000), 'B': np.random.randint(0, 100, 10000),
- 'C': randn(10000), 'D': randn(10000),})
+ self.df2 = DataFrame({'A': np.arange(0, N),
+ 'B': np.random.randint(0, 100, N),
+ 'C': np.random.randn(N),
+ 'D': np.random.randn(N)})
self.df2.loc[1::5, 'A'] = np.nan
self.df2.loc[1::5, 'C'] = np.nan
- def time_interpolate(self):
- self.df.interpolate()
+ def time_interpolate(self, downcast):
+ self.df.interpolate(downcast=downcast)
- def time_interpolate_some_good(self):
- self.df2.interpolate()
-
- def time_interpolate_some_good_infer(self):
- self.df2.interpolate(downcast='infer')
+ def time_interpolate_some_good(self, downcast):
+ self.df2.interpolate(downcast=downcast)
class Shift(object):
# frame shift speedup issue-5609
goal_time = 0.2
+ params = [0, 1]
+ param_names = ['axis']
- def setup(self):
+ def setup(self, axis):
self.df = DataFrame(np.random.rand(10000, 500))
- def time_shift_axis0(self):
- self.df.shift(1, axis=0)
-
- def time_shift_axis_1(self):
- self.df.shift(1, axis=1)
-
-
-#-----------------------------------------------------------------------------
-# from_records issue-6700
-
-class frame_from_records_generator(object):
- goal_time = 0.2
-
- def get_data(self, n=100000):
- return ((x, (x * 20), (x * 100)) for x in range(n))
-
- def time_frame_from_records_generator(self):
- self.df = DataFrame.from_records(self.get_data())
+ def time_shift(self, axis):
+ self.df.shift(1, axis=axis)
- def time_frame_from_records_generator_nrows(self):
- self.df = DataFrame.from_records(self.get_data(), nrows=1000)
-
-
-#-----------------------------------------------------------------------------
-# nunique
-
-class frame_nunique(object):
+class Nunique(object):
def setup(self):
- self.data = np.random.randn(10000, 1000)
- self.df = DataFrame(self.data)
+ self.df = DataFrame(np.random.randn(10000, 1000))
def time_frame_nunique(self):
self.df.nunique()
+class Duplicated(object):
-#-----------------------------------------------------------------------------
-# duplicated
-
-class frame_duplicated(object):
goal_time = 0.2
def setup(self):
- self.n = (1 << 20)
- self.t = date_range('2015-01-01', freq='S', periods=(self.n // 64))
- self.xs = np.random.randn((self.n // 64)).round(2)
- self.df = DataFrame({'a': np.random.randint(((-1) << 8), (1 << 8), self.n), 'b': np.random.choice(self.t, self.n), 'c': np.random.choice(self.xs, self.n), })
-
- self.df2 = DataFrame(np.random.randn(1000, 100).astype(str))
+ n = (1 << 20)
+ t = date_range('2015-01-01', freq='S', periods=(n // 64))
+ xs = np.random.randn(n // 64).round(2)
+ self.df = DataFrame({'a': np.random.randint(-1 << 8, 1 << 8, n),
+ 'b': np.random.choice(t, n),
+ 'c': np.random.choice(xs, n)})
+ self.df2 = DataFrame(np.random.randn(1000, 100).astype(str)).T
def time_frame_duplicated(self):
self.df.duplicated()
def time_frame_duplicated_wide(self):
- self.df2.T.duplicated()
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-class frame_xs_col(object):
- goal_time = 0.2
-
- def setup(self):
- self.df = DataFrame(randn(1, 100000))
-
- def time_frame_xs_col(self):
- self.df.xs(50000, axis=1)
-
-
-class frame_xs_row(object):
- goal_time = 0.2
-
- def setup(self):
- self.df = DataFrame(randn(100000, 1))
-
- def time_frame_xs_row(self):
- self.df.xs(50000)
-
-
-class frame_sort_index(object):
- goal_time = 0.2
-
- def setup(self):
- self.df = DataFrame(randn(1000000, 2), columns=list('AB'))
-
- def time_frame_sort_index(self):
- self.df.sort_index()
-
-
-class frame_sort_index_by_columns(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 10000
- self.K = 10
- self.key1 = tm.makeStringIndex(self.N).values.repeat(self.K)
- self.key2 = tm.makeStringIndex(self.N).values.repeat(self.K)
- self.df = DataFrame({'key1': self.key1, 'key2': self.key2, 'value': np.random.randn((self.N * self.K)), })
- self.col_array_list = list(self.df.values.T)
-
- def time_frame_sort_index_by_columns(self):
- self.df.sort_index(by=['key1', 'key2'])
-
-
-class frame_quantile_axis1(object):
- goal_time = 0.2
-
- def setup(self):
- self.df = DataFrame(np.random.randn(1000, 3),
- columns=list('ABC'))
-
- def time_frame_quantile_axis1(self):
- self.df.quantile([0.1, 0.5], axis=1)
-
-
-#----------------------------------------------------------------------
-# boolean indexing
-
-class frame_boolean_row_select(object):
- goal_time = 0.2
+ self.df2.duplicated()
- def setup(self):
- self.df = DataFrame(randn(10000, 100))
- self.bool_arr = np.zeros(10000, dtype=bool)
- self.bool_arr[:1000] = True
- def time_frame_boolean_row_select(self):
- self.df[self.bool_arr]
+class XS(object):
-class frame_getitem_single_column(object):
goal_time = 0.2
+ params = [0, 1]
+ param_names = ['axis']
- def setup(self):
- self.df = DataFrame(randn(10000, 1000))
- self.df2 = DataFrame(randn(3000, 1), columns=['A'])
- self.df3 = DataFrame(randn(3000, 1))
-
- def h(self):
- for i in range(10000):
- self.df2['A']
-
- def j(self):
- for i in range(10000):
- self.df3[0]
-
- def time_frame_getitem_single_column(self):
- self.h()
+ def setup(self, axis):
+ self.N = 10**4
+ self.df = DataFrame(np.random.randn(self.N, self.N))
- def time_frame_getitem_single_column2(self):
- self.j()
+ def time_frame_xs(self, axis):
+ self.df.xs(self.N / 2, axis=axis)
-#----------------------------------------------------------------------
-# assignment
+class SortValues(object):
-class frame_assign_timeseries_index(object):
goal_time = 0.2
+ params = [True, False]
+ param_names = ['ascending']
- def setup(self):
- self.idx = date_range('1/1/2000', periods=100000, freq='H')
- self.df = DataFrame(randn(100000, 1), columns=['A'], index=self.idx)
-
- def time_frame_assign_timeseries_index(self):
- self.f(self.df)
+ def setup(self, ascending):
+ self.df = DataFrame(np.random.randn(1000000, 2), columns=list('AB'))
- def f(self, df):
- self.x = self.df.copy()
- self.x['date'] = self.x.index
+ def time_frame_sort_values(self, ascending):
+ self.df.sort_values(by='A', ascending=ascending)
+class SortIndexByColumns(object):
-# insert many columns
-
-class frame_insert_100_columns_begin(object):
goal_time = 0.2
def setup(self):
- self.N = 1000
-
- def f(self, K=100):
- self.df = DataFrame(index=range(self.N))
- self.new_col = np.random.randn(self.N)
- for i in range(K):
- self.df.insert(0, i, self.new_col)
+ N = 10000
+ K = 10
+ self.df = DataFrame({'key1': tm.makeStringIndex(N).values.repeat(K),
+ 'key2': tm.makeStringIndex(N).values.repeat(K),
+ 'value': np.random.randn(N * K)})
- def g(self, K=500):
- self.df = DataFrame(index=range(self.N))
- self.new_col = np.random.randn(self.N)
- for i in range(K):
- self.df[i] = self.new_col
+ def time_frame_sort_values_by_columns(self):
+ self.df.sort_values(by=['key1', 'key2'])
- def time_frame_insert_100_columns_begin(self):
- self.f()
- def time_frame_insert_500_columns_end(self):
- self.g()
+class Quantile(object):
-
-
-#----------------------------------------------------------------------
-# strings methods, #2602
-
-class series_string_vector_slice(object):
goal_time = 0.2
+ params = [0, 1]
+ param_names = ['axis']
- def setup(self):
- self.s = Series((['abcdefg', np.nan] * 500000))
-
- def time_series_string_vector_slice(self):
- self.s.str[:5]
+ def setup(self, axis):
+ self.df = DataFrame(np.random.randn(1000, 3), columns=list('ABC'))
+ def time_frame_quantile(self, axis):
+ self.df.quantile([0.1, 0.5], axis=axis)
-#----------------------------------------------------------------------
-# df.info() and get_dtype_counts() # 2807
-class frame_get_dtype_counts(object):
+class GetDtypeCounts(object):
+ # 2807
goal_time = 0.2
def setup(self):
@@ -662,13 +490,21 @@ def setup(self):
def time_frame_get_dtype_counts(self):
self.df.get_dtype_counts()
+ def time_info(self):
+ self.df.info()
+
+
+class NSort(object):
-class frame_nlargest(object):
goal_time = 0.2
+ params = ['first', 'last']
+ param_names = ['keep']
- def setup(self):
- self.df = DataFrame(np.random.randn(1000, 3),
- columns=list('ABC'))
+ def setup(self, keep):
+ self.df = DataFrame(np.random.randn(1000, 3), columns=list('ABC'))
+
+ def time_nlargest(self, keep):
+ self.df.nlargest(100, 'A', keep=keep)
- def time_frame_nlargest(self):
- self.df.nlargest(100, 'A')
+ def time_nsmallest(self, keep):
+ self.df.nsmallest(100, 'A', keep=keep)
diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py
index f3e7ebbbd33e8..f271b82c758ee 100644
--- a/asv_bench/benchmarks/indexing.py
+++ b/asv_bench/benchmarks/indexing.py
@@ -303,3 +303,69 @@ def time_lookup_ix(self):
def time_lookup_loc(self):
self.s.loc
+
+
+class BooleanRowSelect(object):
+
+ goal_time = 0.2
+
+ def setup(self):
+ N = 10000
+ np.random.seed(1234)
+ self.df = DataFrame(np.random.randn(N, 100))
+ self.bool_arr = np.zeros(N, dtype=bool)
+ self.bool_arr[:1000] = True
+
+ def time_frame_boolean_row_select(self):
+ self.df[self.bool_arr]
+
+
+class GetItemSingleColumn(object):
+
+ goal_time = 0.2
+
+ def setup(self):
+ np.random.seed(1234)
+ self.df2 = DataFrame(np.random.randn(3000, 1), columns=['A'])
+ self.df3 = DataFrame(np.random.randn(3000, 1))
+
+ def time_frame_getitem_single_column_label(self):
+ self.df2['A']
+
+ def time_frame_getitem_single_column_int(self):
+ self.df3[0]
+
+
+class AssignTimeseriesIndex(object):
+
+ goal_time = 0.2
+
+ def setup(self):
+ N = 100000
+ np.random.seed(1234)
+ dx = date_range('1/1/2000', periods=N, freq='H')
+ self.df = DataFrame(np.random.randn(N, 1), columns=['A'], index=idx)
+
+ def time_frame_assign_timeseries_index(self):
+ self.df['date'] = self.df.index
+
+
+class InsertColumns(object):
+
+ goal_time = 0.2
+
+ def setup(self):
+ self.N = 10**3
+ self.df = DataFrame(index=range(N))
+
+ def time_insert(self):
+ np.random.seed(1234)
+ for i in range(100):
+ self.df.insert(0, i, np.random.randn(self.N))
+
+ def time_assign_with_setitem(self):
+ np.random.seed(1234)
+ for i in range(100):
+ self.df[i] = np.random.randn(self.N)
+
+
diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py
index b1a58e49fe86c..62eb826418030 100644
--- a/asv_bench/benchmarks/pandas_vb_common.py
+++ b/asv_bench/benchmarks/pandas_vb_common.py
@@ -13,7 +13,11 @@
except ImportError:
pass
-np.random.seed(1234)
+# This function just needs to be imported into each benchmark file in order to
+# sets up the random seed before each function.
+# http://asv.readthedocs.io/en/latest/writing_benchmarks.html
+def setup(*args, **kwargs):
+ np.random.seed(1234)
# try em until it works!
for imp in ['pandas._libs.lib', 'pandas.lib', 'pandas_tseries']:
diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py
index c1600d4e07f58..948d4b92a5a57 100644
--- a/asv_bench/benchmarks/strings.py
+++ b/asv_bench/benchmarks/strings.py
@@ -105,3 +105,15 @@ def setup(self):
def time_encode_decode(self):
self.ser.str.encode('utf-8').str.decode('utf-8')
+
+
+class StringSlice(object):
+
+ goal_time = 0.2
+
+ def setup(self):
+ self.s = Series(['abcdefg', np.nan] * 500000)
+
+ def time_series_string_vector_slice(self):
+ # GH 2602
+ self.s.str[:5]
| - Added `np.random.seed(1234)` in setup classes where random data is created xref #8144
- Ran flake8 and replaced star imports
- Moved `GetItemSingleColumn`, `AssignTimeseriesIndex`, and `InsertColumns` to `indexing.py`, and `StringSlice` to `strings.py`
- Refactored to use `params` where relevant
```
$asv dev -b ^frame_methods
[ 1.79%] ··· Running frame_methods.Apply.time_apply_axis_1 299ms
[ 3.57%] ··· Running frame_methods.Apply.time_apply_lambda_mean 12.0ms
[ 5.36%] ··· Running frame_methods.Apply.time_apply_np_mean 13.4ms
[ 7.14%] ··· Running frame_methods.Apply.time_apply_pass_thru 13.5ms
[ 8.93%] ··· Running frame_methods.Apply.time_apply_ref_by_name 69.1ms
[ 10.71%] ··· Running frame_methods.Apply.time_apply_user_func 256ms
[ 12.50%] ··· Running frame_methods.Count.time_count_level_mixed_dtypes_multi ok
[ 12.50%] ····
====== =======
axis
------ -------
0 157ms
1 128ms
====== =======
[ 14.29%] ··· Running frame_methods.Count.time_count_level_multi ok
[ 14.29%] ····
====== =======
axis
------ -------
0 113ms
1 147ms
====== =======
[ 16.07%] ··· Running frame_methods.Dropna.time_dropna ok
[ 16.07%] ····
===== ======== ========
-- axis
----- -----------------
how 0 1
===== ======== ========
all 135ms 150ms
any 60.8ms 63.0ms
===== ======== ========
[ 17.86%] ··· Running frame_methods.Dropna.time_dropna_axis_mixed_dtypes ok
[ 17.86%] ····
===== ======= =======
-- axis
----- ---------------
how 0 1
===== ======= =======
all 437ms 436ms
any 346ms 327ms
===== ======= =======
[ 19.64%] ··· Running frame_methods.Dtypes.time_frame_dtypes 336μs
[ 21.43%] ··· Running frame_methods.Duplicated.time_frame_duplicated 340ms
[ 23.21%] ··· Running frame_methods.Duplicated.time_frame_duplicated_wide 353ms
[ 25.00%] ··· Running frame_methods.Equals.time_frame_float_equal 8.72ms
[ 26.79%] ··· Running frame_methods.Equals.time_frame_float_unequal 24.4ms
[ 28.57%] ··· Running frame_methods.Equals.time_frame_nonunique_equal 11.8ms
[ 30.36%] ··· Running frame_methods.Equals.time_frame_nonunique_unequal 12.1ms
[ 32.14%] ··· Running frame_methods.Equals.time_frame_object_equal 41.4ms
[ 33.93%] ··· Running frame_methods.Equals.time_frame_object_unequal 27.3ms
[ 35.71%] ··· Running frame_methods.Fillna.time_frame_fillna ok
[ 35.71%] ····
========= ======== ========
-- method
--------- -----------------
inplace pad bfill
========= ======== ========
True 14.7ms 18.8ms
False 12.7ms 13.3ms
========= ======== ========
[ 37.50%] ··· Running frame_methods.FromRecords.time_frame_from_records_generator ok
[ 37.50%] ····
======= ========
nrows
------- --------
None 136ms
1000 2.23ms
======= ========
[ 39.29%] ··· Running frame_methods.GetDtypeCounts.time_frame_get_dtype_counts 493μs
[ 41.07%] ··· Running frame_methods.GetDtypeCounts.time_info 999ms
[ 41.07%] ····· <class 'pandas.core.frame.DataFrame'>
RangeIndex: 10 entries, 0 to 9
Columns: 10000 entries, 0 to 9999
dtypes: float64(10000)
memory usage: 781.3 KB
[ 42.86%] ··· Running frame_methods.GetNumericData.time_frame_get_numeric_data 411μs
[ 42.86%] ····· /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/frame_methods.py:17: FutureWarning: consolidate is deprecated and will be removed in a future release.
self.df = self.df.consolidate()
[ 44.64%] ··· Running frame_methods.Interpolate.time_interpolate ok
[ 44.64%] ····
========== ========
downcast
---------- --------
None 98.8ms
infer 159ms
========== ========
[ 46.43%] ··· Running frame_methods.Interpolate.time_interpolate_some_good ok
[ 46.43%] ····
========== ========
downcast
---------- --------
None 3.02ms
infer 6.04ms
========== ========
[ 48.21%] ··· Running frame_methods.Isnull.time_isnull 2.42ms
[ 50.00%] ··· Running frame_methods.Isnull.time_isnull_floats_no_null 2.45ms
[ 51.79%] ··· Running frame_methods.Isnull.time_isnull_obj 89.7ms
[ 53.57%] ··· Running frame_methods.Isnull.time_isnull_strngs 79.3ms
[ 55.36%] ··· Running frame_methods.Iteration.time_iteritems 87.8ms
[ 57.14%] ··· Running frame_methods.Iteration.time_iteritems_cached 87.6ms
[ 58.93%] ··· Running frame_methods.Iteration.time_iteritems_indexing 450ms
[ 60.71%] ··· Running frame_methods.Iteration.time_itertuples 81.1ms
[ 62.50%] ··· Running frame_methods.Lookup.time_frame_fancy_lookup 8.36ms
[ 64.29%] ··· Running frame_methods.Lookup.time_frame_fancy_lookup_all 55.2ms
[ 66.07%] ··· Running frame_methods.MaskBool.time_frame_mask_bools 26.0ms
[ 67.86%] ··· Running frame_methods.MaskBool.time_frame_mask_floats 19.2ms
[ 69.64%] ··· Running frame_methods.Nlargest.time_frame_nlargest 3.73ms
[ 71.43%] ··· Running frame_methods.Nunique.time_frame_nunique 679ms
[ 73.21%] ··· Running frame_methods.Quantile.time_frame_quantile ok
[ 73.21%] ····
====== ========
axis
------ --------
0 995μs
1 1.59ms
====== ========
[ 75.00%] ··· Running frame_methods.Reindex.time_reindex_axis0 17.7ms
[ 76.79%] ··· Running frame_methods.Reindex.time_reindex_axis1 140ms
[ 78.57%] ··· Running frame_methods.Reindex.time_reindex_both_axes 53.3ms
[ 80.36%] ··· Running frame_methods.Reindex.time_reindex_both_axes_ix 53.9ms
[ 82.14%] ··· Running frame_methods.Reindex.time_reindex_upcast 15.4ms
[ 83.93%] ··· Running frame_methods.Repr.time_frame_repr_wide 32.0ms
[ 85.71%] ··· Running frame_methods.Repr.time_html_repr_trunc_mi 429ms
[ 87.50%] ··· Running frame_methods.Repr.time_html_repr_trunc_si 405ms
[ 89.29%] ··· Running frame_methods.Repr.time_repr_tall 52.2ms
[ 91.07%] ··· Running frame_methods.Shift.time_shift ok
[ 91.07%] ····
====== ========
axis
------ --------
0 41.7ms
1 45.1ms
====== ========
[ 92.86%] ··· Running frame_methods.SortIndex.time_frame_sort_index ok
[ 92.86%] ····
=========== ========
ascending
----------- --------
True 21.1ms
False 139ms
=========== ========
[ 94.64%] ··· Running frame_methods.SortIndexByColumns.time_frame_sort_index_by_columns 66.1ms
[ 94.64%] ····· /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/frame_methods.py:498: FutureWarning: by argument to sort_index is deprecated, pls use .sort_values(by=...)
self.df.sort_index(by=['key1', 'key2'])
[ 96.43%] ··· Running frame_methods.ToHTML.time_to_html_mixed 527ms
[ 98.21%] ··· Running frame_methods.ToString.time_to_string_floats 72.4ms
[100.00%] ··· Running frame_methods.XS.time_frame_xs ok
[100.00%] ····
====== =======
axis
------ -------
0 791μs
1 674μs
====== =======
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/18536 | 2017-11-28T01:09:23Z | 2017-11-29T10:50:47Z | 2017-11-29T10:50:47Z | 2017-11-30T00:21:47Z |
DOC: improve DataFrame/SeriesGroupBy.apply doc string | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index ba180cc98cb08..2ca7af771cb24 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -77,6 +77,119 @@
pandas.Panel.%(name)s
"""
+_apply_docs = dict(
+ template="""
+ Apply function ``func`` group-wise and combine the results together.
+
+ The function passed to ``apply`` must take a {input} as its first
+ argument and return a dataframe, a series or a scalar. ``apply`` will
+ then take care of combining the results back together into a single
+ dataframe or series. ``apply`` is therefore a highly flexible
+ grouping method.
+
+ While ``apply`` is a very flexible method, its downside is that
+ using it can be quite a bit slower than using more specific methods.
+ Pandas offers a wide range of method that will be much faster
+ than using ``apply`` for their specific purposes, so try to use them
+ before reaching for ``apply``.
+
+ Parameters
+ ----------
+ func : function
+ A callable that takes a {input} as its first argument, and
+ returns a dataframe, a series or a scalar. In addition the
+ callable may take positional and keyword arguments
+ args, kwargs : tuple and dict
+ Optional positional and keyword arguments to pass to ``func``
+
+ Returns
+ -------
+ applied : Series or DataFrame
+
+ Notes
+ -----
+ In the current implementation ``apply`` calls func twice on the
+ first group to decide whether it can take a fast or slow code
+ path. This can lead to unexpected behavior if func has
+ side-effects, as they will take effect twice for the first
+ group.
+
+ Examples
+ --------
+ {examples}
+
+ See also
+ --------
+ pipe : Apply function to the full GroupBy object instead of to each
+ group.
+ aggregate, transform
+ """,
+ dataframe_examples="""
+ >>> df = pd.DataFrame({'A': 'a a b'.split(), 'B': [1,2,3], 'C': [4,6, 5]})
+ >>> g = df.groupby('A')
+
+ From ``df`` above we can see that ``g`` has two groups, ``a``, ``b``.
+ Calling ``apply`` in various ways, we can get different grouping results:
+
+ Example 1: below the function passed to ``apply`` takes a dataframe as
+ its argument and returns a dataframe. ``apply`` combines the result for
+ each group together into a new dataframe:
+
+ >>> g.apply(lambda x: x / x.sum())
+ B C
+ 0 0.333333 0.4
+ 1 0.666667 0.6
+ 2 1.000000 1.0
+
+ Example 2: The function passed to ``apply`` takes a dataframe as
+ its argument and returns a series. ``apply`` combines the result for
+ each group together into a new dataframe:
+
+ >>> g.apply(lambda x: x.max() - x.min())
+ B C
+ A
+ a 1 2
+ b 0 0
+
+ Example 3: The function passed to ``apply`` takes a dataframe as
+ its argument and returns a scalar. ``apply`` combines the result for
+ each group together into a series, including setting the index as
+ appropriate:
+
+ >>> g.apply(lambda x: x.C.max() - x.B.min())
+ A
+ a 5
+ b 2
+ dtype: int64
+ """,
+ series_examples="""
+ >>> ser = pd.Series([0, 1, 2], index='a a b'.split())
+ >>> g = ser.groupby(ser.index)
+
+ From ``ser`` above we can see that ``g`` has two groups, ``a``, ``b``.
+ Calling ``apply`` in various ways, we can get different grouping results:
+
+ Example 1: The function passed to ``apply`` takes a series as
+ its argument and returns a series. ``apply`` combines the result for
+ each group together into a new series:
+
+ >>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
+ 0 0.0
+ 1 0.5
+ 2 4.0
+ dtype: float64
+
+ Example 2: The function passed to ``apply`` takes a series as
+ its argument and returns a scalar. ``apply`` combines the result for
+ each group together into a series, including setting the index as
+ appropriate:
+
+ >>> g.apply(lambda x: x.max() - x.min())
+ a 1
+ b 0
+ dtype: int64
+ """)
+
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
@@ -144,6 +257,7 @@
"""
+
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
@@ -653,50 +767,10 @@ def __iter__(self):
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
- @Substitution(name='groupby')
+ @Appender(_apply_docs['template']
+ .format(input="dataframe",
+ examples=_apply_docs['dataframe_examples']))
def apply(self, func, *args, **kwargs):
- """
- Apply function and combine results together in an intelligent way.
-
- The split-apply-combine combination rules attempt to be as common
- sense based as possible. For example:
-
- case 1:
- group DataFrame
- apply aggregation function (f(chunk) -> Series)
- yield DataFrame, with group axis having group labels
-
- case 2:
- group DataFrame
- apply transform function ((f(chunk) -> DataFrame with same indexes)
- yield DataFrame with resulting chunks glued together
-
- case 3:
- group Series
- apply function with f(chunk) -> DataFrame
- yield DataFrame with result of chunks glued together
-
- Parameters
- ----------
- func : function
-
- Notes
- -----
- See online documentation for full exposition on how to use apply.
-
- In the current implementation apply calls func twice on the
- first group to decide whether it can take a fast or slow code
- path. This can lead to unexpected behavior if func has
- side-effects, as they will take effect twice for the first
- group.
-
-
- See also
- --------
- pipe : Apply function to the full GroupBy object instead of to each
- group.
- aggregate, transform
- """
func = self._is_builtin_func(func)
@@ -3013,6 +3087,12 @@ def _selection_name(self):
""")
+ @Appender(_apply_docs['template']
+ .format(input='series',
+ examples=_apply_docs['series_examples']))
+ def apply(self, func, *args, **kwargs):
+ return super(SeriesGroupBy, self).apply(func, *args, **kwargs)
+
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='Series',
| - [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
The doc strings for the ``apply`` method on GroupBy objects are currently IMO difficult to "translate" into working code. This PR proposes making the doc string more practical, including:
* split the doc string up for dataframe and series GroupBy object
* have some working examples
* advice people to use other methods if possible because of speed penalty.
| https://api.github.com/repos/pandas-dev/pandas/pulls/18534 | 2017-11-27T23:55:36Z | 2017-11-28T11:29:40Z | 2017-11-28T11:29:40Z | 2017-12-11T20:09:58Z |
Removed unused funcs from _libs | diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx
index a5aae6d6af656..61d543cd7303a 100644
--- a/pandas/_libs/algos.pyx
+++ b/pandas/_libs/algos.pyx
@@ -212,51 +212,6 @@ cpdef numeric median(numeric[:] arr):
kth_smallest(arr, n // 2 - 1)) / 2
-# -------------- Min, Max subsequence
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def max_subseq(ndarray[double_t] arr):
- cdef:
- Py_ssize_t i=0, s=0, e=0, T, n
- double m, S
-
- n = len(arr)
-
- if len(arr) == 0:
- return (-1, -1, None)
-
- m = arr[0]
- S = m
- T = 0
-
- with nogil:
- for i in range(1, n):
- # S = max { S + A[i], A[i] )
- if (S > 0):
- S = S + arr[i]
- else:
- S = arr[i]
- T = i
- if S > m:
- s = T
- e = i
- m = S
-
- return (s, e, m)
-
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def min_subseq(ndarray[double_t] arr):
- cdef:
- Py_ssize_t s, e
- double m
-
- (s, e, m) = max_subseq(-arr)
-
- return (s, e, -m)
-
# ----------------------------------------------------------------------
# Pairwise correlation/covariance
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx
index dc0fdcf123c32..9d9ac2ef2f5b1 100644
--- a/pandas/_libs/groupby.pyx
+++ b/pandas/_libs/groupby.pyx
@@ -75,57 +75,6 @@ def group_nth_object(ndarray[object, ndim=2] out,
out[i, j] = resx[i, j]
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_nth_bin_object(ndarray[object, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[object, ndim=2] values,
- ndarray[int64_t] bins, int64_t rank):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- object val
- float64_t count
- ndarray[object, ndim=2] resx
- ndarray[float64_t, ndim=2] nobs
-
- nobs = np.zeros((<object> out).shape, dtype=np.float64)
- resx = np.empty((<object> out).shape, dtype=object)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- b = 0
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- if nobs[b, j] == rank:
- resx[b, j] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = nan
- else:
- out[i, j] = resx[i, j]
-
-
@cython.boundscheck(False)
@cython.wraparound(False)
def group_last_object(ndarray[object, ndim=2] out,
@@ -169,56 +118,6 @@ def group_last_object(ndarray[object, ndim=2] out,
out[i, j] = resx[i, j]
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def group_last_bin_object(ndarray[object, ndim=2] out,
- ndarray[int64_t] counts,
- ndarray[object, ndim=2] values,
- ndarray[int64_t] bins):
- """
- Only aggregates on axis=0
- """
- cdef:
- Py_ssize_t i, j, N, K, ngroups, b
- object val
- float64_t count
- ndarray[object, ndim=2] resx
- ndarray[float64_t, ndim=2] nobs
-
- nobs = np.zeros((<object> out).shape, dtype=np.float64)
- resx = np.empty((<object> out).shape, dtype=object)
-
- if len(bins) == 0:
- return
- if bins[len(bins) - 1] == len(values):
- ngroups = len(bins)
- else:
- ngroups = len(bins) + 1
-
- N, K = (<object> values).shape
-
- b = 0
- for i in range(N):
- while b < ngroups - 1 and i >= bins[b]:
- b += 1
-
- counts[b] += 1
- for j in range(K):
- val = values[i, j]
-
- # not nan
- if val == val:
- nobs[b, j] += 1
- resx[b, j] = val
-
- for i in range(ngroups):
- for j in range(K):
- if nobs[i, j] == 0:
- out[i, j] = nan
- else:
- out[i, j] = resx[i, j]
-
-
cdef inline float64_t _median_linear(float64_t* a, int n) nogil:
cdef int i, j, na_count = 0
cdef float64_t result
diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx
index 53203dd30daee..4c4449fb3e291 100644
--- a/pandas/_libs/hashing.pyx
+++ b/pandas/_libs/hashing.pyx
@@ -105,11 +105,6 @@ cdef inline void u32to8_le(uint8_t* p, uint32_t v) nogil:
p[3] = <uint8_t>(v >> 24)
-cdef inline void u64to8_le(uint8_t* p, uint64_t v) nogil:
- u32to8_le(p, <uint32_t>v)
- u32to8_le(p + 4, <uint32_t>(v >> 32))
-
-
cdef inline uint64_t u8to64_le(uint8_t* p) nogil:
return (<uint64_t>p[0] |
<uint64_t>p[1] << 8 |
diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx
index 6befc5e60f5f6..344c5d25d0c3d 100644
--- a/pandas/_libs/join.pyx
+++ b/pandas/_libs/join.pyx
@@ -240,28 +240,4 @@ def ffill_indexer(ndarray[int64_t] indexer):
return result
-def ffill_by_group(ndarray[int64_t] indexer, ndarray[int64_t] group_ids,
- int64_t max_group):
- cdef:
- Py_ssize_t i, n = len(indexer)
- ndarray[int64_t] result, last_obs
- int64_t gid, val
-
- result = np.empty(n, dtype=np.int64)
-
- last_obs = np.empty(max_group, dtype=np.int64)
- last_obs.fill(-1)
-
- for i in range(n):
- gid = group_ids[i]
- val = indexer[i]
- if val == -1:
- result[i] = last_obs[gid]
- else:
- result[i] = val
- last_obs[gid] = val
-
- return result
-
-
include "join_helper.pxi"
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 2ec4b5cf19b72..02b3839ebf181 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -76,27 +76,6 @@ def values_from_object(object o):
return o
-cpdef map_indices_list(list index):
- """
- Produce a dict mapping the values of the input array to their respective
- locations.
-
- Example:
- array(['hi', 'there']) --> {'hi' : 0 , 'there' : 1}
-
- Better to do this with Cython because of the enormous speed boost.
- """
- cdef Py_ssize_t i, length
- cdef dict result = {}
-
- length = len(index)
-
- for i from 0 <= i < length:
- result[index[i]] = i
-
- return result
-
-
@cython.wraparound(False)
@cython.boundscheck(False)
def memory_usage_of_objects(ndarray[object, ndim=1] arr):
@@ -1094,27 +1073,6 @@ def get_level_sorter(ndarray[int64_t, ndim=1] label,
return out
-def group_count(ndarray[int64_t] values, Py_ssize_t size):
- cdef:
- Py_ssize_t i, n = len(values)
- ndarray[int64_t] counts
-
- counts = np.zeros(size, dtype=np.int64)
- for i in range(n):
- counts[values[i]] += 1
- return counts
-
-
-def lookup_values(ndarray[object] values, dict mapping):
- cdef:
- Py_ssize_t i, n = len(values)
-
- result = np.empty(n, dtype='O')
- for i in range(n):
- result[i] = mapping[values[i]]
- return maybe_convert_objects(result)
-
-
@cython.boundscheck(False)
@cython.wraparound(False)
def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask,
@@ -1145,70 +1103,6 @@ def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask,
return counts
-cdef class _PandasNull:
-
- def __richcmp__(_PandasNull self, object other, int op):
- if op == 2: # ==
- return isinstance(other, _PandasNull)
- elif op == 3: # !=
- return not isinstance(other, _PandasNull)
- else:
- return False
-
- def __hash__(self):
- return 0
-
-pandas_null = _PandasNull()
-
-
-def fast_zip_fillna(list ndarrays, fill_value=pandas_null):
- """
- For zipping multiple ndarrays into an ndarray of tuples
- """
- cdef:
- Py_ssize_t i, j, k, n
- ndarray[object] result
- flatiter it
- object val, tup
-
- k = len(ndarrays)
- n = len(ndarrays[0])
-
- result = np.empty(n, dtype=object)
-
- # initialize tuples on first pass
- arr = ndarrays[0]
- it = <flatiter> PyArray_IterNew(arr)
- for i in range(n):
- val = PyArray_GETITEM(arr, PyArray_ITER_DATA(it))
- tup = PyTuple_New(k)
-
- if val != val:
- val = fill_value
-
- PyTuple_SET_ITEM(tup, 0, val)
- Py_INCREF(val)
- result[i] = tup
- PyArray_ITER_NEXT(it)
-
- for j in range(1, k):
- arr = ndarrays[j]
- it = <flatiter> PyArray_IterNew(arr)
- if len(arr) != n:
- raise ValueError('all arrays must be same length')
-
- for i in range(n):
- val = PyArray_GETITEM(arr, PyArray_ITER_DATA(it))
- if val != val:
- val = fill_value
-
- PyTuple_SET_ITEM(result[i], j, val)
- Py_INCREF(val)
- PyArray_ITER_NEXT(it)
-
- return result
-
-
def generate_slices(ndarray[int64_t] labels, Py_ssize_t ngroups):
cdef:
Py_ssize_t i, group_size, n, start
diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c
index 3c63f42f14b83..b1206bd3f2d7a 100644
--- a/pandas/_libs/src/datetime/np_datetime.c
+++ b/pandas/_libs/src/datetime/np_datetime.c
@@ -24,20 +24,7 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt
#include "np_datetime.h"
#if PY_MAJOR_VERSION >= 3
-#define PyIntObject PyLongObject
-#define PyInt_Type PyLong_Type
-#define PyInt_Check(op) PyLong_Check(op)
-#define PyInt_CheckExact(op) PyLong_CheckExact(op)
-#define PyInt_FromString PyLong_FromString
-#define PyInt_FromUnicode PyLong_FromUnicode
-#define PyInt_FromLong PyLong_FromLong
-#define PyInt_FromSize_t PyLong_FromSize_t
-#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
-#define PyInt_AS_LONG PyLong_AS_LONG
-#define PyInt_AsSsize_t PyLong_AsSsize_t
-#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
-#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#endif
const pandas_datetimestruct _NS_MIN_DTS = {
@@ -692,44 +679,6 @@ int convert_datetimestruct_to_datetime(pandas_datetime_metadata *meta,
return 0;
}
-/*
- * This provides the casting rules for the TIMEDELTA data type units.
- *
- * Notably, there is a barrier between the nonlinear years and
- * months units, and all the other units.
- */
-npy_bool can_cast_timedelta64_units(PANDAS_DATETIMEUNIT src_unit,
- PANDAS_DATETIMEUNIT dst_unit,
- NPY_CASTING casting) {
- switch (casting) {
- /* Allow anything with unsafe casting */
- case NPY_UNSAFE_CASTING:
- return 1;
-
- /*
- * Only enforce the 'date units' vs 'time units' barrier with
- * 'same_kind' casting.
- */
- case NPY_SAME_KIND_CASTING:
- return (src_unit <= PANDAS_FR_M && dst_unit <= PANDAS_FR_M) ||
- (src_unit > PANDAS_FR_M && dst_unit > PANDAS_FR_M);
-
- /*
- * Enforce the 'date units' vs 'time units' barrier and that
- * casting is only allowed towards more precise units with
- * 'safe' casting.
- */
- case NPY_SAFE_CASTING:
- return (src_unit <= dst_unit) &&
- ((src_unit <= PANDAS_FR_M && dst_unit <= PANDAS_FR_M) ||
- (src_unit > PANDAS_FR_M && dst_unit > PANDAS_FR_M));
-
- /* Enforce equality with 'no' or 'equiv' casting */
- default:
- return src_unit == dst_unit;
- }
-}
-
/*
* This provides the casting rules for the DATETIME data type units.
*
| See coverage report: https://github.com/pandas-dev/pandas/pull/18512#issuecomment-347281660
This just removes a handful of unused functions. LMK if some should be kept around just-in-case.
Not included in the never-used group but worth mentioning: lib.fast_unique, lib.convert_timestamps, and lib.string_array_replace_from_nan_rep are each called exactly once in io.pytables, not covered in tests (io.pytables looks poorly covered, might have had a bunch of skipTests locally)
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18533 | 2017-11-27T23:15:48Z | 2017-11-28T11:24:28Z | 2017-11-28T11:24:28Z | 2017-12-08T19:38:25Z |
DEPR: Deprecate from_items | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 1890636bc8e1a..4e59f2d0f844a 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -329,7 +329,7 @@ Deprecations
- :func:`read_excel` has deprecated the ``skip_footer`` parameter. Use ``skipfooter`` instead (:issue:`18836`)
- The ``is_copy`` attribute is deprecated and will be removed in a future version (:issue:`18801`).
- ``IntervalIndex.from_intervals`` is deprecated in favor of the :class:`IntervalIndex` constructor (:issue:`19263`)
-
+- :func:``DataFrame.from_items`` is deprecated. Use :func:``DataFrame.from_dict()`` instead, or :func:``DataFrame.from_dict(OrderedDict())`` if you wish to preserve the key order (:issue:`17320`)
.. _whatsnew_0230.prior_deprecations:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 788b236b0ec59..96d28581cfdd9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -313,7 +313,7 @@ def _constructor(self):
_constructor_sliced = Series
_deprecations = NDFrame._deprecations | frozenset(
- ['sortlevel', 'get_value', 'set_value', 'from_csv'])
+ ['sortlevel', 'get_value', 'set_value', 'from_csv', 'from_items'])
@property
def _constructor_expanddim(self):
@@ -1246,6 +1246,12 @@ def to_records(self, index=True, convert_datetime64=True):
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""
+ .. deprecated:: 0.23.0
+ from_items is deprecated and will be removed in a
+ future version. Use :meth:`DataFrame.from_dict(dict())`
+ instead. :meth:`DataFrame.from_dict(OrderedDict(...))` may be used
+ to preserve the key order.
+
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
@@ -1266,6 +1272,13 @@ def from_items(cls, items, columns=None, orient='columns'):
-------
frame : DataFrame
"""
+
+ warnings.warn("from_items is deprecated. Please use "
+ "DataFrame.from_dict(dict()) instead. "
+ "DataFrame.from_dict(OrderedDict()) may be used to "
+ "preserve the key order.",
+ FutureWarning, stacklevel=2)
+
keys, values = lzip(*items)
if orient == 'columns':
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index b409cf20e9a09..0922a4a9c3e9b 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -13,6 +13,7 @@
import datetime
import struct
import sys
+from collections import OrderedDict
import numpy as np
from dateutil.relativedelta import relativedelta
@@ -1571,7 +1572,7 @@ def read(self, nrows=None, convert_dates=None,
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
- data = DataFrame.from_items(data_formatted)
+ data = DataFrame.from_dict(OrderedDict(data_formatted))
del data_formatted
self._do_convert_missing(data, convert_missing)
@@ -1609,7 +1610,7 @@ def read(self, nrows=None, convert_dates=None,
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
- data = DataFrame.from_items(retyped_data)
+ data = DataFrame.from_dict(OrderedDict(retyped_data))
if index_col is not None:
data = data.set_index(data.pop(index_col))
@@ -1722,7 +1723,7 @@ def _do_convert_categoricals(self, data, value_label_dict, lbllist,
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
- data = DataFrame.from_items(cat_converted_data)
+ data = DataFrame.from_dict(OrderedDict(cat_converted_data))
return data
def data_label(self):
@@ -1997,7 +1998,7 @@ def _prepare_categoricals(self, data):
data_formatted.append((col, values))
else:
data_formatted.append((col, data[col]))
- return DataFrame.from_items(data_formatted)
+ return DataFrame.from_dict(OrderedDict(data_formatted))
def _replace_nans(self, data):
# return data
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index b24ae22162a34..8abd88d8a379c 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -871,7 +871,7 @@ def __len__(self, n):
# GH 4297
# support Array
import array
- result = DataFrame.from_items([('A', array.array('i', range(10)))])
+ result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
@@ -1175,28 +1175,35 @@ def test_constructor_manager_resize(self):
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
- recons = DataFrame.from_items(items)
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
- recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
-
- recons = DataFrame.from_items(row_items,
- columns=self.mixed_frame.columns,
- orient='index')
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ recons = DataFrame.from_items(row_items,
+ columns=self.mixed_frame.columns,
+ orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
with tm.assert_raises_regex(TypeError,
"Must pass columns with "
"orient='index'"):
- DataFrame.from_items(row_items, orient='index')
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
@@ -1204,15 +1211,19 @@ def test_constructor_from_items(self):
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
- recons = DataFrame.from_items(row_items,
- columns=self.mixed_frame.columns,
- orient='index')
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ recons = DataFrame.from_items(row_items,
+ columns=self.mixed_frame.columns,
+ orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
- rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
- orient='index',
- columns=['one', 'two', 'three'])
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
+ orient='index',
+ columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
@@ -1222,12 +1233,28 @@ def test_constructor_from_items_scalars(self):
with tm.assert_raises_regex(ValueError,
r'The value in each \(key, value\) '
'pair must be an array, Series, or dict'):
- DataFrame.from_items([('A', 1), ('B', 4)])
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ DataFrame.from_items([('A', 1), ('B', 4)])
with tm.assert_raises_regex(ValueError,
r'The value in each \(key, value\) '
'pair must be an array, Series, or dict'):
- DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
+ orient='index')
+
+ def test_from_items_deprecation(self):
+ # GH 17320
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
+
+ with tm.assert_produces_warning(FutureWarning,
+ check_stacklevel=False):
+ DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
+ columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
@@ -1256,13 +1283,13 @@ def test_constructor_column_duplicates(self):
tm.assert_frame_equal(df, edf)
- idf = DataFrame.from_items(
- [('a', [8]), ('a', [5])], columns=['a', 'a'])
+ idf = DataFrame.from_records([(8, 5)],
+ columns=['a', 'a'])
+
tm.assert_frame_equal(idf, edf)
- pytest.raises(ValueError, DataFrame.from_items,
- [('a', [8]), ('a', [5]), ('b', [6])],
- columns=['b', 'a', 'a'])
+ pytest.raises(ValueError, DataFrame.from_dict,
+ OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py
index f0a21cde4fbd9..36465db78361f 100644
--- a/pandas/tests/frame/test_nonunique_indexes.py
+++ b/pandas/tests/frame/test_nonunique_indexes.py
@@ -214,9 +214,10 @@ def check(result, expected=None):
for index in [df.index, pd.Index(list('edcba'))]:
this_df = df.copy()
expected_ser = pd.Series(index.values, index=this_df.index)
- expected_df = DataFrame.from_items([('A', expected_ser),
- ('B', this_df['B']),
- ('A', expected_ser)])
+ expected_df = DataFrame({'A': expected_ser,
+ 'B': this_df['B'],
+ 'A': expected_ser},
+ columns=['A', 'B', 'A'])
this_df['A'] = index
check(this_df, expected_df)
diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py
index 8525cb42c2455..f677b356a77a5 100644
--- a/pandas/tests/io/parser/common.py
+++ b/pandas/tests/io/parser/common.py
@@ -8,6 +8,7 @@
import re
import sys
from datetime import datetime
+from collections import OrderedDict
import pytest
import numpy as np
@@ -924,8 +925,9 @@ def test_float_parser(self):
def test_scientific_no_exponent(self):
# see gh-12215
- df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
- ('y', ['42e']), ('z', ['632E'])])
+ df = DataFrame.from_dict(OrderedDict([('w', ['2e']), ('x', ['3E']),
+ ('y', ['42e']),
+ ('z', ['632E'])]))
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index efbabcfd8fc4c..ebb8424b78ed4 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -6,6 +6,7 @@
from distutils.version import LooseVersion
from functools import partial
from warnings import catch_warnings
+from collections import OrderedDict
import numpy as np
import pytest
@@ -315,7 +316,7 @@ def test_excel_table(self):
def test_reader_special_dtypes(self):
- expected = DataFrame.from_items([
+ expected = DataFrame.from_dict(OrderedDict([
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
@@ -325,8 +326,7 @@ def test_reader_special_dtypes(self):
("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31),
datetime(1905, 1, 1), datetime(2013, 12, 14),
datetime(2015, 3, 14)])
- ])
-
+ ]))
basename = 'test_types'
# should read in correctly and infer types
@@ -363,12 +363,12 @@ def test_reader_converters(self):
basename = 'test_converters'
- expected = DataFrame.from_items([
+ expected = DataFrame.from_dict(OrderedDict([
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']),
("StrCol", ['1', np.nan, '3', '4', '5']),
- ])
+ ]))
converters = {'IntCol': lambda x: int(x) if x != '' else -1000,
'FloatCol': lambda x: 10 * x if x else np.nan,
@@ -718,32 +718,30 @@ def test_reader_seconds(self):
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
# Xlrd >= 0.9.3 can handle Excel milliseconds.
- expected = DataFrame.from_items([("Time",
- [time(1, 2, 3),
- time(2, 45, 56, 100000),
- time(4, 29, 49, 200000),
- time(6, 13, 42, 300000),
- time(7, 57, 35, 400000),
- time(9, 41, 28, 500000),
- time(11, 25, 21, 600000),
- time(13, 9, 14, 700000),
- time(14, 53, 7, 800000),
- time(16, 37, 0, 900000),
- time(18, 20, 54)])])
+ expected = DataFrame.from_dict({"Time": [time(1, 2, 3),
+ time(2, 45, 56, 100000),
+ time(4, 29, 49, 200000),
+ time(6, 13, 42, 300000),
+ time(7, 57, 35, 400000),
+ time(9, 41, 28, 500000),
+ time(11, 25, 21, 600000),
+ time(13, 9, 14, 700000),
+ time(14, 53, 7, 800000),
+ time(16, 37, 0, 900000),
+ time(18, 20, 54)]})
else:
# Xlrd < 0.9.3 rounds Excel milliseconds.
- expected = DataFrame.from_items([("Time",
- [time(1, 2, 3),
- time(2, 45, 56),
- time(4, 29, 49),
- time(6, 13, 42),
- time(7, 57, 35),
- time(9, 41, 29),
- time(11, 25, 22),
- time(13, 9, 15),
- time(14, 53, 8),
- time(16, 37, 1),
- time(18, 20, 54)])])
+ expected = DataFrame.from_dict({"Time": [time(1, 2, 3),
+ time(2, 45, 56),
+ time(4, 29, 49),
+ time(6, 13, 42),
+ time(7, 57, 35),
+ time(9, 41, 29),
+ time(11, 25, 22),
+ time(13, 9, 15),
+ time(14, 53, 8),
+ time(16, 37, 1),
+ time(18, 20, 54)]})
actual = self.get_exceldf('times_1900', 'Sheet1')
tm.assert_frame_equal(actual, expected)
@@ -1988,7 +1986,7 @@ def test_datetimes(self):
datetime(2013, 1, 13, 18, 20, 52)]
with ensure_clean(self.ext) as path:
- write_frame = DataFrame.from_items([('A', datetimes)])
+ write_frame = DataFrame({'A': datetimes})
write_frame.to_excel(path, 'Sheet1')
read_frame = read_excel(path, 'Sheet1', header=0)
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index d0d7f881b37d0..89d76061329a3 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -8,6 +8,7 @@
import warnings
from datetime import datetime
from distutils.version import LooseVersion
+from collections import OrderedDict
import numpy as np
import pandas as pd
@@ -945,7 +946,7 @@ def test_categorical_order(self, file):
cols.append((col, pd.Categorical.from_codes(codes, labels)))
else:
cols.append((col, pd.Series(labels, dtype=np.float32)))
- expected = DataFrame.from_items(cols)
+ expected = DataFrame.from_dict(OrderedDict(cols))
# Read with and with out categoricals, ensure order is identical
file = getattr(self, file)
| - [ ] closes #17320
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18529 | 2017-11-27T20:56:17Z | 2018-01-31T12:15:15Z | 2018-01-31T12:15:15Z | 2018-09-14T01:40:59Z |
CLN: ASV FromDictwithTimestamp | diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index 5f465a91d38d3..6761d48d25919 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -2,8 +2,9 @@
import pandas.util.testing as tm
from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range
try:
- from pandas.tseries import offsets
-except:
+ from pandas.tseries.offsets import Nano, Hour
+except ImportError:
+ # For compatability with older versions
from pandas.core.datetools import * # noqa
from .pandas_vb_common import setup # noqa
@@ -24,16 +25,16 @@ def setup(self):
self.data2 = {i: {j: float(j) for j in range(100)}
for i in range(2000)}
- def time_frame_ctor_list_of_dict(self):
+ def time_list_of_dict(self):
DataFrame(self.dict_list)
- def time_frame_ctor_nested_dict(self):
+ def time_nested_dict(self):
DataFrame(self.data)
- def time_series_ctor_from_dict(self):
+ def time_dict(self):
Series(self.some_dict)
- def time_frame_ctor_nested_dict_int64(self):
+ def time_nested_dict_int64(self):
# nested dict, integer indexes, regression described in #621
DataFrame(self.data2)
@@ -46,78 +47,24 @@ def setup(self):
mi = MultiIndex.from_product([range(100), range(100)])
self.s = Series(np.random.randn(10000), index=mi)
- def time_frame_from_mi_series(self):
+ def time_mi_series(self):
DataFrame(self.s)
-# ----------------------------------------------------------------------
-# From dict with DatetimeIndex with all offsets
-# dynamically generate benchmarks for every offset
-#
-# get_period_count & get_index_for_offset are there because blindly taking each
-# offset times 1000 can easily go out of Timestamp bounds and raise errors.
+class FromDictwithTimestamp(object):
+ goal_time = 0.2
+ params = [Nano(1), Hour(1)]
+ param_names = ['offset']
-def get_period_count(start_date, off):
- ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
- if (ten_offsets_in_days == 0):
- return 1000
- else:
- periods = 9 * (Timestamp.max - start_date).days // ten_offsets_in_days
- return min(periods, 1000)
-
-
-def get_index_for_offset(off):
- start_date = Timestamp('1/1/1900')
- return date_range(start_date,
- periods=get_period_count(start_date, off),
- freq=off)
-
-
-all_offsets = offsets.__all__
-# extra cases
-for off in ['FY5253', 'FY5253Quarter']:
- all_offsets.pop(all_offsets.index(off))
- all_offsets.extend([off + '_1', off + '_2'])
-
-
-class FromDictwithTimestampOffsets(object):
-
- params = [all_offsets, [1, 2]]
- param_names = ['offset', 'n_steps']
-
- offset_kwargs = {'WeekOfMonth': {'weekday': 1, 'week': 1},
- 'LastWeekOfMonth': {'weekday': 1, 'week': 1},
- 'FY5253': {'startingMonth': 1, 'weekday': 1},
- 'FY5253Quarter': {'qtr_with_extra_week': 1,
- 'startingMonth': 1,
- 'weekday': 1}}
-
- offset_extra_cases = {'FY5253': {'variation': ['nearest', 'last']},
- 'FY5253Quarter': {'variation': ['nearest', 'last']}}
-
- def setup(self, offset, n_steps):
+ def setup(self, offset):
+ N = 10**3
np.random.seed(1234)
- extra = False
- if offset.endswith("_", None, -1):
- extra = int(offset[-1])
- offset = offset[:-2]
-
- kwargs = {}
- if offset in self.offset_kwargs:
- kwargs = self.offset_kwargs[offset]
-
- if extra:
- extras = self.offset_extra_cases[offset]
- for extra_arg in extras:
- kwargs[extra_arg] = extras[extra_arg][extra - 1]
-
- offset = getattr(offsets, offset)
- self.idx = get_index_for_offset(offset(n_steps, **kwargs))
- self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
- self.d = self.df.to_dict()
-
- def time_frame_ctor(self, offset, n_steps):
+ idx = date_range(Timestamp('1/1/1900'), freq=offset, periods=N)
+ df = DataFrame(np.random.randn(N, 10), index=idx)
+ self.d = df.to_dict()
+
+ def time_dict_with_timestamp_offsets(self, offset):
DataFrame(self.d)
| - [x] closes #18511
Also removed unnecessary `self`s and simplified method names
```
$ asv run -q -b ^frame_ctor
[ 0.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
[ 16.67%] ··· Running frame_ctor.FromDicts.time_dict 6.66ms
[ 33.33%] ··· Running frame_ctor.FromDicts.time_list_of_dict 114ms
[ 50.00%] ··· Running frame_ctor.FromDicts.time_nested_dict 94.1ms
[ 66.67%] ··· Running frame_ctor.FromDicts.time_nested_dict_int64 240ms
[ 83.33%] ··· Running frame_ctor.FromDictwithTimestamp.time_dict_with_timestamp_offsets 76.5ms;...
[100.00%] ··· Running frame_ctor.FromSeries.time_mi_series 328μs
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/18527 | 2017-11-27T18:23:05Z | 2017-12-10T16:27:12Z | 2017-12-10T16:27:12Z | 2017-12-11T02:38:19Z |
BUG: Fix marker for high memory | diff --git a/ci/run_circle.sh b/ci/run_circle.sh
index 0e46d28ab6fc4..435985bd42148 100755
--- a/ci/run_circle.sh
+++ b/ci/run_circle.sh
@@ -5,5 +5,5 @@ export PATH="$MINICONDA_DIR/bin:$PATH"
source activate pandas
-echo "pytest --junitxml=$CIRCLE_TEST_REPORTS/reports/junit.xml $@ pandas"
-pytest --junitxml=$CIRCLE_TEST_REPORTS/reports/junit.xml $@ pandas
+echo "pytest --strict --junitxml=$CIRCLE_TEST_REPORTS/reports/junit.xml $@ pandas"
+pytest --strict --junitxml=$CIRCLE_TEST_REPORTS/reports/junit.xml $@ pandas
diff --git a/ci/script_multi.sh b/ci/script_multi.sh
index e03d60360c800..58742552628c8 100755
--- a/ci/script_multi.sh
+++ b/ci/script_multi.sh
@@ -38,17 +38,17 @@ elif [ "$DOC" ]; then
echo "We are not running pytest as this is a doc-build"
elif [ "$COVERAGE" ]; then
- echo pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
- pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
+ echo pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
+ pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
elif [ "$SLOW" ]; then
TEST_ARGS="--only-slow --skip-network"
- echo pytest -r xX -m "not single and slow" -v --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
- pytest -r xX -m "not single and slow" -v --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
+ echo pytest -r xX -m "not single and slow" -v --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
+ pytest -r xX -m "not single and slow" -v --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
else
- echo pytest -n 2 -r xX -m "not single" --junitxml=/tmp/multiple.xml $TEST_ARGS pandas
- pytest -n 2 -r xX -m "not single" --junitxml=/tmp/multiple.xml $TEST_ARGS pandas # TODO: doctest
+ echo pytest -n 2 -r xX -m "not single" --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
+ pytest -n 2 -r xX -m "not single" --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas # TODO: doctest
fi
diff --git a/ci/script_single.sh b/ci/script_single.sh
index 375e9879e950f..963ce00b4a094 100755
--- a/ci/script_single.sh
+++ b/ci/script_single.sh
@@ -23,12 +23,12 @@ elif [ "$DOC" ]; then
echo "We are not running pytest as this is a doc-build"
elif [ "$COVERAGE" ]; then
- echo pytest -s -m "single" --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
- pytest -s -m "single" --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
+ echo pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
+ pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
else
- echo pytest -m "single" -r xX --junitxml=/tmp/single.xml $TEST_ARGS pandas
- pytest -m "single" -r xX --junitxml=/tmp/single.xml $TEST_ARGS pandas # TODO: doctest
+ echo pytest -m "single" -r xX --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas
+ pytest -m "single" -r xX --junitxml=/tmp/single.xml --strict $TEST_ARGS pandas # TODO: doctest
fi
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 5e5fc6e7eac62..3fcbf90d12494 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -4269,9 +4269,10 @@ def test_select_as_multiple(self):
['df1', 'df3'], where=['A>0', 'B>0'],
selector='df1')
- @pytest.mark.skipf(
+ @pytest.mark.skipif(
LooseVersion(tables.__version__) < '3.1.0',
- "tables version does not support fix for nan selection bug: GH 4858")
+ reason=("tables version does not support fix for nan selection "
+ "bug: GH 4858"))
def test_nan_selection_bug_4858(self):
with ensure_clean_store(self.path) as store:
diff --git a/setup.cfg b/setup.cfg
index 7a88ee8557dc7..828ef80971f7b 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -27,4 +27,4 @@ markers =
single: mark a test as single cpu only
slow: mark a test as slow
network: mark a test as network
- highmemory: mark a test as a high-memory only
+ high_memory: mark a test as a high-memory only
diff --git a/test.bat b/test.bat
index 6c69f83866ffd..2424f62b8dbfe 100644
--- a/test.bat
+++ b/test.bat
@@ -1,3 +1,3 @@
:: test on windows
-pytest --skip-slow --skip-network pandas -n 2 %*
+pytest --strict --skip-slow --skip-network pandas -n 2 %*
| Added strict flag to catch this in the future
xref https://github.com/pandas-dev/pandas/pull/18427#discussion_r153277727 | https://api.github.com/repos/pandas-dev/pandas/pulls/18526 | 2017-11-27T18:22:37Z | 2017-11-28T11:16:24Z | 2017-11-28T11:16:24Z | 2017-12-20T16:10:56Z |
BUG: Fix groupby over a CategoricalIndex in axis=1 | diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt
index e307e605687bf..bebfd0ab50e90 100644
--- a/doc/source/whatsnew/v0.21.1.txt
+++ b/doc/source/whatsnew/v0.21.1.txt
@@ -137,6 +137,7 @@ Categorical
- Error messages in the testing module have been improved when items have different ``CategoricalDtype`` (:issue:`18069`)
- ``CategoricalIndex`` can now correctly take a ``pd.api.types.CategoricalDtype`` as its dtype (:issue:`18116`)
- Bug in ``Categorical.unique()`` returning read-only ``codes`` array when all categories were ``NaN`` (:issue:`18051`)
+- Bug in ``DataFrame.groupby(axis=1)`` with a ``CategoricalIndex`` (:issue:`18432`)
String
^^^^^^
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 0e8368e5a4533..662a863c72325 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -2859,9 +2859,11 @@ def is_in_obj(gpr):
else:
in_axis, name = False, None
- if is_categorical_dtype(gpr) and len(gpr) != len(obj):
- raise ValueError("Categorical dtype grouper must "
- "have len(grouper) == len(data)")
+ if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
+ raise ValueError(
+ ("Length of grouper ({len_gpr}) and axis ({len_axis})"
+ " must be same length"
+ .format(len_gpr=len(gpr), len_axis=obj.shape[axis])))
# create the Grouping
# allow us to passing the actual Grouping as the gpr
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index cc422f2d1cdeb..8702062e9cd0a 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -6,7 +6,7 @@
from warnings import catch_warnings
from pandas import (date_range, Timestamp,
- Index, MultiIndex, DataFrame, Series)
+ Index, MultiIndex, DataFrame, Series, CategoricalIndex)
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal)
from pandas.compat import lrange, long
@@ -251,6 +251,29 @@ def test_groupby_levels_and_columns(self):
by_columns.columns = pd.Index(by_columns.columns, dtype=np.int64)
tm.assert_frame_equal(by_levels, by_columns)
+ def test_groupby_categorical_index_and_columns(self):
+ # GH18432
+ columns = ['A', 'B', 'A', 'B']
+ categories = ['B', 'A']
+ data = np.ones((5, 4), int)
+ cat_columns = CategoricalIndex(columns,
+ categories=categories,
+ ordered=True)
+ df = DataFrame(data=data, columns=cat_columns)
+ result = df.groupby(axis=1, level=0).sum()
+ expected_data = 2 * np.ones((5, 2), int)
+ expected_columns = CategoricalIndex(categories,
+ categories=categories,
+ ordered=True)
+ expected = DataFrame(data=expected_data, columns=expected_columns)
+ assert_frame_equal(result, expected)
+
+ # test transposed version
+ df = DataFrame(data.T, index=cat_columns)
+ result = df.groupby(axis=0, level=0).sum()
+ expected = DataFrame(data=expected_data.T, index=expected_columns)
+ assert_frame_equal(result, expected)
+
def test_grouper_getting_correct_binner(self):
# GH 10063
| - [x] closes #18432
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] add whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/18525 | 2017-11-27T18:02:01Z | 2017-11-30T15:11:43Z | 2017-11-30T15:11:43Z | 2017-12-11T20:19:10Z |
check for datetime+period addition | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 52ca05d9a76a9..beac39cd7c9a0 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -207,4 +207,5 @@ Other
- Improved error message when attempting to use a Python keyword as an identifier in a numexpr query (:issue:`18221`)
- Fixed a bug where creating a Series from an array that contains both tz-naive and tz-aware values will result in a Series whose dtype is tz-aware instead of object (:issue:`16406`)
+- Adding a ``Period`` object to a ``datetime`` or ``Timestamp`` object will now correctly raise a ``TypeError`` (:issue:`17983`)
-
diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx
index 2b09e9376bd3d..b95632b5b0eff 100644
--- a/pandas/_libs/period.pyx
+++ b/pandas/_libs/period.pyx
@@ -17,6 +17,10 @@ from pandas.compat import PY2
cimport cython
+from cpython.datetime cimport PyDateTime_Check, PyDateTime_IMPORT
+# import datetime C API
+PyDateTime_IMPORT
+
from tslibs.np_datetime cimport (pandas_datetimestruct,
dtstruct_to_dt64, dt64_to_dtstruct,
is_leapyear)
@@ -647,9 +651,19 @@ cdef class _Period(object):
elif util.is_integer_object(other):
ordinal = self.ordinal + other * self.freq.n
return Period(ordinal=ordinal, freq=self.freq)
+ elif (PyDateTime_Check(other) or
+ is_period_object(other) or util.is_datetime64_object(other)):
+ # can't add datetime-like
+ # GH#17983
+ sname = type(self).__name__
+ oname = type(other).__name__
+ raise TypeError("unsupported operand type(s) for +: '{self}' "
+ "and '{other}'".format(self=sname,
+ other=oname))
else: # pragma: no cover
return NotImplemented
elif is_period_object(other):
+ # this can be reached via __radd__ because of cython rules
return other + self
else:
return NotImplemented
diff --git a/pandas/tests/scalar/test_period.py b/pandas/tests/scalar/test_period.py
index 8cfdf7a461879..3bd4a28b7767d 100644
--- a/pandas/tests/scalar/test_period.py
+++ b/pandas/tests/scalar/test_period.py
@@ -1038,6 +1038,29 @@ def test_add_raises(self):
with tm.assert_raises_regex(TypeError, msg):
dt1 + dt2
+ boxes = [lambda x: x, lambda x: pd.Series([x]), lambda x: pd.Index([x])]
+
+ @pytest.mark.parametrize('lbox', boxes)
+ @pytest.mark.parametrize('rbox', boxes)
+ def test_add_timestamp_raises(self, rbox, lbox):
+ # GH # 17983
+ ts = pd.Timestamp('2017')
+ per = pd.Period('2017', freq='M')
+
+ # We may get a different message depending on which class raises
+ # the error.
+ msg = (r"cannot add|unsupported operand|"
+ r"can only operate on a|incompatible type|"
+ r"ufunc add cannot use operands")
+ with tm.assert_raises_regex(TypeError, msg):
+ lbox(ts) + rbox(per)
+
+ with tm.assert_raises_regex(TypeError, msg):
+ lbox(per) + rbox(ts)
+
+ with tm.assert_raises_regex(TypeError, msg):
+ lbox(per) + rbox(per)
+
def test_sub(self):
dt1 = Period('2011-01-01', freq='D')
dt2 = Period('2011-01-15', freq='D')
| - [x] closes #17983
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18524 | 2017-11-27T17:45:27Z | 2017-11-29T13:24:01Z | 2017-11-29T13:24:01Z | 2017-11-29T18:07:39Z |
implement shift_quarters --> apply_index for quarters and years | diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 6f5ad2ae45f50..251af50ab12ce 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -17,8 +17,6 @@ np.import_array()
from util cimport is_string_object, is_integer_object
-from pandas._libs.tslib import monthrange
-
from conversion cimport tz_convert_single, pydt_to_i8
from frequencies cimport get_freq_code
from nattype cimport NPY_NAT
@@ -471,6 +469,160 @@ cdef inline int month_add_months(pandas_datetimestruct dts, int months) nogil:
return 12 if new_month == 0 else new_month
+@cython.wraparound(False)
+@cython.boundscheck(False)
+def shift_quarters(int64_t[:] dtindex, int quarters,
+ int q1start_month, object day, int modby=3):
+ """
+ Given an int64 array representing nanosecond timestamps, shift all elements
+ by the specified number of quarters using DateOffset semantics.
+
+ Parameters
+ ----------
+ dtindex : int64_t[:] timestamps for input dates
+ quarters : int number of quarters to shift
+ q1start_month : int month in which Q1 begins by convention
+ day : {'start', 'end', 'business_start', 'business_end'}
+ modby : int (3 for quarters, 12 for years)
+
+ Returns
+ -------
+ out : ndarray[int64_t]
+ """
+ cdef:
+ Py_ssize_t i
+ pandas_datetimestruct dts
+ int count = len(dtindex)
+ int months_to_roll, months_since, n, compare_day
+ bint roll_check
+ int64_t[:] out = np.empty(count, dtype='int64')
+
+ if day == 'start':
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT:
+ out[i] = NPY_NAT
+ continue
+
+ dt64_to_dtstruct(dtindex[i], &dts)
+ n = quarters
+
+ months_since = (dts.month - q1start_month) % modby
+
+ # offset semantics - if on the anchor point and going backwards
+ # shift to next
+ if n <= 0 and (months_since != 0 or
+ (months_since == 0 and dts.day > 1)):
+ n += 1
+
+ dts.year = year_add_months(dts, modby * n - months_since)
+ dts.month = month_add_months(dts, modby * n - months_since)
+ dts.day = 1
+
+ out[i] = dtstruct_to_dt64(&dts)
+
+ elif day == 'end':
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT:
+ out[i] = NPY_NAT
+ continue
+
+ dt64_to_dtstruct(dtindex[i], &dts)
+ n = quarters
+
+ months_since = (dts.month - q1start_month) % modby
+
+ if n <= 0 and months_since != 0:
+ # The general case of this condition would be
+ # `months_since != 0 or (months_since == 0 and
+ # dts.day > get_days_in_month(dts.year, dts.month))`
+ # but the get_days_in_month inequality would never hold.
+ n += 1
+ elif n > 0 and (months_since == 0 and
+ dts.day < get_days_in_month(dts.year,
+ dts.month)):
+ n -= 1
+
+ dts.year = year_add_months(dts, modby * n - months_since)
+ dts.month = month_add_months(dts, modby * n - months_since)
+ dts.day = get_days_in_month(dts.year, dts.month)
+
+ out[i] = dtstruct_to_dt64(&dts)
+
+ elif day == 'business_start':
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT:
+ out[i] = NPY_NAT
+ continue
+
+ dt64_to_dtstruct(dtindex[i], &dts)
+ n = quarters
+
+ months_since = (dts.month - q1start_month) % modby
+ compare_month = dts.month - months_since
+ compare_month = compare_month or 12
+ # compare_day is only relevant for comparison in the case
+ # where months_since == 0.
+ compare_day = get_firstbday(dts.year, compare_month)
+
+ if n <= 0 and (months_since != 0 or
+ (months_since == 0 and dts.day > compare_day)):
+ # make sure to roll forward, so negate
+ n += 1
+ elif n > 0 and (months_since == 0 and dts.day < compare_day):
+ # pretend to roll back if on same month but
+ # before compare_day
+ n -= 1
+
+ dts.year = year_add_months(dts, modby * n - months_since)
+ dts.month = month_add_months(dts, modby * n - months_since)
+
+ dts.day = get_firstbday(dts.year, dts.month)
+
+ out[i] = dtstruct_to_dt64(&dts)
+
+ elif day == 'business_end':
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT:
+ out[i] = NPY_NAT
+ continue
+
+ dt64_to_dtstruct(dtindex[i], &dts)
+ n = quarters
+
+ months_since = (dts.month - q1start_month) % modby
+ compare_month = dts.month - months_since
+ compare_month = compare_month or 12
+ # compare_day is only relevant for comparison in the case
+ # where months_since == 0.
+ compare_day = get_lastbday(dts.year, compare_month)
+
+ if n <= 0 and (months_since != 0 or
+ (months_since == 0 and dts.day > compare_day)):
+ # make sure to roll forward, so negate
+ n += 1
+ elif n > 0 and (months_since == 0 and dts.day < compare_day):
+ # pretend to roll back if on same month but
+ # before compare_day
+ n -= 1
+
+ dts.year = year_add_months(dts, modby * n - months_since)
+ dts.month = month_add_months(dts, modby * n - months_since)
+
+ dts.day = get_lastbday(dts.year, dts.month)
+
+ out[i] = dtstruct_to_dt64(&dts)
+
+ else:
+ raise ValueError("day must be None, 'start', 'end', "
+ "'business_start', or 'business_end'")
+
+ return np.asarray(out)
+
+
@cython.wraparound(False)
@cython.boundscheck(False)
def shift_months(int64_t[:] dtindex, int months, object day=None):
diff --git a/pandas/tests/tseries/offsets/test_yqm_offsets.py b/pandas/tests/tseries/offsets/test_yqm_offsets.py
index 292dd5eba938e..22b8cf6119d18 100644
--- a/pandas/tests/tseries/offsets/test_yqm_offsets.py
+++ b/pandas/tests/tseries/offsets/test_yqm_offsets.py
@@ -33,9 +33,15 @@ def test_quarterly_dont_normalize():
assert (result.time() == date.time())
-@pytest.mark.parametrize('offset', [MonthBegin(), MonthEnd(),
- BMonthBegin(), BMonthEnd()])
-def test_apply_index(offset):
+@pytest.mark.parametrize('n', [-2, 1])
+@pytest.mark.parametrize('cls', [MonthBegin, MonthEnd,
+ BMonthBegin, BMonthEnd,
+ QuarterBegin, QuarterEnd,
+ BQuarterBegin, BQuarterEnd,
+ YearBegin, YearEnd,
+ BYearBegin, BYearEnd])
+def test_apply_index(cls, n):
+ offset = cls(n=n)
rng = pd.date_range(start='1/1/2000', periods=100000, freq='T')
ser = pd.Series(rng)
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 8e1ead5dfbe9e..a3cddaa19dc17 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -27,7 +27,7 @@
apply_index_wraps,
roll_yearday,
shift_month,
- BeginMixin, EndMixin,
+ EndMixin,
BaseOffset)
@@ -1028,10 +1028,7 @@ def cbday(self):
@cache_readonly
def m_offset(self):
- kwds = self.kwds
- kwds = {key: kwds[key] for key in kwds
- if key not in ['calendar', 'weekmask', 'holidays', 'offset']}
- return MonthEnd(n=1, normalize=self.normalize, **kwds)
+ return MonthEnd(n=1, normalize=self.normalize)
@apply_wraps
def apply(self, other):
@@ -1106,10 +1103,7 @@ def cbday(self):
@cache_readonly
def m_offset(self):
- kwds = self.kwds
- kwds = {key: kwds[key] for key in kwds
- if key not in ['calendar', 'weekmask', 'holidays', 'offset']}
- return MonthBegin(n=1, normalize=self.normalize, **kwds)
+ return MonthBegin(n=1, normalize=self.normalize)
@apply_wraps
def apply(self, other):
@@ -1254,12 +1248,9 @@ def onOffset(self, dt):
def _apply(self, n, other):
# if other.day is not day_of_month move to day_of_month and update n
- if other.day < self.day_of_month:
- other = other.replace(day=self.day_of_month)
- if n > 0:
- n -= 1
+ if n > 0 and other.day < self.day_of_month:
+ n -= 1
elif other.day > self.day_of_month:
- other = other.replace(day=self.day_of_month)
n += 1
months = n // 2
@@ -1309,12 +1300,9 @@ def onOffset(self, dt):
def _apply(self, n, other):
# if other.day is not day_of_month move to day_of_month and update n
if other.day < self.day_of_month:
- other = other.replace(day=self.day_of_month)
n -= 1
- elif other.day > self.day_of_month:
- other = other.replace(day=self.day_of_month)
- if n <= 0:
- n += 1
+ elif n <= 0 and other.day > self.day_of_month:
+ n += 1
months = n // 2 + n % 2
day = 1 if n % 2 else self.day_of_month
@@ -1471,6 +1459,7 @@ def apply(self, other):
def getOffsetOfMonth(self, dt):
w = Week(weekday=self.weekday)
d = datetime(dt.year, dt.month, 1, tzinfo=dt.tzinfo)
+ # TODO: Is this DST-safe?
d = w.rollforward(d)
return d + timedelta(weeks=self.week)
@@ -1550,6 +1539,7 @@ def getOffsetOfMonth(self, dt):
d = datetime(dt.year, dt.month, 1, dt.hour, dt.minute,
dt.second, dt.microsecond, tzinfo=dt.tzinfo)
eom = m.rollforward(d)
+ # TODO: Is this DST-safe?
w = Week(weekday=self.weekday)
return w.rollback(eom)
@@ -1635,6 +1625,12 @@ def onOffset(self, dt):
modMonth = (dt.month - self.startingMonth) % 3
return modMonth == 0 and dt.day == self._get_offset_day(dt)
+ @apply_index_wraps
+ def apply_index(self, dtindex):
+ shifted = liboffsets.shift_quarters(dtindex.asi8, self.n,
+ self.startingMonth, self._day_opt)
+ return dtindex._shallow_copy(shifted)
+
class BQuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
@@ -1659,7 +1655,7 @@ class BQuarterBegin(QuarterOffset):
_day_opt = 'business_start'
-class QuarterEnd(EndMixin, QuarterOffset):
+class QuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
@@ -1670,25 +1666,14 @@ class QuarterEnd(EndMixin, QuarterOffset):
_prefix = 'Q'
_day_opt = 'end'
- @apply_index_wraps
- def apply_index(self, i):
- return self._end_apply_index(i, self.freqstr)
-
-class QuarterBegin(BeginMixin, QuarterOffset):
+class QuarterBegin(QuarterOffset):
_outputName = 'QuarterBegin'
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'QS'
_day_opt = 'start'
- @apply_index_wraps
- def apply_index(self, i):
- freq_month = 12 if self.startingMonth == 1 else self.startingMonth - 1
- month = liboffsets._int_to_month[freq_month]
- freqstr = 'Q-{month}'.format(month=month)
- return self._beg_apply_index(i, freqstr)
-
# ---------------------------------------------------------------------
# Year-Based Offset Classes
@@ -1709,6 +1694,13 @@ def apply(self, other):
months = years * 12 + (self.month - other.month)
return shift_month(other, months, self._day_opt)
+ @apply_index_wraps
+ def apply_index(self, dtindex):
+ shifted = liboffsets.shift_quarters(dtindex.asi8, self.n,
+ self.month, self._day_opt,
+ modby=12)
+ return dtindex._shallow_copy(shifted)
+
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
@@ -1752,31 +1744,19 @@ class BYearBegin(YearOffset):
_day_opt = 'business_start'
-class YearEnd(EndMixin, YearOffset):
+class YearEnd(YearOffset):
"""DateOffset increments between calendar year ends"""
_default_month = 12
_prefix = 'A'
_day_opt = 'end'
- @apply_index_wraps
- def apply_index(self, i):
- # convert month anchor to annual period tuple
- return self._end_apply_index(i, self.freqstr)
-
-class YearBegin(BeginMixin, YearOffset):
+class YearBegin(YearOffset):
"""DateOffset increments between calendar year begin dates"""
_default_month = 1
_prefix = 'AS'
_day_opt = 'start'
- @apply_index_wraps
- def apply_index(self, i):
- freq_month = 12 if self.month == 1 else self.month - 1
- month = liboffsets._int_to_month[freq_month]
- freqstr = 'A-{month}'.format(month=month)
- return self._beg_apply_index(i, freqstr)
-
# ---------------------------------------------------------------------
# Special Offset Classes
@@ -2245,7 +2225,8 @@ def __eq__(self, other):
if isinstance(other, Tick):
return self.delta == other.delta
else:
- return DateOffset.__eq__(self, other)
+ # TODO: Are there cases where this should raise TypeError?
+ return False
# This is identical to DateOffset.__hash__, but has to be redefined here
# for Python 3, because we've redefined __eq__.
@@ -2261,7 +2242,8 @@ def __ne__(self, other):
if isinstance(other, Tick):
return self.delta != other.delta
else:
- return DateOffset.__ne__(self, other)
+ # TODO: Are there cases where this should raise TypeError?
+ return True
@property
def delta(self):
| `tslibs.offsets.shift_quarters` should look like the the lovechild of `offsets.shift_months` and `QuarterOffset.apply`. It may be possible to de-duplicate some of that* at some point.
We get `apply_index` methods for all four `YearOffset` subclasses and all four `QuarterOffset` subclasses. (Previously there were implementations for the non-business versions, but they went through a code path that I found fragile). `BeginMixin` is no longer used, removed.
All 12 of the (non-custom) Month+Quarter+Year offset subclasses are now parametrized stubs with the actual implementations unified in parent classes.
Small cleanups and notes in offsets.
\* It would be convenient to be able to specify a function to accept either a `datetime` or a `pandas_datetimestruct` in cases where they quack alike. I tried to do a `ctypedef fused datetimelike` but cython objected. Any thoughts?
```
asv continuous -f 1.1 -E virtualenv master HEAD -b offset
[...]
before after ratio
[262e8ff3] [21504bfd]
- 18.4±0.9μs 16.7±0.06μs 0.91 offset.CBDay.time_custom_bday_apply_dt64
- 519±20ms 458±0.3ms 0.88 offset.SemiMonthOffset.time_end_apply_index
- 77.7±6ms 67.7±0.09ms 0.87 timeseries.ToDatetime.time_cache_false_with_dup_string_tzoffset_dates
- 542±20ms 459±2ms 0.85 offset.SemiMonthOffset.time_begin_incr_rng
- 209±3μs 132±0.2μs 0.63 offset.CBMonthBegin.time_custom_bmonthbegin_incr_n
- 21.5±0.1ms 6.82±1ms 0.32 offset.ApplyIndex.time_apply_series(<YearBegin: month=1>)
- 23.4±0.6ms 6.82±0.09ms 0.29 offset.ApplyIndex.time_apply_series(<QuarterBegin: startingMonth=3>)
- 22.0±0.5ms 5.17±0.09ms 0.23 offset.ApplyIndex.time_apply_index(<QuarterBegin: startingMonth=3>)
- 19.8±0.4ms 4.64±0.1ms 0.23 offset.ApplyIndex.time_apply_index(<YearBegin: month=1>)
- 487±1ms 8.13±0.06ms 0.02 offset.ApplyIndex.time_apply_series(<YearEnd: month=12>)
- 502±20ms 7.14±0.07ms 0.01 offset.ApplyIndex.time_apply_series(<QuarterEnd: startingMonth=3>)
- 494±0.4ms 6.02±0.2ms 0.01 offset.ApplyIndex.time_apply_index(<YearEnd: month=12>)
- 555±20ms 5.64±0.2ms 0.01 offset.ApplyIndex.time_apply_index(<QuarterEnd: startingMonth=3>)
- 1.52s 11.3±1ms 0.01 offset.ApplyIndex.time_apply_series(<BusinessYearEnd: month=12>)
- 1.41s 8.17±0.2ms 0.01 offset.ApplyIndex.time_apply_series(<BusinessYearBegin: month=1>)
- 1.82s 10.0±0.2ms 0.01 offset.ApplyIndex.time_apply_series(<BusinessQuarterEnd: startingMonth=3>)
- 1.47s 8.07±0.6ms 0.01 offset.ApplyIndex.time_apply_index(<BusinessQuarterEnd: startingMonth=3>)
- 1.57s 8.49±0.2ms 0.01 offset.ApplyIndex.time_apply_series(<BusinessQuarterBegin: startingMonth=3>)
- 1.66s 8.35±0.09ms 0.01 offset.ApplyIndex.time_apply_index(<BusinessYearEnd: month=12>)
- 1.53s 6.51±0.1ms 0.00 offset.ApplyIndex.time_apply_index(<BusinessQuarterBegin: startingMonth=3>)
- 1.63s 6.21±0.06ms 0.00 offset.ApplyIndex.time_apply_index(<BusinessYearBegin: month=1>)
```
Running a larger version now.
- [ ] closes #xxxx
- [x] tests added / passed --> apply_index for all relevant classes, make sure to hit negative n
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18522 | 2017-11-27T16:29:17Z | 2017-11-27T21:41:03Z | 2017-11-27T21:41:03Z | 2017-12-08T19:38:27Z |
API: empty map should not infer | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 1a08a1353a605..09b504cac5ed4 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -107,7 +107,7 @@ Other API Changes
- :class:`CacheableOffset` and :class:`WeekDay` are no longer available in the ``pandas.tseries.offsets`` module (:issue:`17830`)
- `tseries.frequencies.get_freq_group()` and `tseries.frequencies.DAYS` are removed from the public API (:issue:`18034`)
- :func:`Series.truncate` and :func:`DataFrame.truncate` will raise a ``ValueError`` if the index is not sorted instead of an unhelpful ``KeyError`` (:issue:`17935`)
-- :func:`Index.map` can now accept ``Series`` and dictionary input objects (:issue:`12756`, :issue:`18482`).
+- :func:`Index.map` can now accept ``Series`` and dictionary input objects (:issue:`12756`, :issue:`18482`, :issue:`18509`).
- :func:`Dataframe.unstack` will now default to filling with ``np.nan`` for ``object`` columns. (:issue:`12815`)
- :class:`IntervalIndex` constructor will raise if the ``closed`` parameter conflicts with how the input data is inferred to be closed (:issue:`18421`)
- Inserting missing values into indexes will work for all types of indexes and automatically insert the correct type of missing value (``NaN``, ``NaT``, etc.) regardless of the type passed in (:issue:`18295`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2bf3afe47d007..94e9947155c41 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2897,25 +2897,9 @@ def map(self, mapper, na_action=None):
names=names)
attributes['copy'] = False
-
- # we want to try to return our original dtype
- # ints infer to integer, but if we have
- # uints, would prefer to return these
- if is_unsigned_integer_dtype(self.dtype):
- inferred = lib.infer_dtype(new_values)
- if inferred == 'integer':
- attributes['dtype'] = self.dtype
-
- elif not new_values.size:
+ if not new_values.size:
# empty
attributes['dtype'] = self.dtype
- elif isna(new_values).all():
- # all nan
- inferred = lib.infer_dtype(self)
- if inferred in ['datetime', 'datetime64',
- 'timedelta', 'timedelta64',
- 'period']:
- new_values = [libts.NaT] * len(new_values)
return Index(new_values, **attributes)
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index 99bdaf02e25ff..c1ee18526cc01 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -1009,7 +1009,13 @@ def test_searchsorted_monotonic(self, indices):
def test_map(self):
# callable
index = self.create_index()
- expected = index
+
+ # we don't infer UInt64
+ if isinstance(index, pd.UInt64Index):
+ expected = index.astype('int64')
+ else:
+ expected = index
+
result = index.map(lambda x: x)
tm.assert_index_equal(result, expected)
@@ -1024,9 +1030,14 @@ def test_map_dictlike(self, mapper):
if isinstance(index, (pd.CategoricalIndex, pd.IntervalIndex)):
pytest.skip("skipping tests for {}".format(type(index)))
- expected = index
-
identity = mapper(index.values, index)
+
+ # we don't infer to UInt64 for a dict
+ if isinstance(index, pd.UInt64Index) and isinstance(identity, dict):
+ expected = index.astype('int64')
+ else:
+ expected = index
+
result = index.map(identity)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index a01c60a47c0f9..ad76d17c93c41 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -1,5 +1,6 @@
""" generic datetimelike tests """
import pytest
+import numpy as np
import pandas as pd
from .common import Base
import pandas.util.testing as tm
@@ -72,6 +73,6 @@ def test_map_dictlike(self, mapper):
# empty map; these map to np.nan because we cannot know
# to re-infer things
- expected = pd.Index([pd.NaT] * len(self.index))
+ expected = pd.Index([np.nan] * len(self.index))
result = self.index.map(mapper([], []))
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 0b782e600822a..9ef7a43b2193a 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -885,9 +885,7 @@ def test_map_dictlike(self, mapper):
expected = Index(np.arange(len(index), 0, -1))
# to match proper result coercion for uints
- if name == 'uintIndex':
- expected = expected.astype('uint64')
- elif name == 'empty':
+ if name == 'empty':
expected = Index([])
result = index.map(mapper(expected, index))
diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py
index cafe6a34720be..8899ab585d6cb 100644
--- a/pandas/tests/series/test_apply.py
+++ b/pandas/tests/series/test_apply.py
@@ -377,6 +377,14 @@ def test_map(self):
exp = Series([np.nan, 'B', 'C', 'D'])
tm.assert_series_equal(a.map(c), exp)
+ @pytest.mark.parametrize("index", tm.all_index_generator(10))
+ def test_map_empty(self, index):
+ s = Series(index)
+ result = s.map({})
+
+ expected = pd.Series(np.nan, index=s.index)
+ tm.assert_series_equal(result, expected)
+
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py
index b0154f6db7022..29dd99ac9c655 100644
--- a/pandas/tests/test_resample.py
+++ b/pandas/tests/test_resample.py
@@ -816,21 +816,23 @@ def test_resample_empty_dataframe(self):
# test size for GH13212 (currently stays as df)
- def test_resample_empty_dtypes(self):
+ @pytest.mark.parametrize("index", tm.all_timeseries_index_generator(0))
+ @pytest.mark.parametrize(
+ "dtype",
+ [np.float, np.int, np.object, 'datetime64[ns]'])
+ def test_resample_empty_dtypes(self, index, dtype):
# Empty series were sometimes causing a segfault (for the functions
# with Cython bounds-checking disabled) or an IndexError. We just run
# them to ensure they no longer do. (GH #10228)
- for index in tm.all_timeseries_index_generator(0):
- for dtype in (np.float, np.int, np.object, 'datetime64[ns]'):
- for how in downsample_methods + upsample_methods:
- empty_series = Series([], index, dtype)
- try:
- getattr(empty_series.resample('d'), how)()
- except DataError:
- # Ignore these since some combinations are invalid
- # (ex: doing mean with dtype of np.object)
- pass
+ for how in downsample_methods + upsample_methods:
+ empty_series = Series([], index, dtype)
+ try:
+ getattr(empty_series.resample('d'), how)()
+ except DataError:
+ # Ignore these since some combinations are invalid
+ # (ex: doing mean with dtype of np.object)
+ pass
def test_resample_loffset_arg_type(self):
# GH 13218, 15002
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index ff6fa8ae717d3..850c42a011958 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -1695,7 +1695,8 @@ def all_index_generator(k=10):
"""
all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
makeUnicodeIndex, makeDateIndex, makePeriodIndex,
- makeTimedeltaIndex, makeBoolIndex,
+ makeTimedeltaIndex, makeBoolIndex, makeRangeIndex,
+ makeIntervalIndex,
makeCategoricalIndex]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
| closes #18509
| https://api.github.com/repos/pandas-dev/pandas/pulls/18517 | 2017-11-27T12:07:54Z | 2017-12-02T17:34:37Z | 2017-12-02T17:34:37Z | 2017-12-04T10:23:47Z |
BUG: Index constructor support tupleization for mixed levels | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 8e6382c18343e..988da470eda35 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -147,6 +147,7 @@ Indexing
- Bug in :func:`DataFrame.groupby` where tuples were interpreted as lists of keys rather than as keys (:issue:`17979`, :issue:`18249`)
- Bug in :func:`MultiIndex.remove_unused_levels`` which would fill nan values (:issue:`18417`)
- Bug in :func:`MultiIndex.from_tuples`` which would fail to take zipped tuples in python3 (:issue:`18434`)
+- Bug in :class:`Index`` construction from list of mixed type tuples (:issue:`18505`)
- Bug in :class:`IntervalIndex` where empty and purely NA data was constructed inconsistently depending on the construction method (:issue:`18421`)
- Bug in ``IntervalIndex.symmetric_difference()`` where the symmetric difference with a non-``IntervalIndex`` did not raise (:issue:`18475`)
diff --git a/pandas/core/base.py b/pandas/core/base.py
index cce0f384cb983..ae92b62ce1d11 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -874,8 +874,9 @@ def _map_values(self, mapper, na_action=None):
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
- from pandas import Series
- mapper = Series(mapper, index=mapper.keys())
+ from pandas import Series, Index
+ index = Index(mapper, tupleize_cols=False)
+ mapper = Series(mapper, index=index)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index f4332ac244af4..10f9022e2666b 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -353,22 +353,15 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None,
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
- if (tupleize_cols and isinstance(data, list) and data and
- isinstance(data[0], tuple)):
-
+ if tupleize_cols and is_list_like(data) and data:
+ if is_iterator(data):
+ data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
- try:
- # must be orderable in py3
- if compat.PY3:
- sorted(data)
- from .multi import MultiIndex
- return MultiIndex.from_tuples(
- data, names=name or kwargs.get('names'))
- except (TypeError, KeyError):
- # python2 - MultiIndex fails on mixed types
- pass
+ from .multi import MultiIndex
+ return MultiIndex.from_tuples(
+ data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = _asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 372c11b296d9e..0b71f6bb3fb01 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -106,6 +106,15 @@ def test_construction_list_mixed_tuples(self):
assert isinstance(idx2, Index)
assert not isinstance(idx2, MultiIndex)
+ @pytest.mark.parametrize('na_value', [None, np.nan])
+ @pytest.mark.parametrize('vtype', [list, tuple, iter])
+ def test_construction_list_tuples_nan(self, na_value, vtype):
+ # GH 18505 : valid tuples containing NaN
+ values = [(1, 'two'), (3., na_value)]
+ result = Index(vtype(values))
+ expected = MultiIndex.from_tuples(values)
+ tm.assert_index_equal(result, expected)
+
def test_constructor_from_index_datetimetz(self):
idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index 7ffda3a58ac1c..ccc04da3299fe 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -658,6 +658,16 @@ def test_constructor_tuple_of_tuples(self):
s = Series(data)
assert tuple(s) == data
+ @pytest.mark.xfail(reason='GH 18480 (Series initialization from dict with '
+ 'NaN keys')
+ def test_constructor_dict_of_tuples(self):
+ data = {(1, 2): 3,
+ (None, 5): 6}
+ result = Series(data).sort_values()
+ expected = Series([3, 6],
+ index=MultiIndex.from_tuples([(1, 2), (None, 5)]))
+ tm.assert_series_equal(result, expected)
+
def test_constructor_set(self):
values = set([1, 2, 3, 4, 5])
pytest.raises(TypeError, Series, values)
| - [x] closes #18505
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18514 | 2017-11-27T08:45:15Z | 2017-11-28T09:35:38Z | 2017-11-28T09:35:38Z | 2017-11-28T11:13:34Z |
make get_firstbday, get_lastbday nogil | diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 654c51f0ca842..6f5ad2ae45f50 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -24,7 +24,7 @@ from frequencies cimport get_freq_code
from nattype cimport NPY_NAT
from np_datetime cimport (pandas_datetimestruct,
dtstruct_to_dt64, dt64_to_dtstruct,
- is_leapyear, days_per_month_table)
+ is_leapyear, days_per_month_table, dayofweek)
# ---------------------------------------------------------------------
# Constants
@@ -145,45 +145,44 @@ def apply_index_wraps(func):
# ---------------------------------------------------------------------
# Business Helpers
-cpdef int get_lastbday(int wkday, int days_in_month):
+cpdef int get_lastbday(int year, int month) nogil:
"""
Find the last day of the month that is a business day.
- (wkday, days_in_month) is the output from monthrange(year, month)
-
Parameters
----------
- wkday : int
- days_in_month : int
+ year : int
+ month : int
Returns
-------
last_bday : int
"""
+ cdef:
+ int wkday, days_in_month
+
+ wkday = dayofweek(year, month, 1)
+ days_in_month = get_days_in_month(year, month)
return days_in_month - max(((wkday + days_in_month - 1) % 7) - 4, 0)
-cpdef int get_firstbday(int wkday, int days_in_month=0):
+cpdef int get_firstbday(int year, int month) nogil:
"""
Find the first day of the month that is a business day.
- (wkday, days_in_month) is the output from monthrange(year, month)
-
Parameters
----------
- wkday : int
- days_in_month : int, default 0
+ year : int
+ month : int
Returns
-------
first_bday : int
-
- Notes
- -----
- `days_in_month` arg is a dummy so that this has the same signature as
- `get_lastbday`.
"""
- cdef int first
+ cdef:
+ int first, wkday
+
+ wkday = dayofweek(year, month, 1)
first = 1
if wkday == 5: # on Saturday
first = 3
@@ -556,52 +555,50 @@ def shift_months(int64_t[:] dtindex, int months, object day=None):
out[i] = dtstruct_to_dt64(&dts)
elif day == 'business_start':
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = NPY_NAT
- continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT:
+ out[i] = NPY_NAT
+ continue
- dt64_to_dtstruct(dtindex[i], &dts)
- months_to_roll = months
- wkday, days_in_month = monthrange(dts.year, dts.month)
- compare_day = get_firstbday(wkday, days_in_month)
+ dt64_to_dtstruct(dtindex[i], &dts)
+ months_to_roll = months
+ compare_day = get_firstbday(dts.year, dts.month)
- if months_to_roll > 0 and dts.day < compare_day:
- months_to_roll -= 1
- elif months_to_roll <= 0 and dts.day > compare_day:
- # as if rolled forward already
- months_to_roll += 1
+ if months_to_roll > 0 and dts.day < compare_day:
+ months_to_roll -= 1
+ elif months_to_roll <= 0 and dts.day > compare_day:
+ # as if rolled forward already
+ months_to_roll += 1
- dts.year = year_add_months(dts, months_to_roll)
- dts.month = month_add_months(dts, months_to_roll)
+ dts.year = year_add_months(dts, months_to_roll)
+ dts.month = month_add_months(dts, months_to_roll)
- wkday, days_in_month = monthrange(dts.year, dts.month)
- dts.day = get_firstbday(wkday, days_in_month)
- out[i] = dtstruct_to_dt64(&dts)
+ dts.day = get_firstbday(dts.year, dts.month)
+ out[i] = dtstruct_to_dt64(&dts)
elif day == 'business_end':
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = NPY_NAT
- continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT:
+ out[i] = NPY_NAT
+ continue
- dt64_to_dtstruct(dtindex[i], &dts)
- months_to_roll = months
- wkday, days_in_month = monthrange(dts.year, dts.month)
- compare_day = get_lastbday(wkday, days_in_month)
+ dt64_to_dtstruct(dtindex[i], &dts)
+ months_to_roll = months
+ compare_day = get_lastbday(dts.year, dts.month)
- if months_to_roll > 0 and dts.day < compare_day:
- months_to_roll -= 1
- elif months_to_roll <= 0 and dts.day > compare_day:
- # as if rolled forward already
- months_to_roll += 1
+ if months_to_roll > 0 and dts.day < compare_day:
+ months_to_roll -= 1
+ elif months_to_roll <= 0 and dts.day > compare_day:
+ # as if rolled forward already
+ months_to_roll += 1
- dts.year = year_add_months(dts, months_to_roll)
- dts.month = month_add_months(dts, months_to_roll)
+ dts.year = year_add_months(dts, months_to_roll)
+ dts.month = month_add_months(dts, months_to_roll)
- wkday, days_in_month = monthrange(dts.year, dts.month)
- dts.day = get_lastbday(wkday, days_in_month)
- out[i] = dtstruct_to_dt64(&dts)
+ dts.day = get_lastbday(dts.year, dts.month)
+ out[i] = dtstruct_to_dt64(&dts)
else:
raise ValueError("day must be None, 'start', 'end', "
@@ -635,7 +632,7 @@ cpdef datetime shift_month(datetime stamp, int months, object day_opt=None):
"""
cdef:
int year, month, day
- int wkday, days_in_month, dy
+ int days_in_month, dy
dy = (stamp.month + months) // 12
month = (stamp.month + months) % 12
@@ -645,20 +642,21 @@ cpdef datetime shift_month(datetime stamp, int months, object day_opt=None):
dy -= 1
year = stamp.year + dy
- wkday, days_in_month = monthrange(year, month)
if day_opt is None:
+ days_in_month = get_days_in_month(year, month)
day = min(stamp.day, days_in_month)
elif day_opt == 'start':
day = 1
elif day_opt == 'end':
- day = days_in_month
+ day = get_days_in_month(year, month)
elif day_opt == 'business_start':
# first business day of month
- day = get_firstbday(wkday, days_in_month)
+ day = get_firstbday(year, month)
elif day_opt == 'business_end':
# last business day of month
- day = get_lastbday(wkday, days_in_month)
+ day = get_lastbday(year, month)
elif is_integer_object(day_opt):
+ days_in_month = get_days_in_month(year, month)
day = min(day_opt, days_in_month)
else:
raise ValueError(day_opt)
@@ -691,22 +689,22 @@ cpdef int get_day_of_month(datetime other, day_opt) except? -1:
"""
cdef:
- int wkday, days_in_month
+ int days_in_month
if day_opt == 'start':
return 1
-
- wkday, days_in_month = monthrange(other.year, other.month)
- if day_opt == 'end':
+ elif day_opt == 'end':
+ days_in_month = get_days_in_month(other.year, other.month)
return days_in_month
elif day_opt == 'business_start':
# first business day of month
- return get_firstbday(wkday, days_in_month)
+ return get_firstbday(other.year, other.month)
elif day_opt == 'business_end':
# last business day of month
- return get_lastbday(wkday, days_in_month)
+ return get_lastbday(other.year, other.month)
elif is_integer_object(day_opt):
- day = min(day_opt, days_in_month)
+ days_in_month = get_days_in_month(other.year, other.month)
+ return min(day_opt, days_in_month)
elif day_opt is None:
# Note: unlike `shift_month`, get_day_of_month does not
# allow day_opt = None
diff --git a/pandas/tests/tseries/offsets/test_liboffsets.py b/pandas/tests/tseries/offsets/test_liboffsets.py
index 321104222936b..8aa32bc600ee6 100644
--- a/pandas/tests/tseries/offsets/test_liboffsets.py
+++ b/pandas/tests/tseries/offsets/test_liboffsets.py
@@ -6,7 +6,6 @@
import pytest
-from pandas._libs import tslib
from pandas import Timestamp
import pandas._libs.tslibs.offsets as liboffsets
@@ -15,25 +14,21 @@
def test_get_lastbday():
dt = datetime(2017, 11, 30)
assert dt.weekday() == 3 # i.e. this is a business day
- wkday, days_in_month = tslib.monthrange(dt.year, dt.month)
- assert liboffsets.get_lastbday(wkday, days_in_month) == 30
+ assert liboffsets.get_lastbday(dt.year, dt.month) == 30
dt = datetime(1993, 10, 31)
assert dt.weekday() == 6 # i.e. this is not a business day
- wkday, days_in_month = tslib.monthrange(dt.year, dt.month)
- assert liboffsets.get_lastbday(wkday, days_in_month) == 29
+ assert liboffsets.get_lastbday(dt.year, dt.month) == 29
def test_get_firstbday():
dt = datetime(2017, 4, 1)
assert dt.weekday() == 5 # i.e. not a weekday
- wkday, days_in_month = tslib.monthrange(dt.year, dt.month)
- assert liboffsets.get_firstbday(wkday, days_in_month) == 3
+ assert liboffsets.get_firstbday(dt.year, dt.month) == 3
dt = datetime(1993, 10, 1)
assert dt.weekday() == 4 # i.e. a business day
- wkday, days_in_month = tslib.monthrange(dt.year, dt.month)
- assert liboffsets.get_firstbday(wkday, days_in_month) == 1
+ assert liboffsets.get_firstbday(dt.year, dt.month) == 1
def test_shift_month():
| These are only used in `tslibs.offsets` and in tests, _and_ every use is preceeded by a call to `monthrange`. But because `monthrange` returns a tuple, it cannot be declared `nogil`. This PR removes the call to `monthrange` in favor of the two separate calls that go into `monthrange` (actually only one is needed for `get_first_bday`). On the side we get rid of a few unnecessary calls and get to nogil two more cases worth of the apply_index loops.
There are more cases coming up for `shift_months` (i.e. more subclasses getting implementations of `apply_index`) and these will benefit from these changes too.
- [ ] closes #xxxx
- [ ] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18512 | 2017-11-27T06:19:54Z | 2017-11-27T11:26:48Z | 2017-11-27T11:26:48Z | 2017-11-27T21:37:20Z |
Split test_categorical into subpackage (#18497) | diff --git a/pandas/tests/categorical/__init__.py b/pandas/tests/categorical/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/categorical/common.py b/pandas/tests/categorical/common.py
new file mode 100644
index 0000000000000..9462482553ed8
--- /dev/null
+++ b/pandas/tests/categorical/common.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+
+from pandas import Categorical
+
+
+class TestCategorical(object):
+
+ def setup_method(self, method):
+ self.factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'],
+ ordered=True)
diff --git a/pandas/tests/categorical/test_analytics.py b/pandas/tests/categorical/test_analytics.py
new file mode 100644
index 0000000000000..53d0e596a1d99
--- /dev/null
+++ b/pandas/tests/categorical/test_analytics.py
@@ -0,0 +1,320 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+import sys
+
+import numpy as np
+
+import pandas.util.testing as tm
+from pandas import Categorical, Index, Series
+
+from pandas.compat import PYPY
+
+
+class TestCategoricalAnalytics(object):
+
+ def test_min_max(self):
+
+ # unordered cats have no min/max
+ cat = Categorical(["a", "b", "c", "d"], ordered=False)
+ pytest.raises(TypeError, lambda: cat.min())
+ pytest.raises(TypeError, lambda: cat.max())
+
+ cat = Categorical(["a", "b", "c", "d"], ordered=True)
+ _min = cat.min()
+ _max = cat.max()
+ assert _min == "a"
+ assert _max == "d"
+
+ cat = Categorical(["a", "b", "c", "d"],
+ categories=['d', 'c', 'b', 'a'], ordered=True)
+ _min = cat.min()
+ _max = cat.max()
+ assert _min == "d"
+ assert _max == "a"
+
+ cat = Categorical([np.nan, "b", "c", np.nan],
+ categories=['d', 'c', 'b', 'a'], ordered=True)
+ _min = cat.min()
+ _max = cat.max()
+ assert np.isnan(_min)
+ assert _max == "b"
+
+ _min = cat.min(numeric_only=True)
+ assert _min == "c"
+ _max = cat.max(numeric_only=True)
+ assert _max == "b"
+
+ cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
+ ordered=True)
+ _min = cat.min()
+ _max = cat.max()
+ assert np.isnan(_min)
+ assert _max == 1
+
+ _min = cat.min(numeric_only=True)
+ assert _min == 2
+ _max = cat.max(numeric_only=True)
+ assert _max == 1
+
+ @pytest.mark.parametrize("values,categories,exp_mode", [
+ ([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]),
+ ([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]),
+ ([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]),
+ ([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]),
+ ([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
+ ([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4])])
+ def test_mode(self, values, categories, exp_mode):
+ s = Categorical(values, categories=categories, ordered=True)
+ res = s.mode()
+ exp = Categorical(exp_mode, categories=categories, ordered=True)
+ tm.assert_categorical_equal(res, exp)
+
+ def test_searchsorted(self):
+ # https://github.com/pandas-dev/pandas/issues/8420
+ # https://github.com/pandas-dev/pandas/issues/14522
+
+ c1 = Categorical(['cheese', 'milk', 'apple', 'bread', 'bread'],
+ categories=['cheese', 'milk', 'apple', 'bread'],
+ ordered=True)
+ s1 = Series(c1)
+ c2 = Categorical(['cheese', 'milk', 'apple', 'bread', 'bread'],
+ categories=['cheese', 'milk', 'apple', 'bread'],
+ ordered=False)
+ s2 = Series(c2)
+
+ # Searching for single item argument, side='left' (default)
+ res_cat = c1.searchsorted('apple')
+ res_ser = s1.searchsorted('apple')
+ exp = np.array([2], dtype=np.intp)
+ tm.assert_numpy_array_equal(res_cat, exp)
+ tm.assert_numpy_array_equal(res_ser, exp)
+
+ # Searching for single item array, side='left' (default)
+ res_cat = c1.searchsorted(['bread'])
+ res_ser = s1.searchsorted(['bread'])
+ exp = np.array([3], dtype=np.intp)
+ tm.assert_numpy_array_equal(res_cat, exp)
+ tm.assert_numpy_array_equal(res_ser, exp)
+
+ # Searching for several items array, side='right'
+ res_cat = c1.searchsorted(['apple', 'bread'], side='right')
+ res_ser = s1.searchsorted(['apple', 'bread'], side='right')
+ exp = np.array([3, 5], dtype=np.intp)
+ tm.assert_numpy_array_equal(res_cat, exp)
+ tm.assert_numpy_array_equal(res_ser, exp)
+
+ # Searching for a single value that is not from the Categorical
+ pytest.raises(ValueError, lambda: c1.searchsorted('cucumber'))
+ pytest.raises(ValueError, lambda: s1.searchsorted('cucumber'))
+
+ # Searching for multiple values one of each is not from the Categorical
+ pytest.raises(ValueError,
+ lambda: c1.searchsorted(['bread', 'cucumber']))
+ pytest.raises(ValueError,
+ lambda: s1.searchsorted(['bread', 'cucumber']))
+
+ # searchsorted call for unordered Categorical
+ pytest.raises(ValueError, lambda: c2.searchsorted('apple'))
+ pytest.raises(ValueError, lambda: s2.searchsorted('apple'))
+
+ with tm.assert_produces_warning(FutureWarning):
+ res = c1.searchsorted(v=['bread'])
+ exp = np.array([3], dtype=np.intp)
+ tm.assert_numpy_array_equal(res, exp)
+
+ def test_unique(self):
+ # categories are reordered based on value when ordered=False
+ cat = Categorical(["a", "b"])
+ exp = Index(["a", "b"])
+ res = cat.unique()
+ tm.assert_index_equal(res.categories, exp)
+ tm.assert_categorical_equal(res, cat)
+
+ cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
+ res = cat.unique()
+ tm.assert_index_equal(res.categories, exp)
+ tm.assert_categorical_equal(res, Categorical(exp))
+
+ cat = Categorical(["c", "a", "b", "a", "a"],
+ categories=["a", "b", "c"])
+ exp = Index(["c", "a", "b"])
+ res = cat.unique()
+ tm.assert_index_equal(res.categories, exp)
+ exp_cat = Categorical(exp, categories=['c', 'a', 'b'])
+ tm.assert_categorical_equal(res, exp_cat)
+
+ # nan must be removed
+ cat = Categorical(["b", np.nan, "b", np.nan, "a"],
+ categories=["a", "b", "c"])
+ res = cat.unique()
+ exp = Index(["b", "a"])
+ tm.assert_index_equal(res.categories, exp)
+ exp_cat = Categorical(["b", np.nan, "a"], categories=["b", "a"])
+ tm.assert_categorical_equal(res, exp_cat)
+
+ def test_unique_ordered(self):
+ # keep categories order when ordered=True
+ cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
+ res = cat.unique()
+ exp_cat = Categorical(['b', 'a'], categories=['a', 'b'], ordered=True)
+ tm.assert_categorical_equal(res, exp_cat)
+
+ cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
+ ordered=True)
+ res = cat.unique()
+ exp_cat = Categorical(['c', 'b', 'a'], categories=['a', 'b', 'c'],
+ ordered=True)
+ tm.assert_categorical_equal(res, exp_cat)
+
+ cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
+ ordered=True)
+ res = cat.unique()
+ exp_cat = Categorical(['b', 'a'], categories=['a', 'b'], ordered=True)
+ tm.assert_categorical_equal(res, exp_cat)
+
+ cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
+ ordered=True)
+ res = cat.unique()
+ exp_cat = Categorical(['b', np.nan, 'a'], categories=['a', 'b'],
+ ordered=True)
+ tm.assert_categorical_equal(res, exp_cat)
+
+ def test_unique_index_series(self):
+ c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1])
+ # Categorical.unique sorts categories by appearance order
+ # if ordered=False
+ exp = Categorical([3, 1, 2], categories=[3, 1, 2])
+ tm.assert_categorical_equal(c.unique(), exp)
+
+ tm.assert_index_equal(Index(c).unique(), Index(exp))
+ tm.assert_categorical_equal(Series(c).unique(), exp)
+
+ c = Categorical([1, 1, 2, 2], categories=[3, 2, 1])
+ exp = Categorical([1, 2], categories=[1, 2])
+ tm.assert_categorical_equal(c.unique(), exp)
+ tm.assert_index_equal(Index(c).unique(), Index(exp))
+ tm.assert_categorical_equal(Series(c).unique(), exp)
+
+ c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1], ordered=True)
+ # Categorical.unique keeps categories order if ordered=True
+ exp = Categorical([3, 1, 2], categories=[3, 2, 1], ordered=True)
+ tm.assert_categorical_equal(c.unique(), exp)
+
+ tm.assert_index_equal(Index(c).unique(), Index(exp))
+ tm.assert_categorical_equal(Series(c).unique(), exp)
+
+ def test_shift(self):
+ # GH 9416
+ cat = Categorical(['a', 'b', 'c', 'd', 'a'])
+
+ # shift forward
+ sp1 = cat.shift(1)
+ xp1 = Categorical([np.nan, 'a', 'b', 'c', 'd'])
+ tm.assert_categorical_equal(sp1, xp1)
+ tm.assert_categorical_equal(cat[:-1], sp1[1:])
+
+ # shift back
+ sn2 = cat.shift(-2)
+ xp2 = Categorical(['c', 'd', 'a', np.nan, np.nan],
+ categories=['a', 'b', 'c', 'd'])
+ tm.assert_categorical_equal(sn2, xp2)
+ tm.assert_categorical_equal(cat[2:], sn2[:-2])
+
+ # shift by zero
+ tm.assert_categorical_equal(cat, cat.shift(0))
+
+ def test_nbytes(self):
+ cat = Categorical([1, 2, 3])
+ exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories
+ assert cat.nbytes == exp
+
+ def test_memory_usage(self):
+ cat = Categorical([1, 2, 3])
+
+ # .categories is an index, so we include the hashtable
+ assert 0 < cat.nbytes <= cat.memory_usage()
+ assert 0 < cat.nbytes <= cat.memory_usage(deep=True)
+
+ cat = Categorical(['foo', 'foo', 'bar'])
+ assert cat.memory_usage(deep=True) > cat.nbytes
+
+ if not PYPY:
+ # sys.getsizeof will call the .memory_usage with
+ # deep=True, and add on some GC overhead
+ diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
+ assert abs(diff) < 100
+
+ def test_map(self):
+ c = Categorical(list('ABABC'), categories=list('CBA'), ordered=True)
+ result = c.map(lambda x: x.lower())
+ exp = Categorical(list('ababc'), categories=list('cba'), ordered=True)
+ tm.assert_categorical_equal(result, exp)
+
+ c = Categorical(list('ABABC'), categories=list('ABC'), ordered=False)
+ result = c.map(lambda x: x.lower())
+ exp = Categorical(list('ababc'), categories=list('abc'), ordered=False)
+ tm.assert_categorical_equal(result, exp)
+
+ result = c.map(lambda x: 1)
+ # GH 12766: Return an index not an array
+ tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64)))
+
+ def test_validate_inplace(self):
+ cat = Categorical(['A', 'B', 'B', 'C', 'A'])
+ invalid_values = [1, "True", [1, 2, 3], 5.0]
+
+ for value in invalid_values:
+ with pytest.raises(ValueError):
+ cat.set_ordered(value=True, inplace=value)
+
+ with pytest.raises(ValueError):
+ cat.as_ordered(inplace=value)
+
+ with pytest.raises(ValueError):
+ cat.as_unordered(inplace=value)
+
+ with pytest.raises(ValueError):
+ cat.set_categories(['X', 'Y', 'Z'], rename=True, inplace=value)
+
+ with pytest.raises(ValueError):
+ cat.rename_categories(['X', 'Y', 'Z'], inplace=value)
+
+ with pytest.raises(ValueError):
+ cat.reorder_categories(
+ ['X', 'Y', 'Z'], ordered=True, inplace=value)
+
+ with pytest.raises(ValueError):
+ cat.add_categories(
+ new_categories=['D', 'E', 'F'], inplace=value)
+
+ with pytest.raises(ValueError):
+ cat.remove_categories(removals=['D', 'E', 'F'], inplace=value)
+
+ with pytest.raises(ValueError):
+ cat.remove_unused_categories(inplace=value)
+
+ with pytest.raises(ValueError):
+ cat.sort_values(inplace=value)
+
+ def test_repeat(self):
+ # GH10183
+ cat = Categorical(["a", "b"], categories=["a", "b"])
+ exp = Categorical(["a", "a", "b", "b"], categories=["a", "b"])
+ res = cat.repeat(2)
+ tm.assert_categorical_equal(res, exp)
+
+ def test_numpy_repeat(self):
+ cat = Categorical(["a", "b"], categories=["a", "b"])
+ exp = Categorical(["a", "a", "b", "b"], categories=["a", "b"])
+ tm.assert_categorical_equal(np.repeat(cat, 2), exp)
+
+ msg = "the 'axis' parameter is not supported"
+ tm.assert_raises_regex(ValueError, msg, np.repeat, cat, 2, axis=1)
+
+ def test_isna(self):
+ exp = np.array([False, False, True])
+ c = Categorical(["a", "b", np.nan])
+ res = c.isna()
+
+ tm.assert_numpy_array_equal(res, exp)
diff --git a/pandas/tests/categorical/test_api.py b/pandas/tests/categorical/test_api.py
new file mode 100644
index 0000000000000..7cc0aafaf05b6
--- /dev/null
+++ b/pandas/tests/categorical/test_api.py
@@ -0,0 +1,522 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+
+import numpy as np
+
+import pandas.util.testing as tm
+from pandas import Categorical, CategoricalIndex, Index, Series, DataFrame
+
+from pandas.core.categorical import _recode_for_categories
+from pandas.tests.categorical.common import TestCategorical
+
+
+class TestCategoricalAPI(object):
+
+ def test_ordered_api(self):
+ # GH 9347
+ cat1 = Categorical(list('acb'), ordered=False)
+ tm.assert_index_equal(cat1.categories, Index(['a', 'b', 'c']))
+ assert not cat1.ordered
+
+ cat2 = Categorical(list('acb'), categories=list('bca'), ordered=False)
+ tm.assert_index_equal(cat2.categories, Index(['b', 'c', 'a']))
+ assert not cat2.ordered
+
+ cat3 = Categorical(list('acb'), ordered=True)
+ tm.assert_index_equal(cat3.categories, Index(['a', 'b', 'c']))
+ assert cat3.ordered
+
+ cat4 = Categorical(list('acb'), categories=list('bca'), ordered=True)
+ tm.assert_index_equal(cat4.categories, Index(['b', 'c', 'a']))
+ assert cat4.ordered
+
+ def test_set_ordered(self):
+
+ cat = Categorical(["a", "b", "c", "a"], ordered=True)
+ cat2 = cat.as_unordered()
+ assert not cat2.ordered
+ cat2 = cat.as_ordered()
+ assert cat2.ordered
+ cat2.as_unordered(inplace=True)
+ assert not cat2.ordered
+ cat2.as_ordered(inplace=True)
+ assert cat2.ordered
+
+ assert cat2.set_ordered(True).ordered
+ assert not cat2.set_ordered(False).ordered
+ cat2.set_ordered(True, inplace=True)
+ assert cat2.ordered
+ cat2.set_ordered(False, inplace=True)
+ assert not cat2.ordered
+
+ # removed in 0.19.0
+ msg = "can\'t set attribute"
+ with tm.assert_raises_regex(AttributeError, msg):
+ cat.ordered = True
+ with tm.assert_raises_regex(AttributeError, msg):
+ cat.ordered = False
+
+ def test_rename_categories(self):
+ cat = Categorical(["a", "b", "c", "a"])
+
+ # inplace=False: the old one must not be changed
+ res = cat.rename_categories([1, 2, 3])
+ tm.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1],
+ dtype=np.int64))
+ tm.assert_index_equal(res.categories, Index([1, 2, 3]))
+
+ exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
+ tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
+
+ exp_cat = Index(["a", "b", "c"])
+ tm.assert_index_equal(cat.categories, exp_cat)
+ res = cat.rename_categories([1, 2, 3], inplace=True)
+
+ # and now inplace
+ assert res is None
+ tm.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1],
+ dtype=np.int64))
+ tm.assert_index_equal(cat.categories, Index([1, 2, 3]))
+
+ # Lengthen
+ with pytest.raises(ValueError):
+ cat.rename_categories([1, 2, 3, 4])
+
+ # Shorten
+ with pytest.raises(ValueError):
+ cat.rename_categories([1, 2])
+
+ def test_rename_categories_series(self):
+ # https://github.com/pandas-dev/pandas/issues/17981
+ c = Categorical(['a', 'b'])
+ xpr = "Treating Series 'new_categories' as a list-like "
+ with tm.assert_produces_warning(FutureWarning) as rec:
+ result = c.rename_categories(Series([0, 1]))
+
+ assert len(rec) == 1
+ assert xpr in str(rec[0].message)
+ expected = Categorical([0, 1])
+ tm.assert_categorical_equal(result, expected)
+
+ def test_rename_categories_dict(self):
+ # GH 17336
+ cat = Categorical(['a', 'b', 'c', 'd'])
+ res = cat.rename_categories({'a': 4, 'b': 3, 'c': 2, 'd': 1})
+ expected = Index([4, 3, 2, 1])
+ tm.assert_index_equal(res.categories, expected)
+
+ # Test for inplace
+ res = cat.rename_categories({'a': 4, 'b': 3, 'c': 2, 'd': 1},
+ inplace=True)
+ assert res is None
+ tm.assert_index_equal(cat.categories, expected)
+
+ # Test for dicts of smaller length
+ cat = Categorical(['a', 'b', 'c', 'd'])
+ res = cat.rename_categories({'a': 1, 'c': 3})
+
+ expected = Index([1, 'b', 3, 'd'])
+ tm.assert_index_equal(res.categories, expected)
+
+ # Test for dicts with bigger length
+ cat = Categorical(['a', 'b', 'c', 'd'])
+ res = cat.rename_categories({'a': 1, 'b': 2, 'c': 3,
+ 'd': 4, 'e': 5, 'f': 6})
+ expected = Index([1, 2, 3, 4])
+ tm.assert_index_equal(res.categories, expected)
+
+ # Test for dicts with no items from old categories
+ cat = Categorical(['a', 'b', 'c', 'd'])
+ res = cat.rename_categories({'f': 1, 'g': 3})
+
+ expected = Index(['a', 'b', 'c', 'd'])
+ tm.assert_index_equal(res.categories, expected)
+
+ def test_reorder_categories(self):
+ cat = Categorical(["a", "b", "c", "a"], ordered=True)
+ old = cat.copy()
+ new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
+ ordered=True)
+
+ # first inplace == False
+ res = cat.reorder_categories(["c", "b", "a"])
+ # cat must be the same as before
+ tm.assert_categorical_equal(cat, old)
+ # only res is changed
+ tm.assert_categorical_equal(res, new)
+
+ # inplace == True
+ res = cat.reorder_categories(["c", "b", "a"], inplace=True)
+ assert res is None
+ tm.assert_categorical_equal(cat, new)
+
+ # not all "old" included in "new"
+ cat = Categorical(["a", "b", "c", "a"], ordered=True)
+
+ def f():
+ cat.reorder_categories(["a"])
+
+ pytest.raises(ValueError, f)
+
+ # still not all "old" in "new"
+ def f():
+ cat.reorder_categories(["a", "b", "d"])
+
+ pytest.raises(ValueError, f)
+
+ # all "old" included in "new", but too long
+ def f():
+ cat.reorder_categories(["a", "b", "c", "d"])
+
+ pytest.raises(ValueError, f)
+
+ def test_add_categories(self):
+ cat = Categorical(["a", "b", "c", "a"], ordered=True)
+ old = cat.copy()
+ new = Categorical(["a", "b", "c", "a"],
+ categories=["a", "b", "c", "d"], ordered=True)
+
+ # first inplace == False
+ res = cat.add_categories("d")
+ tm.assert_categorical_equal(cat, old)
+ tm.assert_categorical_equal(res, new)
+
+ res = cat.add_categories(["d"])
+ tm.assert_categorical_equal(cat, old)
+ tm.assert_categorical_equal(res, new)
+
+ # inplace == True
+ res = cat.add_categories("d", inplace=True)
+ tm.assert_categorical_equal(cat, new)
+ assert res is None
+
+ # new is in old categories
+ def f():
+ cat.add_categories(["d"])
+
+ pytest.raises(ValueError, f)
+
+ # GH 9927
+ cat = Categorical(list("abc"), ordered=True)
+ expected = Categorical(
+ list("abc"), categories=list("abcde"), ordered=True)
+ # test with Series, np.array, index, list
+ res = cat.add_categories(Series(["d", "e"]))
+ tm.assert_categorical_equal(res, expected)
+ res = cat.add_categories(np.array(["d", "e"]))
+ tm.assert_categorical_equal(res, expected)
+ res = cat.add_categories(Index(["d", "e"]))
+ tm.assert_categorical_equal(res, expected)
+ res = cat.add_categories(["d", "e"])
+ tm.assert_categorical_equal(res, expected)
+
+ def test_set_categories(self):
+ cat = Categorical(["a", "b", "c", "a"], ordered=True)
+ exp_categories = Index(["c", "b", "a"])
+ exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
+
+ res = cat.set_categories(["c", "b", "a"], inplace=True)
+ tm.assert_index_equal(cat.categories, exp_categories)
+ tm.assert_numpy_array_equal(cat.__array__(), exp_values)
+ assert res is None
+
+ res = cat.set_categories(["a", "b", "c"])
+ # cat must be the same as before
+ tm.assert_index_equal(cat.categories, exp_categories)
+ tm.assert_numpy_array_equal(cat.__array__(), exp_values)
+ # only res is changed
+ exp_categories_back = Index(["a", "b", "c"])
+ tm.assert_index_equal(res.categories, exp_categories_back)
+ tm.assert_numpy_array_equal(res.__array__(), exp_values)
+
+ # not all "old" included in "new" -> all not included ones are now
+ # np.nan
+ cat = Categorical(["a", "b", "c", "a"], ordered=True)
+ res = cat.set_categories(["a"])
+ tm.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0],
+ dtype=np.int8))
+
+ # still not all "old" in "new"
+ res = cat.set_categories(["a", "b", "d"])
+ tm.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0],
+ dtype=np.int8))
+ tm.assert_index_equal(res.categories, Index(["a", "b", "d"]))
+
+ # all "old" included in "new"
+ cat = cat.set_categories(["a", "b", "c", "d"])
+ exp_categories = Index(["a", "b", "c", "d"])
+ tm.assert_index_equal(cat.categories, exp_categories)
+
+ # internals...
+ c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
+ tm.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0],
+ dtype=np.int8))
+ tm.assert_index_equal(c.categories, Index([1, 2, 3, 4]))
+
+ exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
+ tm.assert_numpy_array_equal(c.get_values(), exp)
+
+ # all "pointers" to '4' must be changed from 3 to 0,...
+ c = c.set_categories([4, 3, 2, 1])
+
+ # positions are changed
+ tm.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3],
+ dtype=np.int8))
+
+ # categories are now in new order
+ tm.assert_index_equal(c.categories, Index([4, 3, 2, 1]))
+
+ # output is the same
+ exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
+ tm.assert_numpy_array_equal(c.get_values(), exp)
+ assert c.min() == 4
+ assert c.max() == 1
+
+ # set_categories should set the ordering if specified
+ c2 = c.set_categories([4, 3, 2, 1], ordered=False)
+ assert not c2.ordered
+
+ tm.assert_numpy_array_equal(c.get_values(), c2.get_values())
+
+ # set_categories should pass thru the ordering
+ c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
+ assert not c2.ordered
+
+ tm.assert_numpy_array_equal(c.get_values(), c2.get_values())
+
+ @pytest.mark.parametrize('values, categories, new_categories', [
+ # No NaNs, same cats, same order
+ (['a', 'b', 'a'], ['a', 'b'], ['a', 'b'],),
+ # No NaNs, same cats, different order
+ (['a', 'b', 'a'], ['a', 'b'], ['b', 'a'],),
+ # Same, unsorted
+ (['b', 'a', 'a'], ['a', 'b'], ['a', 'b'],),
+ # No NaNs, same cats, different order
+ (['b', 'a', 'a'], ['a', 'b'], ['b', 'a'],),
+ # NaNs
+ (['a', 'b', 'c'], ['a', 'b'], ['a', 'b']),
+ (['a', 'b', 'c'], ['a', 'b'], ['b', 'a']),
+ (['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
+ (['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
+ # Introduce NaNs
+ (['a', 'b', 'c'], ['a', 'b'], ['a']),
+ (['a', 'b', 'c'], ['a', 'b'], ['b']),
+ (['b', 'a', 'c'], ['a', 'b'], ['a']),
+ (['b', 'a', 'c'], ['a', 'b'], ['a']),
+ # No overlap
+ (['a', 'b', 'c'], ['a', 'b'], ['d', 'e']),
+ ])
+ @pytest.mark.parametrize('ordered', [True, False])
+ def test_set_categories_many(self, values, categories, new_categories,
+ ordered):
+ c = Categorical(values, categories)
+ expected = Categorical(values, new_categories, ordered)
+ result = c.set_categories(new_categories, ordered=ordered)
+ tm.assert_categorical_equal(result, expected)
+
+ def test_set_categories_private(self):
+ cat = Categorical(['a', 'b', 'c'], categories=['a', 'b', 'c', 'd'])
+ cat._set_categories(['a', 'c', 'd', 'e'])
+ expected = Categorical(['a', 'c', 'd'], categories=list('acde'))
+ tm.assert_categorical_equal(cat, expected)
+
+ # fastpath
+ cat = Categorical(['a', 'b', 'c'], categories=['a', 'b', 'c', 'd'])
+ cat._set_categories(['a', 'c', 'd', 'e'], fastpath=True)
+ expected = Categorical(['a', 'c', 'd'], categories=list('acde'))
+ tm.assert_categorical_equal(cat, expected)
+
+ def test_remove_categories(self):
+ cat = Categorical(["a", "b", "c", "a"], ordered=True)
+ old = cat.copy()
+ new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
+ ordered=True)
+
+ # first inplace == False
+ res = cat.remove_categories("c")
+ tm.assert_categorical_equal(cat, old)
+ tm.assert_categorical_equal(res, new)
+
+ res = cat.remove_categories(["c"])
+ tm.assert_categorical_equal(cat, old)
+ tm.assert_categorical_equal(res, new)
+
+ # inplace == True
+ res = cat.remove_categories("c", inplace=True)
+ tm.assert_categorical_equal(cat, new)
+ assert res is None
+
+ # removal is not in categories
+ def f():
+ cat.remove_categories(["c"])
+
+ pytest.raises(ValueError, f)
+
+ def test_remove_unused_categories(self):
+ c = Categorical(["a", "b", "c", "d", "a"],
+ categories=["a", "b", "c", "d", "e"])
+ exp_categories_all = Index(["a", "b", "c", "d", "e"])
+ exp_categories_dropped = Index(["a", "b", "c", "d"])
+
+ tm.assert_index_equal(c.categories, exp_categories_all)
+
+ res = c.remove_unused_categories()
+ tm.assert_index_equal(res.categories, exp_categories_dropped)
+ tm.assert_index_equal(c.categories, exp_categories_all)
+
+ res = c.remove_unused_categories(inplace=True)
+ tm.assert_index_equal(c.categories, exp_categories_dropped)
+ assert res is None
+
+ # with NaN values (GH11599)
+ c = Categorical(["a", "b", "c", np.nan],
+ categories=["a", "b", "c", "d", "e"])
+ res = c.remove_unused_categories()
+ tm.assert_index_equal(res.categories,
+ Index(np.array(["a", "b", "c"])))
+ exp_codes = np.array([0, 1, 2, -1], dtype=np.int8)
+ tm.assert_numpy_array_equal(res.codes, exp_codes)
+ tm.assert_index_equal(c.categories, exp_categories_all)
+
+ val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
+ cat = Categorical(values=val, categories=list('ABCDEFG'))
+ out = cat.remove_unused_categories()
+ tm.assert_index_equal(out.categories, Index(['B', 'D', 'F']))
+ exp_codes = np.array([2, -1, 1, 0, 1, 2, -1], dtype=np.int8)
+ tm.assert_numpy_array_equal(out.codes, exp_codes)
+ assert out.get_values().tolist() == val
+
+ alpha = list('abcdefghijklmnopqrstuvwxyz')
+ val = np.random.choice(alpha[::2], 10000).astype('object')
+ val[np.random.choice(len(val), 100)] = np.nan
+
+ cat = Categorical(values=val, categories=alpha)
+ out = cat.remove_unused_categories()
+ assert out.get_values().tolist() == val.tolist()
+
+ def test_deprecated_labels(self):
+ # TODO: labels is deprecated and should be removed in 0.18 or 2017,
+ # whatever is earlier
+ cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
+ exp = cat.codes
+ with tm.assert_produces_warning(FutureWarning):
+ res = cat.labels
+ tm.assert_numpy_array_equal(res, exp)
+
+
+class TestCategoricalAPIWithFactor(TestCategorical):
+
+ def test_describe(self):
+ # string type
+ desc = self.factor.describe()
+ assert self.factor.ordered
+ exp_index = CategoricalIndex(['a', 'b', 'c'], name='categories',
+ ordered=self.factor.ordered)
+ expected = DataFrame({'counts': [3, 2, 3],
+ 'freqs': [3 / 8., 2 / 8., 3 / 8.]},
+ index=exp_index)
+ tm.assert_frame_equal(desc, expected)
+
+ # check unused categories
+ cat = self.factor.copy()
+ cat.set_categories(["a", "b", "c", "d"], inplace=True)
+ desc = cat.describe()
+
+ exp_index = CategoricalIndex(
+ list('abcd'), ordered=self.factor.ordered, name='categories')
+ expected = DataFrame({'counts': [3, 2, 3, 0],
+ 'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
+ index=exp_index)
+ tm.assert_frame_equal(desc, expected)
+
+ # check an integer one
+ cat = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1])
+ desc = cat.describe()
+ exp_index = CategoricalIndex([1, 2, 3], ordered=cat.ordered,
+ name='categories')
+ expected = DataFrame({'counts': [5, 3, 3],
+ 'freqs': [5 / 11., 3 / 11., 3 / 11.]},
+ index=exp_index)
+ tm.assert_frame_equal(desc, expected)
+
+ # https://github.com/pandas-dev/pandas/issues/3678
+ # describe should work with NaN
+ cat = Categorical([np.nan, 1, 2, 2])
+ desc = cat.describe()
+ expected = DataFrame({'counts': [1, 2, 1],
+ 'freqs': [1 / 4., 2 / 4., 1 / 4.]},
+ index=CategoricalIndex([1, 2, np.nan],
+ categories=[1, 2],
+ name='categories'))
+ tm.assert_frame_equal(desc, expected)
+
+ def test_set_categories_inplace(self):
+ cat = self.factor.copy()
+ cat.set_categories(['a', 'b', 'c', 'd'], inplace=True)
+ tm.assert_index_equal(cat.categories, Index(['a', 'b', 'c', 'd']))
+
+
+class TestPrivateCategoricalAPI(object):
+
+ def test_codes_immutable(self):
+
+ # Codes should be read only
+ c = Categorical(["a", "b", "c", "a", np.nan])
+ exp = np.array([0, 1, 2, 0, -1], dtype='int8')
+ tm.assert_numpy_array_equal(c.codes, exp)
+
+ # Assignments to codes should raise
+ def f():
+ c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
+
+ pytest.raises(ValueError, f)
+
+ # changes in the codes array should raise
+ # np 1.6.1 raises RuntimeError rather than ValueError
+ codes = c.codes
+
+ def f():
+ codes[4] = 1
+
+ pytest.raises(ValueError, f)
+
+ # But even after getting the codes, the original array should still be
+ # writeable!
+ c[4] = "a"
+ exp = np.array([0, 1, 2, 0, 0], dtype='int8')
+ tm.assert_numpy_array_equal(c.codes, exp)
+ c._codes[4] = 2
+ exp = np.array([0, 1, 2, 0, 2], dtype='int8')
+ tm.assert_numpy_array_equal(c.codes, exp)
+
+ @pytest.mark.parametrize('codes, old, new, expected', [
+ ([0, 1], ['a', 'b'], ['a', 'b'], [0, 1]),
+ ([0, 1], ['b', 'a'], ['b', 'a'], [0, 1]),
+ ([0, 1], ['a', 'b'], ['b', 'a'], [1, 0]),
+ ([0, 1], ['b', 'a'], ['a', 'b'], [1, 0]),
+ ([0, 1, 0, 1], ['a', 'b'], ['a', 'b', 'c'], [0, 1, 0, 1]),
+ ([0, 1, 2, 2], ['a', 'b', 'c'], ['a', 'b'], [0, 1, -1, -1]),
+ ([0, 1, -1], ['a', 'b', 'c'], ['a', 'b', 'c'], [0, 1, -1]),
+ ([0, 1, -1], ['a', 'b', 'c'], ['b'], [-1, 0, -1]),
+ ([0, 1, -1], ['a', 'b', 'c'], ['d'], [-1, -1, -1]),
+ ([0, 1, -1], ['a', 'b', 'c'], [], [-1, -1, -1]),
+ ([-1, -1], [], ['a', 'b'], [-1, -1]),
+ ([1, 0], ['b', 'a'], ['a', 'b'], [0, 1]),
+ ])
+ def test_recode_to_categories(self, codes, old, new, expected):
+ codes = np.asanyarray(codes, dtype=np.int8)
+ expected = np.asanyarray(expected, dtype=np.int8)
+ old = Index(old)
+ new = Index(new)
+ result = _recode_for_categories(codes, old, new)
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_recode_to_categories_large(self):
+ N = 1000
+ codes = np.arange(N)
+ old = Index(codes)
+ expected = np.arange(N - 1, -1, -1, dtype=np.int16)
+ new = Index(expected)
+ result = _recode_for_categories(codes, old, new)
+ tm.assert_numpy_array_equal(result, expected)
diff --git a/pandas/tests/categorical/test_constructors.py b/pandas/tests/categorical/test_constructors.py
new file mode 100644
index 0000000000000..abea7e9a0e0b4
--- /dev/null
+++ b/pandas/tests/categorical/test_constructors.py
@@ -0,0 +1,517 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+from datetime import datetime
+
+import numpy as np
+
+import pandas as pd
+import pandas.util.testing as tm
+from pandas import (Categorical, Index, Series, Timestamp,
+ CategoricalIndex, date_range, DatetimeIndex,
+ period_range, timedelta_range, NaT,
+ Interval, IntervalIndex)
+from pandas.core.dtypes.dtypes import CategoricalDtype
+from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
+
+
+class TestCategoricalConstructors(object):
+
+ def test_validate_ordered(self):
+ # see gh-14058
+ exp_msg = "'ordered' must either be 'True' or 'False'"
+ exp_err = TypeError
+
+ # This should be a boolean.
+ ordered = np.array([0, 1, 2])
+
+ with tm.assert_raises_regex(exp_err, exp_msg):
+ Categorical([1, 2, 3], ordered=ordered)
+
+ with tm.assert_raises_regex(exp_err, exp_msg):
+ Categorical.from_codes([0, 0, 1], categories=['a', 'b', 'c'],
+ ordered=ordered)
+
+ def test_constructor_empty(self):
+ # GH 17248
+ c = Categorical([])
+ expected = Index([])
+ tm.assert_index_equal(c.categories, expected)
+
+ c = Categorical([], categories=[1, 2, 3])
+ expected = pd.Int64Index([1, 2, 3])
+ tm.assert_index_equal(c.categories, expected)
+
+ def test_constructor_tuples(self):
+ values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object)
+ result = Categorical(values)
+ expected = Index([(1,), (1, 2)], tupleize_cols=False)
+ tm.assert_index_equal(result.categories, expected)
+ assert result.ordered is False
+
+ def test_constructor_tuples_datetimes(self):
+ # numpy will auto reshape when all of the tuples are the
+ # same len, so add an extra one with 2 items and slice it off
+ values = np.array([(Timestamp('2010-01-01'),),
+ (Timestamp('2010-01-02'),),
+ (Timestamp('2010-01-01'),),
+ (Timestamp('2010-01-02'),),
+ ('a', 'b')], dtype=object)[:-1]
+ result = Categorical(values)
+ expected = Index([(Timestamp('2010-01-01'),),
+ (Timestamp('2010-01-02'),)], tupleize_cols=False)
+ tm.assert_index_equal(result.categories, expected)
+
+ def test_constructor_unsortable(self):
+
+ # it works!
+ arr = np.array([1, 2, 3, datetime.now()], dtype='O')
+ factor = Categorical(arr, ordered=False)
+ assert not factor.ordered
+
+ # this however will raise as cannot be sorted
+ pytest.raises(
+ TypeError, lambda: Categorical(arr, ordered=True))
+
+ def test_constructor_interval(self):
+ result = Categorical([Interval(1, 2), Interval(2, 3), Interval(3, 6)],
+ ordered=True)
+ ii = IntervalIndex.from_intervals([Interval(1, 2),
+ Interval(2, 3),
+ Interval(3, 6)])
+ exp = Categorical(ii, ordered=True)
+ tm.assert_categorical_equal(result, exp)
+ tm.assert_index_equal(result.categories, ii)
+
+ def test_constructor(self):
+
+ exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_)
+ c1 = Categorical(exp_arr)
+ tm.assert_numpy_array_equal(c1.__array__(), exp_arr)
+ c2 = Categorical(exp_arr, categories=["a", "b", "c"])
+ tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
+ c2 = Categorical(exp_arr, categories=["c", "b", "a"])
+ tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
+
+ # categories must be unique
+ def f():
+ Categorical([1, 2], [1, 2, 2])
+
+ pytest.raises(ValueError, f)
+
+ def f():
+ Categorical(["a", "b"], ["a", "b", "b"])
+
+ pytest.raises(ValueError, f)
+
+ # The default should be unordered
+ c1 = Categorical(["a", "b", "c", "a"])
+ assert not c1.ordered
+
+ # Categorical as input
+ c1 = Categorical(["a", "b", "c", "a"])
+ c2 = Categorical(c1)
+ tm.assert_categorical_equal(c1, c2)
+
+ c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
+ c2 = Categorical(c1)
+ tm.assert_categorical_equal(c1, c2)
+
+ c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
+ c2 = Categorical(c1)
+ tm.assert_categorical_equal(c1, c2)
+
+ c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
+ c2 = Categorical(c1, categories=["a", "b", "c"])
+ tm.assert_numpy_array_equal(c1.__array__(), c2.__array__())
+ tm.assert_index_equal(c2.categories, Index(["a", "b", "c"]))
+
+ # Series of dtype category
+ c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
+ c2 = Categorical(Series(c1))
+ tm.assert_categorical_equal(c1, c2)
+
+ c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
+ c2 = Categorical(Series(c1))
+ tm.assert_categorical_equal(c1, c2)
+
+ # Series
+ c1 = Categorical(["a", "b", "c", "a"])
+ c2 = Categorical(Series(["a", "b", "c", "a"]))
+ tm.assert_categorical_equal(c1, c2)
+
+ c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
+ c2 = Categorical(Series(["a", "b", "c", "a"]),
+ categories=["a", "b", "c", "d"])
+ tm.assert_categorical_equal(c1, c2)
+
+ # This should result in integer categories, not float!
+ cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
+ assert is_integer_dtype(cat.categories)
+
+ # https://github.com/pandas-dev/pandas/issues/3678
+ cat = Categorical([np.nan, 1, 2, 3])
+ assert is_integer_dtype(cat.categories)
+
+ # this should result in floats
+ cat = Categorical([np.nan, 1, 2., 3])
+ assert is_float_dtype(cat.categories)
+
+ cat = Categorical([np.nan, 1., 2., 3.])
+ assert is_float_dtype(cat.categories)
+
+ # This doesn't work -> this would probably need some kind of "remember
+ # the original type" feature to try to cast the array interface result
+ # to...
+
+ # vals = np.asarray(cat[cat.notna()])
+ # assert is_integer_dtype(vals)
+
+ # corner cases
+ cat = Categorical([1])
+ assert len(cat.categories) == 1
+ assert cat.categories[0] == 1
+ assert len(cat.codes) == 1
+ assert cat.codes[0] == 0
+
+ cat = Categorical(["a"])
+ assert len(cat.categories) == 1
+ assert cat.categories[0] == "a"
+ assert len(cat.codes) == 1
+ assert cat.codes[0] == 0
+
+ # Scalars should be converted to lists
+ cat = Categorical(1)
+ assert len(cat.categories) == 1
+ assert cat.categories[0] == 1
+ assert len(cat.codes) == 1
+ assert cat.codes[0] == 0
+
+ # two arrays
+ # - when the first is an integer dtype and the second is not
+ # - when the resulting codes are all -1/NaN
+ with tm.assert_produces_warning(None):
+ c_old = Categorical([0, 1, 2, 0, 1, 2],
+ categories=["a", "b", "c"]) # noqa
+
+ with tm.assert_produces_warning(None):
+ c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
+ categories=[3, 4, 5])
+
+ # the next one are from the old docs
+ with tm.assert_produces_warning(None):
+ c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
+ cat = Categorical([1, 2], categories=[1, 2, 3])
+
+ # this is a legitimate constructor
+ with tm.assert_produces_warning(None):
+ c = Categorical(np.array([], dtype='int64'), # noqa
+ categories=[3, 2, 1], ordered=True)
+
+ def test_constructor_not_sequence(self):
+ # https://github.com/pandas-dev/pandas/issues/16022
+ with pytest.raises(TypeError):
+ Categorical(['a', 'b'], categories='a')
+
+ def test_constructor_with_null(self):
+
+ # Cannot have NaN in categories
+ with pytest.raises(ValueError):
+ Categorical([np.nan, "a", "b", "c"],
+ categories=[np.nan, "a", "b", "c"])
+
+ with pytest.raises(ValueError):
+ Categorical([None, "a", "b", "c"],
+ categories=[None, "a", "b", "c"])
+
+ with pytest.raises(ValueError):
+ Categorical(DatetimeIndex(['nat', '20160101']),
+ categories=[NaT, Timestamp('20160101')])
+
+ def test_constructor_with_index(self):
+ ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
+ tm.assert_categorical_equal(ci.values, Categorical(ci))
+
+ ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
+ tm.assert_categorical_equal(ci.values,
+ Categorical(ci.astype(object),
+ categories=ci.categories))
+
+ def test_constructor_with_generator(self):
+ # This was raising an Error in isna(single_val).any() because isna
+ # returned a scalar for a generator
+ xrange = range
+
+ exp = Categorical([0, 1, 2])
+ cat = Categorical((x for x in [0, 1, 2]))
+ tm.assert_categorical_equal(cat, exp)
+ cat = Categorical(xrange(3))
+ tm.assert_categorical_equal(cat, exp)
+
+ # This uses xrange internally
+ from pandas.core.index import MultiIndex
+ MultiIndex.from_product([range(5), ['a', 'b', 'c']])
+
+ # check that categories accept generators and sequences
+ cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
+ tm.assert_categorical_equal(cat, exp)
+ cat = Categorical([0, 1, 2], categories=xrange(3))
+ tm.assert_categorical_equal(cat, exp)
+
+ def test_constructor_with_datetimelike(self):
+
+ # 12077
+ # constructor wwth a datetimelike and NaT
+
+ for dtl in [date_range('1995-01-01 00:00:00', periods=5, freq='s'),
+ date_range('1995-01-01 00:00:00', periods=5,
+ freq='s', tz='US/Eastern'),
+ timedelta_range('1 day', periods=5, freq='s')]:
+
+ s = Series(dtl)
+ c = Categorical(s)
+ expected = type(dtl)(s)
+ expected.freq = None
+ tm.assert_index_equal(c.categories, expected)
+ tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype='int8'))
+
+ # with NaT
+ s2 = s.copy()
+ s2.iloc[-1] = NaT
+ c = Categorical(s2)
+ expected = type(dtl)(s2.dropna())
+ expected.freq = None
+ tm.assert_index_equal(c.categories, expected)
+
+ exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)
+ tm.assert_numpy_array_equal(c.codes, exp)
+
+ result = repr(c)
+ assert 'NaT' in result
+
+ def test_constructor_from_index_series_datetimetz(self):
+ idx = date_range('2015-01-01 10:00', freq='D', periods=3,
+ tz='US/Eastern')
+ result = Categorical(idx)
+ tm.assert_index_equal(result.categories, idx)
+
+ result = Categorical(Series(idx))
+ tm.assert_index_equal(result.categories, idx)
+
+ def test_constructor_from_index_series_timedelta(self):
+ idx = timedelta_range('1 days', freq='D', periods=3)
+ result = Categorical(idx)
+ tm.assert_index_equal(result.categories, idx)
+
+ result = Categorical(Series(idx))
+ tm.assert_index_equal(result.categories, idx)
+
+ def test_constructor_from_index_series_period(self):
+ idx = period_range('2015-01-01', freq='D', periods=3)
+ result = Categorical(idx)
+ tm.assert_index_equal(result.categories, idx)
+
+ result = Categorical(Series(idx))
+ tm.assert_index_equal(result.categories, idx)
+
+ def test_constructor_invariant(self):
+ # GH 14190
+ vals = [
+ np.array([1., 1.2, 1.8, np.nan]),
+ np.array([1, 2, 3], dtype='int64'),
+ ['a', 'b', 'c', np.nan],
+ [pd.Period('2014-01'), pd.Period('2014-02'), NaT],
+ [Timestamp('2014-01-01'), Timestamp('2014-01-02'), NaT],
+ [Timestamp('2014-01-01', tz='US/Eastern'),
+ Timestamp('2014-01-02', tz='US/Eastern'), NaT],
+ ]
+ for val in vals:
+ c = Categorical(val)
+ c2 = Categorical(c)
+ tm.assert_categorical_equal(c, c2)
+
+ @pytest.mark.parametrize('ordered', [True, False])
+ def test_constructor_with_dtype(self, ordered):
+ categories = ['b', 'a', 'c']
+ dtype = CategoricalDtype(categories, ordered=ordered)
+ result = Categorical(['a', 'b', 'a', 'c'], dtype=dtype)
+ expected = Categorical(['a', 'b', 'a', 'c'], categories=categories,
+ ordered=ordered)
+ tm.assert_categorical_equal(result, expected)
+ assert result.ordered is ordered
+
+ def test_constructor_dtype_and_others_raises(self):
+ dtype = CategoricalDtype(['a', 'b'], ordered=True)
+ with tm.assert_raises_regex(ValueError, "Cannot"):
+ Categorical(['a', 'b'], categories=['a', 'b'], dtype=dtype)
+
+ with tm.assert_raises_regex(ValueError, "Cannot"):
+ Categorical(['a', 'b'], ordered=True, dtype=dtype)
+
+ with tm.assert_raises_regex(ValueError, "Cannot"):
+ Categorical(['a', 'b'], ordered=False, dtype=dtype)
+
+ @pytest.mark.parametrize('categories', [
+ None, ['a', 'b'], ['a', 'c'],
+ ])
+ @pytest.mark.parametrize('ordered', [True, False])
+ def test_constructor_str_category(self, categories, ordered):
+ result = Categorical(['a', 'b'], categories=categories,
+ ordered=ordered, dtype='category')
+ expected = Categorical(['a', 'b'], categories=categories,
+ ordered=ordered)
+ tm.assert_categorical_equal(result, expected)
+
+ def test_constructor_str_unknown(self):
+ with tm.assert_raises_regex(ValueError, "Unknown `dtype`"):
+ Categorical([1, 2], dtype="foo")
+
+ def test_constructor_from_categorical_with_dtype(self):
+ dtype = CategoricalDtype(['a', 'b', 'c'], ordered=True)
+ values = Categorical(['a', 'b', 'd'])
+ result = Categorical(values, dtype=dtype)
+ # We use dtype.categories, not values.categories
+ expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],
+ ordered=True)
+ tm.assert_categorical_equal(result, expected)
+
+ def test_constructor_from_categorical_with_unknown_dtype(self):
+ dtype = CategoricalDtype(None, ordered=True)
+ values = Categorical(['a', 'b', 'd'])
+ result = Categorical(values, dtype=dtype)
+ # We use values.categories, not dtype.categories
+ expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'd'],
+ ordered=True)
+ tm.assert_categorical_equal(result, expected)
+
+ def test_contructor_from_categorical_string(self):
+ values = Categorical(['a', 'b', 'd'])
+ # use categories, ordered
+ result = Categorical(values, categories=['a', 'b', 'c'], ordered=True,
+ dtype='category')
+ expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],
+ ordered=True)
+ tm.assert_categorical_equal(result, expected)
+
+ # No string
+ result = Categorical(values, categories=['a', 'b', 'c'], ordered=True)
+ tm.assert_categorical_equal(result, expected)
+
+ def test_constructor_with_categorical_categories(self):
+ # GH17884
+ expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
+
+ result = Categorical(
+ ['a', 'b'], categories=Categorical(['a', 'b', 'c']))
+ tm.assert_categorical_equal(result, expected)
+
+ result = Categorical(
+ ['a', 'b'], categories=CategoricalIndex(['a', 'b', 'c']))
+ tm.assert_categorical_equal(result, expected)
+
+ def test_from_codes(self):
+
+ # too few categories
+ def f():
+ Categorical.from_codes([1, 2], [1, 2])
+
+ pytest.raises(ValueError, f)
+
+ # no int codes
+ def f():
+ Categorical.from_codes(["a"], [1, 2])
+
+ pytest.raises(ValueError, f)
+
+ # no unique categories
+ def f():
+ Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
+
+ pytest.raises(ValueError, f)
+
+ # NaN categories included
+ def f():
+ Categorical.from_codes([0, 1, 2], ["a", "b", np.nan])
+
+ pytest.raises(ValueError, f)
+
+ # too negative
+ def f():
+ Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
+
+ pytest.raises(ValueError, f)
+
+ exp = Categorical(["a", "b", "c"], ordered=False)
+ res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
+ tm.assert_categorical_equal(exp, res)
+
+ # Not available in earlier numpy versions
+ if hasattr(np.random, "choice"):
+ codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
+ Categorical.from_codes(codes, categories=["train", "test"])
+
+ def test_from_codes_with_categorical_categories(self):
+ # GH17884
+ expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
+
+ result = Categorical.from_codes(
+ [0, 1], categories=Categorical(['a', 'b', 'c']))
+ tm.assert_categorical_equal(result, expected)
+
+ result = Categorical.from_codes(
+ [0, 1], categories=CategoricalIndex(['a', 'b', 'c']))
+ tm.assert_categorical_equal(result, expected)
+
+ # non-unique Categorical still raises
+ with pytest.raises(ValueError):
+ Categorical.from_codes([0, 1], Categorical(['a', 'b', 'a']))
+
+ @pytest.mark.parametrize('dtype', [None, 'category'])
+ def test_from_inferred_categories(self, dtype):
+ cats = ['a', 'b']
+ codes = np.array([0, 0, 1, 1], dtype='i8')
+ result = Categorical._from_inferred_categories(cats, codes, dtype)
+ expected = Categorical.from_codes(codes, cats)
+ tm.assert_categorical_equal(result, expected)
+
+ @pytest.mark.parametrize('dtype', [None, 'category'])
+ def test_from_inferred_categories_sorts(self, dtype):
+ cats = ['b', 'a']
+ codes = np.array([0, 1, 1, 1], dtype='i8')
+ result = Categorical._from_inferred_categories(cats, codes, dtype)
+ expected = Categorical.from_codes([1, 0, 0, 0], ['a', 'b'])
+ tm.assert_categorical_equal(result, expected)
+
+ def test_from_inferred_categories_dtype(self):
+ cats = ['a', 'b', 'd']
+ codes = np.array([0, 1, 0, 2], dtype='i8')
+ dtype = CategoricalDtype(['c', 'b', 'a'], ordered=True)
+ result = Categorical._from_inferred_categories(cats, codes, dtype)
+ expected = Categorical(['a', 'b', 'a', 'd'],
+ categories=['c', 'b', 'a'],
+ ordered=True)
+ tm.assert_categorical_equal(result, expected)
+
+ def test_from_inferred_categories_coerces(self):
+ cats = ['1', '2', 'bad']
+ codes = np.array([0, 0, 1, 2], dtype='i8')
+ dtype = CategoricalDtype([1, 2])
+ result = Categorical._from_inferred_categories(cats, codes, dtype)
+ expected = Categorical([1, 1, 2, np.nan])
+ tm.assert_categorical_equal(result, expected)
+
+ def test_construction_with_ordered(self):
+ # GH 9347, 9190
+ cat = Categorical([0, 1, 2])
+ assert not cat.ordered
+ cat = Categorical([0, 1, 2], ordered=False)
+ assert not cat.ordered
+ cat = Categorical([0, 1, 2], ordered=True)
+ assert cat.ordered
+
+ @pytest.mark.xfail(reason="Imaginary values not supported in Categorical")
+ def test_constructor_imaginary(self):
+ values = [1, 2, 3 + 1j]
+ c1 = Categorical(values)
+ tm.assert_index_equal(c1.categories, Index(values))
+ tm.assert_numpy_array_equal(np.array(c1), np.array(values))
diff --git a/pandas/tests/categorical/test_dtypes.py b/pandas/tests/categorical/test_dtypes.py
new file mode 100644
index 0000000000000..0a41b628bc057
--- /dev/null
+++ b/pandas/tests/categorical/test_dtypes.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+
+import numpy as np
+
+import pandas.util.testing as tm
+from pandas.core.dtypes.dtypes import CategoricalDtype
+from pandas import Categorical, Index, CategoricalIndex
+
+
+class TestCategoricalDtypes(object):
+
+ def test_is_equal_dtype(self):
+
+ # test dtype comparisons between cats
+
+ c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
+ c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
+ c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
+ assert c1.is_dtype_equal(c1)
+ assert c2.is_dtype_equal(c2)
+ assert c3.is_dtype_equal(c3)
+ assert c1.is_dtype_equal(c2)
+ assert not c1.is_dtype_equal(c3)
+ assert not c1.is_dtype_equal(Index(list('aabca')))
+ assert not c1.is_dtype_equal(c1.astype(object))
+ assert c1.is_dtype_equal(CategoricalIndex(c1))
+ assert (c1.is_dtype_equal(
+ CategoricalIndex(c1, categories=list('cab'))))
+ assert not c1.is_dtype_equal(CategoricalIndex(c1, ordered=True))
+
+ def test_set_dtype_same(self):
+ c = Categorical(['a', 'b', 'c'])
+ result = c._set_dtype(CategoricalDtype(['a', 'b', 'c']))
+ tm.assert_categorical_equal(result, c)
+
+ def test_set_dtype_new_categories(self):
+ c = Categorical(['a', 'b', 'c'])
+ result = c._set_dtype(CategoricalDtype(list('abcd')))
+ tm.assert_numpy_array_equal(result.codes, c.codes)
+ tm.assert_index_equal(result.dtype.categories, Index(list('abcd')))
+
+ @pytest.mark.parametrize('values, categories, new_categories', [
+ # No NaNs, same cats, same order
+ (['a', 'b', 'a'], ['a', 'b'], ['a', 'b'],),
+ # No NaNs, same cats, different order
+ (['a', 'b', 'a'], ['a', 'b'], ['b', 'a'],),
+ # Same, unsorted
+ (['b', 'a', 'a'], ['a', 'b'], ['a', 'b'],),
+ # No NaNs, same cats, different order
+ (['b', 'a', 'a'], ['a', 'b'], ['b', 'a'],),
+ # NaNs
+ (['a', 'b', 'c'], ['a', 'b'], ['a', 'b']),
+ (['a', 'b', 'c'], ['a', 'b'], ['b', 'a']),
+ (['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
+ (['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
+ # Introduce NaNs
+ (['a', 'b', 'c'], ['a', 'b'], ['a']),
+ (['a', 'b', 'c'], ['a', 'b'], ['b']),
+ (['b', 'a', 'c'], ['a', 'b'], ['a']),
+ (['b', 'a', 'c'], ['a', 'b'], ['a']),
+ # No overlap
+ (['a', 'b', 'c'], ['a', 'b'], ['d', 'e']),
+ ])
+ @pytest.mark.parametrize('ordered', [True, False])
+ def test_set_dtype_many(self, values, categories, new_categories,
+ ordered):
+ c = Categorical(values, categories)
+ expected = Categorical(values, new_categories, ordered)
+ result = c._set_dtype(expected.dtype)
+ tm.assert_categorical_equal(result, expected)
+
+ def test_set_dtype_no_overlap(self):
+ c = Categorical(['a', 'b', 'c'], ['d', 'e'])
+ result = c._set_dtype(CategoricalDtype(['a', 'b']))
+ expected = Categorical([None, None, None], categories=['a', 'b'])
+ tm.assert_categorical_equal(result, expected)
+
+ def test_codes_dtypes(self):
+
+ # GH 8453
+ result = Categorical(['foo', 'bar', 'baz'])
+ assert result.codes.dtype == 'int8'
+
+ result = Categorical(['foo%05d' % i for i in range(400)])
+ assert result.codes.dtype == 'int16'
+
+ result = Categorical(['foo%05d' % i for i in range(40000)])
+ assert result.codes.dtype == 'int32'
+
+ # adding cats
+ result = Categorical(['foo', 'bar', 'baz'])
+ assert result.codes.dtype == 'int8'
+ result = result.add_categories(['foo%05d' % i for i in range(400)])
+ assert result.codes.dtype == 'int16'
+
+ # removing cats
+ result = result.remove_categories(['foo%05d' % i for i in range(300)])
+ assert result.codes.dtype == 'int8'
+
+ def test_astype_categorical(self):
+
+ cat = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
+ tm.assert_categorical_equal(cat, cat.astype('category'))
+ tm.assert_almost_equal(np.array(cat), cat.astype('object'))
+
+ pytest.raises(ValueError, lambda: cat.astype(float))
diff --git a/pandas/tests/categorical/test_indexing.py b/pandas/tests/categorical/test_indexing.py
new file mode 100644
index 0000000000000..9c27b1101e5ca
--- /dev/null
+++ b/pandas/tests/categorical/test_indexing.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+
+import numpy as np
+
+import pandas.util.testing as tm
+from pandas import Categorical, Index, PeriodIndex
+from pandas.tests.categorical.common import TestCategorical
+
+
+class TestCategoricalIndexingWithFactor(TestCategorical):
+
+ def test_getitem(self):
+ assert self.factor[0] == 'a'
+ assert self.factor[-1] == 'c'
+
+ subf = self.factor[[0, 1, 2]]
+ tm.assert_numpy_array_equal(subf._codes,
+ np.array([0, 1, 1], dtype=np.int8))
+
+ subf = self.factor[np.asarray(self.factor) == 'c']
+ tm.assert_numpy_array_equal(subf._codes,
+ np.array([2, 2, 2], dtype=np.int8))
+
+ def test_setitem(self):
+
+ # int/positional
+ c = self.factor.copy()
+ c[0] = 'b'
+ assert c[0] == 'b'
+ c[-1] = 'a'
+ assert c[-1] == 'a'
+
+ # boolean
+ c = self.factor.copy()
+ indexer = np.zeros(len(c), dtype='bool')
+ indexer[0] = True
+ indexer[-1] = True
+ c[indexer] = 'c'
+ expected = Categorical(['c', 'b', 'b', 'a', 'a', 'c', 'c', 'c'],
+ ordered=True)
+
+ tm.assert_categorical_equal(c, expected)
+
+
+class TestCategoricalIndexing(object):
+
+ def test_getitem_listlike(self):
+
+ # GH 9469
+ # properly coerce the input indexers
+ np.random.seed(1)
+ c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
+ result = c.codes[np.array([100000]).astype(np.int64)]
+ expected = c[np.array([100000]).astype(np.int64)].codes
+ tm.assert_numpy_array_equal(result, expected)
+
+ def test_periodindex(self):
+ idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
+ '2014-03', '2014-03'], freq='M')
+
+ cat1 = Categorical(idx1)
+ str(cat1)
+ exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.int8)
+ exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
+ tm.assert_numpy_array_equal(cat1._codes, exp_arr)
+ tm.assert_index_equal(cat1.categories, exp_idx)
+
+ idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
+ '2014-03', '2014-01'], freq='M')
+ cat2 = Categorical(idx2, ordered=True)
+ str(cat2)
+ exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.int8)
+ exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
+ tm.assert_numpy_array_equal(cat2._codes, exp_arr)
+ tm.assert_index_equal(cat2.categories, exp_idx2)
+
+ idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
+ '2013-08', '2013-07', '2013-05'], freq='M')
+ cat3 = Categorical(idx3, ordered=True)
+ exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype=np.int8)
+ exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
+ '2013-10', '2013-11', '2013-12'], freq='M')
+ tm.assert_numpy_array_equal(cat3._codes, exp_arr)
+ tm.assert_index_equal(cat3.categories, exp_idx)
+
+ def test_categories_assigments(self):
+ s = Categorical(["a", "b", "c", "a"])
+ exp = np.array([1, 2, 3, 1], dtype=np.int64)
+ s.categories = [1, 2, 3]
+ tm.assert_numpy_array_equal(s.__array__(), exp)
+ tm.assert_index_equal(s.categories, Index([1, 2, 3]))
+
+ # lengthen
+ def f():
+ s.categories = [1, 2, 3, 4]
+
+ pytest.raises(ValueError, f)
+
+ # shorten
+ def f():
+ s.categories = [1, 2]
+
+ pytest.raises(ValueError, f)
diff --git a/pandas/tests/categorical/test_missing.py b/pandas/tests/categorical/test_missing.py
new file mode 100644
index 0000000000000..79758dee5cfda
--- /dev/null
+++ b/pandas/tests/categorical/test_missing.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+import numpy as np
+
+import pandas.util.testing as tm
+from pandas import (Categorical, Index, isna)
+from pandas.compat import lrange
+from pandas.core.dtypes.dtypes import CategoricalDtype
+
+
+class TestCategoricalMissing(object):
+
+ def test_na_flags_int_categories(self):
+ # #1457
+
+ categories = lrange(10)
+ labels = np.random.randint(0, 10, 20)
+ labels[::5] = -1
+
+ cat = Categorical(labels, categories, fastpath=True)
+ repr(cat)
+
+ tm.assert_numpy_array_equal(isna(cat), labels == -1)
+
+ def test_nan_handling(self):
+
+ # Nans are represented as -1 in codes
+ c = Categorical(["a", "b", np.nan, "a"])
+ tm.assert_index_equal(c.categories, Index(["a", "b"]))
+ tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0],
+ dtype=np.int8))
+ c[1] = np.nan
+ tm.assert_index_equal(c.categories, Index(["a", "b"]))
+ tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0],
+ dtype=np.int8))
+
+ # Adding nan to categories should make assigned nan point to the
+ # category!
+ c = Categorical(["a", "b", np.nan, "a"])
+ tm.assert_index_equal(c.categories, Index(["a", "b"]))
+ tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0],
+ dtype=np.int8))
+
+ def test_set_dtype_nans(self):
+ c = Categorical(['a', 'b', np.nan])
+ result = c._set_dtype(CategoricalDtype(['a', 'c']))
+ tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1],
+ dtype='int8'))
+
+ def test_set_item_nan(self):
+ cat = Categorical([1, 2, 3])
+ cat[1] = np.nan
+
+ exp = Categorical([1, np.nan, 3], categories=[1, 2, 3])
+ tm.assert_categorical_equal(cat, exp)
diff --git a/pandas/tests/categorical/test_operators.py b/pandas/tests/categorical/test_operators.py
new file mode 100644
index 0000000000000..09a0607b67a88
--- /dev/null
+++ b/pandas/tests/categorical/test_operators.py
@@ -0,0 +1,286 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+
+import pandas as pd
+import numpy as np
+
+import pandas.util.testing as tm
+from pandas import Categorical, Series, DataFrame, date_range
+from pandas.tests.categorical.common import TestCategorical
+
+
+class TestCategoricalOpsWithFactor(TestCategorical):
+
+ def test_categories_none_comparisons(self):
+ factor = Categorical(['a', 'b', 'b', 'a',
+ 'a', 'c', 'c', 'c'], ordered=True)
+ tm.assert_categorical_equal(factor, self.factor)
+
+ def test_comparisons(self):
+
+ result = self.factor[self.factor == 'a']
+ expected = self.factor[np.asarray(self.factor) == 'a']
+ tm.assert_categorical_equal(result, expected)
+
+ result = self.factor[self.factor != 'a']
+ expected = self.factor[np.asarray(self.factor) != 'a']
+ tm.assert_categorical_equal(result, expected)
+
+ result = self.factor[self.factor < 'c']
+ expected = self.factor[np.asarray(self.factor) < 'c']
+ tm.assert_categorical_equal(result, expected)
+
+ result = self.factor[self.factor > 'a']
+ expected = self.factor[np.asarray(self.factor) > 'a']
+ tm.assert_categorical_equal(result, expected)
+
+ result = self.factor[self.factor >= 'b']
+ expected = self.factor[np.asarray(self.factor) >= 'b']
+ tm.assert_categorical_equal(result, expected)
+
+ result = self.factor[self.factor <= 'b']
+ expected = self.factor[np.asarray(self.factor) <= 'b']
+ tm.assert_categorical_equal(result, expected)
+
+ n = len(self.factor)
+
+ other = self.factor[np.random.permutation(n)]
+ result = self.factor == other
+ expected = np.asarray(self.factor) == np.asarray(other)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = self.factor == 'd'
+ expected = np.repeat(False, len(self.factor))
+ tm.assert_numpy_array_equal(result, expected)
+
+ # comparisons with categoricals
+ cat_rev = Categorical(
+ ["a", "b", "c"], categories=["c", "b", "a"], ordered=True)
+ cat_rev_base = Categorical(
+ ["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
+ cat = Categorical(["a", "b", "c"], ordered=True)
+ cat_base = Categorical(
+ ["b", "b", "b"], categories=cat.categories, ordered=True)
+
+ # comparisons need to take categories ordering into account
+ res_rev = cat_rev > cat_rev_base
+ exp_rev = np.array([True, False, False])
+ tm.assert_numpy_array_equal(res_rev, exp_rev)
+
+ res_rev = cat_rev < cat_rev_base
+ exp_rev = np.array([False, False, True])
+ tm.assert_numpy_array_equal(res_rev, exp_rev)
+
+ res = cat > cat_base
+ exp = np.array([False, False, True])
+ tm.assert_numpy_array_equal(res, exp)
+
+ # Only categories with same categories can be compared
+ def f():
+ cat > cat_rev
+
+ pytest.raises(TypeError, f)
+
+ cat_rev_base2 = Categorical(
+ ["b", "b", "b"], categories=["c", "b", "a", "d"])
+
+ def f():
+ cat_rev > cat_rev_base2
+
+ pytest.raises(TypeError, f)
+
+ # Only categories with same ordering information can be compared
+ cat_unorderd = cat.set_ordered(False)
+ assert not (cat > cat).any()
+
+ def f():
+ cat > cat_unorderd
+
+ pytest.raises(TypeError, f)
+
+ # comparison (in both directions) with Series will raise
+ s = Series(["b", "b", "b"])
+ pytest.raises(TypeError, lambda: cat > s)
+ pytest.raises(TypeError, lambda: cat_rev > s)
+ pytest.raises(TypeError, lambda: s < cat)
+ pytest.raises(TypeError, lambda: s < cat_rev)
+
+ # comparison with numpy.array will raise in both direction, but only on
+ # newer numpy versions
+ a = np.array(["b", "b", "b"])
+ pytest.raises(TypeError, lambda: cat > a)
+ pytest.raises(TypeError, lambda: cat_rev > a)
+
+ # Make sure that unequal comparison take the categories order in
+ # account
+ cat_rev = Categorical(
+ list("abc"), categories=list("cba"), ordered=True)
+ exp = np.array([True, False, False])
+ res = cat_rev > "b"
+ tm.assert_numpy_array_equal(res, exp)
+
+
+class TestCategoricalOps(object):
+
+ def test_datetime_categorical_comparison(self):
+ dt_cat = Categorical(date_range('2014-01-01', periods=3), ordered=True)
+ tm.assert_numpy_array_equal(dt_cat > dt_cat[0],
+ np.array([False, True, True]))
+ tm.assert_numpy_array_equal(dt_cat[0] < dt_cat,
+ np.array([False, True, True]))
+
+ def test_reflected_comparison_with_scalars(self):
+ # GH8658
+ cat = Categorical([1, 2, 3], ordered=True)
+ tm.assert_numpy_array_equal(cat > cat[0],
+ np.array([False, True, True]))
+ tm.assert_numpy_array_equal(cat[0] < cat,
+ np.array([False, True, True]))
+
+ def test_comparison_with_unknown_scalars(self):
+ # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
+ # and following comparisons with scalars not in categories should raise
+ # for unequal comps, but not for equal/not equal
+ cat = Categorical([1, 2, 3], ordered=True)
+
+ pytest.raises(TypeError, lambda: cat < 4)
+ pytest.raises(TypeError, lambda: cat > 4)
+ pytest.raises(TypeError, lambda: 4 < cat)
+ pytest.raises(TypeError, lambda: 4 > cat)
+
+ tm.assert_numpy_array_equal(cat == 4,
+ np.array([False, False, False]))
+ tm.assert_numpy_array_equal(cat != 4,
+ np.array([True, True, True]))
+
+ @pytest.mark.parametrize('data,reverse,base', [
+ (list("abc"), list("cba"), list("bbb")),
+ ([1, 2, 3], [3, 2, 1], [2, 2, 2])]
+ )
+ def test_comparisons(self, data, reverse, base):
+ cat_rev = Series(
+ Categorical(data, categories=reverse, ordered=True))
+ cat_rev_base = Series(
+ Categorical(base, categories=reverse, ordered=True))
+ cat = Series(Categorical(data, ordered=True))
+ cat_base = Series(
+ Categorical(base, categories=cat.cat.categories, ordered=True))
+ s = Series(base)
+ a = np.array(base)
+
+ # comparisons need to take categories ordering into account
+ res_rev = cat_rev > cat_rev_base
+ exp_rev = Series([True, False, False])
+ tm.assert_series_equal(res_rev, exp_rev)
+
+ res_rev = cat_rev < cat_rev_base
+ exp_rev = Series([False, False, True])
+ tm.assert_series_equal(res_rev, exp_rev)
+
+ res = cat > cat_base
+ exp = Series([False, False, True])
+ tm.assert_series_equal(res, exp)
+
+ scalar = base[1]
+ res = cat > scalar
+ exp = Series([False, False, True])
+ exp2 = cat.values > scalar
+ tm.assert_series_equal(res, exp)
+ tm.assert_numpy_array_equal(res.values, exp2)
+ res_rev = cat_rev > scalar
+ exp_rev = Series([True, False, False])
+ exp_rev2 = cat_rev.values > scalar
+ tm.assert_series_equal(res_rev, exp_rev)
+ tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
+
+ # Only categories with same categories can be compared
+ def f():
+ cat > cat_rev
+
+ pytest.raises(TypeError, f)
+
+ # categorical cannot be compared to Series or numpy array, and also
+ # not the other way around
+ pytest.raises(TypeError, lambda: cat > s)
+ pytest.raises(TypeError, lambda: cat_rev > s)
+ pytest.raises(TypeError, lambda: cat > a)
+ pytest.raises(TypeError, lambda: cat_rev > a)
+
+ pytest.raises(TypeError, lambda: s < cat)
+ pytest.raises(TypeError, lambda: s < cat_rev)
+
+ pytest.raises(TypeError, lambda: a < cat)
+ pytest.raises(TypeError, lambda: a < cat_rev)
+
+ @pytest.mark.parametrize('ctor', [
+ lambda *args, **kwargs: Categorical(*args, **kwargs),
+ lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),
+ ])
+ def test_unordered_different_order_equal(self, ctor):
+ # https://github.com/pandas-dev/pandas/issues/16014
+ c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
+ c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
+ assert (c1 == c2).all()
+
+ c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
+ c2 = ctor(['b', 'a'], categories=['b', 'a'], ordered=False)
+ assert (c1 != c2).all()
+
+ c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
+ c2 = ctor(['b', 'b'], categories=['b', 'a'], ordered=False)
+ assert (c1 != c2).all()
+
+ c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
+ c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
+ result = c1 == c2
+ tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))
+
+ def test_unordered_different_categories_raises(self):
+ c1 = Categorical(['a', 'b'], categories=['a', 'b'], ordered=False)
+ c2 = Categorical(['a', 'c'], categories=['c', 'a'], ordered=False)
+ with tm.assert_raises_regex(TypeError,
+ "Categoricals can only be compared"):
+ c1 == c2
+
+ def test_compare_different_lengths(self):
+ c1 = Categorical([], categories=['a', 'b'])
+ c2 = Categorical([], categories=['a'])
+ msg = "Categories are different lengths"
+ with tm.assert_raises_regex(TypeError, msg):
+ c1 == c2
+
+ def test_numeric_like_ops(self):
+
+ df = DataFrame({'value': np.random.randint(0, 10000, 100)})
+ labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
+ cat_labels = Categorical(labels, labels)
+
+ df = df.sort_values(by=['value'], ascending=True)
+ df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
+ right=False, labels=cat_labels)
+
+ # numeric ops should not succeed
+ for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
+ pytest.raises(TypeError,
+ lambda: getattr(df, op)(df))
+
+ # reduction ops should not succeed (unless specifically defined, e.g.
+ # min/max)
+ s = df['value_group']
+ for op in ['kurt', 'skew', 'var', 'std', 'mean', 'sum', 'median']:
+ pytest.raises(TypeError,
+ lambda: getattr(s, op)(numeric_only=False))
+
+ # mad technically works because it takes always the numeric data
+
+ # numpy ops
+ s = Series(Categorical([1, 2, 3, 4]))
+ pytest.raises(TypeError, lambda: np.sum(s))
+
+ # numeric ops on a Series
+ for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
+ pytest.raises(TypeError, lambda: getattr(s, op)(2))
+
+ # invalid ufunc
+ pytest.raises(TypeError, lambda: np.log(s))
diff --git a/pandas/tests/categorical/test_repr.py b/pandas/tests/categorical/test_repr.py
new file mode 100644
index 0000000000000..0cadf66b24d46
--- /dev/null
+++ b/pandas/tests/categorical/test_repr.py
@@ -0,0 +1,517 @@
+# -*- coding: utf-8 -*-
+
+import numpy as np
+
+from pandas import (Categorical, Series, CategoricalIndex, date_range,
+ period_range, timedelta_range)
+from pandas.compat import u, PY3
+from pandas.core.config import option_context
+from pandas.tests.categorical.common import TestCategorical
+
+
+class TestCategoricalReprWithFactor(TestCategorical):
+
+ def test_print(self):
+ expected = ["[a, b, b, a, a, c, c, c]",
+ "Categories (3, object): [a < b < c]"]
+ expected = "\n".join(expected)
+ actual = repr(self.factor)
+ assert actual == expected
+
+
+class TestCategoricalRepr(object):
+
+ def test_big_print(self):
+ factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
+ fastpath=True)
+ expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
+ "Categories (3, object): [a, b, c]"]
+ expected = "\n".join(expected)
+
+ actual = repr(factor)
+
+ assert actual == expected
+
+ def test_empty_print(self):
+ factor = Categorical([], ["a", "b", "c"])
+ expected = ("[], Categories (3, object): [a, b, c]")
+ # hack because array_repr changed in numpy > 1.6.x
+ actual = repr(factor)
+ assert actual == expected
+
+ assert expected == actual
+ factor = Categorical([], ["a", "b", "c"], ordered=True)
+ expected = ("[], Categories (3, object): [a < b < c]")
+ actual = repr(factor)
+ assert expected == actual
+
+ factor = Categorical([], [])
+ expected = ("[], Categories (0, object): []")
+ assert expected == repr(factor)
+
+ def test_print_none_width(self):
+ # GH10087
+ a = Series(Categorical([1, 2, 3, 4]))
+ exp = u("0 1\n1 2\n2 3\n3 4\n" +
+ "dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
+
+ with option_context("display.width", None):
+ assert exp == repr(a)
+
+ def test_unicode_print(self):
+ if PY3:
+ _rep = repr
+ else:
+ _rep = unicode # noqa
+
+ c = Categorical(['aaaaa', 'bb', 'cccc'] * 20)
+ expected = u"""\
+[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
+Length: 60
+Categories (3, object): [aaaaa, bb, cccc]"""
+
+ assert _rep(c) == expected
+
+ c = Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)
+ expected = u"""\
+[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
+Length: 60
+Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
+
+ assert _rep(c) == expected
+
+ # unicode option should not affect to Categorical, as it doesn't care
+ # the repr width
+ with option_context('display.unicode.east_asian_width', True):
+
+ c = Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)
+ expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
+Length: 60
+Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
+
+ assert _rep(c) == expected
+
+ def test_categorical_repr(self):
+ c = Categorical([1, 2, 3])
+ exp = """[1, 2, 3]
+Categories (3, int64): [1, 2, 3]"""
+
+ assert repr(c) == exp
+
+ c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
+ exp = """[1, 2, 3, 1, 2, 3]
+Categories (3, int64): [1, 2, 3]"""
+
+ assert repr(c) == exp
+
+ c = Categorical([1, 2, 3, 4, 5] * 10)
+ exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
+Length: 50
+Categories (5, int64): [1, 2, 3, 4, 5]"""
+
+ assert repr(c) == exp
+
+ c = Categorical(np.arange(20))
+ exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
+Length: 20
+Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
+
+ assert repr(c) == exp
+
+ def test_categorical_repr_ordered(self):
+ c = Categorical([1, 2, 3], ordered=True)
+ exp = """[1, 2, 3]
+Categories (3, int64): [1 < 2 < 3]"""
+
+ assert repr(c) == exp
+
+ c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3], ordered=True)
+ exp = """[1, 2, 3, 1, 2, 3]
+Categories (3, int64): [1 < 2 < 3]"""
+
+ assert repr(c) == exp
+
+ c = Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
+ exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
+Length: 50
+Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
+
+ assert repr(c) == exp
+
+ c = Categorical(np.arange(20), ordered=True)
+ exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
+Length: 20
+Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
+
+ assert repr(c) == exp
+
+ def test_categorical_repr_datetime(self):
+ idx = date_range('2011-01-01 09:00', freq='H', periods=5)
+ c = Categorical(idx)
+
+ # TODO(wesm): exceeding 80 characters in the console is not good
+ # behavior
+ exp = (
+ "[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
+ "2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
+ "Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
+ "2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
+ " 2011-01-01 12:00:00, "
+ "2011-01-01 13:00:00]""")
+ assert repr(c) == exp
+
+ c = Categorical(idx.append(idx), categories=idx)
+ exp = (
+ "[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
+ "2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
+ "2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
+ "2011-01-01 13:00:00]\n"
+ "Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
+ "2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
+ " 2011-01-01 12:00:00, "
+ "2011-01-01 13:00:00]")
+
+ assert repr(c) == exp
+
+ idx = date_range('2011-01-01 09:00', freq='H', periods=5,
+ tz='US/Eastern')
+ c = Categorical(idx)
+ exp = (
+ "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
+ "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
+ "2011-01-01 13:00:00-05:00]\n"
+ "Categories (5, datetime64[ns, US/Eastern]): "
+ "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
+ " "
+ "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
+ " "
+ "2011-01-01 13:00:00-05:00]")
+
+ assert repr(c) == exp
+
+ c = Categorical(idx.append(idx), categories=idx)
+ exp = (
+ "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
+ "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
+ "2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
+ "2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
+ "2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
+ "Categories (5, datetime64[ns, US/Eastern]): "
+ "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
+ " "
+ "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
+ " "
+ "2011-01-01 13:00:00-05:00]")
+
+ assert repr(c) == exp
+
+ def test_categorical_repr_datetime_ordered(self):
+ idx = date_range('2011-01-01 09:00', freq='H', periods=5)
+ c = Categorical(idx, ordered=True)
+ exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
+ 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
+
+ assert repr(c) == exp
+
+ c = Categorical(idx.append(idx), categories=idx, ordered=True)
+ exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
+ 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
+
+ assert repr(c) == exp
+
+ idx = date_range('2011-01-01 09:00', freq='H', periods=5,
+ tz='US/Eastern')
+ c = Categorical(idx, ordered=True)
+ exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
+Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
+ 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
+ 2011-01-01 13:00:00-05:00]""" # noqa
+
+ assert repr(c) == exp
+
+ c = Categorical(idx.append(idx), categories=idx, ordered=True)
+ exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
+Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
+ 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
+ 2011-01-01 13:00:00-05:00]""" # noqa
+
+ assert repr(c) == exp
+
+ def test_categorical_repr_period(self):
+ idx = period_range('2011-01-01 09:00', freq='H', periods=5)
+ c = Categorical(idx)
+ exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
+Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
+ 2011-01-01 13:00]""" # noqa
+
+ assert repr(c) == exp
+
+ c = Categorical(idx.append(idx), categories=idx)
+ exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
+Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
+ 2011-01-01 13:00]""" # noqa
+
+ assert repr(c) == exp
+
+ idx = period_range('2011-01', freq='M', periods=5)
+ c = Categorical(idx)
+ exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
+Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
+
+ assert repr(c) == exp
+
+ c = Categorical(idx.append(idx), categories=idx)
+ exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
+Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa
+
+ assert repr(c) == exp
+
+ def test_categorical_repr_period_ordered(self):
+ idx = period_range('2011-01-01 09:00', freq='H', periods=5)
+ c = Categorical(idx, ordered=True)
+ exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
+Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
+ 2011-01-01 13:00]""" # noqa
+
+ assert repr(c) == exp
+
+ c = Categorical(idx.append(idx), categories=idx, ordered=True)
+ exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
+Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
+ 2011-01-01 13:00]""" # noqa
+
+ assert repr(c) == exp
+
+ idx = period_range('2011-01', freq='M', periods=5)
+ c = Categorical(idx, ordered=True)
+ exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
+Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
+
+ assert repr(c) == exp
+
+ c = Categorical(idx.append(idx), categories=idx, ordered=True)
+ exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
+Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa
+
+ assert repr(c) == exp
+
+ def test_categorical_repr_timedelta(self):
+ idx = timedelta_range('1 days', periods=5)
+ c = Categorical(idx)
+ exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
+Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
+
+ assert repr(c) == exp
+
+ c = Categorical(idx.append(idx), categories=idx)
+ exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
+Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" # noqa
+
+ assert repr(c) == exp
+
+ idx = timedelta_range('1 hours', periods=20)
+ c = Categorical(idx)
+ exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
+Length: 20
+Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
+ 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
+ 18 days 01:00:00, 19 days 01:00:00]""" # noqa
+
+ assert repr(c) == exp
+
+ c = Categorical(idx.append(idx), categories=idx)
+ exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
+Length: 40
+Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
+ 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
+ 18 days 01:00:00, 19 days 01:00:00]""" # noqa
+
+ assert repr(c) == exp
+
+ def test_categorical_repr_timedelta_ordered(self):
+ idx = timedelta_range('1 days', periods=5)
+ c = Categorical(idx, ordered=True)
+ exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
+Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
+
+ assert repr(c) == exp
+
+ c = Categorical(idx.append(idx), categories=idx, ordered=True)
+ exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
+Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
+
+ assert repr(c) == exp
+
+ idx = timedelta_range('1 hours', periods=20)
+ c = Categorical(idx, ordered=True)
+ exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
+Length: 20
+Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
+ 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
+ 18 days 01:00:00 < 19 days 01:00:00]""" # noqa
+
+ assert repr(c) == exp
+
+ c = Categorical(idx.append(idx), categories=idx, ordered=True)
+ exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
+Length: 40
+Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
+ 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
+ 18 days 01:00:00 < 19 days 01:00:00]""" # noqa
+
+ assert repr(c) == exp
+
+ def test_categorical_index_repr(self):
+ idx = CategoricalIndex(Categorical([1, 2, 3]))
+ exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa
+ assert repr(idx) == exp
+
+ i = CategoricalIndex(Categorical(np.arange(10)))
+ exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')""" # noqa
+ assert repr(i) == exp
+
+ def test_categorical_index_repr_ordered(self):
+ i = CategoricalIndex(Categorical([1, 2, 3], ordered=True))
+ exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa
+ assert repr(i) == exp
+
+ i = CategoricalIndex(Categorical(np.arange(10), ordered=True))
+ exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')""" # noqa
+ assert repr(i) == exp
+
+ def test_categorical_index_repr_datetime(self):
+ idx = date_range('2011-01-01 09:00', freq='H', periods=5)
+ i = CategoricalIndex(Categorical(idx))
+ exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
+ '2011-01-01 11:00:00', '2011-01-01 12:00:00',
+ '2011-01-01 13:00:00'],
+ categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa
+
+ assert repr(i) == exp
+
+ idx = date_range('2011-01-01 09:00', freq='H', periods=5,
+ tz='US/Eastern')
+ i = CategoricalIndex(Categorical(idx))
+ exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
+ '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
+ '2011-01-01 13:00:00-05:00'],
+ categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa
+
+ assert repr(i) == exp
+
+ def test_categorical_index_repr_datetime_ordered(self):
+ idx = date_range('2011-01-01 09:00', freq='H', periods=5)
+ i = CategoricalIndex(Categorical(idx, ordered=True))
+ exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
+ '2011-01-01 11:00:00', '2011-01-01 12:00:00',
+ '2011-01-01 13:00:00'],
+ categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa
+
+ assert repr(i) == exp
+
+ idx = date_range('2011-01-01 09:00', freq='H', periods=5,
+ tz='US/Eastern')
+ i = CategoricalIndex(Categorical(idx, ordered=True))
+ exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
+ '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
+ '2011-01-01 13:00:00-05:00'],
+ categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa
+
+ assert repr(i) == exp
+
+ i = CategoricalIndex(Categorical(idx.append(idx), ordered=True))
+ exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
+ '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
+ '2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
+ '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
+ '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
+ categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa
+
+ assert repr(i) == exp
+
+ def test_categorical_index_repr_period(self):
+ # test all length
+ idx = period_range('2011-01-01 09:00', freq='H', periods=1)
+ i = CategoricalIndex(Categorical(idx))
+ exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa
+ assert repr(i) == exp
+
+ idx = period_range('2011-01-01 09:00', freq='H', periods=2)
+ i = CategoricalIndex(Categorical(idx))
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa
+ assert repr(i) == exp
+
+ idx = period_range('2011-01-01 09:00', freq='H', periods=3)
+ i = CategoricalIndex(Categorical(idx))
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa
+ assert repr(i) == exp
+
+ idx = period_range('2011-01-01 09:00', freq='H', periods=5)
+ i = CategoricalIndex(Categorical(idx))
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
+ '2011-01-01 12:00', '2011-01-01 13:00'],
+ categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa
+
+ assert repr(i) == exp
+
+ i = CategoricalIndex(Categorical(idx.append(idx)))
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
+ '2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
+ '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
+ '2011-01-01 13:00'],
+ categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa
+
+ assert repr(i) == exp
+
+ idx = period_range('2011-01', freq='M', periods=5)
+ i = CategoricalIndex(Categorical(idx))
+ exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa
+ assert repr(i) == exp
+
+ def test_categorical_index_repr_period_ordered(self):
+ idx = period_range('2011-01-01 09:00', freq='H', periods=5)
+ i = CategoricalIndex(Categorical(idx, ordered=True))
+ exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
+ '2011-01-01 12:00', '2011-01-01 13:00'],
+ categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa
+
+ assert repr(i) == exp
+
+ idx = period_range('2011-01', freq='M', periods=5)
+ i = CategoricalIndex(Categorical(idx, ordered=True))
+ exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa
+ assert repr(i) == exp
+
+ def test_categorical_index_repr_timedelta(self):
+ idx = timedelta_range('1 days', periods=5)
+ i = CategoricalIndex(Categorical(idx))
+ exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')""" # noqa
+ assert repr(i) == exp
+
+ idx = timedelta_range('1 hours', periods=10)
+ i = CategoricalIndex(Categorical(idx))
+ exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
+ '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
+ '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
+ '9 days 01:00:00'],
+ categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')""" # noqa
+
+ assert repr(i) == exp
+
+ def test_categorical_index_repr_timedelta_ordered(self):
+ idx = timedelta_range('1 days', periods=5)
+ i = CategoricalIndex(Categorical(idx, ordered=True))
+ exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')""" # noqa
+ assert repr(i) == exp
+
+ idx = timedelta_range('1 hours', periods=10)
+ i = CategoricalIndex(Categorical(idx, ordered=True))
+ exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
+ '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
+ '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
+ '9 days 01:00:00'],
+ categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')""" # noqa
+
+ assert repr(i) == exp
diff --git a/pandas/tests/categorical/test_sorting.py b/pandas/tests/categorical/test_sorting.py
new file mode 100644
index 0000000000000..88edb6c8f1348
--- /dev/null
+++ b/pandas/tests/categorical/test_sorting.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+
+import numpy as np
+
+import pandas.util.testing as tm
+from pandas import Categorical, Index
+
+
+class TestCategoricalSort(object):
+
+ def test_argsort(self):
+ c = Categorical([5, 3, 1, 4, 2], ordered=True)
+
+ expected = np.array([2, 4, 1, 3, 0])
+ tm.assert_numpy_array_equal(c.argsort(ascending=True), expected,
+ check_dtype=False)
+
+ expected = expected[::-1]
+ tm.assert_numpy_array_equal(c.argsort(ascending=False), expected,
+ check_dtype=False)
+
+ def test_numpy_argsort(self):
+ c = Categorical([5, 3, 1, 4, 2], ordered=True)
+
+ expected = np.array([2, 4, 1, 3, 0])
+ tm.assert_numpy_array_equal(np.argsort(c), expected,
+ check_dtype=False)
+
+ tm.assert_numpy_array_equal(np.argsort(c, kind='mergesort'), expected,
+ check_dtype=False)
+
+ msg = "the 'axis' parameter is not supported"
+ tm.assert_raises_regex(ValueError, msg, np.argsort,
+ c, axis=0)
+
+ msg = "the 'order' parameter is not supported"
+ tm.assert_raises_regex(ValueError, msg, np.argsort,
+ c, order='C')
+
+ def test_sort_values(self):
+
+ # unordered cats are sortable
+ cat = Categorical(["a", "b", "b", "a"], ordered=False)
+ cat.sort_values()
+
+ cat = Categorical(["a", "c", "b", "d"], ordered=True)
+
+ # sort_values
+ res = cat.sort_values()
+ exp = np.array(["a", "b", "c", "d"], dtype=object)
+ tm.assert_numpy_array_equal(res.__array__(), exp)
+ tm.assert_index_equal(res.categories, cat.categories)
+
+ cat = Categorical(["a", "c", "b", "d"],
+ categories=["a", "b", "c", "d"], ordered=True)
+ res = cat.sort_values()
+ exp = np.array(["a", "b", "c", "d"], dtype=object)
+ tm.assert_numpy_array_equal(res.__array__(), exp)
+ tm.assert_index_equal(res.categories, cat.categories)
+
+ res = cat.sort_values(ascending=False)
+ exp = np.array(["d", "c", "b", "a"], dtype=object)
+ tm.assert_numpy_array_equal(res.__array__(), exp)
+ tm.assert_index_equal(res.categories, cat.categories)
+
+ # sort (inplace order)
+ cat1 = cat.copy()
+ cat1.sort_values(inplace=True)
+ exp = np.array(["a", "b", "c", "d"], dtype=object)
+ tm.assert_numpy_array_equal(cat1.__array__(), exp)
+ tm.assert_index_equal(res.categories, cat.categories)
+
+ # reverse
+ cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
+ res = cat.sort_values(ascending=False)
+ exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
+ exp_categories = Index(["a", "b", "c", "d"])
+ tm.assert_numpy_array_equal(res.__array__(), exp_val)
+ tm.assert_index_equal(res.categories, exp_categories)
+
+ def test_sort_values_na_position(self):
+ # see gh-12882
+ cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True)
+ exp_categories = Index([2, 5])
+
+ exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
+ res = cat.sort_values() # default arguments
+ tm.assert_numpy_array_equal(res.__array__(), exp)
+ tm.assert_index_equal(res.categories, exp_categories)
+
+ exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0])
+ res = cat.sort_values(ascending=True, na_position='first')
+ tm.assert_numpy_array_equal(res.__array__(), exp)
+ tm.assert_index_equal(res.categories, exp_categories)
+
+ exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0])
+ res = cat.sort_values(ascending=False, na_position='first')
+ tm.assert_numpy_array_equal(res.__array__(), exp)
+ tm.assert_index_equal(res.categories, exp_categories)
+
+ exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
+ res = cat.sort_values(ascending=True, na_position='last')
+ tm.assert_numpy_array_equal(res.__array__(), exp)
+ tm.assert_index_equal(res.categories, exp_categories)
+
+ exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan])
+ res = cat.sort_values(ascending=False, na_position='last')
+ tm.assert_numpy_array_equal(res.__array__(), exp)
+ tm.assert_index_equal(res.categories, exp_categories)
+
+ cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
+ res = cat.sort_values(ascending=False, na_position='last')
+ exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
+ exp_categories = Index(["a", "b", "c", "d"])
+ tm.assert_numpy_array_equal(res.__array__(), exp_val)
+ tm.assert_index_equal(res.categories, exp_categories)
+
+ cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
+ res = cat.sort_values(ascending=False, na_position='first')
+ exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
+ exp_categories = Index(["a", "b", "c", "d"])
+ tm.assert_numpy_array_equal(res.__array__(), exp_val)
+ tm.assert_index_equal(res.categories, exp_categories)
diff --git a/pandas/tests/categorical/test_subclass.py b/pandas/tests/categorical/test_subclass.py
new file mode 100644
index 0000000000000..4060d2ebf633a
--- /dev/null
+++ b/pandas/tests/categorical/test_subclass.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+
+from pandas import Categorical
+
+import pandas.util.testing as tm
+
+
+class TestCategoricalSubclassing(object):
+
+ def test_constructor(self):
+ sc = tm.SubclassedCategorical(['a', 'b', 'c'])
+ assert isinstance(sc, tm.SubclassedCategorical)
+ tm.assert_categorical_equal(sc, Categorical(['a', 'b', 'c']))
+
+ def test_from_codes(self):
+ sc = tm.SubclassedCategorical.from_codes([1, 0, 2], ['a', 'b', 'c'])
+ assert isinstance(sc, tm.SubclassedCategorical)
+ exp = Categorical.from_codes([1, 0, 2], ['a', 'b', 'c'])
+ tm.assert_categorical_equal(sc, exp)
+
+ def test_map(self):
+ sc = tm.SubclassedCategorical(['a', 'b', 'c'])
+ res = sc.map(lambda x: x.upper())
+ assert isinstance(res, tm.SubclassedCategorical)
+ exp = Categorical(['A', 'B', 'C'])
+ tm.assert_categorical_equal(res, exp)
diff --git a/pandas/tests/categorical/test_warnings.py b/pandas/tests/categorical/test_warnings.py
new file mode 100644
index 0000000000000..91278580254aa
--- /dev/null
+++ b/pandas/tests/categorical/test_warnings.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+
+import pandas.util.testing as tm
+
+
+class TestCategoricalWarnings(object):
+ def test_tab_complete_warning(self, ip):
+ # https://github.com/pandas-dev/pandas/issues/16409
+ pytest.importorskip('IPython', minversion="6.0.0")
+ from IPython.core.completer import provisionalcompleter
+
+ code = "import pandas as pd; c = Categorical([])"
+ ip.run_code(code)
+ with tm.assert_produces_warning(None):
+ with provisionalcompleter('ignore'):
+ list(ip.Completer.completions('c.', 1))
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py
index 8086eda1fd58a..4bba6d7601ae8 100644
--- a/pandas/tests/frame/test_analytics.py
+++ b/pandas/tests/frame/test_analytics.py
@@ -15,7 +15,7 @@
from pandas.compat import lrange, product
from pandas import (compat, isna, notna, DataFrame, Series,
- MultiIndex, date_range, Timestamp)
+ MultiIndex, date_range, Timestamp, Categorical)
import pandas as pd
import pandas.core.nanops as nanops
import pandas.core.algorithms as algorithms
@@ -306,6 +306,36 @@ def test_describe_bool_frame(self):
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
+ def test_describe_categorical(self):
+ df = DataFrame({'value': np.random.randint(0, 10000, 100)})
+ labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
+ cat_labels = Categorical(labels, labels)
+
+ df = df.sort_values(by=['value'], ascending=True)
+ df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
+ right=False, labels=cat_labels)
+ cat = df
+
+ # Categoricals should not show up together with numerical columns
+ result = cat.describe()
+ assert len(result.columns) == 1
+
+ # In a frame, describe() for the cat should be the same as for string
+ # arrays (count, unique, top, freq)
+
+ cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
+ ordered=True)
+ s = Series(cat)
+ result = s.describe()
+ expected = Series([4, 2, "b", 3],
+ index=['count', 'unique', 'top', 'freq'])
+ tm.assert_series_equal(result, expected)
+
+ cat = Series(Categorical(["a", "b", "c", "c"]))
+ df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
+ res = df3.describe()
+ tm.assert_numpy_array_equal(res["cat"].values, res["s"].values)
+
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 3d2bee9e01d34..7e952a87688bc 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -15,7 +15,7 @@
from numpy.random import randn
import numpy as np
-from pandas import DataFrame, Series, date_range, timedelta_range
+from pandas import DataFrame, Series, date_range, timedelta_range, Categorical
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
@@ -240,6 +240,29 @@ def test_itertuples(self):
assert not hasattr(tup3, '_fields')
assert isinstance(tup3, tuple)
+ def test_sequence_like_with_categorical(self):
+
+ # GH 7839
+ # make sure can iterate
+ df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
+ "raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
+ df['grade'] = Categorical(df['raw_grade'])
+
+ # basic sequencing testing
+ result = list(df.grade.values)
+ expected = np.array(df.grade.values).tolist()
+ tm.assert_almost_equal(result, expected)
+
+ # iteration
+ for t in df.itertuples(index=False):
+ str(t)
+
+ for row, s in df.iterrows():
+ str(s)
+
+ for c, col in df.iteritems():
+ str(s)
+
def test_len(self):
assert len(self.frame) == len(self.frame.index)
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 8fd196bfc4d2a..22ad2258e70bc 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -19,7 +19,7 @@
from pandas import compat
from pandas import (DataFrame, Index, Series, isna,
MultiIndex, Timedelta, Timestamp,
- date_range)
+ date_range, Categorical)
import pandas as pd
import pandas._libs.lib as lib
import pandas.util.testing as tm
@@ -1562,6 +1562,79 @@ def test_constructor_lists_to_object_dtype(self):
assert d['a'].dtype == np.object_
assert not d['a'][1]
+ def test_constructor_categorical(self):
+
+ # GH8626
+
+ # dict creation
+ df = DataFrame({'A': list('abc')}, dtype='category')
+ expected = Series(list('abc'), dtype='category', name='A')
+ tm.assert_series_equal(df['A'], expected)
+
+ # to_frame
+ s = Series(list('abc'), dtype='category')
+ result = s.to_frame()
+ expected = Series(list('abc'), dtype='category', name=0)
+ tm.assert_series_equal(result[0], expected)
+ result = s.to_frame(name='foo')
+ expected = Series(list('abc'), dtype='category', name='foo')
+ tm.assert_series_equal(result['foo'], expected)
+
+ # list-like creation
+ df = DataFrame(list('abc'), dtype='category')
+ expected = Series(list('abc'), dtype='category', name=0)
+ tm.assert_series_equal(df[0], expected)
+
+ # ndim != 1
+ df = DataFrame([Categorical(list('abc'))])
+ expected = DataFrame({0: Series(list('abc'), dtype='category')})
+ tm.assert_frame_equal(df, expected)
+
+ df = DataFrame([Categorical(list('abc')), Categorical(list('abd'))])
+ expected = DataFrame({0: Series(list('abc'), dtype='category'),
+ 1: Series(list('abd'), dtype='category')},
+ columns=[0, 1])
+ tm.assert_frame_equal(df, expected)
+
+ # mixed
+ df = DataFrame([Categorical(list('abc')), list('def')])
+ expected = DataFrame({0: Series(list('abc'), dtype='category'),
+ 1: list('def')}, columns=[0, 1])
+ tm.assert_frame_equal(df, expected)
+
+ # invalid (shape)
+ pytest.raises(ValueError,
+ lambda: DataFrame([Categorical(list('abc')),
+ Categorical(list('abdefg'))]))
+
+ # ndim > 1
+ pytest.raises(NotImplementedError,
+ lambda: Categorical(np.array([list('abcd')])))
+
+ def test_constructor_categorical_series(self):
+
+ l = [1, 2, 3, 1]
+ exp = Series(l).astype('category')
+ res = Series(l, dtype='category')
+ tm.assert_series_equal(res, exp)
+
+ l = ["a", "b", "c", "a"]
+ exp = Series(l).astype('category')
+ res = Series(l, dtype='category')
+ tm.assert_series_equal(res, exp)
+
+ # insert into frame with different index
+ # GH 8076
+ index = date_range('20000101', periods=3)
+ expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
+ categories=['a', 'b', 'c']))
+ expected.index = index
+
+ expected = DataFrame({'x': expected})
+ df = DataFrame(
+ {'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
+ tm.assert_frame_equal(df, expected)
+
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=('i4,f4,a10'))
diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py
index 7d2d18db8d41c..024de8bc13f72 100644
--- a/pandas/tests/frame/test_convert_to.py
+++ b/pandas/tests/frame/test_convert_to.py
@@ -151,6 +151,27 @@ def test_to_records_with_unicode_column_names(self):
)
tm.assert_almost_equal(result, expected)
+ def test_to_records_with_categorical(self):
+
+ # GH8626
+
+ # dict creation
+ df = DataFrame({'A': list('abc')}, dtype='category')
+ expected = Series(list('abc'), dtype='category', name='A')
+ tm.assert_series_equal(df['A'], expected)
+
+ # list-like creation
+ df = DataFrame(list('abc'), dtype='category')
+ expected = Series(list('abc'), dtype='category', name=0)
+ tm.assert_series_equal(df[0], expected)
+
+ # to record array
+ # this coerces
+ result = df.to_records()
+ expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
+ dtype=[('index', '=i8'), ('0', 'O')])
+ tm.assert_almost_equal(result, expected)
+
@pytest.mark.parametrize('mapping', [
dict,
collections.defaultdict(list),
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index 7591f1f1459be..610b9f7bdbf6c 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -619,6 +619,13 @@ def test_astype_duplicate_col(self):
expected = concat([a1_str, b, a2_str], axis=1)
assert_frame_equal(result, expected)
+ @pytest.mark.parametrize('columns', [['x'], ['x', 'y'], ['x', 'y', 'z']])
+ def test_categorical_astype_ndim_raises(self, columns):
+ # GH 18004
+ msg = '> 1 ndim Categorical are not supported at this time'
+ with tm.assert_raises_regex(NotImplementedError, msg):
+ DataFrame(columns=columns).astype('category')
+
@pytest.mark.parametrize("cls", [
pd.api.types.CategoricalDtype,
pd.api.types.DatetimeTZDtype,
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py
index 33a3495dcd71e..62bc0eada9d89 100644
--- a/pandas/tests/frame/test_indexing.py
+++ b/pandas/tests/frame/test_indexing.py
@@ -17,7 +17,9 @@
import pandas.core.common as com
from pandas import (DataFrame, Index, Series, notna, isna,
MultiIndex, DatetimeIndex, Timestamp,
- date_range)
+ date_range, Categorical)
+from pandas.core.dtypes.dtypes import CategoricalDtype
+
import pandas as pd
from pandas._libs.tslib import iNaT
@@ -3075,3 +3077,372 @@ def test_transpose(self):
expected = DataFrame(self.df.values.T)
expected.index = ['A', 'B']
assert_frame_equal(result, expected)
+
+
+class TestDataFrameIndexingCategorical(object):
+
+ def test_assignment(self):
+ # assignment
+ df = DataFrame({'value': np.array(
+ np.random.randint(0, 10000, 100), dtype='int32')})
+ labels = Categorical(["{0} - {1}".format(i, i + 499)
+ for i in range(0, 10000, 500)])
+
+ df = df.sort_values(by=['value'], ascending=True)
+ s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
+ d = s.values
+ df['D'] = d
+ str(df)
+
+ result = df.dtypes
+ expected = Series(
+ [np.dtype('int32'), CategoricalDtype(categories=labels,
+ ordered=False)],
+ index=['value', 'D'])
+ tm.assert_series_equal(result, expected)
+
+ df['E'] = s
+ str(df)
+
+ result = df.dtypes
+ expected = Series([np.dtype('int32'),
+ CategoricalDtype(categories=labels, ordered=False),
+ CategoricalDtype(categories=labels, ordered=False)],
+ index=['value', 'D', 'E'])
+ tm.assert_series_equal(result, expected)
+
+ result1 = df['D']
+ result2 = df['E']
+ tm.assert_categorical_equal(result1._data._block.values, d)
+
+ # sorting
+ s.name = 'E'
+ tm.assert_series_equal(result2.sort_index(), s.sort_index())
+
+ cat = Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
+ df = DataFrame(Series(cat))
+
+ def test_assigning_ops(self):
+ # systematically test the assigning operations:
+ # for all slicing ops:
+ # for value in categories and value not in categories:
+
+ # - assign a single value -> exp_single_cats_value
+
+ # - assign a complete row (mixed values) -> exp_single_row
+
+ # assign multiple rows (mixed values) (-> array) -> exp_multi_row
+
+ # assign a part of a column with dtype == categorical ->
+ # exp_parts_cats_col
+
+ # assign a part of a column with dtype != categorical ->
+ # exp_parts_cats_col
+
+ cats = Categorical(["a", "a", "a", "a", "a", "a", "a"],
+ categories=["a", "b"])
+ idx = Index(["h", "i", "j", "k", "l", "m", "n"])
+ values = [1, 1, 1, 1, 1, 1, 1]
+ orig = DataFrame({"cats": cats, "values": values}, index=idx)
+
+ # the expected values
+ # changed single row
+ cats1 = Categorical(["a", "a", "b", "a", "a", "a", "a"],
+ categories=["a", "b"])
+ idx1 = Index(["h", "i", "j", "k", "l", "m", "n"])
+ values1 = [1, 1, 2, 1, 1, 1, 1]
+ exp_single_row = DataFrame({"cats": cats1,
+ "values": values1}, index=idx1)
+
+ # changed multiple rows
+ cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"],
+ categories=["a", "b"])
+ idx2 = Index(["h", "i", "j", "k", "l", "m", "n"])
+ values2 = [1, 1, 2, 2, 1, 1, 1]
+ exp_multi_row = DataFrame({"cats": cats2,
+ "values": values2}, index=idx2)
+
+ # changed part of the cats column
+ cats3 = Categorical(
+ ["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
+ idx3 = Index(["h", "i", "j", "k", "l", "m", "n"])
+ values3 = [1, 1, 1, 1, 1, 1, 1]
+ exp_parts_cats_col = DataFrame({"cats": cats3,
+ "values": values3}, index=idx3)
+
+ # changed single value in cats col
+ cats4 = Categorical(
+ ["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
+ idx4 = Index(["h", "i", "j", "k", "l", "m", "n"])
+ values4 = [1, 1, 1, 1, 1, 1, 1]
+ exp_single_cats_value = DataFrame({"cats": cats4,
+ "values": values4}, index=idx4)
+
+ # iloc
+ # ###############
+ # - assign a single value -> exp_single_cats_value
+ df = orig.copy()
+ df.iloc[2, 0] = "b"
+ tm.assert_frame_equal(df, exp_single_cats_value)
+
+ df = orig.copy()
+ df.iloc[df.index == "j", 0] = "b"
+ tm.assert_frame_equal(df, exp_single_cats_value)
+
+ # - assign a single value not in the current categories set
+ def f():
+ df = orig.copy()
+ df.iloc[2, 0] = "c"
+
+ pytest.raises(ValueError, f)
+
+ # - assign a complete row (mixed values) -> exp_single_row
+ df = orig.copy()
+ df.iloc[2, :] = ["b", 2]
+ tm.assert_frame_equal(df, exp_single_row)
+
+ # - assign a complete row (mixed values) not in categories set
+ def f():
+ df = orig.copy()
+ df.iloc[2, :] = ["c", 2]
+
+ pytest.raises(ValueError, f)
+
+ # - assign multiple rows (mixed values) -> exp_multi_row
+ df = orig.copy()
+ df.iloc[2:4, :] = [["b", 2], ["b", 2]]
+ tm.assert_frame_equal(df, exp_multi_row)
+
+ def f():
+ df = orig.copy()
+ df.iloc[2:4, :] = [["c", 2], ["c", 2]]
+
+ pytest.raises(ValueError, f)
+
+ # assign a part of a column with dtype == categorical ->
+ # exp_parts_cats_col
+ df = orig.copy()
+ df.iloc[2:4, 0] = Categorical(["b", "b"], categories=["a", "b"])
+ tm.assert_frame_equal(df, exp_parts_cats_col)
+
+ with pytest.raises(ValueError):
+ # different categories -> not sure if this should fail or pass
+ df = orig.copy()
+ df.iloc[2:4, 0] = Categorical(list('bb'), categories=list('abc'))
+
+ with pytest.raises(ValueError):
+ # different values
+ df = orig.copy()
+ df.iloc[2:4, 0] = Categorical(list('cc'), categories=list('abc'))
+
+ # assign a part of a column with dtype != categorical ->
+ # exp_parts_cats_col
+ df = orig.copy()
+ df.iloc[2:4, 0] = ["b", "b"]
+ tm.assert_frame_equal(df, exp_parts_cats_col)
+
+ with pytest.raises(ValueError):
+ df.iloc[2:4, 0] = ["c", "c"]
+
+ # loc
+ # ##############
+ # - assign a single value -> exp_single_cats_value
+ df = orig.copy()
+ df.loc["j", "cats"] = "b"
+ tm.assert_frame_equal(df, exp_single_cats_value)
+
+ df = orig.copy()
+ df.loc[df.index == "j", "cats"] = "b"
+ tm.assert_frame_equal(df, exp_single_cats_value)
+
+ # - assign a single value not in the current categories set
+ def f():
+ df = orig.copy()
+ df.loc["j", "cats"] = "c"
+
+ pytest.raises(ValueError, f)
+
+ # - assign a complete row (mixed values) -> exp_single_row
+ df = orig.copy()
+ df.loc["j", :] = ["b", 2]
+ tm.assert_frame_equal(df, exp_single_row)
+
+ # - assign a complete row (mixed values) not in categories set
+ def f():
+ df = orig.copy()
+ df.loc["j", :] = ["c", 2]
+
+ pytest.raises(ValueError, f)
+
+ # - assign multiple rows (mixed values) -> exp_multi_row
+ df = orig.copy()
+ df.loc["j":"k", :] = [["b", 2], ["b", 2]]
+ tm.assert_frame_equal(df, exp_multi_row)
+
+ def f():
+ df = orig.copy()
+ df.loc["j":"k", :] = [["c", 2], ["c", 2]]
+
+ pytest.raises(ValueError, f)
+
+ # assign a part of a column with dtype == categorical ->
+ # exp_parts_cats_col
+ df = orig.copy()
+ df.loc["j":"k", "cats"] = Categorical(
+ ["b", "b"], categories=["a", "b"])
+ tm.assert_frame_equal(df, exp_parts_cats_col)
+
+ with pytest.raises(ValueError):
+ # different categories -> not sure if this should fail or pass
+ df = orig.copy()
+ df.loc["j":"k", "cats"] = Categorical(
+ ["b", "b"], categories=["a", "b", "c"])
+
+ with pytest.raises(ValueError):
+ # different values
+ df = orig.copy()
+ df.loc["j":"k", "cats"] = Categorical(
+ ["c", "c"], categories=["a", "b", "c"])
+
+ # assign a part of a column with dtype != categorical ->
+ # exp_parts_cats_col
+ df = orig.copy()
+ df.loc["j":"k", "cats"] = ["b", "b"]
+ tm.assert_frame_equal(df, exp_parts_cats_col)
+
+ with pytest.raises(ValueError):
+ df.loc["j":"k", "cats"] = ["c", "c"]
+
+ # loc
+ # ##############
+ # - assign a single value -> exp_single_cats_value
+ df = orig.copy()
+ df.loc["j", df.columns[0]] = "b"
+ tm.assert_frame_equal(df, exp_single_cats_value)
+
+ df = orig.copy()
+ df.loc[df.index == "j", df.columns[0]] = "b"
+ tm.assert_frame_equal(df, exp_single_cats_value)
+
+ # - assign a single value not in the current categories set
+ def f():
+ df = orig.copy()
+ df.loc["j", df.columns[0]] = "c"
+
+ pytest.raises(ValueError, f)
+
+ # - assign a complete row (mixed values) -> exp_single_row
+ df = orig.copy()
+ df.loc["j", :] = ["b", 2]
+ tm.assert_frame_equal(df, exp_single_row)
+
+ # - assign a complete row (mixed values) not in categories set
+ def f():
+ df = orig.copy()
+ df.loc["j", :] = ["c", 2]
+
+ pytest.raises(ValueError, f)
+
+ # - assign multiple rows (mixed values) -> exp_multi_row
+ df = orig.copy()
+ df.loc["j":"k", :] = [["b", 2], ["b", 2]]
+ tm.assert_frame_equal(df, exp_multi_row)
+
+ def f():
+ df = orig.copy()
+ df.loc["j":"k", :] = [["c", 2], ["c", 2]]
+
+ pytest.raises(ValueError, f)
+
+ # assign a part of a column with dtype == categorical ->
+ # exp_parts_cats_col
+ df = orig.copy()
+ df.loc["j":"k", df.columns[0]] = Categorical(
+ ["b", "b"], categories=["a", "b"])
+ tm.assert_frame_equal(df, exp_parts_cats_col)
+
+ with pytest.raises(ValueError):
+ # different categories -> not sure if this should fail or pass
+ df = orig.copy()
+ df.loc["j":"k", df.columns[0]] = Categorical(
+ ["b", "b"], categories=["a", "b", "c"])
+
+ with pytest.raises(ValueError):
+ # different values
+ df = orig.copy()
+ df.loc["j":"k", df.columns[0]] = Categorical(
+ ["c", "c"], categories=["a", "b", "c"])
+
+ # assign a part of a column with dtype != categorical ->
+ # exp_parts_cats_col
+ df = orig.copy()
+ df.loc["j":"k", df.columns[0]] = ["b", "b"]
+ tm.assert_frame_equal(df, exp_parts_cats_col)
+
+ with pytest.raises(ValueError):
+ df.loc["j":"k", df.columns[0]] = ["c", "c"]
+
+ # iat
+ df = orig.copy()
+ df.iat[2, 0] = "b"
+ tm.assert_frame_equal(df, exp_single_cats_value)
+
+ # - assign a single value not in the current categories set
+ def f():
+ df = orig.copy()
+ df.iat[2, 0] = "c"
+
+ pytest.raises(ValueError, f)
+
+ # at
+ # - assign a single value -> exp_single_cats_value
+ df = orig.copy()
+ df.at["j", "cats"] = "b"
+ tm.assert_frame_equal(df, exp_single_cats_value)
+
+ # - assign a single value not in the current categories set
+ def f():
+ df = orig.copy()
+ df.at["j", "cats"] = "c"
+
+ pytest.raises(ValueError, f)
+
+ # fancy indexing
+ catsf = Categorical(["a", "a", "c", "c", "a", "a", "a"],
+ categories=["a", "b", "c"])
+ idxf = Index(["h", "i", "j", "k", "l", "m", "n"])
+ valuesf = [1, 1, 3, 3, 1, 1, 1]
+ df = DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
+
+ exp_fancy = exp_multi_row.copy()
+ exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
+
+ df[df["cats"] == "c"] = ["b", 2]
+ # category c is kept in .categories
+ tm.assert_frame_equal(df, exp_fancy)
+
+ # set_value
+ df = orig.copy()
+ df.at["j", "cats"] = "b"
+ tm.assert_frame_equal(df, exp_single_cats_value)
+
+ def f():
+ df = orig.copy()
+ df.at["j", "cats"] = "c"
+
+ pytest.raises(ValueError, f)
+
+ # Assigning a Category to parts of a int/... column uses the values of
+ # the Catgorical
+ df = DataFrame({"a": [1, 1, 1, 1, 1], "b": list("aaaaa")})
+ exp = DataFrame({"a": [1, "b", "b", 1, 1], "b": list("aabba")})
+ df.loc[1:2, "a"] = Categorical(["b", "b"], categories=["a", "b"])
+ df.loc[2:3, "b"] = Categorical(["b", "b"], categories=["a", "b"])
+ tm.assert_frame_equal(df, exp)
+
+ def test_functions_no_warnings(self):
+ df = DataFrame({'value': np.random.randint(0, 100, 20)})
+ labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
+ with tm.assert_produces_warning(False):
+ df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
+ labels=labels)
diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py
index 37f8c0cc85b23..8c46dc30a0f5f 100644
--- a/pandas/tests/frame/test_repr_info.py
+++ b/pandas/tests/frame/test_repr_info.py
@@ -10,7 +10,8 @@
import numpy as np
import pytest
-from pandas import (DataFrame, compat, option_context)
+from pandas import (DataFrame, Series, compat, option_context,
+ date_range, period_range, Categorical)
from pandas.compat import StringIO, lrange, u, PYPY
import pandas.io.formats.format as fmt
import pandas as pd
@@ -471,3 +472,34 @@ def test_info_categorical(self):
buf = StringIO()
df.info(buf=buf)
+
+ def test_info_categorical_column(self):
+
+ # make sure it works
+ n = 2500
+ df = DataFrame({'int64': np.random.randint(100, size=n)})
+ df['category'] = Series(np.array(list('abcdefghij')).take(
+ np.random.randint(0, 10, size=n))).astype('category')
+ df.isna()
+ buf = StringIO()
+ df.info(buf=buf)
+
+ df2 = df[df['category'] == 'd']
+ buf = compat.StringIO()
+ df2.info(buf=buf)
+
+ def test_repr_categorical_dates_periods(self):
+ # normal DataFrame
+ dt = date_range('2011-01-01 09:00', freq='H', periods=5,
+ tz='US/Eastern')
+ p = period_range('2011-01', freq='M', periods=5)
+ df = DataFrame({'dt': dt, 'p': p})
+ exp = """ dt p
+0 2011-01-01 09:00:00-05:00 2011-01
+1 2011-01-01 10:00:00-05:00 2011-02
+2 2011-01-01 11:00:00-05:00 2011-03
+3 2011-01-01 12:00:00-05:00 2011-04
+4 2011-01-01 13:00:00-05:00 2011-05"""
+
+ df = DataFrame({'dt': Categorical(dt), 'p': Categorical(p)})
+ assert repr(df) == exp
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py
index fdc03acd3e931..c73423921898d 100644
--- a/pandas/tests/groupby/test_categorical.py
+++ b/pandas/tests/groupby/test_categorical.py
@@ -17,6 +17,142 @@
class TestGroupByCategorical(MixIn):
+ def test_groupby(self):
+
+ cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
+ categories=["a", "b", "c", "d"], ordered=True)
+ data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
+
+ exp_index = CategoricalIndex(list('abcd'), name='b', ordered=True)
+ expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index)
+ result = data.groupby("b").mean()
+ tm.assert_frame_equal(result, expected)
+
+ raw_cat1 = Categorical(["a", "a", "b", "b"],
+ categories=["a", "b", "z"], ordered=True)
+ raw_cat2 = Categorical(["c", "d", "c", "d"],
+ categories=["c", "d", "y"], ordered=True)
+ df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
+
+ # single grouper
+ gb = df.groupby("A")
+ exp_idx = CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True)
+ expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
+ result = gb.sum()
+ tm.assert_frame_equal(result, expected)
+
+ # multiple groupers
+ gb = df.groupby(['A', 'B'])
+ exp_index = pd.MultiIndex.from_product(
+ [Categorical(["a", "b", "z"], ordered=True),
+ Categorical(["c", "d", "y"], ordered=True)],
+ names=['A', 'B'])
+ expected = DataFrame({'values': [1, 2, np.nan, 3, 4, np.nan,
+ np.nan, np.nan, np.nan]},
+ index=exp_index)
+ result = gb.sum()
+ tm.assert_frame_equal(result, expected)
+
+ # multiple groupers with a non-cat
+ df = df.copy()
+ df['C'] = ['foo', 'bar'] * 2
+ gb = df.groupby(['A', 'B', 'C'])
+ exp_index = pd.MultiIndex.from_product(
+ [Categorical(["a", "b", "z"], ordered=True),
+ Categorical(["c", "d", "y"], ordered=True),
+ ['foo', 'bar']],
+ names=['A', 'B', 'C'])
+ expected = DataFrame({'values': Series(
+ np.nan, index=exp_index)}).sort_index()
+ expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
+ result = gb.sum()
+ tm.assert_frame_equal(result, expected)
+
+ # GH 8623
+ x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
+ [1, 'John P. Doe']],
+ columns=['person_id', 'person_name'])
+ x['person_name'] = Categorical(x.person_name)
+
+ g = x.groupby(['person_id'])
+ result = g.transform(lambda x: x)
+ tm.assert_frame_equal(result, x[['person_name']])
+
+ result = x.drop_duplicates('person_name')
+ expected = x.iloc[[0, 1]]
+ tm.assert_frame_equal(result, expected)
+
+ def f(x):
+ return x.drop_duplicates('person_name').iloc[0]
+
+ result = g.apply(f)
+ expected = x.iloc[[0, 1]].copy()
+ expected.index = Index([1, 2], name='person_id')
+ expected['person_name'] = expected['person_name'].astype('object')
+ tm.assert_frame_equal(result, expected)
+
+ # GH 9921
+ # Monotonic
+ df = DataFrame({"a": [5, 15, 25]})
+ c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
+
+ result = df.a.groupby(c).transform(sum)
+ tm.assert_series_equal(result, df['a'])
+
+ tm.assert_series_equal(
+ df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
+ tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
+ tm.assert_frame_equal(
+ df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
+
+ # Filter
+ tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
+ tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
+
+ # Non-monotonic
+ df = DataFrame({"a": [5, 15, 25, -5]})
+ c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
+
+ result = df.a.groupby(c).transform(sum)
+ tm.assert_series_equal(result, df['a'])
+
+ tm.assert_series_equal(
+ df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
+ tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
+ tm.assert_frame_equal(
+ df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
+
+ # GH 9603
+ df = DataFrame({'a': [1, 0, 0, 0]})
+ c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list('abcd')))
+ result = df.groupby(c).apply(len)
+
+ exp_index = CategoricalIndex(
+ c.values.categories, ordered=c.values.ordered)
+ expected = Series([1, 0, 0, 0], index=exp_index)
+ expected.index.name = 'a'
+ tm.assert_series_equal(result, expected)
+
+ def test_groupby_sort(self):
+
+ # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
+ # This should result in a properly sorted Series so that the plot
+ # has a sorted x axis
+ # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
+
+ df = DataFrame({'value': np.random.randint(0, 10000, 100)})
+ labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
+ cat_labels = Categorical(labels, labels)
+
+ df = df.sort_values(by=['value'], ascending=True)
+ df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
+ right=False, labels=cat_labels)
+
+ res = df.groupby(['value_group'])['value_group'].count()
+ exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
+ exp.index = CategoricalIndex(exp.index, name=exp.index.name)
+ tm.assert_series_equal(res, exp)
+
def test_level_groupby_get_group(self):
# GH15155
df = DataFrame(data=np.arange(2, 22, 2),
diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py
index 22b3fd9073bab..f2182687d047f 100644
--- a/pandas/tests/indexing/test_categorical.py
+++ b/pandas/tests/indexing/test_categorical.py
@@ -3,12 +3,15 @@
import pytest
import pandas as pd
+import pandas.compat as compat
import numpy as np
-from pandas import (Series, DataFrame, Timestamp,
- Categorical, CategoricalIndex)
+from pandas import (Series, DataFrame, Timestamp, Categorical,
+ CategoricalIndex, Interval, Index)
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas.util import testing as tm
+from pandas.core.dtypes.common import is_categorical_dtype
from pandas.api.types import CategoricalDtype as CDT
+from pandas.core.dtypes.dtypes import CategoricalDtype
class TestCategoricalIndex(object):
@@ -75,6 +78,229 @@ def test_getitem_scalar(self):
result = s[cats[0]]
assert result == expected
+ def test_slicing_directly(self):
+ cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
+ sliced = cat[3]
+ assert sliced == "d"
+ sliced = cat[3:5]
+ expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
+ tm.assert_numpy_array_equal(sliced._codes, expected._codes)
+ tm.assert_index_equal(sliced.categories, expected.categories)
+
+ def test_slicing(self):
+ cat = Series(Categorical([1, 2, 3, 4]))
+ reversed = cat[::-1]
+ exp = np.array([4, 3, 2, 1], dtype=np.int64)
+ tm.assert_numpy_array_equal(reversed.__array__(), exp)
+
+ df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
+ df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
+
+ expected = Series([11, Interval(0, 25)], index=['value', 'D'], name=10)
+ result = df.iloc[10]
+ tm.assert_series_equal(result, expected)
+
+ expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
+ index=np.arange(10, 20).astype('int64'))
+ expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
+ result = df.iloc[10:20]
+ tm.assert_frame_equal(result, expected)
+
+ expected = Series([9, Interval(0, 25)], index=['value', 'D'], name=8)
+ result = df.loc[8]
+ tm.assert_series_equal(result, expected)
+
+ def test_slicing_and_getting_ops(self):
+
+ # systematically test the slicing operations:
+ # for all slicing ops:
+ # - returning a dataframe
+ # - returning a column
+ # - returning a row
+ # - returning a single value
+
+ cats = Categorical(
+ ["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
+ idx = Index(["h", "i", "j", "k", "l", "m", "n"])
+ values = [1, 2, 3, 4, 5, 6, 7]
+ df = DataFrame({"cats": cats, "values": values}, index=idx)
+
+ # the expected values
+ cats2 = Categorical(["b", "c"], categories=["a", "b", "c"])
+ idx2 = Index(["j", "k"])
+ values2 = [3, 4]
+
+ # 2:4,: | "j":"k",:
+ exp_df = DataFrame({"cats": cats2, "values": values2}, index=idx2)
+
+ # :,"cats" | :,0
+ exp_col = Series(cats, index=idx, name='cats')
+
+ # "j",: | 2,:
+ exp_row = Series(["b", 3], index=["cats", "values"], dtype="object",
+ name="j")
+
+ # "j","cats | 2,0
+ exp_val = "b"
+
+ # iloc
+ # frame
+ res_df = df.iloc[2:4, :]
+ tm.assert_frame_equal(res_df, exp_df)
+ assert is_categorical_dtype(res_df["cats"])
+
+ # row
+ res_row = df.iloc[2, :]
+ tm.assert_series_equal(res_row, exp_row)
+ assert isinstance(res_row["cats"], compat.string_types)
+
+ # col
+ res_col = df.iloc[:, 0]
+ tm.assert_series_equal(res_col, exp_col)
+ assert is_categorical_dtype(res_col)
+
+ # single value
+ res_val = df.iloc[2, 0]
+ assert res_val == exp_val
+
+ # loc
+ # frame
+ res_df = df.loc["j":"k", :]
+ tm.assert_frame_equal(res_df, exp_df)
+ assert is_categorical_dtype(res_df["cats"])
+
+ # row
+ res_row = df.loc["j", :]
+ tm.assert_series_equal(res_row, exp_row)
+ assert isinstance(res_row["cats"], compat.string_types)
+
+ # col
+ res_col = df.loc[:, "cats"]
+ tm.assert_series_equal(res_col, exp_col)
+ assert is_categorical_dtype(res_col)
+
+ # single value
+ res_val = df.loc["j", "cats"]
+ assert res_val == exp_val
+
+ # ix
+ # frame
+ # res_df = df.loc["j":"k",[0,1]] # doesn't work?
+ res_df = df.loc["j":"k", :]
+ tm.assert_frame_equal(res_df, exp_df)
+ assert is_categorical_dtype(res_df["cats"])
+
+ # row
+ res_row = df.loc["j", :]
+ tm.assert_series_equal(res_row, exp_row)
+ assert isinstance(res_row["cats"], compat.string_types)
+
+ # col
+ res_col = df.loc[:, "cats"]
+ tm.assert_series_equal(res_col, exp_col)
+ assert is_categorical_dtype(res_col)
+
+ # single value
+ res_val = df.loc["j", df.columns[0]]
+ assert res_val == exp_val
+
+ # iat
+ res_val = df.iat[2, 0]
+ assert res_val == exp_val
+
+ # at
+ res_val = df.at["j", "cats"]
+ assert res_val == exp_val
+
+ # fancy indexing
+ exp_fancy = df.iloc[[2]]
+
+ res_fancy = df[df["cats"] == "b"]
+ tm.assert_frame_equal(res_fancy, exp_fancy)
+ res_fancy = df[df["values"] == 3]
+ tm.assert_frame_equal(res_fancy, exp_fancy)
+
+ # get_value
+ res_val = df.at["j", "cats"]
+ assert res_val == exp_val
+
+ # i : int, slice, or sequence of integers
+ res_row = df.iloc[2]
+ tm.assert_series_equal(res_row, exp_row)
+ assert isinstance(res_row["cats"], compat.string_types)
+
+ res_df = df.iloc[slice(2, 4)]
+ tm.assert_frame_equal(res_df, exp_df)
+ assert is_categorical_dtype(res_df["cats"])
+
+ res_df = df.iloc[[2, 3]]
+ tm.assert_frame_equal(res_df, exp_df)
+ assert is_categorical_dtype(res_df["cats"])
+
+ res_col = df.iloc[:, 0]
+ tm.assert_series_equal(res_col, exp_col)
+ assert is_categorical_dtype(res_col)
+
+ res_df = df.iloc[:, slice(0, 2)]
+ tm.assert_frame_equal(res_df, df)
+ assert is_categorical_dtype(res_df["cats"])
+
+ res_df = df.iloc[:, [0, 1]]
+ tm.assert_frame_equal(res_df, df)
+ assert is_categorical_dtype(res_df["cats"])
+
+ def test_slicing_doc_examples(self):
+
+ # GH 7918
+ cats = Categorical(["a", "b", "b", "b", "c", "c", "c"],
+ categories=["a", "b", "c"])
+ idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
+ values = [1, 2, 2, 2, 3, 4, 5]
+ df = DataFrame({"cats": cats, "values": values}, index=idx)
+
+ result = df.iloc[2:4, :]
+ expected = DataFrame(
+ {"cats": Categorical(['b', 'b'], categories=['a', 'b', 'c']),
+ "values": [2, 2]}, index=['j', 'k'])
+ tm.assert_frame_equal(result, expected)
+
+ result = df.iloc[2:4, :].dtypes
+ expected = Series(['category', 'int64'], ['cats', 'values'])
+ tm.assert_series_equal(result, expected)
+
+ result = df.loc["h":"j", "cats"]
+ expected = Series(Categorical(['a', 'b', 'b'],
+ categories=['a', 'b', 'c']),
+ index=['h', 'i', 'j'], name='cats')
+ tm.assert_series_equal(result, expected)
+
+ result = df.loc["h":"j", df.columns[0:1]]
+ expected = DataFrame({'cats': Categorical(['a', 'b', 'b'],
+ categories=['a', 'b', 'c'])},
+ index=['h', 'i', 'j'])
+ tm.assert_frame_equal(result, expected)
+
+ def test_getitem_category_type(self):
+ # GH 14580
+ # test iloc() on Series with Categorical data
+
+ s = Series([1, 2, 3]).astype('category')
+
+ # get slice
+ result = s.iloc[0:2]
+ expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
+ tm.assert_series_equal(result, expected)
+
+ # get list of indexes
+ result = s.iloc[[0, 1]]
+ expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
+ tm.assert_series_equal(result, expected)
+
+ # get boolean array
+ result = s.iloc[[True, False, False]]
+ expected = Series([1]).astype(CategoricalDtype([1, 2, 3]))
+ tm.assert_series_equal(result, expected)
+
def test_loc_listlike(self):
# list of labels
@@ -217,6 +443,21 @@ def test_getitem_with_listlike(self):
result = dummies[[c for c in dummies.columns]]
assert_frame_equal(result, expected)
+ def test_setitem_listlike(self):
+
+ # GH 9469
+ # properly coerce the input indexers
+ np.random.seed(1)
+ c = Categorical(np.random.randint(0, 5, size=150000).astype(
+ np.int8)).add_categories([-1000])
+ indexer = np.array([100000]).astype(np.int64)
+ c[indexer] = -1000
+
+ # we are asserting the code result here
+ # which maps to the -1000 category
+ result = c.codes[np.array([100000]).astype(np.int64)]
+ tm.assert_numpy_array_equal(result, np.array([5], dtype='int8'))
+
def test_ix_categorical_index(self):
# GH 12531
df = DataFrame(np.random.randn(3, 3),
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index cd0701e3864fc..6f2d2ce2a8583 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -1504,6 +1504,57 @@ def test_basic(self, left, right):
index=['X', 'Y', 'Z'])
assert_series_equal(result, expected)
+ def test_merge_categorical(self):
+ # GH 9426
+
+ right = DataFrame({'c': {0: 'a',
+ 1: 'b',
+ 2: 'c',
+ 3: 'd',
+ 4: 'e'},
+ 'd': {0: 'null',
+ 1: 'null',
+ 2: 'null',
+ 3: 'null',
+ 4: 'null'}})
+ left = DataFrame({'a': {0: 'f',
+ 1: 'f',
+ 2: 'f',
+ 3: 'f',
+ 4: 'f'},
+ 'b': {0: 'g',
+ 1: 'g',
+ 2: 'g',
+ 3: 'g',
+ 4: 'g'}})
+ df = pd.merge(left, right, how='left', left_on='b', right_on='c')
+
+ # object-object
+ expected = df.copy()
+
+ # object-cat
+ # note that we propagate the category
+ # because we don't have any matching rows
+ cright = right.copy()
+ cright['d'] = cright['d'].astype('category')
+ result = pd.merge(left, cright, how='left', left_on='b', right_on='c')
+ expected['d'] = expected['d'].astype(CategoricalDtype(['null']))
+ tm.assert_frame_equal(result, expected)
+
+ # cat-object
+ cleft = left.copy()
+ cleft['b'] = cleft['b'].astype('category')
+ result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
+ tm.assert_frame_equal(result, expected)
+
+ # cat-cat
+ cright = right.copy()
+ cright['d'] = cright['d'].astype('category')
+ cleft = left.copy()
+ cleft['b'] = cleft['b'].astype('category')
+ result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
+ tm.assert_frame_equal(result, expected)
+
def test_other_columns(self, left, right):
# non-merge columns should preserve if possible
right = right.assign(Z=right.Z.astype('category'))
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py
index ae41502f237f1..bdbf2a0ee2f68 100644
--- a/pandas/tests/reshape/test_concat.py
+++ b/pandas/tests/reshape/test_concat.py
@@ -10,7 +10,8 @@
from pandas import (DataFrame, concat,
read_csv, isna, Series, date_range,
Index, Panel, MultiIndex, Timestamp,
- DatetimeIndex)
+ DatetimeIndex, Categorical)
+from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.util import testing as tm
from pandas.util.testing import (assert_frame_equal,
makeCustomDataframe as mkdf)
@@ -1927,6 +1928,144 @@ def test_concat_multiindex_dfs_with_deepcopy(self):
result_no_copy = pd.concat(example_dict, names=['testname'])
tm.assert_frame_equal(result_no_copy, expected)
+ def test_categorical_concat_append(self):
+ cat = Categorical(["a", "b"], categories=["a", "b"])
+ vals = [1, 2]
+ df = DataFrame({"cats": cat, "vals": vals})
+ cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
+ vals2 = [1, 2, 1, 2]
+ exp = DataFrame({"cats": cat2, "vals": vals2},
+ index=Index([0, 1, 0, 1]))
+
+ tm.assert_frame_equal(pd.concat([df, df]), exp)
+ tm.assert_frame_equal(df.append(df), exp)
+
+ # GH 13524 can concat different categories
+ cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
+ vals3 = [1, 2]
+ df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
+
+ res = pd.concat([df, df_different_categories], ignore_index=True)
+ exp = DataFrame({"cats": list('abab'), "vals": [1, 2, 1, 2]})
+ tm.assert_frame_equal(res, exp)
+
+ res = df.append(df_different_categories, ignore_index=True)
+ tm.assert_frame_equal(res, exp)
+
+ def test_categorical_concat_dtypes(self):
+
+ # GH8143
+ index = ['cat', 'obj', 'num']
+ cat = Categorical(['a', 'b', 'c'])
+ obj = Series(['a', 'b', 'c'])
+ num = Series([1, 2, 3])
+ df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
+
+ result = df.dtypes == 'object'
+ expected = Series([False, True, False], index=index)
+ tm.assert_series_equal(result, expected)
+
+ result = df.dtypes == 'int64'
+ expected = Series([False, False, True], index=index)
+ tm.assert_series_equal(result, expected)
+
+ result = df.dtypes == 'category'
+ expected = Series([True, False, False], index=index)
+ tm.assert_series_equal(result, expected)
+
+ def test_categorical_concat(self):
+ # See GH 10177
+ df1 = DataFrame(np.arange(18, dtype='int64').reshape(6, 3),
+ columns=["a", "b", "c"])
+
+ df2 = DataFrame(np.arange(14, dtype='int64').reshape(7, 2),
+ columns=["a", "c"])
+
+ cat_values = ["one", "one", "two", "one", "two", "two", "one"]
+ df2['h'] = Series(Categorical(cat_values))
+
+ res = pd.concat((df1, df2), axis=0, ignore_index=True)
+ exp = DataFrame({'a': [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
+ 'b': [1, 4, 7, 10, 13, 16, np.nan, np.nan, np.nan,
+ np.nan, np.nan, np.nan, np.nan],
+ 'c': [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
+ 'h': [None] * 6 + cat_values})
+ tm.assert_frame_equal(res, exp)
+
+ def test_categorical_concat_gh7864(self):
+ # GH 7864
+ # make sure ordering is preserverd
+ df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list('abbaae')})
+ df["grade"] = Categorical(df["raw_grade"])
+ df['grade'].cat.set_categories(['e', 'a', 'b'])
+
+ df1 = df[0:3]
+ df2 = df[3:]
+
+ tm.assert_index_equal(df['grade'].cat.categories,
+ df1['grade'].cat.categories)
+ tm.assert_index_equal(df['grade'].cat.categories,
+ df2['grade'].cat.categories)
+
+ dfx = pd.concat([df1, df2])
+ tm.assert_index_equal(df['grade'].cat.categories,
+ dfx['grade'].cat.categories)
+
+ dfa = df1.append(df2)
+ tm.assert_index_equal(df['grade'].cat.categories,
+ dfa['grade'].cat.categories)
+
+ def test_categorical_concat_preserve(self):
+
+ # GH 8641 series concat not preserving category dtype
+ # GH 13524 can concat different categories
+ s = Series(list('abc'), dtype='category')
+ s2 = Series(list('abd'), dtype='category')
+
+ exp = Series(list('abcabd'))
+ res = pd.concat([s, s2], ignore_index=True)
+ tm.assert_series_equal(res, exp)
+
+ exp = Series(list('abcabc'), dtype='category')
+ res = pd.concat([s, s], ignore_index=True)
+ tm.assert_series_equal(res, exp)
+
+ exp = Series(list('abcabc'), index=[0, 1, 2, 0, 1, 2],
+ dtype='category')
+ res = pd.concat([s, s])
+ tm.assert_series_equal(res, exp)
+
+ a = Series(np.arange(6, dtype='int64'))
+ b = Series(list('aabbca'))
+
+ df2 = DataFrame({'A': a,
+ 'B': b.astype(CategoricalDtype(list('cab')))})
+ res = pd.concat([df2, df2])
+ exp = DataFrame(
+ {'A': pd.concat([a, a]),
+ 'B': pd.concat([b, b]).astype(CategoricalDtype(list('cab')))})
+ tm.assert_frame_equal(res, exp)
+
+ def test_categorical_index_preserver(self):
+
+ a = Series(np.arange(6, dtype='int64'))
+ b = Series(list('aabbca'))
+
+ df2 = DataFrame({'A': a,
+ 'B': b.astype(CategoricalDtype(list('cab')))
+ }).set_index('B')
+ result = pd.concat([df2, df2])
+ expected = DataFrame(
+ {'A': pd.concat([a, a]),
+ 'B': pd.concat([b, b]).astype(CategoricalDtype(list('cab')))
+ }).set_index('B')
+ tm.assert_frame_equal(result, expected)
+
+ # wrong catgories
+ df3 = DataFrame({'A': a, 'B': Categorical(b, categories=list('abe'))
+ }).set_index('B')
+ pytest.raises(TypeError, lambda: pd.concat([df2, df3]))
+
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index 857116c8f8f78..c8b7ae044b71c 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -9,7 +9,7 @@
from collections import OrderedDict
import pandas as pd
from pandas import (DataFrame, Series, Index, MultiIndex,
- Grouper, date_range, concat)
+ Grouper, date_range, concat, Categorical)
from pandas.core.reshape.pivot import pivot_table, crosstab
from pandas.compat import range, product
import pandas.util.testing as tm
@@ -91,6 +91,24 @@ def test_pivot_table_dropna(self):
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
+ def test_pivot_table_categorical(self):
+
+ raw_cat1 = Categorical(["a", "a", "b", "b"],
+ categories=["a", "b", "z"], ordered=True)
+ raw_cat2 = Categorical(["c", "d", "c", "d"],
+ categories=["c", "d", "y"], ordered=True)
+ df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
+ result = pd.pivot_table(df, values='values', index=['A', 'B'])
+
+ exp_index = pd.MultiIndex.from_product(
+ [Categorical(["a", "b", "z"], ordered=True),
+ Categorical(["c", "d", "y"], ordered=True)],
+ names=['A', 'B'])
+ expected = DataFrame(
+ {'values': [1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan]},
+ index=exp_index)
+ tm.assert_frame_equal(result, expected)
+
def test_pivot_table_dropna_categoricals(self):
# GH 15193
categories = ['a', 'b', 'c', 'd']
diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py
index 3e68ff7cf2f59..f472c6ae9383c 100644
--- a/pandas/tests/reshape/test_reshape.py
+++ b/pandas/tests/reshape/test_reshape.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
+from warnings import catch_warnings
import pytest
from pandas import DataFrame, Series
@@ -11,7 +12,7 @@
from pandas.util.testing import assert_frame_equal
-from pandas import get_dummies
+from pandas import get_dummies, Categorical, Index
import pandas.util.testing as tm
from pandas.compat import u
@@ -454,6 +455,63 @@ def test_dataframe_dummies_preserve_categorical_dtype(self, dtype):
tm.assert_frame_equal(result, expected)
+class TestCategoricalReshape(object):
+
+ def test_reshaping_panel_categorical(self):
+
+ with catch_warnings(record=True):
+ p = tm.makePanel()
+ p['str'] = 'foo'
+ df = p.to_frame()
+
+ df['category'] = df['str'].astype('category')
+ result = df['category'].unstack()
+
+ c = Categorical(['foo'] * len(p.major_axis))
+ expected = DataFrame({'A': c.copy(),
+ 'B': c.copy(),
+ 'C': c.copy(),
+ 'D': c.copy()},
+ columns=Index(list('ABCD'), name='minor'),
+ index=p.major_axis.set_names('major'))
+ tm.assert_frame_equal(result, expected)
+
+ def test_reshape_categorical(self):
+ cat = Categorical([], categories=["a", "b"])
+ tm.assert_produces_warning(FutureWarning, cat.reshape, 0)
+
+ with tm.assert_produces_warning(FutureWarning):
+ cat = Categorical([], categories=["a", "b"])
+ tm.assert_categorical_equal(cat.reshape(0), cat)
+
+ with tm.assert_produces_warning(FutureWarning):
+ cat = Categorical([], categories=["a", "b"])
+ tm.assert_categorical_equal(cat.reshape((5, -1)), cat)
+
+ with tm.assert_produces_warning(FutureWarning):
+ cat = Categorical(["a", "b"], categories=["a", "b"])
+ tm.assert_categorical_equal(cat.reshape(cat.shape), cat)
+
+ with tm.assert_produces_warning(FutureWarning):
+ cat = Categorical(["a", "b"], categories=["a", "b"])
+ tm.assert_categorical_equal(cat.reshape(cat.size), cat)
+
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ msg = "can only specify one unknown dimension"
+ cat = Categorical(["a", "b"], categories=["a", "b"])
+ tm.assert_raises_regex(ValueError, msg, cat.reshape, (-2, -1))
+
+ def test_reshape_categorical_numpy(self):
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ cat = Categorical(["a", "b"], categories=["a", "b"])
+ tm.assert_categorical_equal(np.reshape(cat, cat.shape), cat)
+
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ msg = "the 'order' parameter is not supported"
+ tm.assert_raises_regex(ValueError, msg, np.reshape,
+ cat, cat.shape, order='F')
+
+
class TestMakeAxisDummies(object):
def test_preserve_categorical_dtype(self):
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index caaa122024cba..289b5c01c1263 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -11,7 +11,8 @@
import pandas as pd
from pandas import (Series, Categorical, DataFrame, isna, notna,
- bdate_range, date_range, _np_version_under1p10)
+ bdate_range, date_range, _np_version_under1p10,
+ CategoricalIndex)
from pandas.core.index import MultiIndex
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.indexes.timedeltas import Timedelta
@@ -1865,3 +1866,218 @@ def test_n(self, n):
result = s.nsmallest(n)
expected = s.sort_values().head(n)
assert_series_equal(result, expected)
+
+
+class TestCategoricalSeriesAnalytics(object):
+
+ def test_count(self):
+
+ s = Series(Categorical([np.nan, 1, 2, np.nan],
+ categories=[5, 4, 3, 2, 1], ordered=True))
+ result = s.count()
+ assert result == 2
+
+ def test_min_max(self):
+ # unordered cats have no min/max
+ cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
+ pytest.raises(TypeError, lambda: cat.min())
+ pytest.raises(TypeError, lambda: cat.max())
+
+ cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
+ _min = cat.min()
+ _max = cat.max()
+ assert _min == "a"
+ assert _max == "d"
+
+ cat = Series(Categorical(["a", "b", "c", "d"], categories=[
+ 'd', 'c', 'b', 'a'], ordered=True))
+ _min = cat.min()
+ _max = cat.max()
+ assert _min == "d"
+ assert _max == "a"
+
+ cat = Series(Categorical(
+ [np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
+ ], ordered=True))
+ _min = cat.min()
+ _max = cat.max()
+ assert np.isnan(_min)
+ assert _max == "b"
+
+ cat = Series(Categorical(
+ [np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
+ _min = cat.min()
+ _max = cat.max()
+ assert np.isnan(_min)
+ assert _max == 1
+
+ def test_mode(self):
+ s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
+ categories=[5, 4, 3, 2, 1], ordered=True))
+ res = s.mode()
+ exp = Series(Categorical([5], categories=[
+ 5, 4, 3, 2, 1], ordered=True))
+ tm.assert_series_equal(res, exp)
+ s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
+ categories=[5, 4, 3, 2, 1], ordered=True))
+ res = s.mode()
+ exp = Series(Categorical([5, 1], categories=[
+ 5, 4, 3, 2, 1], ordered=True))
+ tm.assert_series_equal(res, exp)
+ s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
+ ordered=True))
+ res = s.mode()
+ exp = Series(Categorical([5, 4, 3, 2, 1], categories=[5, 4, 3, 2, 1],
+ ordered=True))
+ tm.assert_series_equal(res, exp)
+
+ def test_value_counts(self):
+ # GH 12835
+ cats = Categorical(list('abcccb'), categories=list('cabd'))
+ s = Series(cats, name='xxx')
+ res = s.value_counts(sort=False)
+
+ exp_index = CategoricalIndex(list('cabd'), categories=cats.categories)
+ exp = Series([3, 1, 2, 0], name='xxx', index=exp_index)
+ tm.assert_series_equal(res, exp)
+
+ res = s.value_counts(sort=True)
+
+ exp_index = CategoricalIndex(list('cbad'), categories=cats.categories)
+ exp = Series([3, 2, 1, 0], name='xxx', index=exp_index)
+ tm.assert_series_equal(res, exp)
+
+ # check object dtype handles the Series.name as the same
+ # (tested in test_base.py)
+ s = Series(["a", "b", "c", "c", "c", "b"], name='xxx')
+ res = s.value_counts()
+ exp = Series([3, 2, 1], name='xxx', index=["c", "b", "a"])
+ tm.assert_series_equal(res, exp)
+
+ def test_value_counts_with_nan(self):
+ # see gh-9443
+
+ # sanity check
+ s = Series(["a", "b", "a"], dtype="category")
+ exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
+
+ res = s.value_counts(dropna=True)
+ tm.assert_series_equal(res, exp)
+
+ res = s.value_counts(dropna=True)
+ tm.assert_series_equal(res, exp)
+
+ # same Series via two different constructions --> same behaviour
+ series = [
+ Series(["a", "b", None, "a", None, None], dtype="category"),
+ Series(Categorical(["a", "b", None, "a", None, None],
+ categories=["a", "b"]))
+ ]
+
+ for s in series:
+ # None is a NaN value, so we exclude its count here
+ exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
+ res = s.value_counts(dropna=True)
+ tm.assert_series_equal(res, exp)
+
+ # we don't exclude the count of None and sort by counts
+ exp = Series([3, 2, 1], index=CategoricalIndex([np.nan, "a", "b"]))
+ res = s.value_counts(dropna=False)
+ tm.assert_series_equal(res, exp)
+
+ # When we aren't sorting by counts, and np.nan isn't a
+ # category, it should be last.
+ exp = Series([2, 1, 3], index=CategoricalIndex(["a", "b", np.nan]))
+ res = s.value_counts(dropna=False, sort=False)
+ tm.assert_series_equal(res, exp)
+
+ @pytest.mark.parametrize(
+ "dtype",
+ ["int_", "uint", "float_", "unicode_", "timedelta64[h]",
+ pytest.param("datetime64[D]",
+ marks=pytest.mark.xfail(reason="issue7996"))]
+ )
+ @pytest.mark.parametrize("is_ordered", [True, False])
+ def test_drop_duplicates_categorical_non_bool(self, dtype, is_ordered):
+ cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
+
+ # Test case 1
+ input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
+ tc1 = Series(Categorical(input1, categories=cat_array,
+ ordered=is_ordered))
+
+ expected = Series([False, False, False, True])
+ tm.assert_series_equal(tc1.duplicated(), expected)
+ tm.assert_series_equal(tc1.drop_duplicates(), tc1[~expected])
+ sc = tc1.copy()
+ sc.drop_duplicates(inplace=True)
+ tm.assert_series_equal(sc, tc1[~expected])
+
+ expected = Series([False, False, True, False])
+ tm.assert_series_equal(tc1.duplicated(keep='last'), expected)
+ tm.assert_series_equal(tc1.drop_duplicates(keep='last'),
+ tc1[~expected])
+ sc = tc1.copy()
+ sc.drop_duplicates(keep='last', inplace=True)
+ tm.assert_series_equal(sc, tc1[~expected])
+
+ expected = Series([False, False, True, True])
+ tm.assert_series_equal(tc1.duplicated(keep=False), expected)
+ tm.assert_series_equal(tc1.drop_duplicates(keep=False), tc1[~expected])
+ sc = tc1.copy()
+ sc.drop_duplicates(keep=False, inplace=True)
+ tm.assert_series_equal(sc, tc1[~expected])
+
+ # Test case 2
+ input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
+ tc2 = Series(Categorical(
+ input2, categories=cat_array, ordered=is_ordered)
+ )
+
+ expected = Series([False, False, False, False, True, True, False])
+ tm.assert_series_equal(tc2.duplicated(), expected)
+ tm.assert_series_equal(tc2.drop_duplicates(), tc2[~expected])
+ sc = tc2.copy()
+ sc.drop_duplicates(inplace=True)
+ tm.assert_series_equal(sc, tc2[~expected])
+
+ expected = Series([False, True, True, False, False, False, False])
+ tm.assert_series_equal(tc2.duplicated(keep='last'), expected)
+ tm.assert_series_equal(tc2.drop_duplicates(keep='last'),
+ tc2[~expected])
+ sc = tc2.copy()
+ sc.drop_duplicates(keep='last', inplace=True)
+ tm.assert_series_equal(sc, tc2[~expected])
+
+ expected = Series([False, True, True, False, True, True, False])
+ tm.assert_series_equal(tc2.duplicated(keep=False), expected)
+ tm.assert_series_equal(tc2.drop_duplicates(keep=False), tc2[~expected])
+ sc = tc2.copy()
+ sc.drop_duplicates(keep=False, inplace=True)
+ tm.assert_series_equal(sc, tc2[~expected])
+
+ @pytest.mark.parametrize("is_ordered", [True, False])
+ def test_drop_duplicates_categorical_bool(self, is_ordered):
+ tc = Series(Categorical([True, False, True, False],
+ categories=[True, False], ordered=is_ordered))
+
+ expected = Series([False, False, True, True])
+ tm.assert_series_equal(tc.duplicated(), expected)
+ tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])
+ sc = tc.copy()
+ sc.drop_duplicates(inplace=True)
+ tm.assert_series_equal(sc, tc[~expected])
+
+ expected = Series([True, True, False, False])
+ tm.assert_series_equal(tc.duplicated(keep='last'), expected)
+ tm.assert_series_equal(tc.drop_duplicates(keep='last'), tc[~expected])
+ sc = tc.copy()
+ sc.drop_duplicates(keep='last', inplace=True)
+ tm.assert_series_equal(sc, tc[~expected])
+
+ expected = Series([True, True, True, True])
+ tm.assert_series_equal(tc.duplicated(keep=False), expected)
+ tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])
+ sc = tc.copy()
+ sc.drop_duplicates(keep=False, inplace=True)
+ tm.assert_series_equal(sc, tc[~expected])
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index 9aae40e1b8dbb..2898ace27f535 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -11,7 +11,8 @@
from pandas.core.indexes.datetimes import Timestamp
from pandas.compat import range
-from pandas import compat
+from pandas import (compat, Categorical, period_range, timedelta_range,
+ DatetimeIndex, PeriodIndex, TimedeltaIndex)
import pandas.io.formats.printing as printing
from pandas.util.testing import (assert_series_equal,
ensure_clean)
@@ -234,6 +235,21 @@ def test_tab_completion(self):
assert 'str' not in dir(s)
assert 'dt' in dir(s) # as it is a datetime categorical
+ def test_tab_completion_with_categorical(self):
+ # test the tab completion display
+ ok_for_cat = ['categories', 'codes', 'ordered', 'set_categories',
+ 'add_categories', 'remove_categories',
+ 'rename_categories', 'reorder_categories',
+ 'remove_unused_categories', 'as_ordered', 'as_unordered']
+
+ def get_dir(s):
+ results = [r for r in s.cat.__dir__() if not r.startswith('_')]
+ return list(sorted(set(results)))
+
+ s = Series(list('aabbcde')).astype('category')
+ results = get_dir(s)
+ tm.assert_almost_equal(results, list(sorted(set(ok_for_cat))))
+
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
@@ -427,3 +443,277 @@ def test_tab_complete_warning(self, ip):
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('s.', 1))
+
+
+class TestCategoricalSeries(object):
+
+ @pytest.mark.parametrize(
+ "method",
+ [
+ lambda x: x.cat.set_categories([1, 2, 3]),
+ lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),
+ lambda x: x.cat.rename_categories([1, 2, 3]),
+ lambda x: x.cat.remove_unused_categories(),
+ lambda x: x.cat.remove_categories([2]),
+ lambda x: x.cat.add_categories([4]),
+ lambda x: x.cat.as_ordered(),
+ lambda x: x.cat.as_unordered(),
+ ])
+ def test_getname_categorical_accessor(self, method):
+ # GH 17509
+ s = Series([1, 2, 3], name='A').astype('category')
+ expected = 'A'
+ result = method(s).name
+ assert result == expected
+
+ def test_cat_accessor(self):
+ s = Series(Categorical(["a", "b", np.nan, "a"]))
+ tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
+ assert not s.cat.ordered, False
+
+ exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
+ s.cat.set_categories(["b", "a"], inplace=True)
+ tm.assert_categorical_equal(s.values, exp)
+
+ res = s.cat.set_categories(["b", "a"])
+ tm.assert_categorical_equal(res.values, exp)
+
+ s[:] = "a"
+ s = s.cat.remove_unused_categories()
+ tm.assert_index_equal(s.cat.categories, Index(["a"]))
+
+ def test_cat_accessor_api(self):
+ # GH 9322
+ from pandas.core.categorical import CategoricalAccessor
+ assert Series.cat is CategoricalAccessor
+ s = Series(list('aabbcde')).astype('category')
+ assert isinstance(s.cat, CategoricalAccessor)
+
+ invalid = Series([1])
+ with tm.assert_raises_regex(AttributeError,
+ "only use .cat accessor"):
+ invalid.cat
+ assert not hasattr(invalid, 'cat')
+
+ def test_cat_accessor_no_new_attributes(self):
+ # https://github.com/pandas-dev/pandas/issues/10673
+ c = Series(list('aabbcde')).astype('category')
+ with tm.assert_raises_regex(AttributeError,
+ "You cannot add any new attribute"):
+ c.cat.xlabel = "a"
+
+ def test_categorical_delegations(self):
+
+ # invalid accessor
+ pytest.raises(AttributeError, lambda: Series([1, 2, 3]).cat)
+ tm.assert_raises_regex(
+ AttributeError,
+ r"Can only use .cat accessor with a 'category' dtype",
+ lambda: Series([1, 2, 3]).cat)
+ pytest.raises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
+ pytest.raises(AttributeError, lambda: Series(np.arange(5.)).cat)
+ pytest.raises(AttributeError,
+ lambda: Series([Timestamp('20130101')]).cat)
+
+ # Series should delegate calls to '.categories', '.codes', '.ordered'
+ # and the methods '.set_categories()' 'drop_unused_categories()' to the
+ # categorical# -*- coding: utf-8 -*-
+ s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
+ exp_categories = Index(["a", "b", "c"])
+ tm.assert_index_equal(s.cat.categories, exp_categories)
+ s.cat.categories = [1, 2, 3]
+ exp_categories = Index([1, 2, 3])
+ tm.assert_index_equal(s.cat.categories, exp_categories)
+
+ exp_codes = Series([0, 1, 2, 0], dtype='int8')
+ tm.assert_series_equal(s.cat.codes, exp_codes)
+
+ assert s.cat.ordered
+ s = s.cat.as_unordered()
+ assert not s.cat.ordered
+ s.cat.as_ordered(inplace=True)
+ assert s.cat.ordered
+
+ # reorder
+ s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
+ exp_categories = Index(["c", "b", "a"])
+ exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
+ s = s.cat.set_categories(["c", "b", "a"])
+ tm.assert_index_equal(s.cat.categories, exp_categories)
+ tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
+ tm.assert_numpy_array_equal(s.__array__(), exp_values)
+
+ # remove unused categories
+ s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
+ ]))
+ exp_categories = Index(["a", "b"])
+ exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)
+ s = s.cat.remove_unused_categories()
+ tm.assert_index_equal(s.cat.categories, exp_categories)
+ tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
+ tm.assert_numpy_array_equal(s.__array__(), exp_values)
+
+ # This method is likely to be confused, so test that it raises an error
+ # on wrong inputs:
+ def f():
+ s.set_categories([4, 3, 2, 1])
+
+ pytest.raises(Exception, f)
+ # right: s.cat.set_categories([4,3,2,1])
+
+ def test_str_accessor_api_for_categorical(self):
+ # https://github.com/pandas-dev/pandas/issues/10661
+ from pandas.core.strings import StringMethods
+ s = Series(list('aabb'))
+ s = s + " " + s
+ c = s.astype('category')
+ assert isinstance(c.str, StringMethods)
+
+ # str functions, which need special arguments
+ special_func_defs = [
+ ('cat', (list("zyxw"),), {"sep": ","}),
+ ('center', (10,), {}),
+ ('contains', ("a",), {}),
+ ('count', ("a",), {}),
+ ('decode', ("UTF-8",), {}),
+ ('encode', ("UTF-8",), {}),
+ ('endswith', ("a",), {}),
+ ('extract', ("([a-z]*) ",), {"expand": False}),
+ ('extract', ("([a-z]*) ",), {"expand": True}),
+ ('extractall', ("([a-z]*) ",), {}),
+ ('find', ("a",), {}),
+ ('findall', ("a",), {}),
+ ('index', (" ",), {}),
+ ('ljust', (10,), {}),
+ ('match', ("a"), {}), # deprecated...
+ ('normalize', ("NFC",), {}),
+ ('pad', (10,), {}),
+ ('partition', (" ",), {"expand": False}), # not default
+ ('partition', (" ",), {"expand": True}), # default
+ ('repeat', (3,), {}),
+ ('replace', ("a", "z"), {}),
+ ('rfind', ("a",), {}),
+ ('rindex', (" ",), {}),
+ ('rjust', (10,), {}),
+ ('rpartition', (" ",), {"expand": False}), # not default
+ ('rpartition', (" ",), {"expand": True}), # default
+ ('slice', (0, 1), {}),
+ ('slice_replace', (0, 1, "z"), {}),
+ ('split', (" ",), {"expand": False}), # default
+ ('split', (" ",), {"expand": True}), # not default
+ ('startswith', ("a",), {}),
+ ('wrap', (2,), {}),
+ ('zfill', (10,), {})
+ ]
+ _special_func_names = [f[0] for f in special_func_defs]
+
+ # * get, join: they need a individual elements of type lists, but
+ # we can't make a categorical with lists as individual categories.
+ # -> `s.str.split(" ").astype("category")` will error!
+ # * `translate` has different interfaces for py2 vs. py3
+ _ignore_names = ["get", "join", "translate"]
+
+ str_func_names = [f for f in dir(s.str) if not (
+ f.startswith("_") or
+ f in _special_func_names or
+ f in _ignore_names)]
+
+ func_defs = [(f, (), {}) for f in str_func_names]
+ func_defs.extend(special_func_defs)
+
+ for func, args, kwargs in func_defs:
+ res = getattr(c.str, func)(*args, **kwargs)
+ exp = getattr(s.str, func)(*args, **kwargs)
+
+ if isinstance(res, DataFrame):
+ tm.assert_frame_equal(res, exp)
+ else:
+ tm.assert_series_equal(res, exp)
+
+ invalid = Series([1, 2, 3]).astype('category')
+ with tm.assert_raises_regex(AttributeError,
+ "Can only use .str "
+ "accessor with string"):
+ invalid.str
+ assert not hasattr(invalid, 'str')
+
+ def test_dt_accessor_api_for_categorical(self):
+ # https://github.com/pandas-dev/pandas/issues/10661
+ from pandas.core.indexes.accessors import Properties
+
+ s_dr = Series(date_range('1/1/2015', periods=5, tz="MET"))
+ c_dr = s_dr.astype("category")
+
+ s_pr = Series(period_range('1/1/2015', freq='D', periods=5))
+ c_pr = s_pr.astype("category")
+
+ s_tdr = Series(timedelta_range('1 days', '10 days'))
+ c_tdr = s_tdr.astype("category")
+
+ # only testing field (like .day)
+ # and bool (is_month_start)
+ get_ops = lambda x: x._datetimelike_ops
+
+ test_data = [
+ ("Datetime", get_ops(DatetimeIndex), s_dr, c_dr),
+ ("Period", get_ops(PeriodIndex), s_pr, c_pr),
+ ("Timedelta", get_ops(TimedeltaIndex), s_tdr, c_tdr)]
+
+ assert isinstance(c_dr.dt, Properties)
+
+ special_func_defs = [
+ ('strftime', ("%Y-%m-%d",), {}),
+ ('tz_convert', ("EST",), {}),
+ ('round', ("D",), {}),
+ ('floor', ("D",), {}),
+ ('ceil', ("D",), {}),
+ ('asfreq', ("D",), {}),
+ # ('tz_localize', ("UTC",), {}),
+ ]
+ _special_func_names = [f[0] for f in special_func_defs]
+
+ # the series is already localized
+ _ignore_names = ['tz_localize', 'components']
+
+ for name, attr_names, s, c in test_data:
+ func_names = [f
+ for f in dir(s.dt)
+ if not (f.startswith("_") or f in attr_names or f in
+ _special_func_names or f in _ignore_names)]
+
+ func_defs = [(f, (), {}) for f in func_names]
+ for f_def in special_func_defs:
+ if f_def[0] in dir(s.dt):
+ func_defs.append(f_def)
+
+ for func, args, kwargs in func_defs:
+ res = getattr(c.dt, func)(*args, **kwargs)
+ exp = getattr(s.dt, func)(*args, **kwargs)
+
+ if isinstance(res, DataFrame):
+ tm.assert_frame_equal(res, exp)
+ elif isinstance(res, Series):
+ tm.assert_series_equal(res, exp)
+ else:
+ tm.assert_almost_equal(res, exp)
+
+ for attr in attr_names:
+ try:
+ res = getattr(c.dt, attr)
+ exp = getattr(s.dt, attr)
+ except Exception as e:
+ print(name, attr)
+ raise e
+
+ if isinstance(res, DataFrame):
+ tm.assert_frame_equal(res, exp)
+ elif isinstance(res, Series):
+ tm.assert_series_equal(res, exp)
+ else:
+ tm.assert_almost_equal(res, exp)
+
+ invalid = Series([1, 2, 3]).astype('category')
+ with tm.assert_raises_regex(
+ AttributeError, "Can only use .dt accessor with datetimelike"):
+ invalid.dt
+ assert not hasattr(invalid, 'str')
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index c814cade77e5c..f9842514ed5e5 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -17,7 +17,7 @@
is_datetime64tz_dtype)
from pandas import (Index, Series, isna, date_range, Timestamp,
NaT, period_range, timedelta_range, MultiIndex,
- IntervalIndex)
+ IntervalIndex, Categorical, DataFrame)
from pandas._libs import lib
from pandas._libs.tslib import iNaT
@@ -184,6 +184,60 @@ def test_constructor_categorical(self):
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
+ def test_constructor_categorical_with_coercion(self):
+ factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
+ # test basic creation / coercion of categoricals
+ s = Series(factor, name='A')
+ assert s.dtype == 'category'
+ assert len(s) == len(factor)
+ str(s.values)
+ str(s)
+
+ # in a frame
+ df = DataFrame({'A': factor})
+ result = df['A']
+ tm.assert_series_equal(result, s)
+ result = df.iloc[:, 0]
+ tm.assert_series_equal(result, s)
+ assert len(df) == len(factor)
+ str(df.values)
+ str(df)
+
+ df = DataFrame({'A': s})
+ result = df['A']
+ tm.assert_series_equal(result, s)
+ assert len(df) == len(factor)
+ str(df.values)
+ str(df)
+
+ # multiples
+ df = DataFrame({'A': s, 'B': s, 'C': 1})
+ result1 = df['A']
+ result2 = df['B']
+ tm.assert_series_equal(result1, s)
+ tm.assert_series_equal(result2, s, check_names=False)
+ assert result2.name == 'B'
+ assert len(df) == len(factor)
+ str(df.values)
+ str(df)
+
+ # GH8623
+ x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
+ [1, 'John P. Doe']],
+ columns=['person_id', 'person_name'])
+ x['person_name'] = Categorical(x.person_name
+ ) # doing this breaks transform
+
+ expected = x.iloc[0].person_name
+ result = x.person_name.iloc[0]
+ assert result == expected
+
+ result = x.person_name[0]
+ assert result == expected
+
+ result = x.person_name.loc[0]
+ assert result == expected
+
def test_constructor_categorical_dtype(self):
result = pd.Series(['a', 'b'],
dtype=CategoricalDtype(['a', 'b', 'c'],
@@ -197,6 +251,40 @@ def test_constructor_categorical_dtype(self):
tm.assert_index_equal(result.cat.categories, pd.Index(['b', 'a']))
assert result.cat.ordered is False
+ def test_categorical_sideeffects_free(self):
+ # Passing a categorical to a Series and then changing values in either
+ # the series or the categorical should not change the values in the
+ # other one, IF you specify copy!
+ cat = Categorical(["a", "b", "c", "a"])
+ s = Series(cat, copy=True)
+ assert s.cat is not cat
+ s.cat.categories = [1, 2, 3]
+ exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
+ exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
+ tm.assert_numpy_array_equal(s.__array__(), exp_s)
+ tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
+
+ # setting
+ s[0] = 2
+ exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
+ tm.assert_numpy_array_equal(s.__array__(), exp_s2)
+ tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
+
+ # however, copy is False by default
+ # so this WILL change values
+ cat = Categorical(["a", "b", "c", "a"])
+ s = Series(cat)
+ assert s.values is cat
+ s.cat.categories = [1, 2, 3]
+ exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
+ tm.assert_numpy_array_equal(s.__array__(), exp_s)
+ tm.assert_numpy_array_equal(cat.__array__(), exp_s)
+
+ s[0] = 2
+ exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
+ tm.assert_numpy_array_equal(s.__array__(), exp_s2)
+ tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
+
def test_unordered_compare_equal(self):
left = pd.Series(['a', 'b', 'c'],
dtype=CategoricalDtype(['a', 'b']))
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index 163950b75bc34..12d0267005f19 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -10,6 +10,7 @@
import warnings
from numpy import nan
+import pandas as pd
import numpy as np
from pandas import (
@@ -229,6 +230,98 @@ def test_astype_categories_deprecation(self):
result = s.astype('category', categories=['a', 'b'], ordered=True)
tm.assert_series_equal(result, expected)
+ def test_astype_from_categorical(self):
+ l = ["a", "b", "c", "a"]
+ s = Series(l)
+ exp = Series(Categorical(l))
+ res = s.astype('category')
+ tm.assert_series_equal(res, exp)
+
+ l = [1, 2, 3, 1]
+ s = Series(l)
+ exp = Series(Categorical(l))
+ res = s.astype('category')
+ tm.assert_series_equal(res, exp)
+
+ df = DataFrame({"cats": [1, 2, 3, 4, 5, 6],
+ "vals": [1, 2, 3, 4, 5, 6]})
+ cats = Categorical([1, 2, 3, 4, 5, 6])
+ exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
+ df["cats"] = df["cats"].astype("category")
+ tm.assert_frame_equal(exp_df, df)
+
+ df = DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
+ "vals": [1, 2, 3, 4, 5, 6]})
+ cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
+ exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
+ df["cats"] = df["cats"].astype("category")
+ tm.assert_frame_equal(exp_df, df)
+
+ # with keywords
+ l = ["a", "b", "c", "a"]
+ s = Series(l)
+ exp = Series(Categorical(l, ordered=True))
+ res = s.astype(CategoricalDtype(None, ordered=True))
+ tm.assert_series_equal(res, exp)
+
+ exp = Series(Categorical(l, categories=list('abcdef'), ordered=True))
+ res = s.astype(CategoricalDtype(list('abcdef'), ordered=True))
+ tm.assert_series_equal(res, exp)
+
+ def test_astype_categorical_to_other(self):
+
+ df = DataFrame({'value': np.random.randint(0, 10000, 100)})
+ labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
+ cat_labels = Categorical(labels, labels)
+
+ df = df.sort_values(by=['value'], ascending=True)
+ df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
+ right=False, labels=cat_labels)
+
+ s = df['value_group']
+ expected = s
+ tm.assert_series_equal(s.astype('category'), expected)
+ tm.assert_series_equal(s.astype(CategoricalDtype()), expected)
+ pytest.raises(ValueError, lambda: s.astype('float64'))
+
+ cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
+ exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
+ tm.assert_series_equal(cat.astype('str'), exp)
+ s2 = Series(Categorical(['1', '2', '3', '4']))
+ exp2 = Series([1, 2, 3, 4]).astype(int)
+ tm.assert_series_equal(s2.astype('int'), exp2)
+
+ # object don't sort correctly, so just compare that we have the same
+ # values
+ def cmp(a, b):
+ tm.assert_almost_equal(
+ np.sort(np.unique(a)), np.sort(np.unique(b)))
+
+ expected = Series(np.array(s.values), name='value_group')
+ cmp(s.astype('object'), expected)
+ cmp(s.astype(np.object_), expected)
+
+ # array conversion
+ tm.assert_almost_equal(np.array(s), np.array(s.values))
+
+ # valid conversion
+ for valid in [lambda x: x.astype('category'),
+ lambda x: x.astype(CategoricalDtype()),
+ lambda x: x.astype('object').astype('category'),
+ lambda x: x.astype('object').astype(
+ CategoricalDtype())
+ ]:
+
+ result = valid(s)
+ # compare series values
+ # internal .categories can't be compared because it is sorted
+ tm.assert_series_equal(result, s, check_categorical=False)
+
+ # invalid conversion (these are NOT a dtype)
+ for invalid in [lambda x: x.astype(Categorical),
+ lambda x: x.astype('object').astype(Categorical)]:
+ pytest.raises(TypeError, lambda: invalid(s))
+
def test_astype_categoricaldtype(self):
s = Series(['a', 'b', 'a'])
result = s.astype(CategoricalDtype(['a', 'b'], ordered=True))
diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py
index c0ef5a3694bf3..b7381c6b8fd30 100644
--- a/pandas/tests/series/test_indexing.py
+++ b/pandas/tests/series/test_indexing.py
@@ -13,7 +13,8 @@
from pandas.core.dtypes.common import is_integer, is_scalar
from pandas import (Index, Series, DataFrame, isna,
date_range, NaT, MultiIndex,
- Timestamp, DatetimeIndex, Timedelta)
+ Timestamp, DatetimeIndex, Timedelta,
+ Categorical)
from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
from pandas._libs import tslib, lib
@@ -2237,6 +2238,31 @@ def test_reindex_fill_value(self):
expected = Series([False, True, False], index=[1, 2, 3])
assert_series_equal(result, expected)
+ def test_reindex_categorical(self):
+
+ index = date_range('20000101', periods=3)
+
+ # reindexing to an invalid Categorical
+ s = Series(['a', 'b', 'c'], dtype='category')
+ result = s.reindex(index)
+ expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
+ categories=['a', 'b', 'c']))
+ expected.index = index
+ tm.assert_series_equal(result, expected)
+
+ # partial reindexing
+ expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
+ 'c']))
+ expected.index = [1, 2]
+ result = s.reindex([1, 2])
+ tm.assert_series_equal(result, expected)
+
+ expected = Series(Categorical(
+ values=['c', np.nan], categories=['a', 'b', 'c']))
+ expected.index = [2, 3]
+ result = s.reindex([2, 3])
+ tm.assert_series_equal(result, expected)
+
def test_rename(self):
# GH 17407
@@ -2337,6 +2363,41 @@ def test_setitem_slice_into_readonly_backing_data(self):
assert not array.any()
+ def test_categorial_assigning_ops(self):
+ orig = Series(Categorical(["b", "b"], categories=["a", "b"]))
+ s = orig.copy()
+ s[:] = "a"
+ exp = Series(Categorical(["a", "a"], categories=["a", "b"]))
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s[1] = "a"
+ exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s[s.index > 0] = "a"
+ exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s[[False, True]] = "a"
+ exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
+ tm.assert_series_equal(s, exp)
+
+ s = orig.copy()
+ s.index = ["x", "y"]
+ s["y"] = "a"
+ exp = Series(Categorical(["b", "a"], categories=["a", "b"]),
+ index=["x", "y"])
+ tm.assert_series_equal(s, exp)
+
+ # ensure that one can set something to np.nan
+ s = Series(Categorical([1, 2, 3]))
+ exp = Series(Categorical([1, np.nan, 3], categories=[1, 2, 3]))
+ s[1] = np.nan
+ tm.assert_series_equal(s, exp)
+
class TestTimeSeriesDuplicates(object):
diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py
index 594049d3d3bb9..b23dc37016b69 100644
--- a/pandas/tests/series/test_missing.py
+++ b/pandas/tests/series/test_missing.py
@@ -365,6 +365,20 @@ def test_fillna_raise(self):
with pytest.raises(ValueError):
s.fillna(1, limit=limit, method=method)
+ def test_categorical_nan_equality(self):
+ cat = Series(Categorical(["a", "b", "c", np.nan]))
+ exp = Series([True, True, True, False])
+ res = (cat == cat)
+ tm.assert_series_equal(res, exp)
+
+ def test_categorical_nan_handling(self):
+
+ # NaNs are represented as -1 in labels
+ s = Series(Categorical(["a", "b", np.nan, "a"]))
+ tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
+ tm.assert_numpy_array_equal(s.values.codes,
+ np.array([0, 1, -1, 0], dtype=np.int8))
+
@pytest.mark.parametrize('fill_value, expected_output', [
('a', ['a', 'a', 'b', 'a', 'a']),
({1: 'a', 3: 'b', 4: 'b'}, ['a', 'a', 'b', 'b', 'b']),
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py
index 0d064983fb546..6cc866a35514f 100644
--- a/pandas/tests/series/test_operators.py
+++ b/pandas/tests/series/test_operators.py
@@ -14,7 +14,7 @@
import pandas as pd
from pandas import (Index, Series, DataFrame, isna, bdate_range,
- NaT, date_range, timedelta_range)
+ NaT, date_range, timedelta_range, Categorical)
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.indexes.timedeltas import Timedelta
import pandas.core.nanops as nanops
@@ -931,6 +931,58 @@ def test_object_comparisons(self):
expected = -(s == 'a')
assert_series_equal(result, expected)
+ def test_categorical_comparisons(self):
+
+ # GH 8938
+ # allow equality comparisons
+ a = Series(list('abc'), dtype="category")
+ b = Series(list('abc'), dtype="object")
+ c = Series(['a', 'b', 'cc'], dtype="object")
+ d = Series(list('acb'), dtype="object")
+ e = Categorical(list('abc'))
+ f = Categorical(list('acb'))
+
+ # vs scalar
+ assert not (a == 'a').all()
+ assert ((a != 'a') == ~(a == 'a')).all()
+
+ assert not ('a' == a).all()
+ assert (a == 'a')[0]
+ assert ('a' == a)[0]
+ assert not ('a' != a)[0]
+
+ # vs list-like
+ assert (a == a).all()
+ assert not (a != a).all()
+
+ assert (a == list(a)).all()
+ assert (a == b).all()
+ assert (b == a).all()
+ assert ((~(a == b)) == (a != b)).all()
+ assert ((~(b == a)) == (b != a)).all()
+
+ assert not (a == c).all()
+ assert not (c == a).all()
+ assert not (a == d).all()
+ assert not (d == a).all()
+
+ # vs a cat-like
+ assert (a == e).all()
+ assert (e == a).all()
+ assert not (a == f).all()
+ assert not (f == a).all()
+
+ assert ((~(a == e) == (a != e)).all())
+ assert ((~(e == a) == (e != a)).all())
+ assert ((~(a == f) == (a != f)).all())
+ assert ((~(f == a) == (f != a)).all())
+
+ # non-equality is not comparable
+ pytest.raises(TypeError, lambda: a < b)
+ pytest.raises(TypeError, lambda: b < a)
+ pytest.raises(TypeError, lambda: a > b)
+ pytest.raises(TypeError, lambda: b > a)
+
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
@@ -1036,6 +1088,34 @@ def test_comparison_invalid(self):
pytest.raises(TypeError, lambda: x < y)
pytest.raises(TypeError, lambda: x <= y)
+ def test_unequal_categorical_comparison_raises_type_error(self):
+ # unequal comparison should raise for unordered cats
+ cat = Series(Categorical(list("abc")))
+
+ def f():
+ cat > "b"
+
+ pytest.raises(TypeError, f)
+ cat = Series(Categorical(list("abc"), ordered=False))
+
+ def f():
+ cat > "b"
+
+ pytest.raises(TypeError, f)
+
+ # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
+ # and following comparisons with scalars not in categories should raise
+ # for unequal comps, but not for equal/not equal
+ cat = Series(Categorical(list("abc"), ordered=True))
+
+ pytest.raises(TypeError, lambda: cat < "d")
+ pytest.raises(TypeError, lambda: cat > "d")
+ pytest.raises(TypeError, lambda: "d" < cat)
+ pytest.raises(TypeError, lambda: "d" > cat)
+
+ tm.assert_series_equal(cat == "d", Series([False, False, False]))
+ tm.assert_series_equal(cat != "d", Series([True, True, True]))
+
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py
index c22e2ca8e0dc8..bf3e584657763 100644
--- a/pandas/tests/series/test_repr.py
+++ b/pandas/tests/series/test_repr.py
@@ -8,7 +8,8 @@
import numpy as np
import pandas as pd
-from pandas import (Index, Series, DataFrame, date_range, option_context)
+from pandas import (Index, Series, DataFrame, date_range, option_context,
+ Categorical, period_range, timedelta_range)
from pandas.core.index import MultiIndex
from pandas.compat import lrange, range, u
@@ -198,3 +199,251 @@ def test_latex_repr(self):
assert result == s._repr_latex_()
assert s._repr_latex_() is None
+
+
+class TestCategoricalRepr(object):
+
+ def test_categorical_repr(self):
+ a = Series(Categorical([1, 2, 3, 4]))
+ exp = u("0 1\n1 2\n2 3\n3 4\n" +
+ "dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
+
+ assert exp == a.__unicode__()
+
+ a = Series(Categorical(["a", "b"] * 25))
+ exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
+ "Length: 50, dtype: category\nCategories (2, object): [a, b]")
+ with option_context("display.max_rows", 5):
+ assert exp == repr(a)
+
+ levs = list("abcdefghijklmnopqrstuvwxyz")
+ a = Series(Categorical(["a", "b"], categories=levs, ordered=True))
+ exp = u("0 a\n1 b\n" + "dtype: category\n"
+ "Categories (26, object): [a < b < c < d ... w < x < y < z]")
+ assert exp == a.__unicode__()
+
+ def test_categorical_series_repr(self):
+ s = Series(Categorical([1, 2, 3]))
+ exp = """0 1
+1 2
+2 3
+dtype: category
+Categories (3, int64): [1, 2, 3]"""
+
+ assert repr(s) == exp
+
+ s = Series(Categorical(np.arange(10)))
+ exp = """0 0
+1 1
+2 2
+3 3
+4 4
+5 5
+6 6
+7 7
+8 8
+9 9
+dtype: category
+Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
+
+ assert repr(s) == exp
+
+ def test_categorical_series_repr_ordered(self):
+ s = Series(Categorical([1, 2, 3], ordered=True))
+ exp = """0 1
+1 2
+2 3
+dtype: category
+Categories (3, int64): [1 < 2 < 3]"""
+
+ assert repr(s) == exp
+
+ s = Series(Categorical(np.arange(10), ordered=True))
+ exp = """0 0
+1 1
+2 2
+3 3
+4 4
+5 5
+6 6
+7 7
+8 8
+9 9
+dtype: category
+Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
+
+ assert repr(s) == exp
+
+ def test_categorical_series_repr_datetime(self):
+ idx = date_range('2011-01-01 09:00', freq='H', periods=5)
+ s = Series(Categorical(idx))
+ exp = """0 2011-01-01 09:00:00
+1 2011-01-01 10:00:00
+2 2011-01-01 11:00:00
+3 2011-01-01 12:00:00
+4 2011-01-01 13:00:00
+dtype: category
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
+ 2011-01-01 12:00:00, 2011-01-01 13:00:00]""" # noqa
+
+ assert repr(s) == exp
+
+ idx = date_range('2011-01-01 09:00', freq='H', periods=5,
+ tz='US/Eastern')
+ s = Series(Categorical(idx))
+ exp = """0 2011-01-01 09:00:00-05:00
+1 2011-01-01 10:00:00-05:00
+2 2011-01-01 11:00:00-05:00
+3 2011-01-01 12:00:00-05:00
+4 2011-01-01 13:00:00-05:00
+dtype: category
+Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
+ 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
+ 2011-01-01 13:00:00-05:00]""" # noqa
+
+ assert repr(s) == exp
+
+ def test_categorical_series_repr_datetime_ordered(self):
+ idx = date_range('2011-01-01 09:00', freq='H', periods=5)
+ s = Series(Categorical(idx, ordered=True))
+ exp = """0 2011-01-01 09:00:00
+1 2011-01-01 10:00:00
+2 2011-01-01 11:00:00
+3 2011-01-01 12:00:00
+4 2011-01-01 13:00:00
+dtype: category
+Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
+ 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
+
+ assert repr(s) == exp
+
+ idx = date_range('2011-01-01 09:00', freq='H', periods=5,
+ tz='US/Eastern')
+ s = Series(Categorical(idx, ordered=True))
+ exp = """0 2011-01-01 09:00:00-05:00
+1 2011-01-01 10:00:00-05:00
+2 2011-01-01 11:00:00-05:00
+3 2011-01-01 12:00:00-05:00
+4 2011-01-01 13:00:00-05:00
+dtype: category
+Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
+ 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
+ 2011-01-01 13:00:00-05:00]""" # noqa
+
+ assert repr(s) == exp
+
+ def test_categorical_series_repr_period(self):
+ idx = period_range('2011-01-01 09:00', freq='H', periods=5)
+ s = Series(Categorical(idx))
+ exp = """0 2011-01-01 09:00
+1 2011-01-01 10:00
+2 2011-01-01 11:00
+3 2011-01-01 12:00
+4 2011-01-01 13:00
+dtype: category
+Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
+ 2011-01-01 13:00]""" # noqa
+
+ assert repr(s) == exp
+
+ idx = period_range('2011-01', freq='M', periods=5)
+ s = Series(Categorical(idx))
+ exp = """0 2011-01
+1 2011-02
+2 2011-03
+3 2011-04
+4 2011-05
+dtype: category
+Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
+
+ assert repr(s) == exp
+
+ def test_categorical_series_repr_period_ordered(self):
+ idx = period_range('2011-01-01 09:00', freq='H', periods=5)
+ s = Series(Categorical(idx, ordered=True))
+ exp = """0 2011-01-01 09:00
+1 2011-01-01 10:00
+2 2011-01-01 11:00
+3 2011-01-01 12:00
+4 2011-01-01 13:00
+dtype: category
+Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
+ 2011-01-01 13:00]""" # noqa
+
+ assert repr(s) == exp
+
+ idx = period_range('2011-01', freq='M', periods=5)
+ s = Series(Categorical(idx, ordered=True))
+ exp = """0 2011-01
+1 2011-02
+2 2011-03
+3 2011-04
+4 2011-05
+dtype: category
+Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
+
+ assert repr(s) == exp
+
+ def test_categorical_series_repr_timedelta(self):
+ idx = timedelta_range('1 days', periods=5)
+ s = Series(Categorical(idx))
+ exp = """0 1 days
+1 2 days
+2 3 days
+3 4 days
+4 5 days
+dtype: category
+Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
+
+ assert repr(s) == exp
+
+ idx = timedelta_range('1 hours', periods=10)
+ s = Series(Categorical(idx))
+ exp = """0 0 days 01:00:00
+1 1 days 01:00:00
+2 2 days 01:00:00
+3 3 days 01:00:00
+4 4 days 01:00:00
+5 5 days 01:00:00
+6 6 days 01:00:00
+7 7 days 01:00:00
+8 8 days 01:00:00
+9 9 days 01:00:00
+dtype: category
+Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
+ 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
+ 8 days 01:00:00, 9 days 01:00:00]""" # noqa
+
+ assert repr(s) == exp
+
+ def test_categorical_series_repr_timedelta_ordered(self):
+ idx = timedelta_range('1 days', periods=5)
+ s = Series(Categorical(idx, ordered=True))
+ exp = """0 1 days
+1 2 days
+2 3 days
+3 4 days
+4 5 days
+dtype: category
+Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
+
+ assert repr(s) == exp
+
+ idx = timedelta_range('1 hours', periods=10)
+ s = Series(Categorical(idx, ordered=True))
+ exp = """0 0 days 01:00:00
+1 1 days 01:00:00
+2 2 days 01:00:00
+3 3 days 01:00:00
+4 4 days 01:00:00
+5 5 days 01:00:00
+6 6 days 01:00:00
+7 7 days 01:00:00
+8 8 days 01:00:00
+9 9 days 01:00:00
+dtype: category
+Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
+ 3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
+ 8 days 01:00:00 < 9 days 01:00:00]""" # noqa
+
+ assert repr(s) == exp
diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py
index 40b0280de3719..310412e53bd1c 100644
--- a/pandas/tests/series/test_sorting.py
+++ b/pandas/tests/series/test_sorting.py
@@ -5,7 +5,7 @@
import numpy as np
import random
-from pandas import DataFrame, Series, MultiIndex, IntervalIndex
+from pandas import DataFrame, Series, MultiIndex, IntervalIndex, Categorical
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
@@ -195,3 +195,72 @@ def test_sort_index_intervals(self):
[3, 2, 1, 0],
[4, 3, 2, 1]))
assert_series_equal(result, expected)
+
+ def test_sort_values_categorical(self):
+
+ c = Categorical(["a", "b", "b", "a"], ordered=False)
+ cat = Series(c.copy())
+
+ # sort in the categories order
+ expected = Series(
+ Categorical(["a", "a", "b", "b"],
+ ordered=False), index=[0, 3, 1, 2])
+ result = cat.sort_values()
+ tm.assert_series_equal(result, expected)
+
+ cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
+ res = cat.sort_values()
+ exp = np.array(["a", "b", "c", "d"], dtype=np.object_)
+ tm.assert_numpy_array_equal(res.__array__(), exp)
+
+ cat = Series(Categorical(["a", "c", "b", "d"], categories=[
+ "a", "b", "c", "d"], ordered=True))
+ res = cat.sort_values()
+ exp = np.array(["a", "b", "c", "d"], dtype=np.object_)
+ tm.assert_numpy_array_equal(res.__array__(), exp)
+
+ res = cat.sort_values(ascending=False)
+ exp = np.array(["d", "c", "b", "a"], dtype=np.object_)
+ tm.assert_numpy_array_equal(res.__array__(), exp)
+
+ raw_cat1 = Categorical(["a", "b", "c", "d"],
+ categories=["a", "b", "c", "d"], ordered=False)
+ raw_cat2 = Categorical(["a", "b", "c", "d"],
+ categories=["d", "c", "b", "a"], ordered=True)
+ s = ["a", "b", "c", "d"]
+ df = DataFrame({"unsort": raw_cat1,
+ "sort": raw_cat2,
+ "string": s,
+ "values": [1, 2, 3, 4]})
+
+ # Cats must be sorted in a dataframe
+ res = df.sort_values(by=["string"], ascending=False)
+ exp = np.array(["d", "c", "b", "a"], dtype=np.object_)
+ tm.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
+ assert res["sort"].dtype == "category"
+
+ res = df.sort_values(by=["sort"], ascending=False)
+ exp = df.sort_values(by=["string"], ascending=True)
+ tm.assert_series_equal(res["values"], exp["values"])
+ assert res["sort"].dtype == "category"
+ assert res["unsort"].dtype == "category"
+
+ # unordered cat, but we allow this
+ df.sort_values(by=["unsort"], ascending=False)
+
+ # multi-columns sort
+ # GH 7848
+ df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
+ "raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
+ df["grade"] = Categorical(df["raw_grade"], ordered=True)
+ df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
+
+ # sorts 'grade' according to the order of the categories
+ result = df.sort_values(by=['grade'])
+ expected = df.iloc[[1, 2, 5, 0, 3, 4]]
+ tm.assert_frame_equal(result, expected)
+
+ # multi
+ result = df.sort_values(by=['grade', 'id'])
+ expected = df.iloc[[2, 1, 5, 4, 3, 0]]
+ tm.assert_frame_equal(result, expected)
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
deleted file mode 100644
index 17f34fdf3604c..0000000000000
--- a/pandas/tests/test_categorical.py
+++ /dev/null
@@ -1,4826 +0,0 @@
-# -*- coding: utf-8 -*-
-# pylint: disable=E1101,E1103,W0232
-
-from warnings import catch_warnings
-import pytest
-import sys
-from datetime import datetime
-from distutils.version import LooseVersion
-
-import numpy as np
-
-from pandas.core.dtypes.dtypes import CategoricalDtype
-from pandas.core.dtypes.common import (
- is_categorical_dtype,
- is_float_dtype,
- is_integer_dtype)
-
-import pandas as pd
-import pandas.compat as compat
-import pandas.util.testing as tm
-from pandas import (Categorical, Index, Series, DataFrame, Timestamp,
- CategoricalIndex, isna, date_range, DatetimeIndex,
- period_range, PeriodIndex, timedelta_range,
- TimedeltaIndex, NaT, Interval, IntervalIndex)
-from pandas.compat import range, lrange, u, PY3, PYPY
-from pandas.core.config import option_context
-from pandas.core.categorical import _recode_for_categories
-
-
-class TestCategorical(object):
-
- def setup_method(self, method):
- self.factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'],
- ordered=True)
-
- def test_getitem(self):
- assert self.factor[0] == 'a'
- assert self.factor[-1] == 'c'
-
- subf = self.factor[[0, 1, 2]]
- tm.assert_numpy_array_equal(subf._codes,
- np.array([0, 1, 1], dtype=np.int8))
-
- subf = self.factor[np.asarray(self.factor) == 'c']
- tm.assert_numpy_array_equal(subf._codes,
- np.array([2, 2, 2], dtype=np.int8))
-
- def test_getitem_listlike(self):
-
- # GH 9469
- # properly coerce the input indexers
- np.random.seed(1)
- c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
- result = c.codes[np.array([100000]).astype(np.int64)]
- expected = c[np.array([100000]).astype(np.int64)].codes
- tm.assert_numpy_array_equal(result, expected)
-
- @pytest.mark.parametrize(
- "method",
- [
- lambda x: x.cat.set_categories([1, 2, 3]),
- lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),
- lambda x: x.cat.rename_categories([1, 2, 3]),
- lambda x: x.cat.remove_unused_categories(),
- lambda x: x.cat.remove_categories([2]),
- lambda x: x.cat.add_categories([4]),
- lambda x: x.cat.as_ordered(),
- lambda x: x.cat.as_unordered(),
- ])
- def test_getname_categorical_accessor(self, method):
- # GH 17509
- s = Series([1, 2, 3], name='A').astype('category')
- expected = 'A'
- result = method(s).name
- assert result == expected
-
- def test_getitem_category_type(self):
- # GH 14580
- # test iloc() on Series with Categorical data
-
- s = Series([1, 2, 3]).astype('category')
-
- # get slice
- result = s.iloc[0:2]
- expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
- tm.assert_series_equal(result, expected)
-
- # get list of indexes
- result = s.iloc[[0, 1]]
- expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
- tm.assert_series_equal(result, expected)
-
- # get boolean array
- result = s.iloc[[True, False, False]]
- expected = Series([1]).astype(CategoricalDtype([1, 2, 3]))
- tm.assert_series_equal(result, expected)
-
- def test_setitem(self):
-
- # int/positional
- c = self.factor.copy()
- c[0] = 'b'
- assert c[0] == 'b'
- c[-1] = 'a'
- assert c[-1] == 'a'
-
- # boolean
- c = self.factor.copy()
- indexer = np.zeros(len(c), dtype='bool')
- indexer[0] = True
- indexer[-1] = True
- c[indexer] = 'c'
- expected = Categorical(['c', 'b', 'b', 'a', 'a', 'c', 'c', 'c'],
- ordered=True)
-
- tm.assert_categorical_equal(c, expected)
-
- def test_setitem_listlike(self):
-
- # GH 9469
- # properly coerce the input indexers
- np.random.seed(1)
- c = Categorical(np.random.randint(0, 5, size=150000).astype(
- np.int8)).add_categories([-1000])
- indexer = np.array([100000]).astype(np.int64)
- c[indexer] = -1000
-
- # we are asserting the code result here
- # which maps to the -1000 category
- result = c.codes[np.array([100000]).astype(np.int64)]
- tm.assert_numpy_array_equal(result, np.array([5], dtype='int8'))
-
- def test_constructor_empty(self):
- # GH 17248
- c = Categorical([])
- expected = Index([])
- tm.assert_index_equal(c.categories, expected)
-
- c = Categorical([], categories=[1, 2, 3])
- expected = pd.Int64Index([1, 2, 3])
- tm.assert_index_equal(c.categories, expected)
-
- def test_constructor_tuples(self):
- values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object)
- result = Categorical(values)
- expected = Index([(1,), (1, 2)], tupleize_cols=False)
- tm.assert_index_equal(result.categories, expected)
- assert result.ordered is False
-
- def test_constructor_tuples_datetimes(self):
- # numpy will auto reshape when all of the tuples are the
- # same len, so add an extra one with 2 items and slice it off
- values = np.array([(Timestamp('2010-01-01'),),
- (Timestamp('2010-01-02'),),
- (Timestamp('2010-01-01'),),
- (Timestamp('2010-01-02'),),
- ('a', 'b')], dtype=object)[:-1]
- result = Categorical(values)
- expected = Index([(Timestamp('2010-01-01'),),
- (Timestamp('2010-01-02'),)], tupleize_cols=False)
- tm.assert_index_equal(result.categories, expected)
-
- def test_constructor_unsortable(self):
-
- # it works!
- arr = np.array([1, 2, 3, datetime.now()], dtype='O')
- factor = Categorical(arr, ordered=False)
- assert not factor.ordered
-
- # this however will raise as cannot be sorted
- pytest.raises(
- TypeError, lambda: Categorical(arr, ordered=True))
-
- def test_constructor_interval(self):
- result = Categorical([Interval(1, 2), Interval(2, 3), Interval(3, 6)],
- ordered=True)
- ii = IntervalIndex.from_intervals([Interval(1, 2),
- Interval(2, 3),
- Interval(3, 6)])
- exp = Categorical(ii, ordered=True)
- tm.assert_categorical_equal(result, exp)
- tm.assert_index_equal(result.categories, ii)
-
- def test_is_equal_dtype(self):
-
- # test dtype comparisons between cats
-
- c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
- c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
- c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
- assert c1.is_dtype_equal(c1)
- assert c2.is_dtype_equal(c2)
- assert c3.is_dtype_equal(c3)
- assert c1.is_dtype_equal(c2)
- assert not c1.is_dtype_equal(c3)
- assert not c1.is_dtype_equal(Index(list('aabca')))
- assert not c1.is_dtype_equal(c1.astype(object))
- assert c1.is_dtype_equal(CategoricalIndex(c1))
- assert (c1.is_dtype_equal(
- CategoricalIndex(c1, categories=list('cab'))))
- assert not c1.is_dtype_equal(CategoricalIndex(c1, ordered=True))
-
- def test_constructor(self):
-
- exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_)
- c1 = Categorical(exp_arr)
- tm.assert_numpy_array_equal(c1.__array__(), exp_arr)
- c2 = Categorical(exp_arr, categories=["a", "b", "c"])
- tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
- c2 = Categorical(exp_arr, categories=["c", "b", "a"])
- tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
-
- # categories must be unique
- def f():
- Categorical([1, 2], [1, 2, 2])
-
- pytest.raises(ValueError, f)
-
- def f():
- Categorical(["a", "b"], ["a", "b", "b"])
-
- pytest.raises(ValueError, f)
-
- # The default should be unordered
- c1 = Categorical(["a", "b", "c", "a"])
- assert not c1.ordered
-
- # Categorical as input
- c1 = Categorical(["a", "b", "c", "a"])
- c2 = Categorical(c1)
- tm.assert_categorical_equal(c1, c2)
-
- c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
- c2 = Categorical(c1)
- tm.assert_categorical_equal(c1, c2)
-
- c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
- c2 = Categorical(c1)
- tm.assert_categorical_equal(c1, c2)
-
- c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
- c2 = Categorical(c1, categories=["a", "b", "c"])
- tm.assert_numpy_array_equal(c1.__array__(), c2.__array__())
- tm.assert_index_equal(c2.categories, Index(["a", "b", "c"]))
-
- # Series of dtype category
- c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
- c2 = Categorical(Series(c1))
- tm.assert_categorical_equal(c1, c2)
-
- c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
- c2 = Categorical(Series(c1))
- tm.assert_categorical_equal(c1, c2)
-
- # Series
- c1 = Categorical(["a", "b", "c", "a"])
- c2 = Categorical(Series(["a", "b", "c", "a"]))
- tm.assert_categorical_equal(c1, c2)
-
- c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
- c2 = Categorical(Series(["a", "b", "c", "a"]),
- categories=["a", "b", "c", "d"])
- tm.assert_categorical_equal(c1, c2)
-
- # This should result in integer categories, not float!
- cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
- assert is_integer_dtype(cat.categories)
-
- # https://github.com/pandas-dev/pandas/issues/3678
- cat = Categorical([np.nan, 1, 2, 3])
- assert is_integer_dtype(cat.categories)
-
- # this should result in floats
- cat = Categorical([np.nan, 1, 2., 3])
- assert is_float_dtype(cat.categories)
-
- cat = Categorical([np.nan, 1., 2., 3.])
- assert is_float_dtype(cat.categories)
-
- # This doesn't work -> this would probably need some kind of "remember
- # the original type" feature to try to cast the array interface result
- # to...
-
- # vals = np.asarray(cat[cat.notna()])
- # assert is_integer_dtype(vals)
-
- # corner cases
- cat = Categorical([1])
- assert len(cat.categories) == 1
- assert cat.categories[0] == 1
- assert len(cat.codes) == 1
- assert cat.codes[0] == 0
-
- cat = Categorical(["a"])
- assert len(cat.categories) == 1
- assert cat.categories[0] == "a"
- assert len(cat.codes) == 1
- assert cat.codes[0] == 0
-
- # Scalars should be converted to lists
- cat = Categorical(1)
- assert len(cat.categories) == 1
- assert cat.categories[0] == 1
- assert len(cat.codes) == 1
- assert cat.codes[0] == 0
-
- # two arrays
- # - when the first is an integer dtype and the second is not
- # - when the resulting codes are all -1/NaN
- with tm.assert_produces_warning(None):
- c_old = Categorical([0, 1, 2, 0, 1, 2],
- categories=["a", "b", "c"]) # noqa
-
- with tm.assert_produces_warning(None):
- c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
- categories=[3, 4, 5])
-
- # the next one are from the old docs
- with tm.assert_produces_warning(None):
- c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
- cat = Categorical([1, 2], categories=[1, 2, 3])
-
- # this is a legitimate constructor
- with tm.assert_produces_warning(None):
- c = Categorical(np.array([], dtype='int64'), # noqa
- categories=[3, 2, 1], ordered=True)
-
- def test_constructor_not_sequence(self):
- # https://github.com/pandas-dev/pandas/issues/16022
- with pytest.raises(TypeError):
- Categorical(['a', 'b'], categories='a')
-
- def test_constructor_with_null(self):
-
- # Cannot have NaN in categories
- with pytest.raises(ValueError):
- Categorical([np.nan, "a", "b", "c"],
- categories=[np.nan, "a", "b", "c"])
-
- with pytest.raises(ValueError):
- Categorical([None, "a", "b", "c"],
- categories=[None, "a", "b", "c"])
-
- with pytest.raises(ValueError):
- Categorical(DatetimeIndex(['nat', '20160101']),
- categories=[NaT, Timestamp('20160101')])
-
- def test_constructor_with_index(self):
- ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
- tm.assert_categorical_equal(ci.values, Categorical(ci))
-
- ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
- tm.assert_categorical_equal(ci.values,
- Categorical(ci.astype(object),
- categories=ci.categories))
-
- def test_constructor_with_generator(self):
- # This was raising an Error in isna(single_val).any() because isna
- # returned a scalar for a generator
- xrange = range
-
- exp = Categorical([0, 1, 2])
- cat = Categorical((x for x in [0, 1, 2]))
- tm.assert_categorical_equal(cat, exp)
- cat = Categorical(xrange(3))
- tm.assert_categorical_equal(cat, exp)
-
- # This uses xrange internally
- from pandas.core.index import MultiIndex
- MultiIndex.from_product([range(5), ['a', 'b', 'c']])
-
- # check that categories accept generators and sequences
- cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
- tm.assert_categorical_equal(cat, exp)
- cat = Categorical([0, 1, 2], categories=xrange(3))
- tm.assert_categorical_equal(cat, exp)
-
- def test_constructor_with_datetimelike(self):
-
- # 12077
- # constructor wwth a datetimelike and NaT
-
- for dtl in [date_range('1995-01-01 00:00:00', periods=5, freq='s'),
- date_range('1995-01-01 00:00:00', periods=5,
- freq='s', tz='US/Eastern'),
- timedelta_range('1 day', periods=5, freq='s')]:
-
- s = Series(dtl)
- c = Categorical(s)
- expected = type(dtl)(s)
- expected.freq = None
- tm.assert_index_equal(c.categories, expected)
- tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype='int8'))
-
- # with NaT
- s2 = s.copy()
- s2.iloc[-1] = NaT
- c = Categorical(s2)
- expected = type(dtl)(s2.dropna())
- expected.freq = None
- tm.assert_index_equal(c.categories, expected)
-
- exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)
- tm.assert_numpy_array_equal(c.codes, exp)
-
- result = repr(c)
- assert 'NaT' in result
-
- def test_constructor_from_index_series_datetimetz(self):
- idx = date_range('2015-01-01 10:00', freq='D', periods=3,
- tz='US/Eastern')
- result = Categorical(idx)
- tm.assert_index_equal(result.categories, idx)
-
- result = Categorical(Series(idx))
- tm.assert_index_equal(result.categories, idx)
-
- def test_constructor_from_index_series_timedelta(self):
- idx = timedelta_range('1 days', freq='D', periods=3)
- result = Categorical(idx)
- tm.assert_index_equal(result.categories, idx)
-
- result = Categorical(Series(idx))
- tm.assert_index_equal(result.categories, idx)
-
- def test_constructor_from_index_series_period(self):
- idx = period_range('2015-01-01', freq='D', periods=3)
- result = Categorical(idx)
- tm.assert_index_equal(result.categories, idx)
-
- result = Categorical(Series(idx))
- tm.assert_index_equal(result.categories, idx)
-
- def test_constructor_invariant(self):
- # GH 14190
- vals = [
- np.array([1., 1.2, 1.8, np.nan]),
- np.array([1, 2, 3], dtype='int64'),
- ['a', 'b', 'c', np.nan],
- [pd.Period('2014-01'), pd.Period('2014-02'), NaT],
- [Timestamp('2014-01-01'), Timestamp('2014-01-02'), NaT],
- [Timestamp('2014-01-01', tz='US/Eastern'),
- Timestamp('2014-01-02', tz='US/Eastern'), NaT],
- ]
- for val in vals:
- c = Categorical(val)
- c2 = Categorical(c)
- tm.assert_categorical_equal(c, c2)
-
- @pytest.mark.parametrize('ordered', [True, False])
- def test_constructor_with_dtype(self, ordered):
- categories = ['b', 'a', 'c']
- dtype = CategoricalDtype(categories, ordered=ordered)
- result = Categorical(['a', 'b', 'a', 'c'], dtype=dtype)
- expected = Categorical(['a', 'b', 'a', 'c'], categories=categories,
- ordered=ordered)
- tm.assert_categorical_equal(result, expected)
- assert result.ordered is ordered
-
- def test_constructor_dtype_and_others_raises(self):
- dtype = CategoricalDtype(['a', 'b'], ordered=True)
- with tm.assert_raises_regex(ValueError, "Cannot"):
- Categorical(['a', 'b'], categories=['a', 'b'], dtype=dtype)
-
- with tm.assert_raises_regex(ValueError, "Cannot"):
- Categorical(['a', 'b'], ordered=True, dtype=dtype)
-
- with tm.assert_raises_regex(ValueError, "Cannot"):
- Categorical(['a', 'b'], ordered=False, dtype=dtype)
-
- @pytest.mark.parametrize('categories', [
- None, ['a', 'b'], ['a', 'c'],
- ])
- @pytest.mark.parametrize('ordered', [True, False])
- def test_constructor_str_category(self, categories, ordered):
- result = Categorical(['a', 'b'], categories=categories,
- ordered=ordered, dtype='category')
- expected = Categorical(['a', 'b'], categories=categories,
- ordered=ordered)
- tm.assert_categorical_equal(result, expected)
-
- def test_constructor_str_unknown(self):
- with tm.assert_raises_regex(ValueError, "Unknown `dtype`"):
- Categorical([1, 2], dtype="foo")
-
- def test_constructor_from_categorical_with_dtype(self):
- dtype = CategoricalDtype(['a', 'b', 'c'], ordered=True)
- values = Categorical(['a', 'b', 'd'])
- result = Categorical(values, dtype=dtype)
- # We use dtype.categories, not values.categories
- expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],
- ordered=True)
- tm.assert_categorical_equal(result, expected)
-
- def test_constructor_from_categorical_with_unknown_dtype(self):
- dtype = CategoricalDtype(None, ordered=True)
- values = Categorical(['a', 'b', 'd'])
- result = Categorical(values, dtype=dtype)
- # We use values.categories, not dtype.categories
- expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'd'],
- ordered=True)
- tm.assert_categorical_equal(result, expected)
-
- def test_contructor_from_categorical_string(self):
- values = Categorical(['a', 'b', 'd'])
- # use categories, ordered
- result = Categorical(values, categories=['a', 'b', 'c'], ordered=True,
- dtype='category')
- expected = Categorical(['a', 'b', 'd'], categories=['a', 'b', 'c'],
- ordered=True)
- tm.assert_categorical_equal(result, expected)
-
- # No string
- result = Categorical(values, categories=['a', 'b', 'c'], ordered=True)
- tm.assert_categorical_equal(result, expected)
-
- def test_constructor_with_categorical_categories(self):
- # GH17884
- expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
-
- result = Categorical(
- ['a', 'b'], categories=Categorical(['a', 'b', 'c']))
- tm.assert_categorical_equal(result, expected)
-
- result = Categorical(
- ['a', 'b'], categories=CategoricalIndex(['a', 'b', 'c']))
- tm.assert_categorical_equal(result, expected)
-
- def test_from_codes(self):
-
- # too few categories
- def f():
- Categorical.from_codes([1, 2], [1, 2])
-
- pytest.raises(ValueError, f)
-
- # no int codes
- def f():
- Categorical.from_codes(["a"], [1, 2])
-
- pytest.raises(ValueError, f)
-
- # no unique categories
- def f():
- Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
-
- pytest.raises(ValueError, f)
-
- # NaN categories included
- def f():
- Categorical.from_codes([0, 1, 2], ["a", "b", np.nan])
-
- pytest.raises(ValueError, f)
-
- # too negative
- def f():
- Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
-
- pytest.raises(ValueError, f)
-
- exp = Categorical(["a", "b", "c"], ordered=False)
- res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
- tm.assert_categorical_equal(exp, res)
-
- # Not available in earlier numpy versions
- if hasattr(np.random, "choice"):
- codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
- Categorical.from_codes(codes, categories=["train", "test"])
-
- def test_from_codes_with_categorical_categories(self):
- # GH17884
- expected = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
-
- result = Categorical.from_codes(
- [0, 1], categories=Categorical(['a', 'b', 'c']))
- tm.assert_categorical_equal(result, expected)
-
- result = Categorical.from_codes(
- [0, 1], categories=CategoricalIndex(['a', 'b', 'c']))
- tm.assert_categorical_equal(result, expected)
-
- # non-unique Categorical still raises
- with pytest.raises(ValueError):
- Categorical.from_codes([0, 1], Categorical(['a', 'b', 'a']))
-
- @pytest.mark.parametrize('dtype', [None, 'category'])
- def test_from_inferred_categories(self, dtype):
- cats = ['a', 'b']
- codes = np.array([0, 0, 1, 1], dtype='i8')
- result = Categorical._from_inferred_categories(cats, codes, dtype)
- expected = Categorical.from_codes(codes, cats)
- tm.assert_categorical_equal(result, expected)
-
- @pytest.mark.parametrize('dtype', [None, 'category'])
- def test_from_inferred_categories_sorts(self, dtype):
- cats = ['b', 'a']
- codes = np.array([0, 1, 1, 1], dtype='i8')
- result = Categorical._from_inferred_categories(cats, codes, dtype)
- expected = Categorical.from_codes([1, 0, 0, 0], ['a', 'b'])
- tm.assert_categorical_equal(result, expected)
-
- def test_from_inferred_categories_dtype(self):
- cats = ['a', 'b', 'd']
- codes = np.array([0, 1, 0, 2], dtype='i8')
- dtype = CategoricalDtype(['c', 'b', 'a'], ordered=True)
- result = Categorical._from_inferred_categories(cats, codes, dtype)
- expected = Categorical(['a', 'b', 'a', 'd'],
- categories=['c', 'b', 'a'],
- ordered=True)
- tm.assert_categorical_equal(result, expected)
-
- def test_from_inferred_categories_coerces(self):
- cats = ['1', '2', 'bad']
- codes = np.array([0, 0, 1, 2], dtype='i8')
- dtype = CategoricalDtype([1, 2])
- result = Categorical._from_inferred_categories(cats, codes, dtype)
- expected = Categorical([1, 1, 2, np.nan])
- tm.assert_categorical_equal(result, expected)
-
- def test_validate_ordered(self):
- # see gh-14058
- exp_msg = "'ordered' must either be 'True' or 'False'"
- exp_err = TypeError
-
- # This should be a boolean.
- ordered = np.array([0, 1, 2])
-
- with tm.assert_raises_regex(exp_err, exp_msg):
- Categorical([1, 2, 3], ordered=ordered)
-
- with tm.assert_raises_regex(exp_err, exp_msg):
- Categorical.from_codes([0, 0, 1], categories=['a', 'b', 'c'],
- ordered=ordered)
-
- def test_comparisons(self):
-
- result = self.factor[self.factor == 'a']
- expected = self.factor[np.asarray(self.factor) == 'a']
- tm.assert_categorical_equal(result, expected)
-
- result = self.factor[self.factor != 'a']
- expected = self.factor[np.asarray(self.factor) != 'a']
- tm.assert_categorical_equal(result, expected)
-
- result = self.factor[self.factor < 'c']
- expected = self.factor[np.asarray(self.factor) < 'c']
- tm.assert_categorical_equal(result, expected)
-
- result = self.factor[self.factor > 'a']
- expected = self.factor[np.asarray(self.factor) > 'a']
- tm.assert_categorical_equal(result, expected)
-
- result = self.factor[self.factor >= 'b']
- expected = self.factor[np.asarray(self.factor) >= 'b']
- tm.assert_categorical_equal(result, expected)
-
- result = self.factor[self.factor <= 'b']
- expected = self.factor[np.asarray(self.factor) <= 'b']
- tm.assert_categorical_equal(result, expected)
-
- n = len(self.factor)
-
- other = self.factor[np.random.permutation(n)]
- result = self.factor == other
- expected = np.asarray(self.factor) == np.asarray(other)
- tm.assert_numpy_array_equal(result, expected)
-
- result = self.factor == 'd'
- expected = np.repeat(False, len(self.factor))
- tm.assert_numpy_array_equal(result, expected)
-
- # comparisons with categoricals
- cat_rev = Categorical(
- ["a", "b", "c"], categories=["c", "b", "a"], ordered=True)
- cat_rev_base = Categorical(
- ["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
- cat = Categorical(["a", "b", "c"], ordered=True)
- cat_base = Categorical(
- ["b", "b", "b"], categories=cat.categories, ordered=True)
-
- # comparisons need to take categories ordering into account
- res_rev = cat_rev > cat_rev_base
- exp_rev = np.array([True, False, False])
- tm.assert_numpy_array_equal(res_rev, exp_rev)
-
- res_rev = cat_rev < cat_rev_base
- exp_rev = np.array([False, False, True])
- tm.assert_numpy_array_equal(res_rev, exp_rev)
-
- res = cat > cat_base
- exp = np.array([False, False, True])
- tm.assert_numpy_array_equal(res, exp)
-
- # Only categories with same categories can be compared
- def f():
- cat > cat_rev
-
- pytest.raises(TypeError, f)
-
- cat_rev_base2 = Categorical(
- ["b", "b", "b"], categories=["c", "b", "a", "d"])
-
- def f():
- cat_rev > cat_rev_base2
-
- pytest.raises(TypeError, f)
-
- # Only categories with same ordering information can be compared
- cat_unorderd = cat.set_ordered(False)
- assert not (cat > cat).any()
-
- def f():
- cat > cat_unorderd
-
- pytest.raises(TypeError, f)
-
- # comparison (in both directions) with Series will raise
- s = Series(["b", "b", "b"])
- pytest.raises(TypeError, lambda: cat > s)
- pytest.raises(TypeError, lambda: cat_rev > s)
- pytest.raises(TypeError, lambda: s < cat)
- pytest.raises(TypeError, lambda: s < cat_rev)
-
- # comparison with numpy.array will raise in both direction, but only on
- # newer numpy versions
- a = np.array(["b", "b", "b"])
- pytest.raises(TypeError, lambda: cat > a)
- pytest.raises(TypeError, lambda: cat_rev > a)
-
- # The following work via '__array_priority__ = 1000'
- # works only on numpy >= 1.7.1
- if LooseVersion(np.__version__) > LooseVersion("1.7.1"):
- pytest.raises(TypeError, lambda: a < cat)
- pytest.raises(TypeError, lambda: a < cat_rev)
-
- # Make sure that unequal comparison take the categories order in
- # account
- cat_rev = Categorical(
- list("abc"), categories=list("cba"), ordered=True)
- exp = np.array([True, False, False])
- res = cat_rev > "b"
- tm.assert_numpy_array_equal(res, exp)
-
- def test_argsort(self):
- c = Categorical([5, 3, 1, 4, 2], ordered=True)
-
- expected = np.array([2, 4, 1, 3, 0])
- tm.assert_numpy_array_equal(c.argsort(ascending=True), expected,
- check_dtype=False)
-
- expected = expected[::-1]
- tm.assert_numpy_array_equal(c.argsort(ascending=False), expected,
- check_dtype=False)
-
- def test_numpy_argsort(self):
- c = Categorical([5, 3, 1, 4, 2], ordered=True)
-
- expected = np.array([2, 4, 1, 3, 0])
- tm.assert_numpy_array_equal(np.argsort(c), expected,
- check_dtype=False)
-
- tm.assert_numpy_array_equal(np.argsort(c, kind='mergesort'), expected,
- check_dtype=False)
-
- msg = "the 'axis' parameter is not supported"
- tm.assert_raises_regex(ValueError, msg, np.argsort,
- c, axis=0)
-
- msg = "the 'order' parameter is not supported"
- tm.assert_raises_regex(ValueError, msg, np.argsort,
- c, order='C')
-
- def test_na_flags_int_categories(self):
- # #1457
-
- categories = lrange(10)
- labels = np.random.randint(0, 10, 20)
- labels[::5] = -1
-
- cat = Categorical(labels, categories, fastpath=True)
- repr(cat)
-
- tm.assert_numpy_array_equal(isna(cat), labels == -1)
-
- def test_categories_none(self):
- factor = Categorical(['a', 'b', 'b', 'a',
- 'a', 'c', 'c', 'c'], ordered=True)
- tm.assert_categorical_equal(factor, self.factor)
-
- def test_set_categories_inplace(self):
- cat = self.factor.copy()
- cat.set_categories(['a', 'b', 'c', 'd'], inplace=True)
- tm.assert_index_equal(cat.categories, Index(['a', 'b', 'c', 'd']))
-
- @pytest.mark.parametrize(
- "dtype",
- ["int_", "uint", "float_", "unicode_", "timedelta64[h]",
- pytest.param("datetime64[D]",
- marks=pytest.mark.xfail(reason="issue7996"))]
- )
- @pytest.mark.parametrize("is_ordered", [True, False])
- def test_drop_duplicates_non_bool(self, dtype, is_ordered):
- cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
-
- # Test case 1
- input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
- tc1 = Series(Categorical(input1, categories=cat_array,
- ordered=is_ordered))
-
- expected = Series([False, False, False, True])
- tm.assert_series_equal(tc1.duplicated(), expected)
- tm.assert_series_equal(tc1.drop_duplicates(), tc1[~expected])
- sc = tc1.copy()
- sc.drop_duplicates(inplace=True)
- tm.assert_series_equal(sc, tc1[~expected])
-
- expected = Series([False, False, True, False])
- tm.assert_series_equal(tc1.duplicated(keep='last'), expected)
- tm.assert_series_equal(tc1.drop_duplicates(keep='last'),
- tc1[~expected])
- sc = tc1.copy()
- sc.drop_duplicates(keep='last', inplace=True)
- tm.assert_series_equal(sc, tc1[~expected])
-
- expected = Series([False, False, True, True])
- tm.assert_series_equal(tc1.duplicated(keep=False), expected)
- tm.assert_series_equal(tc1.drop_duplicates(keep=False), tc1[~expected])
- sc = tc1.copy()
- sc.drop_duplicates(keep=False, inplace=True)
- tm.assert_series_equal(sc, tc1[~expected])
-
- # Test case 2
- input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
- tc2 = Series(Categorical(
- input2, categories=cat_array, ordered=is_ordered)
- )
-
- expected = Series([False, False, False, False, True, True, False])
- tm.assert_series_equal(tc2.duplicated(), expected)
- tm.assert_series_equal(tc2.drop_duplicates(), tc2[~expected])
- sc = tc2.copy()
- sc.drop_duplicates(inplace=True)
- tm.assert_series_equal(sc, tc2[~expected])
-
- expected = Series([False, True, True, False, False, False, False])
- tm.assert_series_equal(tc2.duplicated(keep='last'), expected)
- tm.assert_series_equal(tc2.drop_duplicates(keep='last'),
- tc2[~expected])
- sc = tc2.copy()
- sc.drop_duplicates(keep='last', inplace=True)
- tm.assert_series_equal(sc, tc2[~expected])
-
- expected = Series([False, True, True, False, True, True, False])
- tm.assert_series_equal(tc2.duplicated(keep=False), expected)
- tm.assert_series_equal(tc2.drop_duplicates(keep=False), tc2[~expected])
- sc = tc2.copy()
- sc.drop_duplicates(keep=False, inplace=True)
- tm.assert_series_equal(sc, tc2[~expected])
-
- @pytest.mark.parametrize("is_ordered", [True, False])
- def test_drop_duplicates_bool(self, is_ordered):
- tc = Series(Categorical([True, False, True, False],
- categories=[True, False], ordered=is_ordered))
-
- expected = Series([False, False, True, True])
- tm.assert_series_equal(tc.duplicated(), expected)
- tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])
- sc = tc.copy()
- sc.drop_duplicates(inplace=True)
- tm.assert_series_equal(sc, tc[~expected])
-
- expected = Series([True, True, False, False])
- tm.assert_series_equal(tc.duplicated(keep='last'), expected)
- tm.assert_series_equal(tc.drop_duplicates(keep='last'), tc[~expected])
- sc = tc.copy()
- sc.drop_duplicates(keep='last', inplace=True)
- tm.assert_series_equal(sc, tc[~expected])
-
- expected = Series([True, True, True, True])
- tm.assert_series_equal(tc.duplicated(keep=False), expected)
- tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])
- sc = tc.copy()
- sc.drop_duplicates(keep=False, inplace=True)
- tm.assert_series_equal(sc, tc[~expected])
-
- def test_describe(self):
- # string type
- desc = self.factor.describe()
- assert self.factor.ordered
- exp_index = CategoricalIndex(['a', 'b', 'c'], name='categories',
- ordered=self.factor.ordered)
- expected = DataFrame({'counts': [3, 2, 3],
- 'freqs': [3 / 8., 2 / 8., 3 / 8.]},
- index=exp_index)
- tm.assert_frame_equal(desc, expected)
-
- # check unused categories
- cat = self.factor.copy()
- cat.set_categories(["a", "b", "c", "d"], inplace=True)
- desc = cat.describe()
-
- exp_index = CategoricalIndex(
- list('abcd'), ordered=self.factor.ordered, name='categories')
- expected = DataFrame({'counts': [3, 2, 3, 0],
- 'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
- index=exp_index)
- tm.assert_frame_equal(desc, expected)
-
- # check an integer one
- cat = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1])
- desc = cat.describe()
- exp_index = CategoricalIndex([1, 2, 3], ordered=cat.ordered,
- name='categories')
- expected = DataFrame({'counts': [5, 3, 3],
- 'freqs': [5 / 11., 3 / 11., 3 / 11.]},
- index=exp_index)
- tm.assert_frame_equal(desc, expected)
-
- # https://github.com/pandas-dev/pandas/issues/3678
- # describe should work with NaN
- cat = Categorical([np.nan, 1, 2, 2])
- desc = cat.describe()
- expected = DataFrame({'counts': [1, 2, 1],
- 'freqs': [1 / 4., 2 / 4., 1 / 4.]},
- index=CategoricalIndex([1, 2, np.nan],
- categories=[1, 2],
- name='categories'))
- tm.assert_frame_equal(desc, expected)
-
- def test_print(self):
- expected = ["[a, b, b, a, a, c, c, c]",
- "Categories (3, object): [a < b < c]"]
- expected = "\n".join(expected)
- actual = repr(self.factor)
- assert actual == expected
-
- def test_big_print(self):
- factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
- fastpath=True)
- expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
- "Categories (3, object): [a, b, c]"]
- expected = "\n".join(expected)
-
- actual = repr(factor)
-
- assert actual == expected
-
- def test_empty_print(self):
- factor = Categorical([], ["a", "b", "c"])
- expected = ("[], Categories (3, object): [a, b, c]")
- # hack because array_repr changed in numpy > 1.6.x
- actual = repr(factor)
- assert actual == expected
-
- assert expected == actual
- factor = Categorical([], ["a", "b", "c"], ordered=True)
- expected = ("[], Categories (3, object): [a < b < c]")
- actual = repr(factor)
- assert expected == actual
-
- factor = Categorical([], [])
- expected = ("[], Categories (0, object): []")
- assert expected == repr(factor)
-
- def test_print_none_width(self):
- # GH10087
- a = Series(Categorical([1, 2, 3, 4]))
- exp = u("0 1\n1 2\n2 3\n3 4\n" +
- "dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
-
- with option_context("display.width", None):
- assert exp == repr(a)
-
- def test_unicode_print(self):
- if PY3:
- _rep = repr
- else:
- _rep = unicode # noqa
-
- c = Categorical(['aaaaa', 'bb', 'cccc'] * 20)
- expected = u"""\
-[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
-Length: 60
-Categories (3, object): [aaaaa, bb, cccc]"""
-
- assert _rep(c) == expected
-
- c = Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)
- expected = u"""\
-[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
-Length: 60
-Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
-
- assert _rep(c) == expected
-
- # unicode option should not affect to Categorical, as it doesn't care
- # the repr width
- with option_context('display.unicode.east_asian_width', True):
-
- c = Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)
- expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
-Length: 60
-Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
-
- assert _rep(c) == expected
-
- def test_tab_complete_warning(self, ip):
- # https://github.com/pandas-dev/pandas/issues/16409
- pytest.importorskip('IPython', minversion="6.0.0")
- from IPython.core.completer import provisionalcompleter
-
- code = "import pandas as pd; c = Categorical([])"
- ip.run_code(code)
- with tm.assert_produces_warning(None):
- with provisionalcompleter('ignore'):
- list(ip.Completer.completions('c.', 1))
-
- def test_periodindex(self):
- idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
- '2014-03', '2014-03'], freq='M')
-
- cat1 = Categorical(idx1)
- str(cat1)
- exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.int8)
- exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
- tm.assert_numpy_array_equal(cat1._codes, exp_arr)
- tm.assert_index_equal(cat1.categories, exp_idx)
-
- idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
- '2014-03', '2014-01'], freq='M')
- cat2 = Categorical(idx2, ordered=True)
- str(cat2)
- exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.int8)
- exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
- tm.assert_numpy_array_equal(cat2._codes, exp_arr)
- tm.assert_index_equal(cat2.categories, exp_idx2)
-
- idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
- '2013-08', '2013-07', '2013-05'], freq='M')
- cat3 = Categorical(idx3, ordered=True)
- exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype=np.int8)
- exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
- '2013-10', '2013-11', '2013-12'], freq='M')
- tm.assert_numpy_array_equal(cat3._codes, exp_arr)
- tm.assert_index_equal(cat3.categories, exp_idx)
-
- def test_categories_assigments(self):
- s = Categorical(["a", "b", "c", "a"])
- exp = np.array([1, 2, 3, 1], dtype=np.int64)
- s.categories = [1, 2, 3]
- tm.assert_numpy_array_equal(s.__array__(), exp)
- tm.assert_index_equal(s.categories, Index([1, 2, 3]))
-
- # lengthen
- def f():
- s.categories = [1, 2, 3, 4]
-
- pytest.raises(ValueError, f)
-
- # shorten
- def f():
- s.categories = [1, 2]
-
- pytest.raises(ValueError, f)
-
- def test_construction_with_ordered(self):
- # GH 9347, 9190
- cat = Categorical([0, 1, 2])
- assert not cat.ordered
- cat = Categorical([0, 1, 2], ordered=False)
- assert not cat.ordered
- cat = Categorical([0, 1, 2], ordered=True)
- assert cat.ordered
-
- def test_ordered_api(self):
- # GH 9347
- cat1 = Categorical(list('acb'), ordered=False)
- tm.assert_index_equal(cat1.categories, Index(['a', 'b', 'c']))
- assert not cat1.ordered
-
- cat2 = Categorical(list('acb'), categories=list('bca'), ordered=False)
- tm.assert_index_equal(cat2.categories, Index(['b', 'c', 'a']))
- assert not cat2.ordered
-
- cat3 = Categorical(list('acb'), ordered=True)
- tm.assert_index_equal(cat3.categories, Index(['a', 'b', 'c']))
- assert cat3.ordered
-
- cat4 = Categorical(list('acb'), categories=list('bca'), ordered=True)
- tm.assert_index_equal(cat4.categories, Index(['b', 'c', 'a']))
- assert cat4.ordered
-
- def test_set_dtype_same(self):
- c = Categorical(['a', 'b', 'c'])
- result = c._set_dtype(CategoricalDtype(['a', 'b', 'c']))
- tm.assert_categorical_equal(result, c)
-
- def test_set_dtype_new_categories(self):
- c = Categorical(['a', 'b', 'c'])
- result = c._set_dtype(CategoricalDtype(list('abcd')))
- tm.assert_numpy_array_equal(result.codes, c.codes)
- tm.assert_index_equal(result.dtype.categories, Index(list('abcd')))
-
- def test_set_dtype_nans(self):
- c = Categorical(['a', 'b', np.nan])
- result = c._set_dtype(CategoricalDtype(['a', 'c']))
- tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1],
- dtype='int8'))
-
- def test_set_categories_private(self):
- cat = Categorical(['a', 'b', 'c'], categories=['a', 'b', 'c', 'd'])
- cat._set_categories(['a', 'c', 'd', 'e'])
- expected = Categorical(['a', 'c', 'd'], categories=list('acde'))
- tm.assert_categorical_equal(cat, expected)
-
- # fastpath
- cat = Categorical(['a', 'b', 'c'], categories=['a', 'b', 'c', 'd'])
- cat._set_categories(['a', 'c', 'd', 'e'], fastpath=True)
- expected = Categorical(['a', 'c', 'd'], categories=list('acde'))
- tm.assert_categorical_equal(cat, expected)
-
- @pytest.mark.parametrize('values, categories, new_categories', [
- # No NaNs, same cats, same order
- (['a', 'b', 'a'], ['a', 'b'], ['a', 'b'],),
- # No NaNs, same cats, different order
- (['a', 'b', 'a'], ['a', 'b'], ['b', 'a'],),
- # Same, unsorted
- (['b', 'a', 'a'], ['a', 'b'], ['a', 'b'],),
- # No NaNs, same cats, different order
- (['b', 'a', 'a'], ['a', 'b'], ['b', 'a'],),
- # NaNs
- (['a', 'b', 'c'], ['a', 'b'], ['a', 'b']),
- (['a', 'b', 'c'], ['a', 'b'], ['b', 'a']),
- (['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
- (['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
- # Introduce NaNs
- (['a', 'b', 'c'], ['a', 'b'], ['a']),
- (['a', 'b', 'c'], ['a', 'b'], ['b']),
- (['b', 'a', 'c'], ['a', 'b'], ['a']),
- (['b', 'a', 'c'], ['a', 'b'], ['a']),
- # No overlap
- (['a', 'b', 'c'], ['a', 'b'], ['d', 'e']),
- ])
- @pytest.mark.parametrize('ordered', [True, False])
- def test_set_dtype_many(self, values, categories, new_categories,
- ordered):
- c = Categorical(values, categories)
- expected = Categorical(values, new_categories, ordered)
- result = c._set_dtype(expected.dtype)
- tm.assert_categorical_equal(result, expected)
-
- def test_set_dtype_no_overlap(self):
- c = Categorical(['a', 'b', 'c'], ['d', 'e'])
- result = c._set_dtype(CategoricalDtype(['a', 'b']))
- expected = Categorical([None, None, None], categories=['a', 'b'])
- tm.assert_categorical_equal(result, expected)
-
- def test_set_ordered(self):
-
- cat = Categorical(["a", "b", "c", "a"], ordered=True)
- cat2 = cat.as_unordered()
- assert not cat2.ordered
- cat2 = cat.as_ordered()
- assert cat2.ordered
- cat2.as_unordered(inplace=True)
- assert not cat2.ordered
- cat2.as_ordered(inplace=True)
- assert cat2.ordered
-
- assert cat2.set_ordered(True).ordered
- assert not cat2.set_ordered(False).ordered
- cat2.set_ordered(True, inplace=True)
- assert cat2.ordered
- cat2.set_ordered(False, inplace=True)
- assert not cat2.ordered
-
- # removed in 0.19.0
- msg = "can\'t set attribute"
- with tm.assert_raises_regex(AttributeError, msg):
- cat.ordered = True
- with tm.assert_raises_regex(AttributeError, msg):
- cat.ordered = False
-
- def test_set_categories(self):
- cat = Categorical(["a", "b", "c", "a"], ordered=True)
- exp_categories = Index(["c", "b", "a"])
- exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
-
- res = cat.set_categories(["c", "b", "a"], inplace=True)
- tm.assert_index_equal(cat.categories, exp_categories)
- tm.assert_numpy_array_equal(cat.__array__(), exp_values)
- assert res is None
-
- res = cat.set_categories(["a", "b", "c"])
- # cat must be the same as before
- tm.assert_index_equal(cat.categories, exp_categories)
- tm.assert_numpy_array_equal(cat.__array__(), exp_values)
- # only res is changed
- exp_categories_back = Index(["a", "b", "c"])
- tm.assert_index_equal(res.categories, exp_categories_back)
- tm.assert_numpy_array_equal(res.__array__(), exp_values)
-
- # not all "old" included in "new" -> all not included ones are now
- # np.nan
- cat = Categorical(["a", "b", "c", "a"], ordered=True)
- res = cat.set_categories(["a"])
- tm.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0],
- dtype=np.int8))
-
- # still not all "old" in "new"
- res = cat.set_categories(["a", "b", "d"])
- tm.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0],
- dtype=np.int8))
- tm.assert_index_equal(res.categories, Index(["a", "b", "d"]))
-
- # all "old" included in "new"
- cat = cat.set_categories(["a", "b", "c", "d"])
- exp_categories = Index(["a", "b", "c", "d"])
- tm.assert_index_equal(cat.categories, exp_categories)
-
- # internals...
- c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
- tm.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0],
- dtype=np.int8))
- tm.assert_index_equal(c.categories, Index([1, 2, 3, 4]))
-
- exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
- tm.assert_numpy_array_equal(c.get_values(), exp)
-
- # all "pointers" to '4' must be changed from 3 to 0,...
- c = c.set_categories([4, 3, 2, 1])
-
- # positions are changed
- tm.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3],
- dtype=np.int8))
-
- # categories are now in new order
- tm.assert_index_equal(c.categories, Index([4, 3, 2, 1]))
-
- # output is the same
- exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
- tm.assert_numpy_array_equal(c.get_values(), exp)
- assert c.min() == 4
- assert c.max() == 1
-
- # set_categories should set the ordering if specified
- c2 = c.set_categories([4, 3, 2, 1], ordered=False)
- assert not c2.ordered
-
- tm.assert_numpy_array_equal(c.get_values(), c2.get_values())
-
- # set_categories should pass thru the ordering
- c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
- assert not c2.ordered
-
- tm.assert_numpy_array_equal(c.get_values(), c2.get_values())
-
- def test_rename_categories(self):
- cat = Categorical(["a", "b", "c", "a"])
-
- # inplace=False: the old one must not be changed
- res = cat.rename_categories([1, 2, 3])
- tm.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1],
- dtype=np.int64))
- tm.assert_index_equal(res.categories, Index([1, 2, 3]))
-
- exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
- tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
-
- exp_cat = Index(["a", "b", "c"])
- tm.assert_index_equal(cat.categories, exp_cat)
- res = cat.rename_categories([1, 2, 3], inplace=True)
-
- # and now inplace
- assert res is None
- tm.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1],
- dtype=np.int64))
- tm.assert_index_equal(cat.categories, Index([1, 2, 3]))
-
- # Lengthen
- with pytest.raises(ValueError):
- cat.rename_categories([1, 2, 3, 4])
-
- # Shorten
- with pytest.raises(ValueError):
- cat.rename_categories([1, 2])
-
- def test_rename_categories_series(self):
- # https://github.com/pandas-dev/pandas/issues/17981
- c = Categorical(['a', 'b'])
- xpr = "Treating Series 'new_categories' as a list-like "
- with tm.assert_produces_warning(FutureWarning) as rec:
- result = c.rename_categories(Series([0, 1]))
-
- assert len(rec) == 1
- assert xpr in str(rec[0].message)
- expected = Categorical([0, 1])
- tm.assert_categorical_equal(result, expected)
-
- def test_rename_categories_dict(self):
- # GH 17336
- cat = Categorical(['a', 'b', 'c', 'd'])
- res = cat.rename_categories({'a': 4, 'b': 3, 'c': 2, 'd': 1})
- expected = Index([4, 3, 2, 1])
- tm.assert_index_equal(res.categories, expected)
-
- # Test for inplace
- res = cat.rename_categories({'a': 4, 'b': 3, 'c': 2, 'd': 1},
- inplace=True)
- assert res is None
- tm.assert_index_equal(cat.categories, expected)
-
- # Test for dicts of smaller length
- cat = Categorical(['a', 'b', 'c', 'd'])
- res = cat.rename_categories({'a': 1, 'c': 3})
-
- expected = Index([1, 'b', 3, 'd'])
- tm.assert_index_equal(res.categories, expected)
-
- # Test for dicts with bigger length
- cat = Categorical(['a', 'b', 'c', 'd'])
- res = cat.rename_categories({'a': 1, 'b': 2, 'c': 3,
- 'd': 4, 'e': 5, 'f': 6})
- expected = Index([1, 2, 3, 4])
- tm.assert_index_equal(res.categories, expected)
-
- # Test for dicts with no items from old categories
- cat = Categorical(['a', 'b', 'c', 'd'])
- res = cat.rename_categories({'f': 1, 'g': 3})
-
- expected = Index(['a', 'b', 'c', 'd'])
- tm.assert_index_equal(res.categories, expected)
-
- @pytest.mark.parametrize('codes, old, new, expected', [
- ([0, 1], ['a', 'b'], ['a', 'b'], [0, 1]),
- ([0, 1], ['b', 'a'], ['b', 'a'], [0, 1]),
- ([0, 1], ['a', 'b'], ['b', 'a'], [1, 0]),
- ([0, 1], ['b', 'a'], ['a', 'b'], [1, 0]),
- ([0, 1, 0, 1], ['a', 'b'], ['a', 'b', 'c'], [0, 1, 0, 1]),
- ([0, 1, 2, 2], ['a', 'b', 'c'], ['a', 'b'], [0, 1, -1, -1]),
- ([0, 1, -1], ['a', 'b', 'c'], ['a', 'b', 'c'], [0, 1, -1]),
- ([0, 1, -1], ['a', 'b', 'c'], ['b'], [-1, 0, -1]),
- ([0, 1, -1], ['a', 'b', 'c'], ['d'], [-1, -1, -1]),
- ([0, 1, -1], ['a', 'b', 'c'], [], [-1, -1, -1]),
- ([-1, -1], [], ['a', 'b'], [-1, -1]),
- ([1, 0], ['b', 'a'], ['a', 'b'], [0, 1]),
- ])
- def test_recode_to_categories(self, codes, old, new, expected):
- codes = np.asanyarray(codes, dtype=np.int8)
- expected = np.asanyarray(expected, dtype=np.int8)
- old = Index(old)
- new = Index(new)
- result = _recode_for_categories(codes, old, new)
- tm.assert_numpy_array_equal(result, expected)
-
- def test_recode_to_categories_large(self):
- N = 1000
- codes = np.arange(N)
- old = Index(codes)
- expected = np.arange(N - 1, -1, -1, dtype=np.int16)
- new = Index(expected)
- result = _recode_for_categories(codes, old, new)
- tm.assert_numpy_array_equal(result, expected)
-
- @pytest.mark.parametrize('values, categories, new_categories', [
- # No NaNs, same cats, same order
- (['a', 'b', 'a'], ['a', 'b'], ['a', 'b'],),
- # No NaNs, same cats, different order
- (['a', 'b', 'a'], ['a', 'b'], ['b', 'a'],),
- # Same, unsorted
- (['b', 'a', 'a'], ['a', 'b'], ['a', 'b'],),
- # No NaNs, same cats, different order
- (['b', 'a', 'a'], ['a', 'b'], ['b', 'a'],),
- # NaNs
- (['a', 'b', 'c'], ['a', 'b'], ['a', 'b']),
- (['a', 'b', 'c'], ['a', 'b'], ['b', 'a']),
- (['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
- (['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
- # Introduce NaNs
- (['a', 'b', 'c'], ['a', 'b'], ['a']),
- (['a', 'b', 'c'], ['a', 'b'], ['b']),
- (['b', 'a', 'c'], ['a', 'b'], ['a']),
- (['b', 'a', 'c'], ['a', 'b'], ['a']),
- # No overlap
- (['a', 'b', 'c'], ['a', 'b'], ['d', 'e']),
- ])
- @pytest.mark.parametrize('ordered', [True, False])
- def test_set_categories_many(self, values, categories, new_categories,
- ordered):
- c = Categorical(values, categories)
- expected = Categorical(values, new_categories, ordered)
- result = c.set_categories(new_categories, ordered=ordered)
- tm.assert_categorical_equal(result, expected)
-
- def test_reorder_categories(self):
- cat = Categorical(["a", "b", "c", "a"], ordered=True)
- old = cat.copy()
- new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
- ordered=True)
-
- # first inplace == False
- res = cat.reorder_categories(["c", "b", "a"])
- # cat must be the same as before
- tm.assert_categorical_equal(cat, old)
- # only res is changed
- tm.assert_categorical_equal(res, new)
-
- # inplace == True
- res = cat.reorder_categories(["c", "b", "a"], inplace=True)
- assert res is None
- tm.assert_categorical_equal(cat, new)
-
- # not all "old" included in "new"
- cat = Categorical(["a", "b", "c", "a"], ordered=True)
-
- def f():
- cat.reorder_categories(["a"])
-
- pytest.raises(ValueError, f)
-
- # still not all "old" in "new"
- def f():
- cat.reorder_categories(["a", "b", "d"])
-
- pytest.raises(ValueError, f)
-
- # all "old" included in "new", but too long
- def f():
- cat.reorder_categories(["a", "b", "c", "d"])
-
- pytest.raises(ValueError, f)
-
- def test_add_categories(self):
- cat = Categorical(["a", "b", "c", "a"], ordered=True)
- old = cat.copy()
- new = Categorical(["a", "b", "c", "a"],
- categories=["a", "b", "c", "d"], ordered=True)
-
- # first inplace == False
- res = cat.add_categories("d")
- tm.assert_categorical_equal(cat, old)
- tm.assert_categorical_equal(res, new)
-
- res = cat.add_categories(["d"])
- tm.assert_categorical_equal(cat, old)
- tm.assert_categorical_equal(res, new)
-
- # inplace == True
- res = cat.add_categories("d", inplace=True)
- tm.assert_categorical_equal(cat, new)
- assert res is None
-
- # new is in old categories
- def f():
- cat.add_categories(["d"])
-
- pytest.raises(ValueError, f)
-
- # GH 9927
- cat = Categorical(list("abc"), ordered=True)
- expected = Categorical(
- list("abc"), categories=list("abcde"), ordered=True)
- # test with Series, np.array, index, list
- res = cat.add_categories(Series(["d", "e"]))
- tm.assert_categorical_equal(res, expected)
- res = cat.add_categories(np.array(["d", "e"]))
- tm.assert_categorical_equal(res, expected)
- res = cat.add_categories(Index(["d", "e"]))
- tm.assert_categorical_equal(res, expected)
- res = cat.add_categories(["d", "e"])
- tm.assert_categorical_equal(res, expected)
-
- def test_remove_categories(self):
- cat = Categorical(["a", "b", "c", "a"], ordered=True)
- old = cat.copy()
- new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
- ordered=True)
-
- # first inplace == False
- res = cat.remove_categories("c")
- tm.assert_categorical_equal(cat, old)
- tm.assert_categorical_equal(res, new)
-
- res = cat.remove_categories(["c"])
- tm.assert_categorical_equal(cat, old)
- tm.assert_categorical_equal(res, new)
-
- # inplace == True
- res = cat.remove_categories("c", inplace=True)
- tm.assert_categorical_equal(cat, new)
- assert res is None
-
- # removal is not in categories
- def f():
- cat.remove_categories(["c"])
-
- pytest.raises(ValueError, f)
-
- def test_remove_unused_categories(self):
- c = Categorical(["a", "b", "c", "d", "a"],
- categories=["a", "b", "c", "d", "e"])
- exp_categories_all = Index(["a", "b", "c", "d", "e"])
- exp_categories_dropped = Index(["a", "b", "c", "d"])
-
- tm.assert_index_equal(c.categories, exp_categories_all)
-
- res = c.remove_unused_categories()
- tm.assert_index_equal(res.categories, exp_categories_dropped)
- tm.assert_index_equal(c.categories, exp_categories_all)
-
- res = c.remove_unused_categories(inplace=True)
- tm.assert_index_equal(c.categories, exp_categories_dropped)
- assert res is None
-
- # with NaN values (GH11599)
- c = Categorical(["a", "b", "c", np.nan],
- categories=["a", "b", "c", "d", "e"])
- res = c.remove_unused_categories()
- tm.assert_index_equal(res.categories,
- Index(np.array(["a", "b", "c"])))
- exp_codes = np.array([0, 1, 2, -1], dtype=np.int8)
- tm.assert_numpy_array_equal(res.codes, exp_codes)
- tm.assert_index_equal(c.categories, exp_categories_all)
-
- val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
- cat = Categorical(values=val, categories=list('ABCDEFG'))
- out = cat.remove_unused_categories()
- tm.assert_index_equal(out.categories, Index(['B', 'D', 'F']))
- exp_codes = np.array([2, -1, 1, 0, 1, 2, -1], dtype=np.int8)
- tm.assert_numpy_array_equal(out.codes, exp_codes)
- assert out.get_values().tolist() == val
-
- alpha = list('abcdefghijklmnopqrstuvwxyz')
- val = np.random.choice(alpha[::2], 10000).astype('object')
- val[np.random.choice(len(val), 100)] = np.nan
-
- cat = Categorical(values=val, categories=alpha)
- out = cat.remove_unused_categories()
- assert out.get_values().tolist() == val.tolist()
-
- def test_nan_handling(self):
-
- # Nans are represented as -1 in codes
- c = Categorical(["a", "b", np.nan, "a"])
- tm.assert_index_equal(c.categories, Index(["a", "b"]))
- tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0],
- dtype=np.int8))
- c[1] = np.nan
- tm.assert_index_equal(c.categories, Index(["a", "b"]))
- tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0],
- dtype=np.int8))
-
- # Adding nan to categories should make assigned nan point to the
- # category!
- c = Categorical(["a", "b", np.nan, "a"])
- tm.assert_index_equal(c.categories, Index(["a", "b"]))
- tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0],
- dtype=np.int8))
-
- def test_isna(self):
- exp = np.array([False, False, True])
- c = Categorical(["a", "b", np.nan])
- res = c.isna()
-
- tm.assert_numpy_array_equal(res, exp)
-
- def test_codes_immutable(self):
-
- # Codes should be read only
- c = Categorical(["a", "b", "c", "a", np.nan])
- exp = np.array([0, 1, 2, 0, -1], dtype='int8')
- tm.assert_numpy_array_equal(c.codes, exp)
-
- # Assignments to codes should raise
- def f():
- c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
-
- pytest.raises(ValueError, f)
-
- # changes in the codes array should raise
- # np 1.6.1 raises RuntimeError rather than ValueError
- codes = c.codes
-
- def f():
- codes[4] = 1
-
- pytest.raises(ValueError, f)
-
- # But even after getting the codes, the original array should still be
- # writeable!
- c[4] = "a"
- exp = np.array([0, 1, 2, 0, 0], dtype='int8')
- tm.assert_numpy_array_equal(c.codes, exp)
- c._codes[4] = 2
- exp = np.array([0, 1, 2, 0, 2], dtype='int8')
- tm.assert_numpy_array_equal(c.codes, exp)
-
- def test_min_max(self):
-
- # unordered cats have no min/max
- cat = Categorical(["a", "b", "c", "d"], ordered=False)
- pytest.raises(TypeError, lambda: cat.min())
- pytest.raises(TypeError, lambda: cat.max())
- cat = Categorical(["a", "b", "c", "d"], ordered=True)
- _min = cat.min()
- _max = cat.max()
- assert _min == "a"
- assert _max == "d"
- cat = Categorical(["a", "b", "c", "d"],
- categories=['d', 'c', 'b', 'a'], ordered=True)
- _min = cat.min()
- _max = cat.max()
- assert _min == "d"
- assert _max == "a"
- cat = Categorical([np.nan, "b", "c", np.nan],
- categories=['d', 'c', 'b', 'a'], ordered=True)
- _min = cat.min()
- _max = cat.max()
- assert np.isnan(_min)
- assert _max == "b"
-
- _min = cat.min(numeric_only=True)
- assert _min == "c"
- _max = cat.max(numeric_only=True)
- assert _max == "b"
-
- cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
- ordered=True)
- _min = cat.min()
- _max = cat.max()
- assert np.isnan(_min)
- assert _max == 1
-
- _min = cat.min(numeric_only=True)
- assert _min == 2
- _max = cat.max(numeric_only=True)
- assert _max == 1
-
- def test_unique(self):
- # categories are reordered based on value when ordered=False
- cat = Categorical(["a", "b"])
- exp = Index(["a", "b"])
- res = cat.unique()
- tm.assert_index_equal(res.categories, exp)
- tm.assert_categorical_equal(res, cat)
-
- cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
- res = cat.unique()
- tm.assert_index_equal(res.categories, exp)
- tm.assert_categorical_equal(res, Categorical(exp))
-
- cat = Categorical(["c", "a", "b", "a", "a"],
- categories=["a", "b", "c"])
- exp = Index(["c", "a", "b"])
- res = cat.unique()
- tm.assert_index_equal(res.categories, exp)
- exp_cat = Categorical(exp, categories=['c', 'a', 'b'])
- tm.assert_categorical_equal(res, exp_cat)
-
- # nan must be removed
- cat = Categorical(["b", np.nan, "b", np.nan, "a"],
- categories=["a", "b", "c"])
- res = cat.unique()
- exp = Index(["b", "a"])
- tm.assert_index_equal(res.categories, exp)
- exp_cat = Categorical(["b", np.nan, "a"], categories=["b", "a"])
- tm.assert_categorical_equal(res, exp_cat)
-
- def test_unique_ordered(self):
- # keep categories order when ordered=True
- cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
- res = cat.unique()
- exp_cat = Categorical(['b', 'a'], categories=['a', 'b'], ordered=True)
- tm.assert_categorical_equal(res, exp_cat)
-
- cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
- ordered=True)
- res = cat.unique()
- exp_cat = Categorical(['c', 'b', 'a'], categories=['a', 'b', 'c'],
- ordered=True)
- tm.assert_categorical_equal(res, exp_cat)
-
- cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
- ordered=True)
- res = cat.unique()
- exp_cat = Categorical(['b', 'a'], categories=['a', 'b'], ordered=True)
- tm.assert_categorical_equal(res, exp_cat)
-
- cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
- ordered=True)
- res = cat.unique()
- exp_cat = Categorical(['b', np.nan, 'a'], categories=['a', 'b'],
- ordered=True)
- tm.assert_categorical_equal(res, exp_cat)
-
- def test_unique_index_series(self):
- c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1])
- # Categorical.unique sorts categories by appearance order
- # if ordered=False
- exp = Categorical([3, 1, 2], categories=[3, 1, 2])
- tm.assert_categorical_equal(c.unique(), exp)
-
- tm.assert_index_equal(Index(c).unique(), Index(exp))
- tm.assert_categorical_equal(Series(c).unique(), exp)
-
- c = Categorical([1, 1, 2, 2], categories=[3, 2, 1])
- exp = Categorical([1, 2], categories=[1, 2])
- tm.assert_categorical_equal(c.unique(), exp)
- tm.assert_index_equal(Index(c).unique(), Index(exp))
- tm.assert_categorical_equal(Series(c).unique(), exp)
-
- c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1], ordered=True)
- # Categorical.unique keeps categories order if ordered=True
- exp = Categorical([3, 1, 2], categories=[3, 2, 1], ordered=True)
- tm.assert_categorical_equal(c.unique(), exp)
-
- tm.assert_index_equal(Index(c).unique(), Index(exp))
- tm.assert_categorical_equal(Series(c).unique(), exp)
-
- def test_mode(self):
- s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
- ordered=True)
- res = s.mode()
- exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
- tm.assert_categorical_equal(res, exp)
- s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
- ordered=True)
- res = s.mode()
- exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
- tm.assert_categorical_equal(res, exp)
- s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
- ordered=True)
- res = s.mode()
- exp = Categorical([5, 4, 3, 2, 1],
- categories=[5, 4, 3, 2, 1], ordered=True)
- tm.assert_categorical_equal(res, exp)
- # NaN should not become the mode!
- s = Categorical([np.nan, np.nan, np.nan, 4, 5],
- categories=[5, 4, 3, 2, 1], ordered=True)
- res = s.mode()
- exp = Categorical([5, 4], categories=[5, 4, 3, 2, 1], ordered=True)
- tm.assert_categorical_equal(res, exp)
- s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
- categories=[5, 4, 3, 2, 1], ordered=True)
- res = s.mode()
- exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
- tm.assert_categorical_equal(res, exp)
- s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
- ordered=True)
- res = s.mode()
- exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
- tm.assert_categorical_equal(res, exp)
-
- def test_sort_values(self):
-
- # unordered cats are sortable
- cat = Categorical(["a", "b", "b", "a"], ordered=False)
- cat.sort_values()
-
- cat = Categorical(["a", "c", "b", "d"], ordered=True)
-
- # sort_values
- res = cat.sort_values()
- exp = np.array(["a", "b", "c", "d"], dtype=object)
- tm.assert_numpy_array_equal(res.__array__(), exp)
- tm.assert_index_equal(res.categories, cat.categories)
-
- cat = Categorical(["a", "c", "b", "d"],
- categories=["a", "b", "c", "d"], ordered=True)
- res = cat.sort_values()
- exp = np.array(["a", "b", "c", "d"], dtype=object)
- tm.assert_numpy_array_equal(res.__array__(), exp)
- tm.assert_index_equal(res.categories, cat.categories)
-
- res = cat.sort_values(ascending=False)
- exp = np.array(["d", "c", "b", "a"], dtype=object)
- tm.assert_numpy_array_equal(res.__array__(), exp)
- tm.assert_index_equal(res.categories, cat.categories)
-
- # sort (inplace order)
- cat1 = cat.copy()
- cat1.sort_values(inplace=True)
- exp = np.array(["a", "b", "c", "d"], dtype=object)
- tm.assert_numpy_array_equal(cat1.__array__(), exp)
- tm.assert_index_equal(res.categories, cat.categories)
-
- # reverse
- cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
- res = cat.sort_values(ascending=False)
- exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
- exp_categories = Index(["a", "b", "c", "d"])
- tm.assert_numpy_array_equal(res.__array__(), exp_val)
- tm.assert_index_equal(res.categories, exp_categories)
-
- def test_sort_values_na_position(self):
- # see gh-12882
- cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True)
- exp_categories = Index([2, 5])
-
- exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
- res = cat.sort_values() # default arguments
- tm.assert_numpy_array_equal(res.__array__(), exp)
- tm.assert_index_equal(res.categories, exp_categories)
-
- exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0])
- res = cat.sort_values(ascending=True, na_position='first')
- tm.assert_numpy_array_equal(res.__array__(), exp)
- tm.assert_index_equal(res.categories, exp_categories)
-
- exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0])
- res = cat.sort_values(ascending=False, na_position='first')
- tm.assert_numpy_array_equal(res.__array__(), exp)
- tm.assert_index_equal(res.categories, exp_categories)
-
- exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan])
- res = cat.sort_values(ascending=True, na_position='last')
- tm.assert_numpy_array_equal(res.__array__(), exp)
- tm.assert_index_equal(res.categories, exp_categories)
-
- exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan])
- res = cat.sort_values(ascending=False, na_position='last')
- tm.assert_numpy_array_equal(res.__array__(), exp)
- tm.assert_index_equal(res.categories, exp_categories)
-
- cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
- res = cat.sort_values(ascending=False, na_position='last')
- exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
- exp_categories = Index(["a", "b", "c", "d"])
- tm.assert_numpy_array_equal(res.__array__(), exp_val)
- tm.assert_index_equal(res.categories, exp_categories)
-
- cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
- res = cat.sort_values(ascending=False, na_position='first')
- exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
- exp_categories = Index(["a", "b", "c", "d"])
- tm.assert_numpy_array_equal(res.__array__(), exp_val)
- tm.assert_index_equal(res.categories, exp_categories)
-
- def test_slicing_directly(self):
- cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
- sliced = cat[3]
- assert sliced == "d"
- sliced = cat[3:5]
- expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
- tm.assert_numpy_array_equal(sliced._codes, expected._codes)
- tm.assert_index_equal(sliced.categories, expected.categories)
-
- def test_set_item_nan(self):
- cat = Categorical([1, 2, 3])
- cat[1] = np.nan
-
- exp = Categorical([1, np.nan, 3], categories=[1, 2, 3])
- tm.assert_categorical_equal(cat, exp)
-
- def test_shift(self):
- # GH 9416
- cat = Categorical(['a', 'b', 'c', 'd', 'a'])
-
- # shift forward
- sp1 = cat.shift(1)
- xp1 = Categorical([np.nan, 'a', 'b', 'c', 'd'])
- tm.assert_categorical_equal(sp1, xp1)
- tm.assert_categorical_equal(cat[:-1], sp1[1:])
-
- # shift back
- sn2 = cat.shift(-2)
- xp2 = Categorical(['c', 'd', 'a', np.nan, np.nan],
- categories=['a', 'b', 'c', 'd'])
- tm.assert_categorical_equal(sn2, xp2)
- tm.assert_categorical_equal(cat[2:], sn2[:-2])
-
- # shift by zero
- tm.assert_categorical_equal(cat, cat.shift(0))
-
- def test_nbytes(self):
- cat = Categorical([1, 2, 3])
- exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories
- assert cat.nbytes == exp
-
- def test_memory_usage(self):
- cat = Categorical([1, 2, 3])
-
- # .categories is an index, so we include the hashtable
- assert 0 < cat.nbytes <= cat.memory_usage()
- assert 0 < cat.nbytes <= cat.memory_usage(deep=True)
-
- cat = Categorical(['foo', 'foo', 'bar'])
- assert cat.memory_usage(deep=True) > cat.nbytes
-
- if not PYPY:
- # sys.getsizeof will call the .memory_usage with
- # deep=True, and add on some GC overhead
- diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
- assert abs(diff) < 100
-
- def test_searchsorted(self):
- # https://github.com/pandas-dev/pandas/issues/8420
- # https://github.com/pandas-dev/pandas/issues/14522
-
- c1 = Categorical(['cheese', 'milk', 'apple', 'bread', 'bread'],
- categories=['cheese', 'milk', 'apple', 'bread'],
- ordered=True)
- s1 = Series(c1)
- c2 = Categorical(['cheese', 'milk', 'apple', 'bread', 'bread'],
- categories=['cheese', 'milk', 'apple', 'bread'],
- ordered=False)
- s2 = Series(c2)
-
- # Searching for single item argument, side='left' (default)
- res_cat = c1.searchsorted('apple')
- res_ser = s1.searchsorted('apple')
- exp = np.array([2], dtype=np.intp)
- tm.assert_numpy_array_equal(res_cat, exp)
- tm.assert_numpy_array_equal(res_ser, exp)
-
- # Searching for single item array, side='left' (default)
- res_cat = c1.searchsorted(['bread'])
- res_ser = s1.searchsorted(['bread'])
- exp = np.array([3], dtype=np.intp)
- tm.assert_numpy_array_equal(res_cat, exp)
- tm.assert_numpy_array_equal(res_ser, exp)
-
- # Searching for several items array, side='right'
- res_cat = c1.searchsorted(['apple', 'bread'], side='right')
- res_ser = s1.searchsorted(['apple', 'bread'], side='right')
- exp = np.array([3, 5], dtype=np.intp)
- tm.assert_numpy_array_equal(res_cat, exp)
- tm.assert_numpy_array_equal(res_ser, exp)
-
- # Searching for a single value that is not from the Categorical
- pytest.raises(ValueError, lambda: c1.searchsorted('cucumber'))
- pytest.raises(ValueError, lambda: s1.searchsorted('cucumber'))
-
- # Searching for multiple values one of each is not from the Categorical
- pytest.raises(ValueError,
- lambda: c1.searchsorted(['bread', 'cucumber']))
- pytest.raises(ValueError,
- lambda: s1.searchsorted(['bread', 'cucumber']))
-
- # searchsorted call for unordered Categorical
- pytest.raises(ValueError, lambda: c2.searchsorted('apple'))
- pytest.raises(ValueError, lambda: s2.searchsorted('apple'))
-
- with tm.assert_produces_warning(FutureWarning):
- res = c1.searchsorted(v=['bread'])
- exp = np.array([3], dtype=np.intp)
- tm.assert_numpy_array_equal(res, exp)
-
- def test_deprecated_labels(self):
- # TODO: labels is deprecated and should be removed in 0.18 or 2017,
- # whatever is earlier
- cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
- exp = cat.codes
- with tm.assert_produces_warning(FutureWarning):
- res = cat.labels
- tm.assert_numpy_array_equal(res, exp)
-
- def test_datetime_categorical_comparison(self):
- dt_cat = Categorical(date_range('2014-01-01', periods=3), ordered=True)
- tm.assert_numpy_array_equal(dt_cat > dt_cat[0],
- np.array([False, True, True]))
- tm.assert_numpy_array_equal(dt_cat[0] < dt_cat,
- np.array([False, True, True]))
-
- def test_reflected_comparison_with_scalars(self):
- # GH8658
- cat = Categorical([1, 2, 3], ordered=True)
- tm.assert_numpy_array_equal(cat > cat[0],
- np.array([False, True, True]))
- tm.assert_numpy_array_equal(cat[0] < cat,
- np.array([False, True, True]))
-
- def test_comparison_with_unknown_scalars(self):
- # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
- # and following comparisons with scalars not in categories should raise
- # for unequal comps, but not for equal/not equal
- cat = Categorical([1, 2, 3], ordered=True)
-
- pytest.raises(TypeError, lambda: cat < 4)
- pytest.raises(TypeError, lambda: cat > 4)
- pytest.raises(TypeError, lambda: 4 < cat)
- pytest.raises(TypeError, lambda: 4 > cat)
-
- tm.assert_numpy_array_equal(cat == 4,
- np.array([False, False, False]))
- tm.assert_numpy_array_equal(cat != 4,
- np.array([True, True, True]))
-
- def test_map(self):
- c = Categorical(list('ABABC'), categories=list('CBA'), ordered=True)
- result = c.map(lambda x: x.lower())
- exp = Categorical(list('ababc'), categories=list('cba'), ordered=True)
- tm.assert_categorical_equal(result, exp)
-
- c = Categorical(list('ABABC'), categories=list('ABC'), ordered=False)
- result = c.map(lambda x: x.lower())
- exp = Categorical(list('ababc'), categories=list('abc'), ordered=False)
- tm.assert_categorical_equal(result, exp)
-
- result = c.map(lambda x: 1)
- # GH 12766: Return an index not an array
- tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64)))
-
- def test_validate_inplace(self):
- cat = Categorical(['A', 'B', 'B', 'C', 'A'])
- invalid_values = [1, "True", [1, 2, 3], 5.0]
-
- for value in invalid_values:
- with pytest.raises(ValueError):
- cat.set_ordered(value=True, inplace=value)
-
- with pytest.raises(ValueError):
- cat.as_ordered(inplace=value)
-
- with pytest.raises(ValueError):
- cat.as_unordered(inplace=value)
-
- with pytest.raises(ValueError):
- cat.set_categories(['X', 'Y', 'Z'], rename=True, inplace=value)
-
- with pytest.raises(ValueError):
- cat.rename_categories(['X', 'Y', 'Z'], inplace=value)
-
- with pytest.raises(ValueError):
- cat.reorder_categories(
- ['X', 'Y', 'Z'], ordered=True, inplace=value)
-
- with pytest.raises(ValueError):
- cat.add_categories(
- new_categories=['D', 'E', 'F'], inplace=value)
-
- with pytest.raises(ValueError):
- cat.remove_categories(removals=['D', 'E', 'F'], inplace=value)
-
- with pytest.raises(ValueError):
- cat.remove_unused_categories(inplace=value)
-
- with pytest.raises(ValueError):
- cat.sort_values(inplace=value)
-
- @pytest.mark.xfail(reason="Imaginary values not supported in Categorical")
- def test_imaginary(self):
- values = [1, 2, 3 + 1j]
- c1 = Categorical(values)
- tm.assert_index_equal(c1.categories, Index(values))
- tm.assert_numpy_array_equal(np.array(c1), np.array(values))
-
-
-class TestCategoricalAsBlock(object):
-
- def setup_method(self, method):
- self.factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
-
- df = DataFrame({'value': np.random.randint(0, 10000, 100)})
- labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
- cat_labels = Categorical(labels, labels)
-
- df = df.sort_values(by=['value'], ascending=True)
- df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
- right=False, labels=cat_labels)
- self.cat = df
-
- def test_dtypes(self):
-
- # GH8143
- index = ['cat', 'obj', 'num']
- cat = Categorical(['a', 'b', 'c'])
- obj = Series(['a', 'b', 'c'])
- num = Series([1, 2, 3])
- df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
-
- result = df.dtypes == 'object'
- expected = Series([False, True, False], index=index)
- tm.assert_series_equal(result, expected)
-
- result = df.dtypes == 'int64'
- expected = Series([False, False, True], index=index)
- tm.assert_series_equal(result, expected)
-
- result = df.dtypes == 'category'
- expected = Series([True, False, False], index=index)
- tm.assert_series_equal(result, expected)
-
- def test_codes_dtypes(self):
-
- # GH 8453
- result = Categorical(['foo', 'bar', 'baz'])
- assert result.codes.dtype == 'int8'
-
- result = Categorical(['foo%05d' % i for i in range(400)])
- assert result.codes.dtype == 'int16'
-
- result = Categorical(['foo%05d' % i for i in range(40000)])
- assert result.codes.dtype == 'int32'
-
- # adding cats
- result = Categorical(['foo', 'bar', 'baz'])
- assert result.codes.dtype == 'int8'
- result = result.add_categories(['foo%05d' % i for i in range(400)])
- assert result.codes.dtype == 'int16'
-
- # removing cats
- result = result.remove_categories(['foo%05d' % i for i in range(300)])
- assert result.codes.dtype == 'int8'
-
- def test_basic(self):
-
- # test basic creation / coercion of categoricals
- s = Series(self.factor, name='A')
- assert s.dtype == 'category'
- assert len(s) == len(self.factor)
- str(s.values)
- str(s)
-
- # in a frame
- df = DataFrame({'A': self.factor})
- result = df['A']
- tm.assert_series_equal(result, s)
- result = df.iloc[:, 0]
- tm.assert_series_equal(result, s)
- assert len(df) == len(self.factor)
- str(df.values)
- str(df)
-
- df = DataFrame({'A': s})
- result = df['A']
- tm.assert_series_equal(result, s)
- assert len(df) == len(self.factor)
- str(df.values)
- str(df)
-
- # multiples
- df = DataFrame({'A': s, 'B': s, 'C': 1})
- result1 = df['A']
- result2 = df['B']
- tm.assert_series_equal(result1, s)
- tm.assert_series_equal(result2, s, check_names=False)
- assert result2.name == 'B'
- assert len(df) == len(self.factor)
- str(df.values)
- str(df)
-
- # GH8623
- x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
- [1, 'John P. Doe']],
- columns=['person_id', 'person_name'])
- x['person_name'] = Categorical(x.person_name
- ) # doing this breaks transform
-
- expected = x.iloc[0].person_name
- result = x.person_name.iloc[0]
- assert result == expected
-
- result = x.person_name[0]
- assert result == expected
-
- result = x.person_name.loc[0]
- assert result == expected
-
- def test_creation_astype(self):
- l = ["a", "b", "c", "a"]
- s = Series(l)
- exp = Series(Categorical(l))
- res = s.astype('category')
- tm.assert_series_equal(res, exp)
-
- l = [1, 2, 3, 1]
- s = Series(l)
- exp = Series(Categorical(l))
- res = s.astype('category')
- tm.assert_series_equal(res, exp)
-
- df = DataFrame({"cats": [1, 2, 3, 4, 5, 6],
- "vals": [1, 2, 3, 4, 5, 6]})
- cats = Categorical([1, 2, 3, 4, 5, 6])
- exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
- df["cats"] = df["cats"].astype("category")
- tm.assert_frame_equal(exp_df, df)
-
- df = DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
- "vals": [1, 2, 3, 4, 5, 6]})
- cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
- exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
- df["cats"] = df["cats"].astype("category")
- tm.assert_frame_equal(exp_df, df)
-
- # with keywords
- l = ["a", "b", "c", "a"]
- s = Series(l)
- exp = Series(Categorical(l, ordered=True))
- res = s.astype(CategoricalDtype(None, ordered=True))
- tm.assert_series_equal(res, exp)
-
- exp = Series(Categorical(l, categories=list('abcdef'), ordered=True))
- res = s.astype(CategoricalDtype(list('abcdef'), ordered=True))
- tm.assert_series_equal(res, exp)
-
- @pytest.mark.parametrize('columns', [['x'], ['x', 'y'], ['x', 'y', 'z']])
- def test_empty_astype(self, columns):
- # GH 18004
- msg = '> 1 ndim Categorical are not supported at this time'
- with tm.assert_raises_regex(NotImplementedError, msg):
- DataFrame(columns=columns).astype('category')
-
- def test_construction_series(self):
-
- l = [1, 2, 3, 1]
- exp = Series(l).astype('category')
- res = Series(l, dtype='category')
- tm.assert_series_equal(res, exp)
-
- l = ["a", "b", "c", "a"]
- exp = Series(l).astype('category')
- res = Series(l, dtype='category')
- tm.assert_series_equal(res, exp)
-
- # insert into frame with different index
- # GH 8076
- index = date_range('20000101', periods=3)
- expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
- categories=['a', 'b', 'c']))
- expected.index = index
-
- expected = DataFrame({'x': expected})
- df = DataFrame(
- {'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
- tm.assert_frame_equal(df, expected)
-
- def test_construction_frame(self):
-
- # GH8626
-
- # dict creation
- df = DataFrame({'A': list('abc')}, dtype='category')
- expected = Series(list('abc'), dtype='category', name='A')
- tm.assert_series_equal(df['A'], expected)
-
- # to_frame
- s = Series(list('abc'), dtype='category')
- result = s.to_frame()
- expected = Series(list('abc'), dtype='category', name=0)
- tm.assert_series_equal(result[0], expected)
- result = s.to_frame(name='foo')
- expected = Series(list('abc'), dtype='category', name='foo')
- tm.assert_series_equal(result['foo'], expected)
-
- # list-like creation
- df = DataFrame(list('abc'), dtype='category')
- expected = Series(list('abc'), dtype='category', name=0)
- tm.assert_series_equal(df[0], expected)
-
- # ndim != 1
- df = DataFrame([Categorical(list('abc'))])
- expected = DataFrame({0: Series(list('abc'), dtype='category')})
- tm.assert_frame_equal(df, expected)
-
- df = DataFrame([Categorical(list('abc')), Categorical(list('abd'))])
- expected = DataFrame({0: Series(list('abc'), dtype='category'),
- 1: Series(list('abd'), dtype='category')},
- columns=[0, 1])
- tm.assert_frame_equal(df, expected)
-
- # mixed
- df = DataFrame([Categorical(list('abc')), list('def')])
- expected = DataFrame({0: Series(list('abc'), dtype='category'),
- 1: list('def')}, columns=[0, 1])
- tm.assert_frame_equal(df, expected)
-
- # invalid (shape)
- pytest.raises(ValueError,
- lambda: DataFrame([Categorical(list('abc')),
- Categorical(list('abdefg'))]))
-
- # ndim > 1
- pytest.raises(NotImplementedError,
- lambda: Categorical(np.array([list('abcd')])))
-
- def test_reshaping(self):
-
- with catch_warnings(record=True):
- p = tm.makePanel()
- p['str'] = 'foo'
- df = p.to_frame()
-
- df['category'] = df['str'].astype('category')
- result = df['category'].unstack()
-
- c = Categorical(['foo'] * len(p.major_axis))
- expected = DataFrame({'A': c.copy(),
- 'B': c.copy(),
- 'C': c.copy(),
- 'D': c.copy()},
- columns=Index(list('ABCD'), name='minor'),
- index=p.major_axis.set_names('major'))
- tm.assert_frame_equal(result, expected)
-
- def test_reindex(self):
-
- index = date_range('20000101', periods=3)
-
- # reindexing to an invalid Categorical
- s = Series(['a', 'b', 'c'], dtype='category')
- result = s.reindex(index)
- expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
- categories=['a', 'b', 'c']))
- expected.index = index
- tm.assert_series_equal(result, expected)
-
- # partial reindexing
- expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
- 'c']))
- expected.index = [1, 2]
- result = s.reindex([1, 2])
- tm.assert_series_equal(result, expected)
-
- expected = Series(Categorical(
- values=['c', np.nan], categories=['a', 'b', 'c']))
- expected.index = [2, 3]
- result = s.reindex([2, 3])
- tm.assert_series_equal(result, expected)
-
- def test_sideeffects_free(self):
- # Passing a categorical to a Series and then changing values in either
- # the series or the categorical should not change the values in the
- # other one, IF you specify copy!
- cat = Categorical(["a", "b", "c", "a"])
- s = Series(cat, copy=True)
- assert s.cat is not cat
- s.cat.categories = [1, 2, 3]
- exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
- exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
- tm.assert_numpy_array_equal(s.__array__(), exp_s)
- tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
-
- # setting
- s[0] = 2
- exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
- tm.assert_numpy_array_equal(s.__array__(), exp_s2)
- tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
-
- # however, copy is False by default
- # so this WILL change values
- cat = Categorical(["a", "b", "c", "a"])
- s = Series(cat)
- assert s.values is cat
- s.cat.categories = [1, 2, 3]
- exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
- tm.assert_numpy_array_equal(s.__array__(), exp_s)
- tm.assert_numpy_array_equal(cat.__array__(), exp_s)
-
- s[0] = 2
- exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
- tm.assert_numpy_array_equal(s.__array__(), exp_s2)
- tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
-
- def test_nan_handling(self):
-
- # NaNs are represented as -1 in labels
- s = Series(Categorical(["a", "b", np.nan, "a"]))
- tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
- tm.assert_numpy_array_equal(s.values.codes,
- np.array([0, 1, -1, 0], dtype=np.int8))
-
- def test_cat_accessor(self):
- s = Series(Categorical(["a", "b", np.nan, "a"]))
- tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
- assert not s.cat.ordered, False
-
- exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
- s.cat.set_categories(["b", "a"], inplace=True)
- tm.assert_categorical_equal(s.values, exp)
-
- res = s.cat.set_categories(["b", "a"])
- tm.assert_categorical_equal(res.values, exp)
-
- s[:] = "a"
- s = s.cat.remove_unused_categories()
- tm.assert_index_equal(s.cat.categories, Index(["a"]))
-
- def test_sequence_like(self):
-
- # GH 7839
- # make sure can iterate
- df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
- "raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
- df['grade'] = Categorical(df['raw_grade'])
-
- # basic sequencing testing
- result = list(df.grade.values)
- expected = np.array(df.grade.values).tolist()
- tm.assert_almost_equal(result, expected)
-
- # iteration
- for t in df.itertuples(index=False):
- str(t)
-
- for row, s in df.iterrows():
- str(s)
-
- for c, col in df.iteritems():
- str(s)
-
- def test_series_delegations(self):
-
- # invalid accessor
- pytest.raises(AttributeError, lambda: Series([1, 2, 3]).cat)
- tm.assert_raises_regex(
- AttributeError,
- r"Can only use .cat accessor with a 'category' dtype",
- lambda: Series([1, 2, 3]).cat)
- pytest.raises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
- pytest.raises(AttributeError, lambda: Series(np.arange(5.)).cat)
- pytest.raises(AttributeError,
- lambda: Series([Timestamp('20130101')]).cat)
-
- # Series should delegate calls to '.categories', '.codes', '.ordered'
- # and the methods '.set_categories()' 'drop_unused_categories()' to the
- # categorical
- s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
- exp_categories = Index(["a", "b", "c"])
- tm.assert_index_equal(s.cat.categories, exp_categories)
- s.cat.categories = [1, 2, 3]
- exp_categories = Index([1, 2, 3])
- tm.assert_index_equal(s.cat.categories, exp_categories)
-
- exp_codes = Series([0, 1, 2, 0], dtype='int8')
- tm.assert_series_equal(s.cat.codes, exp_codes)
-
- assert s.cat.ordered
- s = s.cat.as_unordered()
- assert not s.cat.ordered
- s.cat.as_ordered(inplace=True)
- assert s.cat.ordered
-
- # reorder
- s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
- exp_categories = Index(["c", "b", "a"])
- exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
- s = s.cat.set_categories(["c", "b", "a"])
- tm.assert_index_equal(s.cat.categories, exp_categories)
- tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
- tm.assert_numpy_array_equal(s.__array__(), exp_values)
-
- # remove unused categories
- s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
- ]))
- exp_categories = Index(["a", "b"])
- exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)
- s = s.cat.remove_unused_categories()
- tm.assert_index_equal(s.cat.categories, exp_categories)
- tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
- tm.assert_numpy_array_equal(s.__array__(), exp_values)
-
- # This method is likely to be confused, so test that it raises an error
- # on wrong inputs:
- def f():
- s.set_categories([4, 3, 2, 1])
-
- pytest.raises(Exception, f)
- # right: s.cat.set_categories([4,3,2,1])
-
- def test_series_functions_no_warnings(self):
- df = DataFrame({'value': np.random.randint(0, 100, 20)})
- labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
- with tm.assert_produces_warning(False):
- df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
- labels=labels)
-
- def test_assignment_to_dataframe(self):
- # assignment
- df = DataFrame({'value': np.array(
- np.random.randint(0, 10000, 100), dtype='int32')})
- labels = Categorical(["{0} - {1}".format(i, i + 499)
- for i in range(0, 10000, 500)])
-
- df = df.sort_values(by=['value'], ascending=True)
- s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
- d = s.values
- df['D'] = d
- str(df)
-
- result = df.dtypes
- expected = Series(
- [np.dtype('int32'), CategoricalDtype(categories=labels,
- ordered=False)],
- index=['value', 'D'])
- tm.assert_series_equal(result, expected)
-
- df['E'] = s
- str(df)
-
- result = df.dtypes
- expected = Series([np.dtype('int32'),
- CategoricalDtype(categories=labels, ordered=False),
- CategoricalDtype(categories=labels, ordered=False)],
- index=['value', 'D', 'E'])
- tm.assert_series_equal(result, expected)
-
- result1 = df['D']
- result2 = df['E']
- tm.assert_categorical_equal(result1._data._block.values, d)
-
- # sorting
- s.name = 'E'
- tm.assert_series_equal(result2.sort_index(), s.sort_index())
-
- cat = Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
- df = DataFrame(Series(cat))
-
- def test_describe(self):
-
- # Categoricals should not show up together with numerical columns
- result = self.cat.describe()
- assert len(result.columns) == 1
-
- # In a frame, describe() for the cat should be the same as for string
- # arrays (count, unique, top, freq)
-
- cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
- ordered=True)
- s = Series(cat)
- result = s.describe()
- expected = Series([4, 2, "b", 3],
- index=['count', 'unique', 'top', 'freq'])
- tm.assert_series_equal(result, expected)
-
- cat = Series(Categorical(["a", "b", "c", "c"]))
- df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
- res = df3.describe()
- tm.assert_numpy_array_equal(res["cat"].values, res["s"].values)
-
- def test_repr(self):
- a = Series(Categorical([1, 2, 3, 4]))
- exp = u("0 1\n1 2\n2 3\n3 4\n" +
- "dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
-
- assert exp == a.__unicode__()
-
- a = Series(Categorical(["a", "b"] * 25))
- exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
- "Length: 50, dtype: category\nCategories (2, object): [a, b]")
- with option_context("display.max_rows", 5):
- assert exp == repr(a)
-
- levs = list("abcdefghijklmnopqrstuvwxyz")
- a = Series(Categorical(["a", "b"], categories=levs, ordered=True))
- exp = u("0 a\n1 b\n" + "dtype: category\n"
- "Categories (26, object): [a < b < c < d ... w < x < y < z]")
- assert exp == a.__unicode__()
-
- def test_categorical_repr(self):
- c = Categorical([1, 2, 3])
- exp = """[1, 2, 3]
-Categories (3, int64): [1, 2, 3]"""
-
- assert repr(c) == exp
-
- c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
- exp = """[1, 2, 3, 1, 2, 3]
-Categories (3, int64): [1, 2, 3]"""
-
- assert repr(c) == exp
-
- c = Categorical([1, 2, 3, 4, 5] * 10)
- exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
-Length: 50
-Categories (5, int64): [1, 2, 3, 4, 5]"""
-
- assert repr(c) == exp
-
- c = Categorical(np.arange(20))
- exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
-Length: 20
-Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
-
- assert repr(c) == exp
-
- def test_categorical_repr_ordered(self):
- c = Categorical([1, 2, 3], ordered=True)
- exp = """[1, 2, 3]
-Categories (3, int64): [1 < 2 < 3]"""
-
- assert repr(c) == exp
-
- c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3], ordered=True)
- exp = """[1, 2, 3, 1, 2, 3]
-Categories (3, int64): [1 < 2 < 3]"""
-
- assert repr(c) == exp
-
- c = Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
- exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
-Length: 50
-Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
-
- assert repr(c) == exp
-
- c = Categorical(np.arange(20), ordered=True)
- exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
-Length: 20
-Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
-
- assert repr(c) == exp
-
- def test_categorical_repr_datetime(self):
- idx = date_range('2011-01-01 09:00', freq='H', periods=5)
- c = Categorical(idx)
-
- # TODO(wesm): exceeding 80 characters in the console is not good
- # behavior
- exp = (
- "[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
- "2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
- "Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
- "2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
- " 2011-01-01 12:00:00, "
- "2011-01-01 13:00:00]""")
- assert repr(c) == exp
-
- c = Categorical(idx.append(idx), categories=idx)
- exp = (
- "[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
- "2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
- "2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
- "2011-01-01 13:00:00]\n"
- "Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
- "2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
- " 2011-01-01 12:00:00, "
- "2011-01-01 13:00:00]")
-
- assert repr(c) == exp
-
- idx = date_range('2011-01-01 09:00', freq='H', periods=5,
- tz='US/Eastern')
- c = Categorical(idx)
- exp = (
- "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
- "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
- "2011-01-01 13:00:00-05:00]\n"
- "Categories (5, datetime64[ns, US/Eastern]): "
- "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
- " "
- "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
- " "
- "2011-01-01 13:00:00-05:00]")
-
- assert repr(c) == exp
-
- c = Categorical(idx.append(idx), categories=idx)
- exp = (
- "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
- "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
- "2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
- "2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
- "2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
- "Categories (5, datetime64[ns, US/Eastern]): "
- "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
- " "
- "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
- " "
- "2011-01-01 13:00:00-05:00]")
-
- assert repr(c) == exp
-
- def test_categorical_repr_datetime_ordered(self):
- idx = date_range('2011-01-01 09:00', freq='H', periods=5)
- c = Categorical(idx, ordered=True)
- exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
-Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
- 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
-
- assert repr(c) == exp
-
- c = Categorical(idx.append(idx), categories=idx, ordered=True)
- exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
-Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
- 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
-
- assert repr(c) == exp
-
- idx = date_range('2011-01-01 09:00', freq='H', periods=5,
- tz='US/Eastern')
- c = Categorical(idx, ordered=True)
- exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
-Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
- 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
- 2011-01-01 13:00:00-05:00]""" # noqa
-
- assert repr(c) == exp
-
- c = Categorical(idx.append(idx), categories=idx, ordered=True)
- exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
-Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
- 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
- 2011-01-01 13:00:00-05:00]""" # noqa
-
- assert repr(c) == exp
-
- def test_categorical_repr_period(self):
- idx = period_range('2011-01-01 09:00', freq='H', periods=5)
- c = Categorical(idx)
- exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
-Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
- 2011-01-01 13:00]""" # noqa
-
- assert repr(c) == exp
-
- c = Categorical(idx.append(idx), categories=idx)
- exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
-Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
- 2011-01-01 13:00]""" # noqa
-
- assert repr(c) == exp
-
- idx = period_range('2011-01', freq='M', periods=5)
- c = Categorical(idx)
- exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
-Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
-
- assert repr(c) == exp
-
- c = Categorical(idx.append(idx), categories=idx)
- exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
-Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa
-
- assert repr(c) == exp
-
- def test_categorical_repr_period_ordered(self):
- idx = period_range('2011-01-01 09:00', freq='H', periods=5)
- c = Categorical(idx, ordered=True)
- exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
-Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
- 2011-01-01 13:00]""" # noqa
-
- assert repr(c) == exp
-
- c = Categorical(idx.append(idx), categories=idx, ordered=True)
- exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
-Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
- 2011-01-01 13:00]""" # noqa
-
- assert repr(c) == exp
-
- idx = period_range('2011-01', freq='M', periods=5)
- c = Categorical(idx, ordered=True)
- exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
-Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
-
- assert repr(c) == exp
-
- c = Categorical(idx.append(idx), categories=idx, ordered=True)
- exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
-Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa
-
- assert repr(c) == exp
-
- def test_categorical_repr_timedelta(self):
- idx = timedelta_range('1 days', periods=5)
- c = Categorical(idx)
- exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
-Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
-
- assert repr(c) == exp
-
- c = Categorical(idx.append(idx), categories=idx)
- exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
-Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" # noqa
-
- assert repr(c) == exp
-
- idx = timedelta_range('1 hours', periods=20)
- c = Categorical(idx)
- exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
-Length: 20
-Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
- 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
- 18 days 01:00:00, 19 days 01:00:00]""" # noqa
-
- assert repr(c) == exp
-
- c = Categorical(idx.append(idx), categories=idx)
- exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
-Length: 40
-Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
- 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
- 18 days 01:00:00, 19 days 01:00:00]""" # noqa
-
- assert repr(c) == exp
-
- def test_categorical_repr_timedelta_ordered(self):
- idx = timedelta_range('1 days', periods=5)
- c = Categorical(idx, ordered=True)
- exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
-Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
-
- assert repr(c) == exp
-
- c = Categorical(idx.append(idx), categories=idx, ordered=True)
- exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
-Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
-
- assert repr(c) == exp
-
- idx = timedelta_range('1 hours', periods=20)
- c = Categorical(idx, ordered=True)
- exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
-Length: 20
-Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
- 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
- 18 days 01:00:00 < 19 days 01:00:00]""" # noqa
-
- assert repr(c) == exp
-
- c = Categorical(idx.append(idx), categories=idx, ordered=True)
- exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
-Length: 40
-Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
- 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
- 18 days 01:00:00 < 19 days 01:00:00]""" # noqa
-
- assert repr(c) == exp
-
- def test_categorical_series_repr(self):
- s = Series(Categorical([1, 2, 3]))
- exp = """0 1
-1 2
-2 3
-dtype: category
-Categories (3, int64): [1, 2, 3]"""
-
- assert repr(s) == exp
-
- s = Series(Categorical(np.arange(10)))
- exp = """0 0
-1 1
-2 2
-3 3
-4 4
-5 5
-6 6
-7 7
-8 8
-9 9
-dtype: category
-Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
-
- assert repr(s) == exp
-
- def test_categorical_series_repr_ordered(self):
- s = Series(Categorical([1, 2, 3], ordered=True))
- exp = """0 1
-1 2
-2 3
-dtype: category
-Categories (3, int64): [1 < 2 < 3]"""
-
- assert repr(s) == exp
-
- s = Series(Categorical(np.arange(10), ordered=True))
- exp = """0 0
-1 1
-2 2
-3 3
-4 4
-5 5
-6 6
-7 7
-8 8
-9 9
-dtype: category
-Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
-
- assert repr(s) == exp
-
- def test_categorical_series_repr_datetime(self):
- idx = date_range('2011-01-01 09:00', freq='H', periods=5)
- s = Series(Categorical(idx))
- exp = """0 2011-01-01 09:00:00
-1 2011-01-01 10:00:00
-2 2011-01-01 11:00:00
-3 2011-01-01 12:00:00
-4 2011-01-01 13:00:00
-dtype: category
-Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
- 2011-01-01 12:00:00, 2011-01-01 13:00:00]""" # noqa
-
- assert repr(s) == exp
-
- idx = date_range('2011-01-01 09:00', freq='H', periods=5,
- tz='US/Eastern')
- s = Series(Categorical(idx))
- exp = """0 2011-01-01 09:00:00-05:00
-1 2011-01-01 10:00:00-05:00
-2 2011-01-01 11:00:00-05:00
-3 2011-01-01 12:00:00-05:00
-4 2011-01-01 13:00:00-05:00
-dtype: category
-Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
- 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
- 2011-01-01 13:00:00-05:00]""" # noqa
-
- assert repr(s) == exp
-
- def test_categorical_series_repr_datetime_ordered(self):
- idx = date_range('2011-01-01 09:00', freq='H', periods=5)
- s = Series(Categorical(idx, ordered=True))
- exp = """0 2011-01-01 09:00:00
-1 2011-01-01 10:00:00
-2 2011-01-01 11:00:00
-3 2011-01-01 12:00:00
-4 2011-01-01 13:00:00
-dtype: category
-Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
- 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
-
- assert repr(s) == exp
-
- idx = date_range('2011-01-01 09:00', freq='H', periods=5,
- tz='US/Eastern')
- s = Series(Categorical(idx, ordered=True))
- exp = """0 2011-01-01 09:00:00-05:00
-1 2011-01-01 10:00:00-05:00
-2 2011-01-01 11:00:00-05:00
-3 2011-01-01 12:00:00-05:00
-4 2011-01-01 13:00:00-05:00
-dtype: category
-Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
- 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
- 2011-01-01 13:00:00-05:00]""" # noqa
-
- assert repr(s) == exp
-
- def test_categorical_series_repr_period(self):
- idx = period_range('2011-01-01 09:00', freq='H', periods=5)
- s = Series(Categorical(idx))
- exp = """0 2011-01-01 09:00
-1 2011-01-01 10:00
-2 2011-01-01 11:00
-3 2011-01-01 12:00
-4 2011-01-01 13:00
-dtype: category
-Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
- 2011-01-01 13:00]""" # noqa
-
- assert repr(s) == exp
-
- idx = period_range('2011-01', freq='M', periods=5)
- s = Series(Categorical(idx))
- exp = """0 2011-01
-1 2011-02
-2 2011-03
-3 2011-04
-4 2011-05
-dtype: category
-Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
-
- assert repr(s) == exp
-
- def test_categorical_series_repr_period_ordered(self):
- idx = period_range('2011-01-01 09:00', freq='H', periods=5)
- s = Series(Categorical(idx, ordered=True))
- exp = """0 2011-01-01 09:00
-1 2011-01-01 10:00
-2 2011-01-01 11:00
-3 2011-01-01 12:00
-4 2011-01-01 13:00
-dtype: category
-Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
- 2011-01-01 13:00]""" # noqa
-
- assert repr(s) == exp
-
- idx = period_range('2011-01', freq='M', periods=5)
- s = Series(Categorical(idx, ordered=True))
- exp = """0 2011-01
-1 2011-02
-2 2011-03
-3 2011-04
-4 2011-05
-dtype: category
-Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
-
- assert repr(s) == exp
-
- def test_categorical_series_repr_timedelta(self):
- idx = timedelta_range('1 days', periods=5)
- s = Series(Categorical(idx))
- exp = """0 1 days
-1 2 days
-2 3 days
-3 4 days
-4 5 days
-dtype: category
-Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
-
- assert repr(s) == exp
-
- idx = timedelta_range('1 hours', periods=10)
- s = Series(Categorical(idx))
- exp = """0 0 days 01:00:00
-1 1 days 01:00:00
-2 2 days 01:00:00
-3 3 days 01:00:00
-4 4 days 01:00:00
-5 5 days 01:00:00
-6 6 days 01:00:00
-7 7 days 01:00:00
-8 8 days 01:00:00
-9 9 days 01:00:00
-dtype: category
-Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
- 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
- 8 days 01:00:00, 9 days 01:00:00]""" # noqa
-
- assert repr(s) == exp
-
- def test_categorical_series_repr_timedelta_ordered(self):
- idx = timedelta_range('1 days', periods=5)
- s = Series(Categorical(idx, ordered=True))
- exp = """0 1 days
-1 2 days
-2 3 days
-3 4 days
-4 5 days
-dtype: category
-Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
-
- assert repr(s) == exp
-
- idx = timedelta_range('1 hours', periods=10)
- s = Series(Categorical(idx, ordered=True))
- exp = """0 0 days 01:00:00
-1 1 days 01:00:00
-2 2 days 01:00:00
-3 3 days 01:00:00
-4 4 days 01:00:00
-5 5 days 01:00:00
-6 6 days 01:00:00
-7 7 days 01:00:00
-8 8 days 01:00:00
-9 9 days 01:00:00
-dtype: category
-Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
- 3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
- 8 days 01:00:00 < 9 days 01:00:00]""" # noqa
-
- assert repr(s) == exp
-
- def test_categorical_index_repr(self):
- idx = CategoricalIndex(Categorical([1, 2, 3]))
- exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa
- assert repr(idx) == exp
-
- i = CategoricalIndex(Categorical(np.arange(10)))
- exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')""" # noqa
- assert repr(i) == exp
-
- def test_categorical_index_repr_ordered(self):
- i = CategoricalIndex(Categorical([1, 2, 3], ordered=True))
- exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa
- assert repr(i) == exp
-
- i = CategoricalIndex(Categorical(np.arange(10), ordered=True))
- exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')""" # noqa
- assert repr(i) == exp
-
- def test_categorical_index_repr_datetime(self):
- idx = date_range('2011-01-01 09:00', freq='H', periods=5)
- i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
- '2011-01-01 11:00:00', '2011-01-01 12:00:00',
- '2011-01-01 13:00:00'],
- categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa
-
- assert repr(i) == exp
-
- idx = date_range('2011-01-01 09:00', freq='H', periods=5,
- tz='US/Eastern')
- i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
- '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
- '2011-01-01 13:00:00-05:00'],
- categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa
-
- assert repr(i) == exp
-
- def test_categorical_index_repr_datetime_ordered(self):
- idx = date_range('2011-01-01 09:00', freq='H', periods=5)
- i = CategoricalIndex(Categorical(idx, ordered=True))
- exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
- '2011-01-01 11:00:00', '2011-01-01 12:00:00',
- '2011-01-01 13:00:00'],
- categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa
-
- assert repr(i) == exp
-
- idx = date_range('2011-01-01 09:00', freq='H', periods=5,
- tz='US/Eastern')
- i = CategoricalIndex(Categorical(idx, ordered=True))
- exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
- '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
- '2011-01-01 13:00:00-05:00'],
- categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa
-
- assert repr(i) == exp
-
- i = CategoricalIndex(Categorical(idx.append(idx), ordered=True))
- exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
- '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
- '2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
- '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
- '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
- categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa
-
- assert repr(i) == exp
-
- def test_categorical_index_repr_period(self):
- # test all length
- idx = period_range('2011-01-01 09:00', freq='H', periods=1)
- i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa
- assert repr(i) == exp
-
- idx = period_range('2011-01-01 09:00', freq='H', periods=2)
- i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa
- assert repr(i) == exp
-
- idx = period_range('2011-01-01 09:00', freq='H', periods=3)
- i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa
- assert repr(i) == exp
-
- idx = period_range('2011-01-01 09:00', freq='H', periods=5)
- i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
- '2011-01-01 12:00', '2011-01-01 13:00'],
- categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa
-
- assert repr(i) == exp
-
- i = CategoricalIndex(Categorical(idx.append(idx)))
- exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
- '2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
- '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
- '2011-01-01 13:00'],
- categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa
-
- assert repr(i) == exp
-
- idx = period_range('2011-01', freq='M', periods=5)
- i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa
- assert repr(i) == exp
-
- def test_categorical_index_repr_period_ordered(self):
- idx = period_range('2011-01-01 09:00', freq='H', periods=5)
- i = CategoricalIndex(Categorical(idx, ordered=True))
- exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
- '2011-01-01 12:00', '2011-01-01 13:00'],
- categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa
-
- assert repr(i) == exp
-
- idx = period_range('2011-01', freq='M', periods=5)
- i = CategoricalIndex(Categorical(idx, ordered=True))
- exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa
- assert repr(i) == exp
-
- def test_categorical_index_repr_timedelta(self):
- idx = timedelta_range('1 days', periods=5)
- i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')""" # noqa
- assert repr(i) == exp
-
- idx = timedelta_range('1 hours', periods=10)
- i = CategoricalIndex(Categorical(idx))
- exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
- '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
- '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
- '9 days 01:00:00'],
- categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')""" # noqa
-
- assert repr(i) == exp
-
- def test_categorical_index_repr_timedelta_ordered(self):
- idx = timedelta_range('1 days', periods=5)
- i = CategoricalIndex(Categorical(idx, ordered=True))
- exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')""" # noqa
- assert repr(i) == exp
-
- idx = timedelta_range('1 hours', periods=10)
- i = CategoricalIndex(Categorical(idx, ordered=True))
- exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
- '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
- '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
- '9 days 01:00:00'],
- categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')""" # noqa
-
- assert repr(i) == exp
-
- def test_categorical_frame(self):
- # normal DataFrame
- dt = date_range('2011-01-01 09:00', freq='H', periods=5,
- tz='US/Eastern')
- p = period_range('2011-01', freq='M', periods=5)
- df = DataFrame({'dt': dt, 'p': p})
- exp = """ dt p
-0 2011-01-01 09:00:00-05:00 2011-01
-1 2011-01-01 10:00:00-05:00 2011-02
-2 2011-01-01 11:00:00-05:00 2011-03
-3 2011-01-01 12:00:00-05:00 2011-04
-4 2011-01-01 13:00:00-05:00 2011-05"""
-
- df = DataFrame({'dt': Categorical(dt), 'p': Categorical(p)})
- assert repr(df) == exp
-
- def test_info(self):
-
- # make sure it works
- n = 2500
- df = DataFrame({'int64': np.random.randint(100, size=n)})
- df['category'] = Series(np.array(list('abcdefghij')).take(
- np.random.randint(0, 10, size=n))).astype('category')
- df.isna()
- buf = compat.StringIO()
- df.info(buf=buf)
-
- df2 = df[df['category'] == 'd']
- buf = compat.StringIO()
- df2.info(buf=buf)
-
- def test_groupby_sort(self):
-
- # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
- # This should result in a properly sorted Series so that the plot
- # has a sorted x axis
- # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
-
- res = self.cat.groupby(['value_group'])['value_group'].count()
- exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
- exp.index = CategoricalIndex(exp.index, name=exp.index.name)
- tm.assert_series_equal(res, exp)
-
- def test_min_max(self):
- # unordered cats have no min/max
- cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
- pytest.raises(TypeError, lambda: cat.min())
- pytest.raises(TypeError, lambda: cat.max())
-
- cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
- _min = cat.min()
- _max = cat.max()
- assert _min == "a"
- assert _max == "d"
-
- cat = Series(Categorical(["a", "b", "c", "d"], categories=[
- 'd', 'c', 'b', 'a'], ordered=True))
- _min = cat.min()
- _max = cat.max()
- assert _min == "d"
- assert _max == "a"
-
- cat = Series(Categorical(
- [np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
- ], ordered=True))
- _min = cat.min()
- _max = cat.max()
- assert np.isnan(_min)
- assert _max == "b"
-
- cat = Series(Categorical(
- [np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
- _min = cat.min()
- _max = cat.max()
- assert np.isnan(_min)
- assert _max == 1
-
- def test_mode(self):
- s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
- categories=[5, 4, 3, 2, 1], ordered=True))
- res = s.mode()
- exp = Series(Categorical([5], categories=[
- 5, 4, 3, 2, 1], ordered=True))
- tm.assert_series_equal(res, exp)
- s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
- categories=[5, 4, 3, 2, 1], ordered=True))
- res = s.mode()
- exp = Series(Categorical([5, 1], categories=[
- 5, 4, 3, 2, 1], ordered=True))
- tm.assert_series_equal(res, exp)
- s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
- ordered=True))
- res = s.mode()
- exp = Series(Categorical([5, 4, 3, 2, 1], categories=[5, 4, 3, 2, 1],
- ordered=True))
- tm.assert_series_equal(res, exp)
-
- def test_value_counts(self):
- # GH 12835
- cats = Categorical(list('abcccb'), categories=list('cabd'))
- s = Series(cats, name='xxx')
- res = s.value_counts(sort=False)
-
- exp_index = CategoricalIndex(list('cabd'), categories=cats.categories)
- exp = Series([3, 1, 2, 0], name='xxx', index=exp_index)
- tm.assert_series_equal(res, exp)
-
- res = s.value_counts(sort=True)
-
- exp_index = CategoricalIndex(list('cbad'), categories=cats.categories)
- exp = Series([3, 2, 1, 0], name='xxx', index=exp_index)
- tm.assert_series_equal(res, exp)
-
- # check object dtype handles the Series.name as the same
- # (tested in test_base.py)
- s = Series(["a", "b", "c", "c", "c", "b"], name='xxx')
- res = s.value_counts()
- exp = Series([3, 2, 1], name='xxx', index=["c", "b", "a"])
- tm.assert_series_equal(res, exp)
-
- def test_value_counts_with_nan(self):
- # see gh-9443
-
- # sanity check
- s = Series(["a", "b", "a"], dtype="category")
- exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
-
- res = s.value_counts(dropna=True)
- tm.assert_series_equal(res, exp)
-
- res = s.value_counts(dropna=True)
- tm.assert_series_equal(res, exp)
-
- # same Series via two different constructions --> same behaviour
- series = [
- Series(["a", "b", None, "a", None, None], dtype="category"),
- Series(Categorical(["a", "b", None, "a", None, None],
- categories=["a", "b"]))
- ]
-
- for s in series:
- # None is a NaN value, so we exclude its count here
- exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
- res = s.value_counts(dropna=True)
- tm.assert_series_equal(res, exp)
-
- # we don't exclude the count of None and sort by counts
- exp = Series([3, 2, 1], index=CategoricalIndex([np.nan, "a", "b"]))
- res = s.value_counts(dropna=False)
- tm.assert_series_equal(res, exp)
-
- # When we aren't sorting by counts, and np.nan isn't a
- # category, it should be last.
- exp = Series([2, 1, 3], index=CategoricalIndex(["a", "b", np.nan]))
- res = s.value_counts(dropna=False, sort=False)
- tm.assert_series_equal(res, exp)
-
- def test_groupby(self):
-
- cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"],
- categories=["a", "b", "c", "d"], ordered=True)
- data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
-
- exp_index = CategoricalIndex(list('abcd'), name='b', ordered=True)
- expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index)
- result = data.groupby("b").mean()
- tm.assert_frame_equal(result, expected)
-
- raw_cat1 = Categorical(["a", "a", "b", "b"],
- categories=["a", "b", "z"], ordered=True)
- raw_cat2 = Categorical(["c", "d", "c", "d"],
- categories=["c", "d", "y"], ordered=True)
- df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
-
- # single grouper
- gb = df.groupby("A")
- exp_idx = CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True)
- expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
- result = gb.sum()
- tm.assert_frame_equal(result, expected)
-
- # multiple groupers
- gb = df.groupby(['A', 'B'])
- exp_index = pd.MultiIndex.from_product(
- [Categorical(["a", "b", "z"], ordered=True),
- Categorical(["c", "d", "y"], ordered=True)],
- names=['A', 'B'])
- expected = DataFrame({'values': [1, 2, np.nan, 3, 4, np.nan,
- np.nan, np.nan, np.nan]},
- index=exp_index)
- result = gb.sum()
- tm.assert_frame_equal(result, expected)
-
- # multiple groupers with a non-cat
- df = df.copy()
- df['C'] = ['foo', 'bar'] * 2
- gb = df.groupby(['A', 'B', 'C'])
- exp_index = pd.MultiIndex.from_product(
- [Categorical(["a", "b", "z"], ordered=True),
- Categorical(["c", "d", "y"], ordered=True),
- ['foo', 'bar']],
- names=['A', 'B', 'C'])
- expected = DataFrame({'values': Series(
- np.nan, index=exp_index)}).sort_index()
- expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
- result = gb.sum()
- tm.assert_frame_equal(result, expected)
-
- # GH 8623
- x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
- [1, 'John P. Doe']],
- columns=['person_id', 'person_name'])
- x['person_name'] = Categorical(x.person_name)
-
- g = x.groupby(['person_id'])
- result = g.transform(lambda x: x)
- tm.assert_frame_equal(result, x[['person_name']])
-
- result = x.drop_duplicates('person_name')
- expected = x.iloc[[0, 1]]
- tm.assert_frame_equal(result, expected)
-
- def f(x):
- return x.drop_duplicates('person_name').iloc[0]
-
- result = g.apply(f)
- expected = x.iloc[[0, 1]].copy()
- expected.index = Index([1, 2], name='person_id')
- expected['person_name'] = expected['person_name'].astype('object')
- tm.assert_frame_equal(result, expected)
-
- # GH 9921
- # Monotonic
- df = DataFrame({"a": [5, 15, 25]})
- c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
-
- result = df.a.groupby(c).transform(sum)
- tm.assert_series_equal(result, df['a'])
-
- tm.assert_series_equal(
- df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
- tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
- tm.assert_frame_equal(
- df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
-
- # Filter
- tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
- tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
-
- # Non-monotonic
- df = DataFrame({"a": [5, 15, 25, -5]})
- c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
-
- result = df.a.groupby(c).transform(sum)
- tm.assert_series_equal(result, df['a'])
-
- tm.assert_series_equal(
- df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
- tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
- tm.assert_frame_equal(
- df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
-
- # GH 9603
- df = DataFrame({'a': [1, 0, 0, 0]})
- c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list('abcd')))
- result = df.groupby(c).apply(len)
-
- exp_index = CategoricalIndex(
- c.values.categories, ordered=c.values.ordered)
- expected = Series([1, 0, 0, 0], index=exp_index)
- expected.index.name = 'a'
- tm.assert_series_equal(result, expected)
-
- def test_pivot_table(self):
-
- raw_cat1 = Categorical(["a", "a", "b", "b"],
- categories=["a", "b", "z"], ordered=True)
- raw_cat2 = Categorical(["c", "d", "c", "d"],
- categories=["c", "d", "y"], ordered=True)
- df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
- result = pd.pivot_table(df, values='values', index=['A', 'B'])
-
- exp_index = pd.MultiIndex.from_product(
- [Categorical(["a", "b", "z"], ordered=True),
- Categorical(["c", "d", "y"], ordered=True)],
- names=['A', 'B'])
- expected = DataFrame(
- {'values': [1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan]},
- index=exp_index)
- tm.assert_frame_equal(result, expected)
-
- def test_count(self):
-
- s = Series(Categorical([np.nan, 1, 2, np.nan],
- categories=[5, 4, 3, 2, 1], ordered=True))
- result = s.count()
- assert result == 2
-
- def test_sort_values(self):
-
- c = Categorical(["a", "b", "b", "a"], ordered=False)
- cat = Series(c.copy())
-
- # sort in the categories order
- expected = Series(
- Categorical(["a", "a", "b", "b"],
- ordered=False), index=[0, 3, 1, 2])
- result = cat.sort_values()
- tm.assert_series_equal(result, expected)
-
- cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
- res = cat.sort_values()
- exp = np.array(["a", "b", "c", "d"], dtype=np.object_)
- tm.assert_numpy_array_equal(res.__array__(), exp)
-
- cat = Series(Categorical(["a", "c", "b", "d"], categories=[
- "a", "b", "c", "d"], ordered=True))
- res = cat.sort_values()
- exp = np.array(["a", "b", "c", "d"], dtype=np.object_)
- tm.assert_numpy_array_equal(res.__array__(), exp)
-
- res = cat.sort_values(ascending=False)
- exp = np.array(["d", "c", "b", "a"], dtype=np.object_)
- tm.assert_numpy_array_equal(res.__array__(), exp)
-
- raw_cat1 = Categorical(["a", "b", "c", "d"],
- categories=["a", "b", "c", "d"], ordered=False)
- raw_cat2 = Categorical(["a", "b", "c", "d"],
- categories=["d", "c", "b", "a"], ordered=True)
- s = ["a", "b", "c", "d"]
- df = DataFrame({"unsort": raw_cat1,
- "sort": raw_cat2,
- "string": s,
- "values": [1, 2, 3, 4]})
-
- # Cats must be sorted in a dataframe
- res = df.sort_values(by=["string"], ascending=False)
- exp = np.array(["d", "c", "b", "a"], dtype=np.object_)
- tm.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
- assert res["sort"].dtype == "category"
-
- res = df.sort_values(by=["sort"], ascending=False)
- exp = df.sort_values(by=["string"], ascending=True)
- tm.assert_series_equal(res["values"], exp["values"])
- assert res["sort"].dtype == "category"
- assert res["unsort"].dtype == "category"
-
- # unordered cat, but we allow this
- df.sort_values(by=["unsort"], ascending=False)
-
- # multi-columns sort
- # GH 7848
- df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
- "raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
- df["grade"] = Categorical(df["raw_grade"], ordered=True)
- df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
-
- # sorts 'grade' according to the order of the categories
- result = df.sort_values(by=['grade'])
- expected = df.iloc[[1, 2, 5, 0, 3, 4]]
- tm.assert_frame_equal(result, expected)
-
- # multi
- result = df.sort_values(by=['grade', 'id'])
- expected = df.iloc[[2, 1, 5, 4, 3, 0]]
- tm.assert_frame_equal(result, expected)
-
- def test_slicing(self):
- cat = Series(Categorical([1, 2, 3, 4]))
- reversed = cat[::-1]
- exp = np.array([4, 3, 2, 1], dtype=np.int64)
- tm.assert_numpy_array_equal(reversed.__array__(), exp)
-
- df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
- df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
-
- expected = Series([11, Interval(0, 25)], index=['value', 'D'], name=10)
- result = df.iloc[10]
- tm.assert_series_equal(result, expected)
-
- expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
- index=np.arange(10, 20).astype('int64'))
- expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
- result = df.iloc[10:20]
- tm.assert_frame_equal(result, expected)
-
- expected = Series([9, Interval(0, 25)], index=['value', 'D'], name=8)
- result = df.loc[8]
- tm.assert_series_equal(result, expected)
-
- def test_slicing_and_getting_ops(self):
-
- # systematically test the slicing operations:
- # for all slicing ops:
- # - returning a dataframe
- # - returning a column
- # - returning a row
- # - returning a single value
-
- cats = Categorical(
- ["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
- idx = Index(["h", "i", "j", "k", "l", "m", "n"])
- values = [1, 2, 3, 4, 5, 6, 7]
- df = DataFrame({"cats": cats, "values": values}, index=idx)
-
- # the expected values
- cats2 = Categorical(["b", "c"], categories=["a", "b", "c"])
- idx2 = Index(["j", "k"])
- values2 = [3, 4]
-
- # 2:4,: | "j":"k",:
- exp_df = DataFrame({"cats": cats2, "values": values2}, index=idx2)
-
- # :,"cats" | :,0
- exp_col = Series(cats, index=idx, name='cats')
-
- # "j",: | 2,:
- exp_row = Series(["b", 3], index=["cats", "values"], dtype="object",
- name="j")
-
- # "j","cats | 2,0
- exp_val = "b"
-
- # iloc
- # frame
- res_df = df.iloc[2:4, :]
- tm.assert_frame_equal(res_df, exp_df)
- assert is_categorical_dtype(res_df["cats"])
-
- # row
- res_row = df.iloc[2, :]
- tm.assert_series_equal(res_row, exp_row)
- assert isinstance(res_row["cats"], compat.string_types)
-
- # col
- res_col = df.iloc[:, 0]
- tm.assert_series_equal(res_col, exp_col)
- assert is_categorical_dtype(res_col)
-
- # single value
- res_val = df.iloc[2, 0]
- assert res_val == exp_val
-
- # loc
- # frame
- res_df = df.loc["j":"k", :]
- tm.assert_frame_equal(res_df, exp_df)
- assert is_categorical_dtype(res_df["cats"])
-
- # row
- res_row = df.loc["j", :]
- tm.assert_series_equal(res_row, exp_row)
- assert isinstance(res_row["cats"], compat.string_types)
-
- # col
- res_col = df.loc[:, "cats"]
- tm.assert_series_equal(res_col, exp_col)
- assert is_categorical_dtype(res_col)
-
- # single value
- res_val = df.loc["j", "cats"]
- assert res_val == exp_val
-
- # ix
- # frame
- # res_df = df.loc["j":"k",[0,1]] # doesn't work?
- res_df = df.loc["j":"k", :]
- tm.assert_frame_equal(res_df, exp_df)
- assert is_categorical_dtype(res_df["cats"])
-
- # row
- res_row = df.loc["j", :]
- tm.assert_series_equal(res_row, exp_row)
- assert isinstance(res_row["cats"], compat.string_types)
-
- # col
- res_col = df.loc[:, "cats"]
- tm.assert_series_equal(res_col, exp_col)
- assert is_categorical_dtype(res_col)
-
- # single value
- res_val = df.loc["j", df.columns[0]]
- assert res_val == exp_val
-
- # iat
- res_val = df.iat[2, 0]
- assert res_val == exp_val
-
- # at
- res_val = df.at["j", "cats"]
- assert res_val == exp_val
-
- # fancy indexing
- exp_fancy = df.iloc[[2]]
-
- res_fancy = df[df["cats"] == "b"]
- tm.assert_frame_equal(res_fancy, exp_fancy)
- res_fancy = df[df["values"] == 3]
- tm.assert_frame_equal(res_fancy, exp_fancy)
-
- # get_value
- res_val = df.at["j", "cats"]
- assert res_val == exp_val
-
- # i : int, slice, or sequence of integers
- res_row = df.iloc[2]
- tm.assert_series_equal(res_row, exp_row)
- assert isinstance(res_row["cats"], compat.string_types)
-
- res_df = df.iloc[slice(2, 4)]
- tm.assert_frame_equal(res_df, exp_df)
- assert is_categorical_dtype(res_df["cats"])
-
- res_df = df.iloc[[2, 3]]
- tm.assert_frame_equal(res_df, exp_df)
- assert is_categorical_dtype(res_df["cats"])
-
- res_col = df.iloc[:, 0]
- tm.assert_series_equal(res_col, exp_col)
- assert is_categorical_dtype(res_col)
-
- res_df = df.iloc[:, slice(0, 2)]
- tm.assert_frame_equal(res_df, df)
- assert is_categorical_dtype(res_df["cats"])
-
- res_df = df.iloc[:, [0, 1]]
- tm.assert_frame_equal(res_df, df)
- assert is_categorical_dtype(res_df["cats"])
-
- def test_slicing_doc_examples(self):
-
- # GH 7918
- cats = Categorical(["a", "b", "b", "b", "c", "c", "c"],
- categories=["a", "b", "c"])
- idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
- values = [1, 2, 2, 2, 3, 4, 5]
- df = DataFrame({"cats": cats, "values": values}, index=idx)
-
- result = df.iloc[2:4, :]
- expected = DataFrame(
- {"cats": Categorical(['b', 'b'], categories=['a', 'b', 'c']),
- "values": [2, 2]}, index=['j', 'k'])
- tm.assert_frame_equal(result, expected)
-
- result = df.iloc[2:4, :].dtypes
- expected = Series(['category', 'int64'], ['cats', 'values'])
- tm.assert_series_equal(result, expected)
-
- result = df.loc["h":"j", "cats"]
- expected = Series(Categorical(['a', 'b', 'b'],
- categories=['a', 'b', 'c']),
- index=['h', 'i', 'j'], name='cats')
- tm.assert_series_equal(result, expected)
-
- result = df.loc["h":"j", df.columns[0:1]]
- expected = DataFrame({'cats': Categorical(['a', 'b', 'b'],
- categories=['a', 'b', 'c'])},
- index=['h', 'i', 'j'])
- tm.assert_frame_equal(result, expected)
-
- def test_assigning_ops(self):
- # systematically test the assigning operations:
- # for all slicing ops:
- # for value in categories and value not in categories:
-
- # - assign a single value -> exp_single_cats_value
-
- # - assign a complete row (mixed values) -> exp_single_row
-
- # assign multiple rows (mixed values) (-> array) -> exp_multi_row
-
- # assign a part of a column with dtype == categorical ->
- # exp_parts_cats_col
-
- # assign a part of a column with dtype != categorical ->
- # exp_parts_cats_col
-
- cats = Categorical(["a", "a", "a", "a", "a", "a", "a"],
- categories=["a", "b"])
- idx = Index(["h", "i", "j", "k", "l", "m", "n"])
- values = [1, 1, 1, 1, 1, 1, 1]
- orig = DataFrame({"cats": cats, "values": values}, index=idx)
-
- # the expected values
- # changed single row
- cats1 = Categorical(["a", "a", "b", "a", "a", "a", "a"],
- categories=["a", "b"])
- idx1 = Index(["h", "i", "j", "k", "l", "m", "n"])
- values1 = [1, 1, 2, 1, 1, 1, 1]
- exp_single_row = DataFrame({"cats": cats1,
- "values": values1}, index=idx1)
-
- # changed multiple rows
- cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"],
- categories=["a", "b"])
- idx2 = Index(["h", "i", "j", "k", "l", "m", "n"])
- values2 = [1, 1, 2, 2, 1, 1, 1]
- exp_multi_row = DataFrame({"cats": cats2,
- "values": values2}, index=idx2)
-
- # changed part of the cats column
- cats3 = Categorical(
- ["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
- idx3 = Index(["h", "i", "j", "k", "l", "m", "n"])
- values3 = [1, 1, 1, 1, 1, 1, 1]
- exp_parts_cats_col = DataFrame({"cats": cats3,
- "values": values3}, index=idx3)
-
- # changed single value in cats col
- cats4 = Categorical(
- ["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
- idx4 = Index(["h", "i", "j", "k", "l", "m", "n"])
- values4 = [1, 1, 1, 1, 1, 1, 1]
- exp_single_cats_value = DataFrame({"cats": cats4,
- "values": values4}, index=idx4)
-
- # iloc
- # ###############
- # - assign a single value -> exp_single_cats_value
- df = orig.copy()
- df.iloc[2, 0] = "b"
- tm.assert_frame_equal(df, exp_single_cats_value)
-
- df = orig.copy()
- df.iloc[df.index == "j", 0] = "b"
- tm.assert_frame_equal(df, exp_single_cats_value)
-
- # - assign a single value not in the current categories set
- def f():
- df = orig.copy()
- df.iloc[2, 0] = "c"
-
- pytest.raises(ValueError, f)
-
- # - assign a complete row (mixed values) -> exp_single_row
- df = orig.copy()
- df.iloc[2, :] = ["b", 2]
- tm.assert_frame_equal(df, exp_single_row)
-
- # - assign a complete row (mixed values) not in categories set
- def f():
- df = orig.copy()
- df.iloc[2, :] = ["c", 2]
-
- pytest.raises(ValueError, f)
-
- # - assign multiple rows (mixed values) -> exp_multi_row
- df = orig.copy()
- df.iloc[2:4, :] = [["b", 2], ["b", 2]]
- tm.assert_frame_equal(df, exp_multi_row)
-
- def f():
- df = orig.copy()
- df.iloc[2:4, :] = [["c", 2], ["c", 2]]
-
- pytest.raises(ValueError, f)
-
- # assign a part of a column with dtype == categorical ->
- # exp_parts_cats_col
- df = orig.copy()
- df.iloc[2:4, 0] = Categorical(["b", "b"], categories=["a", "b"])
- tm.assert_frame_equal(df, exp_parts_cats_col)
-
- with pytest.raises(ValueError):
- # different categories -> not sure if this should fail or pass
- df = orig.copy()
- df.iloc[2:4, 0] = Categorical(list('bb'), categories=list('abc'))
-
- with pytest.raises(ValueError):
- # different values
- df = orig.copy()
- df.iloc[2:4, 0] = Categorical(list('cc'), categories=list('abc'))
-
- # assign a part of a column with dtype != categorical ->
- # exp_parts_cats_col
- df = orig.copy()
- df.iloc[2:4, 0] = ["b", "b"]
- tm.assert_frame_equal(df, exp_parts_cats_col)
-
- with pytest.raises(ValueError):
- df.iloc[2:4, 0] = ["c", "c"]
-
- # loc
- # ##############
- # - assign a single value -> exp_single_cats_value
- df = orig.copy()
- df.loc["j", "cats"] = "b"
- tm.assert_frame_equal(df, exp_single_cats_value)
-
- df = orig.copy()
- df.loc[df.index == "j", "cats"] = "b"
- tm.assert_frame_equal(df, exp_single_cats_value)
-
- # - assign a single value not in the current categories set
- def f():
- df = orig.copy()
- df.loc["j", "cats"] = "c"
-
- pytest.raises(ValueError, f)
-
- # - assign a complete row (mixed values) -> exp_single_row
- df = orig.copy()
- df.loc["j", :] = ["b", 2]
- tm.assert_frame_equal(df, exp_single_row)
-
- # - assign a complete row (mixed values) not in categories set
- def f():
- df = orig.copy()
- df.loc["j", :] = ["c", 2]
-
- pytest.raises(ValueError, f)
-
- # - assign multiple rows (mixed values) -> exp_multi_row
- df = orig.copy()
- df.loc["j":"k", :] = [["b", 2], ["b", 2]]
- tm.assert_frame_equal(df, exp_multi_row)
-
- def f():
- df = orig.copy()
- df.loc["j":"k", :] = [["c", 2], ["c", 2]]
-
- pytest.raises(ValueError, f)
-
- # assign a part of a column with dtype == categorical ->
- # exp_parts_cats_col
- df = orig.copy()
- df.loc["j":"k", "cats"] = Categorical(
- ["b", "b"], categories=["a", "b"])
- tm.assert_frame_equal(df, exp_parts_cats_col)
-
- with pytest.raises(ValueError):
- # different categories -> not sure if this should fail or pass
- df = orig.copy()
- df.loc["j":"k", "cats"] = Categorical(
- ["b", "b"], categories=["a", "b", "c"])
-
- with pytest.raises(ValueError):
- # different values
- df = orig.copy()
- df.loc["j":"k", "cats"] = Categorical(
- ["c", "c"], categories=["a", "b", "c"])
-
- # assign a part of a column with dtype != categorical ->
- # exp_parts_cats_col
- df = orig.copy()
- df.loc["j":"k", "cats"] = ["b", "b"]
- tm.assert_frame_equal(df, exp_parts_cats_col)
-
- with pytest.raises(ValueError):
- df.loc["j":"k", "cats"] = ["c", "c"]
-
- # loc
- # ##############
- # - assign a single value -> exp_single_cats_value
- df = orig.copy()
- df.loc["j", df.columns[0]] = "b"
- tm.assert_frame_equal(df, exp_single_cats_value)
-
- df = orig.copy()
- df.loc[df.index == "j", df.columns[0]] = "b"
- tm.assert_frame_equal(df, exp_single_cats_value)
-
- # - assign a single value not in the current categories set
- def f():
- df = orig.copy()
- df.loc["j", df.columns[0]] = "c"
-
- pytest.raises(ValueError, f)
-
- # - assign a complete row (mixed values) -> exp_single_row
- df = orig.copy()
- df.loc["j", :] = ["b", 2]
- tm.assert_frame_equal(df, exp_single_row)
-
- # - assign a complete row (mixed values) not in categories set
- def f():
- df = orig.copy()
- df.loc["j", :] = ["c", 2]
-
- pytest.raises(ValueError, f)
-
- # - assign multiple rows (mixed values) -> exp_multi_row
- df = orig.copy()
- df.loc["j":"k", :] = [["b", 2], ["b", 2]]
- tm.assert_frame_equal(df, exp_multi_row)
-
- def f():
- df = orig.copy()
- df.loc["j":"k", :] = [["c", 2], ["c", 2]]
-
- pytest.raises(ValueError, f)
-
- # assign a part of a column with dtype == categorical ->
- # exp_parts_cats_col
- df = orig.copy()
- df.loc["j":"k", df.columns[0]] = Categorical(
- ["b", "b"], categories=["a", "b"])
- tm.assert_frame_equal(df, exp_parts_cats_col)
-
- with pytest.raises(ValueError):
- # different categories -> not sure if this should fail or pass
- df = orig.copy()
- df.loc["j":"k", df.columns[0]] = Categorical(
- ["b", "b"], categories=["a", "b", "c"])
-
- with pytest.raises(ValueError):
- # different values
- df = orig.copy()
- df.loc["j":"k", df.columns[0]] = Categorical(
- ["c", "c"], categories=["a", "b", "c"])
-
- # assign a part of a column with dtype != categorical ->
- # exp_parts_cats_col
- df = orig.copy()
- df.loc["j":"k", df.columns[0]] = ["b", "b"]
- tm.assert_frame_equal(df, exp_parts_cats_col)
-
- with pytest.raises(ValueError):
- df.loc["j":"k", df.columns[0]] = ["c", "c"]
-
- # iat
- df = orig.copy()
- df.iat[2, 0] = "b"
- tm.assert_frame_equal(df, exp_single_cats_value)
-
- # - assign a single value not in the current categories set
- def f():
- df = orig.copy()
- df.iat[2, 0] = "c"
-
- pytest.raises(ValueError, f)
-
- # at
- # - assign a single value -> exp_single_cats_value
- df = orig.copy()
- df.at["j", "cats"] = "b"
- tm.assert_frame_equal(df, exp_single_cats_value)
-
- # - assign a single value not in the current categories set
- def f():
- df = orig.copy()
- df.at["j", "cats"] = "c"
-
- pytest.raises(ValueError, f)
-
- # fancy indexing
- catsf = Categorical(["a", "a", "c", "c", "a", "a", "a"],
- categories=["a", "b", "c"])
- idxf = Index(["h", "i", "j", "k", "l", "m", "n"])
- valuesf = [1, 1, 3, 3, 1, 1, 1]
- df = DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
-
- exp_fancy = exp_multi_row.copy()
- exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
-
- df[df["cats"] == "c"] = ["b", 2]
- # category c is kept in .categories
- tm.assert_frame_equal(df, exp_fancy)
-
- # set_value
- df = orig.copy()
- df.at["j", "cats"] = "b"
- tm.assert_frame_equal(df, exp_single_cats_value)
-
- def f():
- df = orig.copy()
- df.at["j", "cats"] = "c"
-
- pytest.raises(ValueError, f)
-
- # Assigning a Category to parts of a int/... column uses the values of
- # the Catgorical
- df = DataFrame({"a": [1, 1, 1, 1, 1], "b": list("aaaaa")})
- exp = DataFrame({"a": [1, "b", "b", 1, 1], "b": list("aabba")})
- df.loc[1:2, "a"] = Categorical(["b", "b"], categories=["a", "b"])
- df.loc[2:3, "b"] = Categorical(["b", "b"], categories=["a", "b"])
- tm.assert_frame_equal(df, exp)
-
- # Series
- orig = Series(Categorical(["b", "b"], categories=["a", "b"]))
- s = orig.copy()
- s[:] = "a"
- exp = Series(Categorical(["a", "a"], categories=["a", "b"]))
- tm.assert_series_equal(s, exp)
-
- s = orig.copy()
- s[1] = "a"
- exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
- tm.assert_series_equal(s, exp)
-
- s = orig.copy()
- s[s.index > 0] = "a"
- exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
- tm.assert_series_equal(s, exp)
-
- s = orig.copy()
- s[[False, True]] = "a"
- exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
- tm.assert_series_equal(s, exp)
-
- s = orig.copy()
- s.index = ["x", "y"]
- s["y"] = "a"
- exp = Series(Categorical(["b", "a"], categories=["a", "b"]),
- index=["x", "y"])
- tm.assert_series_equal(s, exp)
-
- # ensure that one can set something to np.nan
- s = Series(Categorical([1, 2, 3]))
- exp = Series(Categorical([1, np.nan, 3], categories=[1, 2, 3]))
- s[1] = np.nan
- tm.assert_series_equal(s, exp)
-
- def test_comparisons(self):
- tests_data = [(list("abc"), list("cba"), list("bbb")),
- ([1, 2, 3], [3, 2, 1], [2, 2, 2])]
- for data, reverse, base in tests_data:
- cat_rev = Series(
- Categorical(data, categories=reverse, ordered=True))
- cat_rev_base = Series(
- Categorical(base, categories=reverse, ordered=True))
- cat = Series(Categorical(data, ordered=True))
- cat_base = Series(
- Categorical(base, categories=cat.cat.categories, ordered=True))
- s = Series(base)
- a = np.array(base)
-
- # comparisons need to take categories ordering into account
- res_rev = cat_rev > cat_rev_base
- exp_rev = Series([True, False, False])
- tm.assert_series_equal(res_rev, exp_rev)
-
- res_rev = cat_rev < cat_rev_base
- exp_rev = Series([False, False, True])
- tm.assert_series_equal(res_rev, exp_rev)
-
- res = cat > cat_base
- exp = Series([False, False, True])
- tm.assert_series_equal(res, exp)
-
- scalar = base[1]
- res = cat > scalar
- exp = Series([False, False, True])
- exp2 = cat.values > scalar
- tm.assert_series_equal(res, exp)
- tm.assert_numpy_array_equal(res.values, exp2)
- res_rev = cat_rev > scalar
- exp_rev = Series([True, False, False])
- exp_rev2 = cat_rev.values > scalar
- tm.assert_series_equal(res_rev, exp_rev)
- tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
-
- # Only categories with same categories can be compared
- def f():
- cat > cat_rev
-
- pytest.raises(TypeError, f)
-
- # categorical cannot be compared to Series or numpy array, and also
- # not the other way around
- pytest.raises(TypeError, lambda: cat > s)
- pytest.raises(TypeError, lambda: cat_rev > s)
- pytest.raises(TypeError, lambda: cat > a)
- pytest.raises(TypeError, lambda: cat_rev > a)
-
- pytest.raises(TypeError, lambda: s < cat)
- pytest.raises(TypeError, lambda: s < cat_rev)
-
- pytest.raises(TypeError, lambda: a < cat)
- pytest.raises(TypeError, lambda: a < cat_rev)
-
- # unequal comparison should raise for unordered cats
- cat = Series(Categorical(list("abc")))
-
- def f():
- cat > "b"
-
- pytest.raises(TypeError, f)
- cat = Series(Categorical(list("abc"), ordered=False))
-
- def f():
- cat > "b"
-
- pytest.raises(TypeError, f)
-
- # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
- # and following comparisons with scalars not in categories should raise
- # for unequal comps, but not for equal/not equal
- cat = Series(Categorical(list("abc"), ordered=True))
-
- pytest.raises(TypeError, lambda: cat < "d")
- pytest.raises(TypeError, lambda: cat > "d")
- pytest.raises(TypeError, lambda: "d" < cat)
- pytest.raises(TypeError, lambda: "d" > cat)
-
- tm.assert_series_equal(cat == "d", Series([False, False, False]))
- tm.assert_series_equal(cat != "d", Series([True, True, True]))
-
- # And test NaN handling...
- cat = Series(Categorical(["a", "b", "c", np.nan]))
- exp = Series([True, True, True, False])
- res = (cat == cat)
- tm.assert_series_equal(res, exp)
-
- def test_cat_equality(self):
-
- # GH 8938
- # allow equality comparisons
- a = Series(list('abc'), dtype="category")
- b = Series(list('abc'), dtype="object")
- c = Series(['a', 'b', 'cc'], dtype="object")
- d = Series(list('acb'), dtype="object")
- e = Categorical(list('abc'))
- f = Categorical(list('acb'))
-
- # vs scalar
- assert not (a == 'a').all()
- assert ((a != 'a') == ~(a == 'a')).all()
-
- assert not ('a' == a).all()
- assert (a == 'a')[0]
- assert ('a' == a)[0]
- assert not ('a' != a)[0]
-
- # vs list-like
- assert (a == a).all()
- assert not (a != a).all()
-
- assert (a == list(a)).all()
- assert (a == b).all()
- assert (b == a).all()
- assert ((~(a == b)) == (a != b)).all()
- assert ((~(b == a)) == (b != a)).all()
-
- assert not (a == c).all()
- assert not (c == a).all()
- assert not (a == d).all()
- assert not (d == a).all()
-
- # vs a cat-like
- assert (a == e).all()
- assert (e == a).all()
- assert not (a == f).all()
- assert not (f == a).all()
-
- assert ((~(a == e) == (a != e)).all())
- assert ((~(e == a) == (e != a)).all())
- assert ((~(a == f) == (a != f)).all())
- assert ((~(f == a) == (f != a)).all())
-
- # non-equality is not comparable
- pytest.raises(TypeError, lambda: a < b)
- pytest.raises(TypeError, lambda: b < a)
- pytest.raises(TypeError, lambda: a > b)
- pytest.raises(TypeError, lambda: b > a)
-
- @pytest.mark.parametrize('ctor', [
- lambda *args, **kwargs: Categorical(*args, **kwargs),
- lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),
- ])
- def test_unordered_different_order_equal(self, ctor):
- # https://github.com/pandas-dev/pandas/issues/16014
- c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
- c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
- assert (c1 == c2).all()
-
- c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
- c2 = ctor(['b', 'a'], categories=['b', 'a'], ordered=False)
- assert (c1 != c2).all()
-
- c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
- c2 = ctor(['b', 'b'], categories=['b', 'a'], ordered=False)
- assert (c1 != c2).all()
-
- c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
- c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
- result = c1 == c2
- tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))
-
- def test_unordered_different_categories_raises(self):
- c1 = Categorical(['a', 'b'], categories=['a', 'b'], ordered=False)
- c2 = Categorical(['a', 'c'], categories=['c', 'a'], ordered=False)
- with tm.assert_raises_regex(TypeError,
- "Categoricals can only be compared"):
- c1 == c2
-
- def test_compare_different_lengths(self):
- c1 = Categorical([], categories=['a', 'b'])
- c2 = Categorical([], categories=['a'])
- msg = "Categories are different lengths"
- with tm.assert_raises_regex(TypeError, msg):
- c1 == c2
-
- def test_concat_append(self):
- cat = Categorical(["a", "b"], categories=["a", "b"])
- vals = [1, 2]
- df = DataFrame({"cats": cat, "vals": vals})
- cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
- vals2 = [1, 2, 1, 2]
- exp = DataFrame({"cats": cat2, "vals": vals2},
- index=Index([0, 1, 0, 1]))
-
- tm.assert_frame_equal(pd.concat([df, df]), exp)
- tm.assert_frame_equal(df.append(df), exp)
-
- # GH 13524 can concat different categories
- cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
- vals3 = [1, 2]
- df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
-
- res = pd.concat([df, df_different_categories], ignore_index=True)
- exp = DataFrame({"cats": list('abab'), "vals": [1, 2, 1, 2]})
- tm.assert_frame_equal(res, exp)
-
- res = df.append(df_different_categories, ignore_index=True)
- tm.assert_frame_equal(res, exp)
-
- def test_concat_append_gh7864(self):
- # GH 7864
- # make sure ordering is preserverd
- df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list('abbaae')})
- df["grade"] = Categorical(df["raw_grade"])
- df['grade'].cat.set_categories(['e', 'a', 'b'])
-
- df1 = df[0:3]
- df2 = df[3:]
-
- tm.assert_index_equal(df['grade'].cat.categories,
- df1['grade'].cat.categories)
- tm.assert_index_equal(df['grade'].cat.categories,
- df2['grade'].cat.categories)
-
- dfx = pd.concat([df1, df2])
- tm.assert_index_equal(df['grade'].cat.categories,
- dfx['grade'].cat.categories)
-
- dfa = df1.append(df2)
- tm.assert_index_equal(df['grade'].cat.categories,
- dfa['grade'].cat.categories)
-
- def test_concat_preserve(self):
-
- # GH 8641 series concat not preserving category dtype
- # GH 13524 can concat different categories
- s = Series(list('abc'), dtype='category')
- s2 = Series(list('abd'), dtype='category')
-
- exp = Series(list('abcabd'))
- res = pd.concat([s, s2], ignore_index=True)
- tm.assert_series_equal(res, exp)
-
- exp = Series(list('abcabc'), dtype='category')
- res = pd.concat([s, s], ignore_index=True)
- tm.assert_series_equal(res, exp)
-
- exp = Series(list('abcabc'), index=[0, 1, 2, 0, 1, 2],
- dtype='category')
- res = pd.concat([s, s])
- tm.assert_series_equal(res, exp)
-
- a = Series(np.arange(6, dtype='int64'))
- b = Series(list('aabbca'))
-
- df2 = DataFrame({'A': a,
- 'B': b.astype(CategoricalDtype(list('cab')))})
- res = pd.concat([df2, df2])
- exp = DataFrame(
- {'A': pd.concat([a, a]),
- 'B': pd.concat([b, b]).astype(CategoricalDtype(list('cab')))})
- tm.assert_frame_equal(res, exp)
-
- def test_categorical_index_preserver(self):
-
- a = Series(np.arange(6, dtype='int64'))
- b = Series(list('aabbca'))
-
- df2 = DataFrame({'A': a,
- 'B': b.astype(CategoricalDtype(list('cab')))
- }).set_index('B')
- result = pd.concat([df2, df2])
- expected = DataFrame(
- {'A': pd.concat([a, a]),
- 'B': pd.concat([b, b]).astype(CategoricalDtype(list('cab')))
- }).set_index('B')
- tm.assert_frame_equal(result, expected)
-
- # wrong catgories
- df3 = DataFrame({'A': a, 'B': Categorical(b, categories=list('abe'))
- }).set_index('B')
- pytest.raises(TypeError, lambda: pd.concat([df2, df3]))
-
- def test_merge(self):
- # GH 9426
-
- right = DataFrame({'c': {0: 'a',
- 1: 'b',
- 2: 'c',
- 3: 'd',
- 4: 'e'},
- 'd': {0: 'null',
- 1: 'null',
- 2: 'null',
- 3: 'null',
- 4: 'null'}})
- left = DataFrame({'a': {0: 'f',
- 1: 'f',
- 2: 'f',
- 3: 'f',
- 4: 'f'},
- 'b': {0: 'g',
- 1: 'g',
- 2: 'g',
- 3: 'g',
- 4: 'g'}})
- df = pd.merge(left, right, how='left', left_on='b', right_on='c')
-
- # object-object
- expected = df.copy()
-
- # object-cat
- # note that we propagate the category
- # because we don't have any matching rows
- cright = right.copy()
- cright['d'] = cright['d'].astype('category')
- result = pd.merge(left, cright, how='left', left_on='b', right_on='c')
- expected['d'] = expected['d'].astype(CategoricalDtype(['null']))
- tm.assert_frame_equal(result, expected)
-
- # cat-object
- cleft = left.copy()
- cleft['b'] = cleft['b'].astype('category')
- result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
- tm.assert_frame_equal(result, expected)
-
- # cat-cat
- cright = right.copy()
- cright['d'] = cright['d'].astype('category')
- cleft = left.copy()
- cleft['b'] = cleft['b'].astype('category')
- result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
- tm.assert_frame_equal(result, expected)
-
- def test_repeat(self):
- # GH10183
- cat = Categorical(["a", "b"], categories=["a", "b"])
- exp = Categorical(["a", "a", "b", "b"], categories=["a", "b"])
- res = cat.repeat(2)
- tm.assert_categorical_equal(res, exp)
-
- def test_numpy_repeat(self):
- cat = Categorical(["a", "b"], categories=["a", "b"])
- exp = Categorical(["a", "a", "b", "b"], categories=["a", "b"])
- tm.assert_categorical_equal(np.repeat(cat, 2), exp)
-
- msg = "the 'axis' parameter is not supported"
- tm.assert_raises_regex(ValueError, msg, np.repeat, cat, 2, axis=1)
-
- def test_reshape(self):
- cat = Categorical([], categories=["a", "b"])
- tm.assert_produces_warning(FutureWarning, cat.reshape, 0)
-
- with tm.assert_produces_warning(FutureWarning):
- cat = Categorical([], categories=["a", "b"])
- tm.assert_categorical_equal(cat.reshape(0), cat)
-
- with tm.assert_produces_warning(FutureWarning):
- cat = Categorical([], categories=["a", "b"])
- tm.assert_categorical_equal(cat.reshape((5, -1)), cat)
-
- with tm.assert_produces_warning(FutureWarning):
- cat = Categorical(["a", "b"], categories=["a", "b"])
- tm.assert_categorical_equal(cat.reshape(cat.shape), cat)
-
- with tm.assert_produces_warning(FutureWarning):
- cat = Categorical(["a", "b"], categories=["a", "b"])
- tm.assert_categorical_equal(cat.reshape(cat.size), cat)
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- msg = "can only specify one unknown dimension"
- cat = Categorical(["a", "b"], categories=["a", "b"])
- tm.assert_raises_regex(ValueError, msg, cat.reshape, (-2, -1))
-
- def test_numpy_reshape(self):
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- cat = Categorical(["a", "b"], categories=["a", "b"])
- tm.assert_categorical_equal(np.reshape(cat, cat.shape), cat)
-
- with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
- msg = "the 'order' parameter is not supported"
- tm.assert_raises_regex(ValueError, msg, np.reshape,
- cat, cat.shape, order='F')
-
- def test_astype_to_other(self):
-
- s = self.cat['value_group']
- expected = s
- tm.assert_series_equal(s.astype('category'), expected)
- tm.assert_series_equal(s.astype(CategoricalDtype()), expected)
- pytest.raises(ValueError, lambda: s.astype('float64'))
-
- cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
- exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
- tm.assert_series_equal(cat.astype('str'), exp)
- s2 = Series(Categorical(['1', '2', '3', '4']))
- exp2 = Series([1, 2, 3, 4]).astype(int)
- tm.assert_series_equal(s2.astype('int'), exp2)
-
- # object don't sort correctly, so just compare that we have the same
- # values
- def cmp(a, b):
- tm.assert_almost_equal(
- np.sort(np.unique(a)), np.sort(np.unique(b)))
-
- expected = Series(np.array(s.values), name='value_group')
- cmp(s.astype('object'), expected)
- cmp(s.astype(np.object_), expected)
-
- # array conversion
- tm.assert_almost_equal(np.array(s), np.array(s.values))
-
- # valid conversion
- for valid in [lambda x: x.astype('category'),
- lambda x: x.astype(CategoricalDtype()),
- lambda x: x.astype('object').astype('category'),
- lambda x: x.astype('object').astype(
- CategoricalDtype())
- ]:
-
- result = valid(s)
- # compare series values
- # internal .categories can't be compared because it is sorted
- tm.assert_series_equal(result, s, check_categorical=False)
-
- # invalid conversion (these are NOT a dtype)
- for invalid in [lambda x: x.astype(Categorical),
- lambda x: x.astype('object').astype(Categorical)]:
- pytest.raises(TypeError, lambda: invalid(s))
-
- def test_astype_categorical(self):
-
- cat = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
- tm.assert_categorical_equal(cat, cat.astype('category'))
- tm.assert_almost_equal(np.array(cat), cat.astype('object'))
-
- pytest.raises(ValueError, lambda: cat.astype(float))
-
- def test_to_records(self):
-
- # GH8626
-
- # dict creation
- df = DataFrame({'A': list('abc')}, dtype='category')
- expected = Series(list('abc'), dtype='category', name='A')
- tm.assert_series_equal(df['A'], expected)
-
- # list-like creation
- df = DataFrame(list('abc'), dtype='category')
- expected = Series(list('abc'), dtype='category', name=0)
- tm.assert_series_equal(df[0], expected)
-
- # to record array
- # this coerces
- result = df.to_records()
- expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
- dtype=[('index', '=i8'), ('0', 'O')])
- tm.assert_almost_equal(result, expected)
-
- def test_numeric_like_ops(self):
-
- # numeric ops should not succeed
- for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
- pytest.raises(TypeError,
- lambda: getattr(self.cat, op)(self.cat))
-
- # reduction ops should not succeed (unless specifically defined, e.g.
- # min/max)
- s = self.cat['value_group']
- for op in ['kurt', 'skew', 'var', 'std', 'mean', 'sum', 'median']:
- pytest.raises(TypeError,
- lambda: getattr(s, op)(numeric_only=False))
-
- # mad technically works because it takes always the numeric data
-
- # numpy ops
- s = Series(Categorical([1, 2, 3, 4]))
- pytest.raises(TypeError, lambda: np.sum(s))
-
- # numeric ops on a Series
- for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
- pytest.raises(TypeError, lambda: getattr(s, op)(2))
-
- # invalid ufunc
- pytest.raises(TypeError, lambda: np.log(s))
-
- def test_cat_tab_completition(self):
- # test the tab completion display
- ok_for_cat = ['categories', 'codes', 'ordered', 'set_categories',
- 'add_categories', 'remove_categories',
- 'rename_categories', 'reorder_categories',
- 'remove_unused_categories', 'as_ordered', 'as_unordered']
-
- def get_dir(s):
- results = [r for r in s.cat.__dir__() if not r.startswith('_')]
- return list(sorted(set(results)))
-
- s = Series(list('aabbcde')).astype('category')
- results = get_dir(s)
- tm.assert_almost_equal(results, list(sorted(set(ok_for_cat))))
-
- def test_cat_accessor_api(self):
- # GH 9322
- from pandas.core.categorical import CategoricalAccessor
- assert Series.cat is CategoricalAccessor
- s = Series(list('aabbcde')).astype('category')
- assert isinstance(s.cat, CategoricalAccessor)
-
- invalid = Series([1])
- with tm.assert_raises_regex(AttributeError,
- "only use .cat accessor"):
- invalid.cat
- assert not hasattr(invalid, 'cat')
-
- def test_cat_accessor_no_new_attributes(self):
- # https://github.com/pandas-dev/pandas/issues/10673
- c = Series(list('aabbcde')).astype('category')
- with tm.assert_raises_regex(AttributeError,
- "You cannot add any new attribute"):
- c.cat.xlabel = "a"
-
- def test_str_accessor_api_for_categorical(self):
- # https://github.com/pandas-dev/pandas/issues/10661
- from pandas.core.strings import StringMethods
- s = Series(list('aabb'))
- s = s + " " + s
- c = s.astype('category')
- assert isinstance(c.str, StringMethods)
-
- # str functions, which need special arguments
- special_func_defs = [
- ('cat', (list("zyxw"),), {"sep": ","}),
- ('center', (10,), {}),
- ('contains', ("a",), {}),
- ('count', ("a",), {}),
- ('decode', ("UTF-8",), {}),
- ('encode', ("UTF-8",), {}),
- ('endswith', ("a",), {}),
- ('extract', ("([a-z]*) ",), {"expand": False}),
- ('extract', ("([a-z]*) ",), {"expand": True}),
- ('extractall', ("([a-z]*) ",), {}),
- ('find', ("a",), {}),
- ('findall', ("a",), {}),
- ('index', (" ",), {}),
- ('ljust', (10,), {}),
- ('match', ("a"), {}), # deprecated...
- ('normalize', ("NFC",), {}),
- ('pad', (10,), {}),
- ('partition', (" ",), {"expand": False}), # not default
- ('partition', (" ",), {"expand": True}), # default
- ('repeat', (3,), {}),
- ('replace', ("a", "z"), {}),
- ('rfind', ("a",), {}),
- ('rindex', (" ",), {}),
- ('rjust', (10,), {}),
- ('rpartition', (" ",), {"expand": False}), # not default
- ('rpartition', (" ",), {"expand": True}), # default
- ('slice', (0, 1), {}),
- ('slice_replace', (0, 1, "z"), {}),
- ('split', (" ",), {"expand": False}), # default
- ('split', (" ",), {"expand": True}), # not default
- ('startswith', ("a",), {}),
- ('wrap', (2,), {}),
- ('zfill', (10,), {})
- ]
- _special_func_names = [f[0] for f in special_func_defs]
-
- # * get, join: they need a individual elements of type lists, but
- # we can't make a categorical with lists as individual categories.
- # -> `s.str.split(" ").astype("category")` will error!
- # * `translate` has different interfaces for py2 vs. py3
- _ignore_names = ["get", "join", "translate"]
-
- str_func_names = [f for f in dir(s.str) if not (
- f.startswith("_") or
- f in _special_func_names or
- f in _ignore_names)]
-
- func_defs = [(f, (), {}) for f in str_func_names]
- func_defs.extend(special_func_defs)
-
- for func, args, kwargs in func_defs:
- res = getattr(c.str, func)(*args, **kwargs)
- exp = getattr(s.str, func)(*args, **kwargs)
-
- if isinstance(res, DataFrame):
- tm.assert_frame_equal(res, exp)
- else:
- tm.assert_series_equal(res, exp)
-
- invalid = Series([1, 2, 3]).astype('category')
- with tm.assert_raises_regex(AttributeError,
- "Can only use .str "
- "accessor with string"):
- invalid.str
- assert not hasattr(invalid, 'str')
-
- def test_dt_accessor_api_for_categorical(self):
- # https://github.com/pandas-dev/pandas/issues/10661
- from pandas.core.indexes.accessors import Properties
-
- s_dr = Series(date_range('1/1/2015', periods=5, tz="MET"))
- c_dr = s_dr.astype("category")
-
- s_pr = Series(period_range('1/1/2015', freq='D', periods=5))
- c_pr = s_pr.astype("category")
-
- s_tdr = Series(timedelta_range('1 days', '10 days'))
- c_tdr = s_tdr.astype("category")
-
- # only testing field (like .day)
- # and bool (is_month_start)
- get_ops = lambda x: x._datetimelike_ops
-
- test_data = [
- ("Datetime", get_ops(DatetimeIndex), s_dr, c_dr),
- ("Period", get_ops(PeriodIndex), s_pr, c_pr),
- ("Timedelta", get_ops(TimedeltaIndex), s_tdr, c_tdr)]
-
- assert isinstance(c_dr.dt, Properties)
-
- special_func_defs = [
- ('strftime', ("%Y-%m-%d",), {}),
- ('tz_convert', ("EST",), {}),
- ('round', ("D",), {}),
- ('floor', ("D",), {}),
- ('ceil', ("D",), {}),
- ('asfreq', ("D",), {}),
- # ('tz_localize', ("UTC",), {}),
- ]
- _special_func_names = [f[0] for f in special_func_defs]
-
- # the series is already localized
- _ignore_names = ['tz_localize', 'components']
-
- for name, attr_names, s, c in test_data:
- func_names = [f
- for f in dir(s.dt)
- if not (f.startswith("_") or f in attr_names or f in
- _special_func_names or f in _ignore_names)]
-
- func_defs = [(f, (), {}) for f in func_names]
- for f_def in special_func_defs:
- if f_def[0] in dir(s.dt):
- func_defs.append(f_def)
-
- for func, args, kwargs in func_defs:
- res = getattr(c.dt, func)(*args, **kwargs)
- exp = getattr(s.dt, func)(*args, **kwargs)
-
- if isinstance(res, DataFrame):
- tm.assert_frame_equal(res, exp)
- elif isinstance(res, Series):
- tm.assert_series_equal(res, exp)
- else:
- tm.assert_almost_equal(res, exp)
-
- for attr in attr_names:
- try:
- res = getattr(c.dt, attr)
- exp = getattr(s.dt, attr)
- except Exception as e:
- print(name, attr)
- raise e
-
- if isinstance(res, DataFrame):
- tm.assert_frame_equal(res, exp)
- elif isinstance(res, Series):
- tm.assert_series_equal(res, exp)
- else:
- tm.assert_almost_equal(res, exp)
-
- invalid = Series([1, 2, 3]).astype('category')
- with tm.assert_raises_regex(
- AttributeError, "Can only use .dt accessor with datetimelike"):
- invalid.dt
- assert not hasattr(invalid, 'str')
-
- def test_concat_categorical(self):
- # See GH 10177
- df1 = DataFrame(np.arange(18, dtype='int64').reshape(6, 3),
- columns=["a", "b", "c"])
-
- df2 = DataFrame(np.arange(14, dtype='int64').reshape(7, 2),
- columns=["a", "c"])
-
- cat_values = ["one", "one", "two", "one", "two", "two", "one"]
- df2['h'] = Series(Categorical(cat_values))
-
- res = pd.concat((df1, df2), axis=0, ignore_index=True)
- exp = DataFrame({'a': [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
- 'b': [1, 4, 7, 10, 13, 16, np.nan, np.nan, np.nan,
- np.nan, np.nan, np.nan, np.nan],
- 'c': [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
- 'h': [None] * 6 + cat_values})
- tm.assert_frame_equal(res, exp)
-
-
-class TestCategoricalSubclassing(object):
-
- def test_constructor(self):
- sc = tm.SubclassedCategorical(['a', 'b', 'c'])
- assert isinstance(sc, tm.SubclassedCategorical)
- tm.assert_categorical_equal(sc, Categorical(['a', 'b', 'c']))
-
- def test_from_codes(self):
- sc = tm.SubclassedCategorical.from_codes([1, 0, 2], ['a', 'b', 'c'])
- assert isinstance(sc, tm.SubclassedCategorical)
- exp = Categorical.from_codes([1, 0, 2], ['a', 'b', 'c'])
- tm.assert_categorical_equal(sc, exp)
-
- def test_map(self):
- sc = tm.SubclassedCategorical(['a', 'b', 'c'])
- res = sc.map(lambda x: x.upper())
- assert isinstance(res, tm.SubclassedCategorical)
- exp = Categorical(['A', 'B', 'C'])
- tm.assert_categorical_equal(res, exp)
diff --git a/setup.py b/setup.py
index e6480cfedaee0..19818b07162cf 100755
--- a/setup.py
+++ b/setup.py
@@ -747,6 +747,7 @@ def pxd(name):
'pandas.tests',
'pandas.tests.api',
'pandas.tests.dtypes',
+ 'pandas.tests.categorical',
'pandas.tests.computation',
'pandas.tests.sparse',
'pandas.tests.frame',
| - [X] closes #18497
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Speaking to the methodology here, I created separate files mirroring what was provided in the other test packages. Within each file, there is one class for standard categorical tests and one for "block" tests, owing back to how this file is currently structured.
Because there were setup functions for only a very limited number of test cases, rather than creating those as separate classes within each module I lumped all of them into one ``test_generic.py`` file. | https://api.github.com/repos/pandas-dev/pandas/pulls/18508 | 2017-11-26T23:54:12Z | 2017-12-08T00:25:37Z | 2017-12-08T00:25:36Z | 2017-12-12T15:41:30Z |
Add nrows parameter to pandas.read_excel() | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index c3a0e3599a0f9..32b548e5f32f1 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -135,7 +135,7 @@ Other Enhancements
- Improved wording of ``ValueError`` raised in :func:`read_csv` when the ``usecols`` argument cannot match all columns. (:issue:`17301`)
- :func:`DataFrame.corrwith` now silently drops non-numeric columns when passed a Series. Before, an exception was raised (:issue:`18570`).
- :class:`IntervalIndex` now supports time zone aware ``Interval`` objects (:issue:`18537`, :issue:`18538`)
-
+- :func:`read_excel()` has gained the ``nrows`` parameter (:issue:`16645`)
.. _whatsnew_0220.api_breaking:
@@ -188,6 +188,7 @@ Other API Changes
- :func:`pandas.DataFrame.merge` no longer casts a ``float`` column to ``object`` when merging on ``int`` and ``float`` columns (:issue:`16572`)
- The default NA value for :class:`UInt64Index` has changed from 0 to ``NaN``, which impacts methods that mask with NA, such as ``UInt64Index.where()`` (:issue:`18398`)
- Refactored ``setup.py`` to use ``find_packages`` instead of explicitly listing out all subpackages (:issue:`18535`)
+- Rearranged the order of keyword arguments in :func:`read_excel()` to align with :func:`read_csv()` (:pr:`16672`)
.. _whatsnew_0220.deprecations:
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index 882130bedcbf0..a1dcd52b61270 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -70,6 +70,7 @@
* None -> All sheets as a dictionary of DataFrames
sheetname : string, int, mixed list of strings/ints, or None, default 0
+
.. deprecated:: 0.21.0
Use `sheet_name` instead
@@ -77,24 +78,29 @@
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
-skiprows : list-like
- Rows to skip at the beginning (0-indexed)
-skip_footer : int, default 0
- Rows at the end to skip (0-indexed)
+names : array-like, default None
+ List of column names to use. If file contains no header row,
+ then you should explicitly pass header=None
index_col : int, list of ints, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
-names : array-like, default None
- List of column names to use. If file contains no header row,
- then you should explicitly pass header=None
-converters : dict, default None
- Dict of functions for converting values in certain columns. Keys can
- either be integers or column labels, values are functions that take one
- input argument, the Excel cell content, and return the transformed
- content.
+parse_cols : int or list, default None
+
+ .. deprecated:: 0.21.0
+ Pass in `usecols` instead.
+
+usecols : int or list, default None
+ * If None then parse all columns,
+ * If int then indicates last column to be parsed
+ * If list of ints then indicates list of column numbers to be parsed
+ * If string then indicates comma separated list of Excel column letters and
+ column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
+ both sides.
+squeeze : boolean, default False
+ If the parsed data only contains one column then return a Series
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
@@ -103,6 +109,14 @@
.. versionadded:: 0.20.0
+engine: string, default None
+ If io is not a buffer or path, this must be set to identify io.
+ Acceptable values are None or xlrd
+converters : dict, default None
+ Dict of functions for converting values in certain columns. Keys can
+ either be integers or column labels, values are functions that take one
+ input argument, the Excel cell content, and return the transformed
+ content.
true_values : list, default None
Values to consider as True
@@ -113,36 +127,29 @@
.. versionadded:: 0.19.0
-parse_cols : int or list, default None
- .. deprecated:: 0.21.0
- Pass in `usecols` instead.
+skiprows : list-like
+ Rows to skip at the beginning (0-indexed)
+nrows : int, default None
+ Number of rows to parse
+
+ .. versionadded:: 0.22.0
-usecols : int or list, default None
- * If None then parse all columns,
- * If int then indicates last column to be parsed
- * If list of ints then indicates list of column numbers to be parsed
- * If string then indicates comma separated list of Excel column letters and
- column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
- both sides.
-squeeze : boolean, default False
- If the parsed data only contains one column then return a Series
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN: '""" + fill("', '".join(sorted(_NA_VALUES)), 70) + """'.
-thousands : str, default None
- Thousands separator for parsing string columns to numeric. Note that
- this parameter is only necessary for columns stored as TEXT in Excel,
- any numeric columns will automatically be parsed, regardless of display
- format.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
-engine: string, default None
- If io is not a buffer or path, this must be set to identify io.
- Acceptable values are None or xlrd
+thousands : str, default None
+ Thousands separator for parsing string columns to numeric. Note that
+ this parameter is only necessary for columns stored as TEXT in Excel,
+ any numeric columns will automatically be parsed, regardless of display
+ format.
+skip_footer : int, default 0
+ Rows at the end to skip (0-indexed)
convert_float : boolean, default True
convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
@@ -193,12 +200,27 @@ def get_writer(engine_name):
@Appender(_read_excel_doc)
@deprecate_kwarg("parse_cols", "usecols")
-def read_excel(io, sheet_name=0, header=0, skiprows=None, skip_footer=0,
- index_col=None, names=None, usecols=None, parse_dates=False,
- date_parser=None, na_values=None, thousands=None,
- convert_float=True, converters=None, dtype=None,
- true_values=None, false_values=None, engine=None,
- squeeze=False, **kwds):
+def read_excel(io,
+ sheet_name=0,
+ header=0,
+ names=None,
+ index_col=None,
+ usecols=None,
+ squeeze=False,
+ dtype=None,
+ engine=None,
+ converters=None,
+ true_values=None,
+ false_values=None,
+ skiprows=None,
+ nrows=None,
+ na_values=None,
+ parse_dates=False,
+ date_parser=None,
+ thousands=None,
+ skip_footer=0,
+ convert_float=True,
+ **kwds):
# Can't use _deprecate_kwarg since sheetname=None has a special meaning
if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds:
@@ -213,12 +235,25 @@ def read_excel(io, sheet_name=0, header=0, skiprows=None, skip_footer=0,
io = ExcelFile(io, engine=engine)
return io._parse_excel(
- sheetname=sheet_name, header=header, skiprows=skiprows, names=names,
- index_col=index_col, usecols=usecols, parse_dates=parse_dates,
- date_parser=date_parser, na_values=na_values, thousands=thousands,
- convert_float=convert_float, skip_footer=skip_footer,
- converters=converters, dtype=dtype, true_values=true_values,
- false_values=false_values, squeeze=squeeze, **kwds)
+ sheetname=sheet_name,
+ header=header,
+ names=names,
+ index_col=index_col,
+ usecols=usecols,
+ squeeze=squeeze,
+ dtype=dtype,
+ converters=converters,
+ true_values=true_values,
+ false_values=false_values,
+ skiprows=skiprows,
+ nrows=nrows,
+ na_values=na_values,
+ parse_dates=parse_dates,
+ date_parser=date_parser,
+ thousands=thousands,
+ skip_footer=skip_footer,
+ convert_float=convert_float,
+ **kwds)
class ExcelFile(object):
@@ -282,11 +317,25 @@ def __init__(self, io, **kwds):
def __fspath__(self):
return self._io
- def parse(self, sheet_name=0, header=0, skiprows=None, skip_footer=0,
- names=None, index_col=None, usecols=None, parse_dates=False,
- date_parser=None, na_values=None, thousands=None,
- convert_float=True, converters=None, true_values=None,
- false_values=None, squeeze=False, **kwds):
+ def parse(self,
+ sheet_name=0,
+ header=0,
+ names=None,
+ index_col=None,
+ usecols=None,
+ squeeze=False,
+ converters=None,
+ true_values=None,
+ false_values=None,
+ skiprows=None,
+ nrows=None,
+ na_values=None,
+ parse_dates=False,
+ date_parser=None,
+ thousands=None,
+ skip_footer=0,
+ convert_float=True,
+ **kwds):
"""
Parse specified sheet(s) into a DataFrame
@@ -294,19 +343,23 @@ def parse(self, sheet_name=0, header=0, skiprows=None, skip_footer=0,
docstring for more info on accepted parameters
"""
- return self._parse_excel(sheetname=sheet_name, header=header,
- skiprows=skiprows, names=names,
+ return self._parse_excel(sheetname=sheet_name,
+ header=header,
+ names=names,
index_col=index_col,
usecols=usecols,
+ squeeze=squeeze,
+ converters=converters,
+ true_values=true_values,
+ false_values=false_values,
+ skiprows=skiprows,
+ nrows=nrows,
+ na_values=na_values,
parse_dates=parse_dates,
- date_parser=date_parser, na_values=na_values,
+ date_parser=date_parser,
thousands=thousands,
skip_footer=skip_footer,
convert_float=convert_float,
- converters=converters,
- true_values=true_values,
- false_values=false_values,
- squeeze=squeeze,
**kwds)
def _should_parse(self, i, usecols):
@@ -342,12 +395,26 @@ def _excel2num(x):
else:
return i in usecols
- def _parse_excel(self, sheetname=0, header=0, skiprows=None, names=None,
- skip_footer=0, index_col=None, usecols=None,
- parse_dates=False, date_parser=None, na_values=None,
- thousands=None, convert_float=True, true_values=None,
- false_values=None, verbose=False, dtype=None,
- squeeze=False, **kwds):
+ def _parse_excel(self,
+ sheetname=0,
+ header=0,
+ names=None,
+ index_col=None,
+ usecols=None,
+ squeeze=False,
+ dtype=None,
+ true_values=None,
+ false_values=None,
+ skiprows=None,
+ nrows=None,
+ na_values=None,
+ verbose=False,
+ parse_dates=False,
+ date_parser=None,
+ thousands=None,
+ skip_footer=0,
+ convert_float=True,
+ **kwds):
skipfooter = kwds.pop('skipfooter', None)
if skipfooter is not None:
@@ -509,21 +576,24 @@ def _parse_cell(cell_contents, cell_typ):
# GH 12292 : error when read one empty column from excel file
try:
- parser = TextParser(data, header=header, index_col=index_col,
+ parser = TextParser(data,
+ header=header,
+ index_col=index_col,
has_index_names=has_index_names,
- na_values=na_values,
- thousands=thousands,
- parse_dates=parse_dates,
- date_parser=date_parser,
+ squeeze=squeeze,
+ dtype=dtype,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
+ nrows=nrows,
+ na_values=na_values,
+ parse_dates=parse_dates,
+ date_parser=date_parser,
+ thousands=thousands,
skipfooter=skip_footer,
- squeeze=squeeze,
- dtype=dtype,
**kwds)
- output[asheetname] = parser.read()
+ output[asheetname] = parser.read(nrows=nrows)
if names is not None:
output[asheetname].columns = names
if not squeeze or isinstance(output[asheetname], DataFrame):
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 83b1d8ec1a070..a04d77de08950 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -440,7 +440,7 @@ def _read(filepath_or_buffer, kwds):
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get('iterator', False)
chunksize = _validate_integer('chunksize', kwds.get('chunksize', None), 1)
- nrows = _validate_integer('nrows', kwds.get('nrows', None))
+ nrows = kwds.get('nrows', None)
# Check for duplicates in names.
_validate_names(kwds.get("names", None))
@@ -1062,6 +1062,8 @@ def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
+ nrows = _validate_integer('nrows', nrows)
+
if nrows is not None:
if self.options.get('skipfooter'):
raise ValueError('skipfooter not supported for iteration')
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py
index 96117b3c21a9b..3fd55bcad677a 100644
--- a/pandas/tests/io/test_excel.py
+++ b/pandas/tests/io/test_excel.py
@@ -1017,6 +1017,33 @@ def test_read_excel_skiprows_list(self):
'skiprows_list', skiprows=np.array([0, 2]))
tm.assert_frame_equal(actual, expected)
+ def test_read_excel_nrows(self):
+ # GH 16645
+ num_rows_to_pull = 5
+ actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
+ nrows=num_rows_to_pull)
+ expected = pd.read_excel(os.path.join(self.dirpath,
+ 'test1' + self.ext))
+ expected = expected[:num_rows_to_pull]
+ tm.assert_frame_equal(actual, expected)
+
+ def test_read_excel_nrows_greater_than_nrows_in_file(self):
+ # GH 16645
+ expected = pd.read_excel(os.path.join(self.dirpath,
+ 'test1' + self.ext))
+ num_records_in_file = len(expected)
+ num_rows_to_pull = num_records_in_file + 10
+ actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
+ nrows=num_rows_to_pull)
+ tm.assert_frame_equal(actual, expected)
+
+ def test_read_excel_nrows_non_integer_parameter(self):
+ # GH 16645
+ msg = "'nrows' must be an integer >=0"
+ with tm.assert_raises_regex(ValueError, msg):
+ pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
+ nrows='5')
+
def test_read_excel_squeeze(self):
# GH 12157
f = os.path.join(self.dirpath, 'test_squeeze' + self.ext)
| - [x] closes #16645
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
I was working on this a few months back, but got busy. Apologies. I'm free until January so thought I'd pick this back up.
### Changes
* Added code for `nrows` parameter (with tests)
* Changed order of parameters to match `pandas.read_csv()` signature (across entire `pandas/io/excel.py` file)
* Increased readability by having one parameter per line
* Updated docstring to match new order (+ new entry for `nrows` param)
I think that covered all the outstanding issues from the [previous (closed) PR](https://github.com/pandas-dev/pandas/pull/16672). Please let me know if you need anything changed or modified. | https://api.github.com/repos/pandas-dev/pandas/pulls/18507 | 2017-11-26T18:32:18Z | 2017-12-09T15:12:35Z | 2017-12-09T15:12:35Z | 2018-01-20T04:25:12Z |
TST: move gbq back to 3.5 build and remove from BUILD_TEST | diff --git a/ci/requirements-3.5.pip b/ci/requirements-3.5.pip
index 0d9e44cf39fa4..c9565f2173070 100644
--- a/ci/requirements-3.5.pip
+++ b/ci/requirements-3.5.pip
@@ -1 +1,2 @@
xarray==0.9.1
+pandas_gbq
diff --git a/ci/requirements-3.6_BUILD_TEST.pip b/ci/requirements-3.6_BUILD_TEST.pip
index a0fc77c40bc00..f4617133cad5b 100644
--- a/ci/requirements-3.6_BUILD_TEST.pip
+++ b/ci/requirements-3.6_BUILD_TEST.pip
@@ -1,7 +1,6 @@
xarray
geopandas
seaborn
-pandas_gbq
pandas_datareader
statsmodels
scikit-learn
| https://api.github.com/repos/pandas-dev/pandas/pulls/18506 | 2017-11-26T16:22:57Z | 2017-11-26T21:46:51Z | 2017-11-26T21:46:51Z | 2017-12-11T20:22:28Z | |
fix missing arg in timestamp asvs | diff --git a/asv_bench/benchmarks/timestamp.py b/asv_bench/benchmarks/timestamp.py
index b8ef309e6a464..fc5e6dc8c06d6 100644
--- a/asv_bench/benchmarks/timestamp.py
+++ b/asv_bench/benchmarks/timestamp.py
@@ -13,55 +13,55 @@ class TimestampProperties(object):
def setup(self, tz):
self.ts = Timestamp('2017-08-25 08:16:14', tzinfo=tz)
- def time_tz(self):
+ def time_tz(self, tz):
self.ts.tz
- def time_offset(self):
+ def time_offset(self, tz):
self.ts.offset
- def time_dayofweek(self):
+ def time_dayofweek(self, tz):
self.ts.dayofweek
- def time_weekday_name(self):
+ def time_weekday_name(self, tz):
self.ts.weekday_name
- def time_dayofyear(self):
+ def time_dayofyear(self, tz):
self.ts.dayofyear
- def time_week(self):
+ def time_week(self, tz):
self.ts.week
- def time_quarter(self):
+ def time_quarter(self, tz):
self.ts.quarter
- def time_days_in_month(self):
+ def time_days_in_month(self, tz):
self.ts.days_in_month
- def time_freqstr(self):
+ def time_freqstr(self, tz):
self.ts.freqstr
- def time_is_month_start(self):
+ def time_is_month_start(self, tz):
self.ts.is_month_start
- def time_is_month_end(self):
+ def time_is_month_end(self, tz):
self.ts.is_month_end
- def time_is_quarter_start(self):
+ def time_is_quarter_start(self, tz):
self.ts.is_quarter_start
- def time_is_quarter_end(self):
+ def time_is_quarter_end(self, tz):
self.ts.is_quarter_end
- def time_is_year_start(self):
+ def time_is_year_start(self, tz):
self.ts.is_quarter_end
- def time_is_year_end(self):
+ def time_is_year_end(self, tz):
self.ts.is_quarter_end
- def time_is_leap_year(self):
+ def time_is_leap_year(self, tz):
self.ts.is_quarter_end
- def time_microsecond(self):
+ def time_microsecond(self, tz):
self.ts.microsecond
@@ -74,13 +74,13 @@ class TimestampOps(object):
def setup(self, tz):
self.ts = Timestamp('2017-08-25 08:16:14', tz=tz)
- def time_replace_tz(self):
+ def time_replace_tz(self, tz):
self.ts.replace(tzinfo=pytz.timezone('US/Eastern'))
- def time_replace_None(self):
+ def time_replace_None(self, tz):
self.ts.replace(tzinfo=None)
- def time_to_pydatetime(self):
+ def time_to_pydatetime(self, tz):
self.ts.to_pydatetime()
| Pretty good bet that I broke this a little while ago... | https://api.github.com/repos/pandas-dev/pandas/pulls/18503 | 2017-11-26T05:06:32Z | 2017-11-26T15:08:20Z | 2017-11-26T15:08:20Z | 2017-11-27T12:26:21Z |
CLN: ASV eval benchmark | diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py
index 6f33590ee9e33..fd18b3f21cf45 100644
--- a/asv_bench/benchmarks/eval.py
+++ b/asv_bench/benchmarks/eval.py
@@ -1,4 +1,4 @@
-from .pandas_vb_common import *
+import numpy as np
import pandas as pd
try:
import pandas.core.computation.expressions as expr
@@ -7,64 +7,61 @@
class Eval(object):
+
goal_time = 0.2
params = [['numexpr', 'python'], [1, 'all']]
param_names = ['engine', 'threads']
def setup(self, engine, threads):
- self.df = DataFrame(np.random.randn(20000, 100))
- self.df2 = DataFrame(np.random.randn(20000, 100))
- self.df3 = DataFrame(np.random.randn(20000, 100))
- self.df4 = DataFrame(np.random.randn(20000, 100))
+ np.random.seed(1234)
+ self.df = pd.DataFrame(np.random.randn(20000, 100))
+ self.df2 = pd.DataFrame(np.random.randn(20000, 100))
+ self.df3 = pd.DataFrame(np.random.randn(20000, 100))
+ self.df4 = pd.DataFrame(np.random.randn(20000, 100))
if threads == 1:
expr.set_numexpr_threads(1)
def time_add(self, engine, threads):
- df, df2, df3, df4 = self.df, self.df2, self.df3, self.df4
- pd.eval('df + df2 + df3 + df4', engine=engine)
+ pd.eval('self.df + self.df2 + self.df3 + self.df4', engine=engine)
def time_and(self, engine, threads):
- df, df2, df3, df4 = self.df, self.df2, self.df3, self.df4
- pd.eval('(df > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)', engine=engine)
+ pd.eval('(self.df > 0) & (self.df2 > 0) & '
+ '(self.df3 > 0) & (self.df4 > 0)', engine=engine)
def time_chained_cmp(self, engine, threads):
- df, df2, df3, df4 = self.df, self.df2, self.df3, self.df4
- pd.eval('df < df2 < df3 < df4', engine=engine)
+ pd.eval('self.df < self.df2 < self.df3 < self.df4', engine=engine)
def time_mult(self, engine, threads):
- df, df2, df3, df4 = self.df, self.df2, self.df3, self.df4
- pd.eval('df * df2 * df3 * df4', engine=engine)
+ pd.eval('self.df * self.df2 * self.df3 * self.df4', engine=engine)
def teardown(self, engine, threads):
expr.set_numexpr_threads()
class Query(object):
+
goal_time = 0.2
def setup(self):
- self.N = 1000000
- self.halfway = ((self.N // 2) - 1)
- self.index = date_range('20010101', periods=self.N, freq='T')
- self.s = Series(self.index)
+ np.random.seed(1234)
+ self.N = 10**6
+ self.halfway = (self.N // 2) - 1
+ self.index = pd.date_range('20010101', periods=self.N, freq='T')
+ self.s = pd.Series(self.index)
self.ts = self.s.iloc[self.halfway]
- self.df = DataFrame({'a': np.random.randn(self.N), }, index=self.index)
- self.df2 = DataFrame({'dates': self.s.values,})
-
- self.df3 = DataFrame({'a': np.random.randn(self.N),})
- self.min_val = self.df3['a'].min()
- self.max_val = self.df3['a'].max()
+ self.df = pd.DataFrame({'a': np.random.randn(self.N), 'dates': self.s},
+ index=self.index)
+ self.data = np.random.randn(self.N)
+ self.min_val = self.data.min()
+ self.max_val = self.data.max()
def time_query_datetime_index(self):
- ts = self.ts
- self.df.query('index < @ts')
+ self.df.query('index < @self.ts')
- def time_query_datetime_series(self):
- ts = self.ts
- self.df2.query('dates < @ts')
+ def time_query_datetime_column(self):
+ self.df.query('dates < @self.ts')
def time_query_with_boolean_selection(self):
- min_val, max_val = self.min_val, self.max_val
- self.df.query('(a >= @min_val) & (a <= @max_val)')
+ self.df.query('(a >= @self.min_val) & (a <= @self.max_val)')
| - Added `np.random.seed(1234)` in setup classes where random data is created xref #8144
- Ran flake8 and replaced star imports
- Removed some initialization of variables within the `time_*` methods
```
asv run -b ^eval
[ 0.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
[ 14.29%] ··· Running eval.Eval.time_add 41.8±0.7ms;...
[ 28.57%] ··· Running eval.Eval.time_and 55.5±0.4ms;...
[ 42.86%] ··· Running eval.Eval.time_chained_cmp 48.7±1ms;...
[ 57.14%] ··· Running eval.Eval.time_mult 38.5±1ms;...
[ 71.43%] ··· Running eval.Query.time_query_datetime_column 19.1ms
[ 85.71%] ··· Running eval.Query.time_query_datetime_index 51.9ms
[100.00%] ··· Running eval.Query.time_query_with_boolean_selection 66.9ms
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/18500 | 2017-11-26T03:32:09Z | 2017-11-26T15:00:32Z | 2017-11-26T15:00:32Z | 2017-11-27T17:31:13Z |
CLN: ASV frame_ctor benchmark | diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py
index 2ee5f5da7a84a..5fad7b682c2ed 100644
--- a/asv_bench/benchmarks/frame_ctor.py
+++ b/asv_bench/benchmarks/frame_ctor.py
@@ -1,32 +1,33 @@
-from .pandas_vb_common import *
+import numpy as np
+import pandas.util.testing as tm
+from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range
try:
- from pandas.tseries.offsets import *
+ from pandas.tseries import offsets
except:
from pandas.core.datetools import *
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
# Creation from nested dict
class FromDicts(object):
+
goal_time = 0.2
def setup(self):
- (N, K) = (5000, 50)
+ np.random.seed(1234)
+ N, K = 5000, 50
self.index = tm.makeStringIndex(N)
self.columns = tm.makeStringIndex(K)
- self.frame = DataFrame(np.random.randn(N, K), index=self.index, columns=self.columns)
- try:
- self.data = self.frame.to_dict()
- except:
- self.data = self.frame.toDict()
+ self.frame = DataFrame(np.random.randn(N, K),
+ index=self.index,
+ columns=self.columns)
+ self.data = self.frame.to_dict()
self.some_dict = list(self.data.values())[0]
- self.dict_list = [dict(zip(self.columns, row)) for row in self.frame.values]
-
+ self.dict_list = self.frame.to_dict(orient='records')
self.data2 = {i: {j: float(j) for j in range(100)}
for i in range(2000)}
-
def time_frame_ctor_list_of_dict(self):
DataFrame(self.dict_list)
@@ -38,38 +39,21 @@ def time_series_ctor_from_dict(self):
def time_frame_ctor_nested_dict_int64(self):
# nested dict, integer indexes, regression described in #621
- DataFrame(self.data)
+ DataFrame(self.data2)
# from a mi-series
-class frame_from_series(object):
+class FromSeries(object):
goal_time = 0.2
def setup(self):
- self.mi = MultiIndex.from_tuples([(x, y) for x in range(100) for y in range(100)])
- self.s = Series(randn(10000), index=self.mi)
+ self.mi = MultiIndex.from_product([range(100), range(100)])
+ self.s = Series(np.random.randn(10000), index=self.mi)
def time_frame_from_mi_series(self):
DataFrame(self.s)
-
-#----------------------------------------------------------------------
-# get_numeric_data
-
-class frame_get_numeric_data(object):
- goal_time = 0.2
-
- def setup(self):
- self.df = DataFrame(randn(10000, 25))
- self.df['foo'] = 'bar'
- self.df['bar'] = 'baz'
- self.df = self.df.consolidate()
-
- def time_frame_get_numeric_data(self):
- self.df._get_numeric_data()
-
-
# ----------------------------------------------------------------------
# From dict with DatetimeIndex with all offsets
@@ -84,13 +68,15 @@ def get_period_count(start_date, off):
if (ten_offsets_in_days == 0):
return 1000
else:
- return min((9 * ((Timestamp.max - start_date).days // ten_offsets_in_days)), 1000)
+ periods = 9 * (Timestamp.max - start_date).days // ten_offsets_in_days
+ return min(periods, 1000)
def get_index_for_offset(off):
start_date = Timestamp('1/1/1900')
- return date_range(start_date, periods=min(1000, get_period_count(
- start_date, off)), freq=off)
+ return date_range(start_date,
+ periods=get_period_count(start_date, off),
+ freq=off)
all_offsets = offsets.__all__
@@ -100,7 +86,7 @@ def get_index_for_offset(off):
all_offsets.extend([off + '_1', off + '_2'])
-class FrameConstructorDTIndexFromOffsets(object):
+class FromDictwithTimestampOffsets(object):
params = [all_offsets, [1, 2]]
param_names = ['offset', 'n_steps']
@@ -108,13 +94,15 @@ class FrameConstructorDTIndexFromOffsets(object):
offset_kwargs = {'WeekOfMonth': {'weekday': 1, 'week': 1},
'LastWeekOfMonth': {'weekday': 1, 'week': 1},
'FY5253': {'startingMonth': 1, 'weekday': 1},
- 'FY5253Quarter': {'qtr_with_extra_week': 1, 'startingMonth': 1, 'weekday': 1}}
+ 'FY5253Quarter': {'qtr_with_extra_week': 1,
+ 'startingMonth': 1,
+ 'weekday': 1}}
offset_extra_cases = {'FY5253': {'variation': ['nearest', 'last']},
'FY5253Quarter': {'variation': ['nearest', 'last']}}
def setup(self, offset, n_steps):
-
+ np.random.seed(1234)
extra = False
if offset.endswith("_", None, -1):
extra = int(offset[-1])
@@ -127,12 +115,12 @@ def setup(self, offset, n_steps):
if extra:
extras = self.offset_extra_cases[offset]
for extra_arg in extras:
- kwargs[extra_arg] = extras[extra_arg][extra -1]
+ kwargs[extra_arg] = extras[extra_arg][extra - 1]
offset = getattr(offsets, offset)
self.idx = get_index_for_offset(offset(n_steps, **kwargs))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
- self.d = dict(self.df.items())
+ self.d = self.df.to_dict()
def time_frame_ctor(self, offset, n_steps):
DataFrame(self.d)
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index af72ca1e9a6ab..53ee4d8019938 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -1,6 +1,20 @@
from .pandas_vb_common import *
import string
+#----------------------------------------------------------------------
+# get_numeric_data
+
+class frame_get_numeric_data(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.df = DataFrame(np.random.randn(10000, 25))
+ self.df['foo'] = 'bar'
+ self.df['bar'] = 'baz'
+ self.df = self.df.consolidate()
+
+ def time_frame_get_numeric_data(self):
+ self.df._get_numeric_data()
#----------------------------------------------------------------------
# lookup
| - Added `np.random.seed(1234)` in setup classes where random data is created xref #8144
- Ran flake8 and replaced star imports (but `from pandas.core.datetools import *` might need to be kept for compat?)
- `time_frame_ctor_nested_dict_int64` was using `self.data` instead of `self.data2`
- Moved the class `frame_get_numeric_data` to `frame_methods.py`
```
asv run -b ^frame_ctor
[ 0.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
[ 16.67%] ··· Running frame_ctor.FromDicts.time_frame_ctor_list_of_dict 113±0.3ms
[ 33.33%] ··· Running frame_ctor.FromDicts.time_frame_ctor_nested_dict 92.7±0.3ms
[ 50.00%] ··· Running frame_ctor.FromDicts.time_frame_ctor_nested_dict_int64 243±0.5ms
[ 66.67%] ··· Running frame_ctor.FromDicts.time_series_ctor_from_dict 6.17±0.01ms
[ 83.33%] ··· Running frame_ctor.FromDictwithTimestampOffsets.time_frame_ctor 2/76 failed
[100.00%] ··· Running frame_ctor.FromSeries.time_frame_from_mi_series 243±0.7μs
```
The supposed offsets benchmark failures do not show up when running asv dev?
```
asv dev -b ^frame_ctor
· Discovering benchmarks
· Running 6 total benchmarks (1 commits * 1 environments * 6 benchmarks)
[ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python
[ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python
[ 16.67%] ··· Running frame_ctor.FromDicts.time_frame_ctor_list_of_dict 153ms
[ 33.33%] ··· Running frame_ctor.FromDicts.time_frame_ctor_nested_dict 147ms
[ 50.00%] ··· Running frame_ctor.FromDicts.time_frame_ctor_nested_dict_int64 217ms
[ 66.67%] ··· Running frame_ctor.FromDicts.time_series_ctor_from_dict 6.09ms
[ 83.33%] ··· Running frame_ctor.FromDictwithTimestampOffsets.time_frame_ctor ok
[ 83.33%] ····
==================== ======== ========
-- n_steps
-------------------- -----------------
offset 1 2
==================== ======== ========
Day 96.5ms 96.7ms
BusinessDay 96.7ms 96.5ms
BDay 99.8ms 96.8ms
CustomBusinessDay 98.8ms 97.5ms
CDay 96.7ms 95.8ms
CBMonthEnd 96.8ms 97.7ms
CBMonthBegin 97.0ms 98.0ms
MonthBegin 96.4ms 97.6ms
BMonthBegin 98.1ms 96.4ms
MonthEnd 95.8ms 96.6ms
BMonthEnd 96.7ms 96.9ms
SemiMonthEnd 96.2ms 97.3ms
SemiMonthBegin 96.3ms 98.1ms
BusinessHour 96.5ms 97.1ms
CustomBusinessHour 97.2ms 97.0ms
YearBegin 33.4ms 17.5ms
BYearBegin 32.3ms 17.6ms
YearEnd 32.2ms 17.3ms
BYearEnd 36.2ms 17.9ms
QuarterBegin 98.0ms 64.9ms
BQuarterBegin 99.5ms 70.6ms
QuarterEnd 101ms 67.9ms
BQuarterEnd 98.3ms 69.0ms
LastWeekOfMonth 97.1ms 102ms
Week 97.5ms 102ms
WeekOfMonth 104ms 102ms
Easter 35.4ms 18.0ms
Hour 102ms 96.3ms
Minute 95.8ms 100ms
Second 95.8ms 97.2ms
Milli 98.7ms 101ms
Micro 94.7ms 93.5ms
Nano 71.8ms 72.3ms
DateOffset 95.9ms 96.1ms
FY5253_1 36.9ms 18.1ms
FY5253_2 35.8ms 18.2ms
FY5253Quarter_1 98.7ms 66.2ms
FY5253Quarter_2 96.3ms 65.4ms
==================== ======== ========
[100.00%] ··· Running frame_ctor.FromSeries.time_frame_from_mi_series 282μs
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/18499 | 2017-11-26T03:16:41Z | 2017-11-26T15:01:54Z | 2017-11-26T15:01:54Z | 2017-11-27T03:21:45Z |
Construction of Series from dict containing NaN as key | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 4a27bf54de695..411583404a32d 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -68,6 +68,7 @@ Other API Changes
- :func:`Series.astype` and :func:`Index.astype` with an incompatible dtype will now raise a ``TypeError`` rather than a ``ValueError`` (:issue:`18231`)
- ``Series`` construction with an ``object`` dtyped tz-aware datetime and ``dtype=object`` specified, will now return an ``object`` dtyped ``Series``, previously this would infer the datetime dtype (:issue:`18231`)
+- A :class:`Series` of ``dtype=category`` constructed from an empty ``dict`` will now have categories of ``dtype=object`` rather than ``dtype=float64``, consistently with the case in which an empty list is passed (:issue:`18515`)
- ``NaT`` division with :class:`datetime.timedelta` will now return ``NaN`` instead of raising (:issue:`17876`)
- All-NaN levels in a ``MultiIndex`` are now assigned ``float`` rather than ``object`` dtype, promoting consistency with ``Index`` (:issue:`17929`).
- :class:`Timestamp` will no longer silently ignore unused or invalid ``tz`` or ``tzinfo`` keyword arguments (:issue:`17690`)
@@ -208,5 +209,6 @@ Other
- Improved error message when attempting to use a Python keyword as an identifier in a numexpr query (:issue:`18221`)
- Fixed a bug where creating a Series from an array that contains both tz-naive and tz-aware values will result in a Series whose dtype is tz-aware instead of object (:issue:`16406`)
+- Fixed construction of a :class:`Series` from a ``dict`` containing ``NaN`` as key (:issue:`18480`)
- Adding a ``Period`` object to a ``datetime`` or ``Timestamp`` object will now correctly raise a ``TypeError`` (:issue:`17983`)
-
diff --git a/pandas/core/base.py b/pandas/core/base.py
index ae92b62ce1d11..72acd0052202b 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -874,9 +874,8 @@ def _map_values(self, mapper, na_action=None):
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
- from pandas import Series, Index
- index = Index(mapper, tupleize_cols=False)
- mapper = Series(mapper, index=index)
+ from pandas import Series
+ mapper = Series(mapper)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 10f9022e2666b..2bf3afe47d007 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2822,27 +2822,6 @@ def get_indexer_for(self, target, **kwargs):
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer
- _index_shared_docs['_get_values_from_dict'] = """
- Return the values of the input dictionary in the order the keys are
- in the index. np.nan is returned for index values not in the
- dictionary.
-
- Parameters
- ----------
- data : dict
- The dictionary from which to extract the values
-
- Returns
- -------
- np.array
-
- """
-
- @Appender(_index_shared_docs['_get_values_from_dict'])
- def _get_values_from_dict(self, data):
- return lib.fast_multiget(data, self.values,
- default=np.nan)
-
def _maybe_promote(self, other):
# A hack, but it works
from pandas.core.indexes.datetimes import DatetimeIndex
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py
index 5643d886a4fec..f2bf528d7cc6b 100644
--- a/pandas/core/indexes/datetimelike.py
+++ b/pandas/core/indexes/datetimelike.py
@@ -698,14 +698,6 @@ def __rsub__(self, other):
def _add_delta(self, other):
return NotImplemented
- @Appender(_index_shared_docs['_get_values_from_dict'])
- def _get_values_from_dict(self, data):
- if len(data):
- return np.array([data.get(i, np.nan)
- for i in self.asobject.values])
-
- return np.array([np.nan])
-
def _add_delta_td(self, other):
# add a delta of a timedeltalike
# return the i8 result view
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 196c881f97526..ee6263a9f0aad 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -1457,17 +1457,6 @@ def get_value_maybe_box(self, series, key):
key, tz=self.tz)
return _maybe_box(self, values, series, key)
- @Appender(_index_shared_docs['_get_values_from_dict'])
- def _get_values_from_dict(self, data):
- if len(data):
- # coerce back to datetime objects for lookup
- data = com._dict_compat(data)
- return lib.fast_multiget(data,
- self.asobject.values,
- default=np.nan)
-
- return np.array([np.nan])
-
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
diff --git a/pandas/core/series.py b/pandas/core/series.py
index bff7c21ad69b1..5d0e6907a6595 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -42,7 +42,6 @@
_default_index,
_asarray_tuplesafe,
_values_from_object,
- _try_sort,
_maybe_match_name,
SettingWithCopyError,
_maybe_box_datetimelike,
@@ -198,18 +197,9 @@ def __init__(self, data=None, index=None, dtype=None, name=None,
data = data.reindex(index, copy=copy)
data = data._data
elif isinstance(data, dict):
- if index is None:
- if isinstance(data, OrderedDict):
- index = Index(data)
- else:
- index = Index(_try_sort(data))
-
- try:
- data = index._get_values_from_dict(data)
- except TypeError:
- data = ([data.get(i, np.nan) for i in index]
- if data else np.nan)
-
+ data, index = self._init_dict(data, index, dtype)
+ dtype = None
+ copy = False
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
@@ -257,6 +247,45 @@ def __init__(self, data=None, index=None, dtype=None, name=None,
self.name = name
self._set_axis(0, index, fastpath=True)
+ def _init_dict(self, data, index=None, dtype=None):
+ """
+ Derive the "_data" and "index" attributes of a new Series from a
+ dictionary input.
+
+ Parameters
+ ----------
+ data : dict or dict-like
+ Data used to populate the new Series
+ index : Index or index-like, default None
+ index for the new Series: if None, use dict keys
+ dtype : dtype, default None
+ dtype for the new Series: if None, infer from data
+
+ Returns
+ -------
+ _data : BlockManager for the new Series
+ index : index for the new Series
+ """
+ # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]
+ # raises KeyError), so we iterate the entire dict, and align
+ if data:
+ keys, values = zip(*compat.iteritems(data))
+ else:
+ keys, values = [], []
+
+ # Input is now list-like, so rely on "standard" construction:
+ s = Series(values, index=keys, dtype=dtype)
+
+ # Now we just make sure the order is respected, if any
+ if index is not None:
+ s = s.reindex(index, copy=False)
+ elif not isinstance(data, OrderedDict):
+ try:
+ s = s.sort_index()
+ except TypeError:
+ pass
+ return s._data, s.index
+
@classmethod
def from_array(cls, arr, index=None, name=None, dtype=None, copy=False,
fastpath=False):
diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py
index fe21ba569ae99..cafe6a34720be 100644
--- a/pandas/tests/series/test_apply.py
+++ b/pandas/tests/series/test_apply.py
@@ -422,6 +422,7 @@ def test_map_dict_with_tuple_keys(self):
converted to a multi-index, preventing tuple values
from being mapped properly.
"""
+ # GH 18496
df = pd.DataFrame({'a': [(1, ), (2, ), (3, 4), (5, 6)]})
label_mappings = {(1, ): 'A', (2, ): 'B', (3, 4): 'A', (5, 6): 'B'}
diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py
index 71ac00975af03..6cf60e818c845 100644
--- a/pandas/tests/series/test_combine_concat.py
+++ b/pandas/tests/series/test_combine_concat.py
@@ -181,7 +181,8 @@ def test_concat_empty_series_dtypes(self):
# categorical
assert pd.concat([Series(dtype='category'),
Series(dtype='category')]).dtype == 'category'
- assert pd.concat([Series(dtype='category'),
+ # GH 18515
+ assert pd.concat([Series(np.array([]), dtype='category'),
Series(dtype='float64')]).dtype == 'float64'
assert pd.concat([Series(dtype='category'),
Series(dtype='object')]).dtype == 'object'
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index ccc04da3299fe..a57385a9cf690 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -4,6 +4,7 @@
import pytest
from datetime import datetime, timedelta
+from collections import OrderedDict
from numpy import nan
import numpy as np
@@ -79,17 +80,42 @@ def test_constructor(self):
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
pytest.raises(NotImplementedError, Series, m)
- def test_constructor_empty(self):
+ @pytest.mark.parametrize('input_class', [list, dict, OrderedDict])
+ def test_constructor_empty(self, input_class):
empty = Series()
- empty2 = Series([])
+ empty2 = Series(input_class())
- # the are Index() and RangeIndex() which don't compare type equal
+ # these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
assert_series_equal(empty, empty2, check_index_type=False)
- empty = Series(index=lrange(10))
- empty2 = Series(np.nan, index=lrange(10))
- assert_series_equal(empty, empty2)
+ # With explicit dtype:
+ empty = Series(dtype='float64')
+ empty2 = Series(input_class(), dtype='float64')
+ assert_series_equal(empty, empty2, check_index_type=False)
+
+ # GH 18515 : with dtype=category:
+ empty = Series(dtype='category')
+ empty2 = Series(input_class(), dtype='category')
+ assert_series_equal(empty, empty2, check_index_type=False)
+
+ if input_class is not list:
+ # With index:
+ empty = Series(index=lrange(10))
+ empty2 = Series(input_class(), index=lrange(10))
+ assert_series_equal(empty, empty2)
+
+ # With index and dtype float64:
+ empty = Series(np.nan, index=lrange(10))
+ empty2 = Series(input_class(), index=lrange(10), dtype='float64')
+ assert_series_equal(empty, empty2)
+
+ @pytest.mark.parametrize('input_arg', [np.nan, float('nan')])
+ def test_constructor_nan(self, input_arg):
+ empty = Series(dtype='float64', index=lrange(10))
+ empty2 = Series(input_arg, index=lrange(10))
+
+ assert_series_equal(empty, empty2, check_index_type=False)
def test_constructor_series(self):
index1 = ['d', 'b', 'a', 'c']
@@ -625,6 +651,21 @@ def test_constructor_dict(self):
expected.iloc[1] = 1
assert_series_equal(result, expected)
+ @pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
+ def test_constructor_dict_nan_key(self, value):
+ # GH 18480
+ d = {1: 'a', value: 'b', float('nan'): 'c', 4: 'd'}
+ result = Series(d).sort_values()
+ expected = Series(['a', 'b', 'c', 'd'], index=[1, value, np.nan, 4])
+ assert_series_equal(result, expected)
+
+ # MultiIndex:
+ d = {(1, 1): 'a', (2, np.nan): 'b', (3, value): 'c'}
+ result = Series(d).sort_values()
+ expected = Series(['a', 'b', 'c'],
+ index=Index([(1, 1), (2, np.nan), (3, value)]))
+ assert_series_equal(result, expected)
+
def test_constructor_dict_datetime64_index(self):
# GH 9456
@@ -658,8 +699,6 @@ def test_constructor_tuple_of_tuples(self):
s = Series(data)
assert tuple(s) == data
- @pytest.mark.xfail(reason='GH 18480 (Series initialization from dict with '
- 'NaN keys')
def test_constructor_dict_of_tuples(self):
data = {(1, 2): 3,
(None, 5): 6}
| - [x] closes #18480
closes #18515
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
This is also a prerequisite for fixing #18455 (which is a prerequisite for fixing #18460 ). The workaround to #18485 is annoying, but it is easy to remove it when the bug is fixed. | https://api.github.com/repos/pandas-dev/pandas/pulls/18496 | 2017-11-26T01:52:58Z | 2017-12-01T19:02:43Z | 2017-12-01T19:02:42Z | 2017-12-01T20:14:42Z |
Improved description of seaborn | diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst
index f7d1edff15cfb..8ed647c2a19bc 100644
--- a/doc/source/ecosystem.rst
+++ b/doc/source/ecosystem.rst
@@ -53,6 +53,18 @@ the latest web technologies. Its goal is to provide elegant, concise constructio
graphics in the style of Protovis/D3, while delivering high-performance interactivity over
large data to thin clients.
+`seaborn <https://seaborn.pydata.org>`__
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Seaborn is a Python visualization library based on `matplotlib
+<http://matplotlib.org>`__. It provides a high-level, dataset-oriented
+interface for creating attractive statistical graphics. The plotting functions
+in seaborn understand pandas objects and leverage pandas grouping operations
+internally to support concise specification of complex visualizations. Seaborn
+also goes beyond matplotlib and pandas with the option to perform statistical
+estimation while plotting, aggregating across observations and visualizing the
+fit of statistical models to emphasize patterns in a dataset.
+
`yhat/ggplot <https://github.com/yhat/ggplot>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -64,15 +76,6 @@ but a faithful implementation for python users has long been missing. Although s
(as of Jan-2014), the `yhat/ggplot <https://github.com/yhat/ggplot>`__ project has been
progressing quickly in that direction.
-`Seaborn <https://github.com/mwaskom/seaborn>`__
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Although pandas has quite a bit of "just plot it" functionality built-in, visualization and
-in particular statistical graphics is a vast field with a long tradition and lots of ground
-to cover. The `Seaborn <https://github.com/mwaskom/seaborn>`__ project builds on top of pandas
-and `matplotlib <http://matplotlib.org>`__ to provide easy plotting of data which extends to
-more advanced types of plots then those offered by pandas.
-
`Vincent <https://github.com/wrobstory/vincent>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| The pandas docs send a surprising amount of traffic towards seaborn, but
I've never thought that the existing description communicates the
goals/advantages of seaborn all that well. Please consider this revised
copy.
I also swapped the ordering of seaborn/py-ggplot, because it looks like
that package has stalled out a bit, but I'm happy to revert the
reordering if it bothers people. | https://api.github.com/repos/pandas-dev/pandas/pulls/18495 | 2017-11-26T01:24:22Z | 2017-11-26T15:13:25Z | 2017-11-26T15:13:25Z | 2017-12-11T20:22:34Z |
parametrize offsets tests | diff --git a/asv_bench/benchmarks/offset.py b/asv_bench/benchmarks/offset.py
new file mode 100644
index 0000000000000..ea826e8270ace
--- /dev/null
+++ b/asv_bench/benchmarks/offset.py
@@ -0,0 +1,239 @@
+# -*- coding: utf-8 -*-
+from datetime import datetime
+
+import numpy as np
+
+import pandas as pd
+from pandas import date_range
+
+try:
+ import pandas.tseries.holiday
+except ImportError:
+ pass
+
+hcal = pd.tseries.holiday.USFederalHolidayCalendar()
+
+
+class ApplyIndex(object):
+ goal_time = 0.2
+
+ params = [pd.offsets.YearEnd(), pd.offsets.YearBegin(),
+ pd.offsets.BYearEnd(), pd.offsets.BYearBegin(),
+ pd.offsets.QuarterEnd(), pd.offsets.QuarterBegin(),
+ pd.offsets.BQuarterEnd(), pd.offsets.BQuarterBegin(),
+ pd.offsets.MonthEnd(), pd.offsets.MonthBegin(),
+ pd.offsets.BMonthEnd(), pd.offsets.BMonthBegin()]
+
+ def setup(self, param):
+ self.offset = param
+
+ self.N = 100000
+ self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
+ self.ser = pd.Series(self.rng)
+
+ def time_apply_index(self, param):
+ self.rng + self.offset
+
+ def time_apply_series(self, param):
+ self.ser + self.offset
+
+
+class DatetimeIndexArithmetic(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.N = 100000
+ self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
+ self.day_offset = pd.offsets.Day()
+ self.relativedelta_offset = pd.offsets.DateOffset(months=2, days=2)
+ self.busday_offset = pd.offsets.BusinessDay()
+
+ def time_add_offset_delta(self):
+ self.rng + self.day_offset
+
+ def time_add_offset_fast(self):
+ self.rng + self.relativedelta_offset
+
+ def time_add_offset_slow(self):
+ self.rng + self.busday_offset
+
+
+class SeriesArithmetic(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.N = 100000
+ rng = date_range(start='20140101', freq='T', periods=self.N)
+ self.ser = pd.Series(rng)
+ self.day_offset = pd.offsets.Day()
+ self.relativedelta_offset = pd.offsets.DateOffset(months=2, days=2)
+ self.busday_offset = pd.offsets.BusinessDay()
+
+ def time_add_offset_delta(self):
+ self.ser + self.day_offset
+
+ def time_add_offset_fast(self):
+ self.ser + self.relativedelta_offset
+
+ def time_add_offset_slow(self):
+ self.ser + self.busday_offset
+
+
+class YearBegin(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.date = datetime(2011, 1, 1)
+ self.year = pd.offsets.YearBegin()
+
+ def time_timeseries_year_apply(self):
+ self.year.apply(self.date)
+
+ def time_timeseries_year_incr(self):
+ self.date + self.year
+
+
+class Day(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.date = datetime(2011, 1, 1)
+ self.day = pd.offsets.Day()
+
+ def time_timeseries_day_apply(self):
+ self.day.apply(self.date)
+
+ def time_timeseries_day_incr(self):
+ self.date + self.day
+
+
+class CBDay(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.date = datetime(2011, 1, 1)
+ self.dt64 = np.datetime64('2011-01-01 09:00Z')
+ self.cday = pd.offsets.CustomBusinessDay()
+
+ def time_custom_bday_decr(self):
+ self.date - self.cday
+
+ def time_custom_bday_incr(self):
+ self.date + self.cday
+
+ def time_custom_bday_apply(self):
+ self.cday.apply(self.date)
+
+ def time_custom_bday_apply_dt64(self):
+ self.cday.apply(self.dt64)
+
+
+class CBDayHolidays(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.date = datetime(2011, 1, 1)
+ self.cdayh = pd.offsets.CustomBusinessDay(calendar=hcal)
+
+ def time_custom_bday_cal_incr(self):
+ self.date + 1 * self.cdayh
+
+ def time_custom_bday_cal_decr(self):
+ self.date - 1 * self.cdayh
+
+ def time_custom_bday_cal_incr_n(self):
+ self.date + 10 * self.cdayh
+
+ def time_custom_bday_cal_incr_neg_n(self):
+ self.date - 10 * self.cdayh
+
+
+class CBMonthBegin(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.date = datetime(2011, 1, 1)
+ self.cmb = pd.offsets.CustomBusinessMonthBegin(calendar=hcal)
+
+ def time_custom_bmonthbegin_decr_n(self):
+ self.date - (10 * self.cmb)
+
+ def time_custom_bmonthbegin_incr_n(self):
+ self.date + (10 * self.cmb)
+
+
+class CBMonthEnd(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.date = datetime(2011, 1, 1)
+ self.cme = pd.offsets.CustomBusinessMonthEnd(calendar=hcal)
+
+ def time_custom_bmonthend_incr(self):
+ self.date + self.cme
+
+ def time_custom_bmonthend_incr_n(self):
+ self.date + (10 * self.cme)
+
+ def time_custom_bmonthend_decr_n(self):
+ self.date - (10 * self.cme)
+
+
+class SemiMonthOffset(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.N = 100000
+ self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
+ # date is not on an offset which will be slowest case
+ self.date = datetime(2011, 1, 2)
+ self.semi_month_end = pd.offsets.SemiMonthEnd()
+ self.semi_month_begin = pd.offsets.SemiMonthBegin()
+
+ def time_end_apply(self):
+ self.semi_month_end.apply(self.date)
+
+ def time_end_incr(self):
+ self.date + self.semi_month_end
+
+ def time_end_incr_n(self):
+ self.date + 10 * self.semi_month_end
+
+ def time_end_decr(self):
+ self.date - self.semi_month_end
+
+ def time_end_decr_n(self):
+ self.date - 10 * self.semi_month_end
+
+ def time_end_apply_index(self):
+ self.semi_month_end.apply_index(self.rng)
+
+ def time_end_incr_rng(self):
+ self.rng + self.semi_month_end
+
+ def time_end_decr_rng(self):
+ self.rng - self.semi_month_end
+
+ def time_begin_apply(self):
+ self.semi_month_begin.apply(self.date)
+
+ def time_begin_incr(self):
+ self.date + self.semi_month_begin
+
+ def time_begin_incr_n(self):
+ self.date + 10 * self.semi_month_begin
+
+ def time_begin_decr(self):
+ self.date - self.semi_month_begin
+
+ def time_begin_decr_n(self):
+ self.date - 10 * self.semi_month_begin
+
+ def time_begin_apply_index(self):
+ self.semi_month_begin.apply_index(self.rng)
+
+ def time_begin_incr_rng(self):
+ self.rng + self.semi_month_begin
+
+ def time_begin_decr_rng(self):
+ self.rng - self.semi_month_begin
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index 2ca2416f58b57..b3996739e33f7 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -2,13 +2,11 @@
from pandas.plotting._converter import DatetimeConverter
except ImportError:
from pandas.tseries.converter import DatetimeConverter
-from .pandas_vb_common import *
+
import pandas as pd
+from pandas import to_datetime, date_range, Series, DataFrame, period_range
+
import datetime as dt
-try:
- import pandas.tseries.holiday
-except ImportError:
- pass
from pandas.tseries.frequencies import infer_freq
import numpy as np
@@ -22,32 +20,38 @@ class DatetimeIndex(object):
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
- self.delta_offset = pd.offsets.Day()
- self.fast_offset = pd.offsets.DateOffset(months=2, days=2)
- self.slow_offset = pd.offsets.BusinessDay()
- self.rng2 = date_range(start='1/1/2000 9:30', periods=10000, freq='S', tz='US/Eastern')
+ self.rng2 = date_range(start='1/1/2000 9:30', periods=10000,
+ freq='S', tz='US/Eastern')
- self.index_repeated = date_range(start='1/1/2000', periods=1000, freq='T').repeat(10)
+ self.index_repeated = date_range(start='1/1/2000',
+ periods=1000, freq='T').repeat(10)
self.rng3 = date_range(start='1/1/2000', periods=1000, freq='H')
self.df = DataFrame(np.random.randn(len(self.rng3), 2), self.rng3)
- self.rng4 = date_range(start='1/1/2000', periods=1000, freq='H', tz='US/Eastern')
- self.df2 = DataFrame(np.random.randn(len(self.rng4), 2), index=self.rng4)
+ self.rng4 = date_range(start='1/1/2000', periods=1000,
+ freq='H', tz='US/Eastern')
+ self.df2 = DataFrame(np.random.randn(len(self.rng4), 2),
+ index=self.rng4)
N = 100000
self.dti = pd.date_range('2011-01-01', freq='H', periods=N).repeat(5)
self.dti_tz = pd.date_range('2011-01-01', freq='H', periods=N,
tz='Asia/Tokyo').repeat(5)
- self.rng5 = date_range(start='1/1/2000', end='3/1/2000', tz='US/Eastern')
+ self.rng5 = date_range(start='1/1/2000',
+ end='3/1/2000', tz='US/Eastern')
- self.dst_rng = date_range(start='10/29/2000 1:00:00', end='10/29/2000 1:59:59', freq='S')
- self.index = date_range(start='10/29/2000', end='10/29/2000 00:59:59', freq='S')
+ self.dst_rng = date_range(start='10/29/2000 1:00:00',
+ end='10/29/2000 1:59:59', freq='S')
+ self.index = date_range(start='10/29/2000',
+ end='10/29/2000 00:59:59', freq='S')
self.index = self.index.append(self.dst_rng)
self.index = self.index.append(self.dst_rng)
- self.index = self.index.append(date_range(start='10/29/2000 2:00:00', end='10/29/2000 3:00:00', freq='S'))
+ self.index = self.index.append(date_range(start='10/29/2000 2:00:00',
+ end='10/29/2000 3:00:00',
+ freq='S'))
self.N = 10000
self.rng6 = date_range(start='1/1/1', periods=self.N, freq='B')
@@ -62,15 +66,6 @@ def setup(self):
def time_add_timedelta(self):
(self.rng + dt.timedelta(minutes=2))
- def time_add_offset_delta(self):
- (self.rng + self.delta_offset)
-
- def time_add_offset_fast(self):
- (self.rng + self.fast_offset)
-
- def time_add_offset_slow(self):
- (self.rng + self.slow_offset)
-
def time_normalize(self):
self.rng2.normalize()
@@ -116,6 +111,7 @@ def time_to_date(self):
def time_to_pydatetime(self):
self.rng.to_pydatetime()
+
class TimeDatetimeConverter(object):
goal_time = 0.2
@@ -156,7 +152,7 @@ def time_iter_periodindex_preexit(self):
self.iter_n(self.idx2, self.M)
-#----------------------------------------------------------------------
+# ----------------------------------------------------------------------
# Resampling
class ResampleDataFrame(object):
@@ -195,7 +191,8 @@ def setup(self):
self.rng2 = date_range(start='1/1/2000', end='1/1/2001', freq='T')
self.ts2 = Series(np.random.randn(len(self.rng2)), index=self.rng2)
- self.rng3 = date_range(start='2000-01-01 00:00:00', end='2000-01-01 10:00:00', freq='555000U')
+ self.rng3 = date_range(start='2000-01-01 00:00:00',
+ end='2000-01-01 10:00:00', freq='555000U')
self.int_ts = Series(5, self.rng3, dtype='int64')
self.dt_ts = self.int_ts.astype('datetime64[ns]')
@@ -223,7 +220,8 @@ def setup(self):
self.N = 10000
self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s')
self.ts = Series(np.random.randn(self.N), index=self.rng)
- self.dates = date_range(start='1/1/1990', periods=(self.N * 10), freq='5s')
+ self.dates = date_range(start='1/1/1990',
+ periods=(self.N * 10), freq='5s')
self.ts2 = self.ts.copy()
self.ts2[250:5000] = np.nan
self.ts3 = self.ts.copy()
@@ -261,7 +259,8 @@ def setup(self):
self.N = 10000
self.M = 100
self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s')
- self.dates = date_range(start='1/1/1990', periods=(self.N * 10), freq='5s')
+ self.dates = date_range(start='1/1/1990',
+ periods=(self.N * 10), freq='5s')
self.ts = DataFrame(np.random.randn(self.N, self.M), index=self.rng)
self.ts2 = self.ts.copy()
self.ts2.iloc[250:5000] = np.nan
@@ -306,8 +305,10 @@ def setup(self):
self.lindex = np.random.permutation(self.N)[:(self.N // 2)]
self.rindex = np.random.permutation(self.N)[:(self.N // 2)]
- self.left = Series(self.ts2.values.take(self.lindex), index=self.ts2.index.take(self.lindex))
- self.right = Series(self.ts2.values.take(self.rindex), index=self.ts2.index.take(self.rindex))
+ self.left = Series(self.ts2.values.take(self.lindex),
+ index=self.ts2.index.take(self.lindex))
+ self.right = Series(self.ts2.values.take(self.rindex),
+ index=self.ts2.index.take(self.rindex))
self.rng3 = date_range(start='1/1/2000', periods=1500000, freq='S')
self.ts3 = Series(1, index=self.rng3)
@@ -329,26 +330,6 @@ def time_large_lookup_value(self):
self.ts3.index._cleanup()
-class SeriesArithmetic(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 100000
- self.s = Series(date_range(start='20140101', freq='T', periods=self.N))
- self.delta_offset = pd.offsets.Day()
- self.fast_offset = pd.offsets.DateOffset(months=2, days=2)
- self.slow_offset = pd.offsets.BusinessDay()
-
- def time_add_offset_delta(self):
- (self.s + self.delta_offset)
-
- def time_add_offset_fast(self):
- (self.s + self.fast_offset)
-
- def time_add_offset_slow(self):
- (self.s + self.slow_offset)
-
-
class ToDatetime(object):
goal_time = 0.2
@@ -425,136 +406,6 @@ def time_cache_false_with_dup_string_tzoffset_dates(self):
to_datetime(self.dup_string_with_tz, cache=False)
-class Offsets(object):
- goal_time = 0.2
-
- def setup(self):
- self.date = dt.datetime(2011, 1, 1)
- self.dt64 = np.datetime64('2011-01-01 09:00Z')
- self.hcal = pd.tseries.holiday.USFederalHolidayCalendar()
- self.day = pd.offsets.Day()
- self.year = pd.offsets.YearBegin()
- self.cday = pd.offsets.CustomBusinessDay()
- self.cmb = pd.offsets.CustomBusinessMonthBegin(calendar=self.hcal)
- self.cme = pd.offsets.CustomBusinessMonthEnd(calendar=self.hcal)
- self.cdayh = pd.offsets.CustomBusinessDay(calendar=self.hcal)
-
- def time_timeseries_day_apply(self):
- self.day.apply(self.date)
-
- def time_timeseries_day_incr(self):
- (self.date + self.day)
-
- def time_timeseries_year_apply(self):
- self.year.apply(self.date)
-
- def time_timeseries_year_incr(self):
- (self.date + self.year)
-
- # custom business offsets
-
- def time_custom_bday_decr(self):
- (self.date - self.cday)
-
- def time_custom_bday_incr(self):
- (self.date + self.cday)
-
- def time_custom_bday_apply(self):
- self.cday.apply(self.date)
-
- def time_custom_bday_apply_dt64(self):
- self.cday.apply(self.dt64)
-
- def time_custom_bday_cal_incr(self):
- self.date + 1 * self.cdayh
-
- def time_custom_bday_cal_decr(self):
- self.date - 1 * self.cdayh
-
- def time_custom_bday_cal_incr_n(self):
- self.date + 10 * self.cdayh
-
- def time_custom_bday_cal_incr_neg_n(self):
- self.date - 10 * self.cdayh
-
- # Increment custom business month
-
- def time_custom_bmonthend_incr(self):
- (self.date + self.cme)
-
- def time_custom_bmonthend_incr_n(self):
- (self.date + (10 * self.cme))
-
- def time_custom_bmonthend_decr_n(self):
- (self.date - (10 * self.cme))
-
- def time_custom_bmonthbegin_decr_n(self):
- (self.date - (10 * self.cmb))
-
- def time_custom_bmonthbegin_incr_n(self):
- (self.date + (10 * self.cmb))
-
-
-class SemiMonthOffset(object):
- goal_time = 0.2
-
- def setup(self):
- self.N = 100000
- self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
- # date is not on an offset which will be slowest case
- self.date = dt.datetime(2011, 1, 2)
- self.semi_month_end = pd.offsets.SemiMonthEnd()
- self.semi_month_begin = pd.offsets.SemiMonthBegin()
-
- def time_end_apply(self):
- self.semi_month_end.apply(self.date)
-
- def time_end_incr(self):
- self.date + self.semi_month_end
-
- def time_end_incr_n(self):
- self.date + 10 * self.semi_month_end
-
- def time_end_decr(self):
- self.date - self.semi_month_end
-
- def time_end_decr_n(self):
- self.date - 10 * self.semi_month_end
-
- def time_end_apply_index(self):
- self.semi_month_end.apply_index(self.rng)
-
- def time_end_incr_rng(self):
- self.rng + self.semi_month_end
-
- def time_end_decr_rng(self):
- self.rng - self.semi_month_end
-
- def time_begin_apply(self):
- self.semi_month_begin.apply(self.date)
-
- def time_begin_incr(self):
- self.date + self.semi_month_begin
-
- def time_begin_incr_n(self):
- self.date + 10 * self.semi_month_begin
-
- def time_begin_decr(self):
- self.date - self.semi_month_begin
-
- def time_begin_decr_n(self):
- self.date - 10 * self.semi_month_begin
-
- def time_begin_apply_index(self):
- self.semi_month_begin.apply_index(self.rng)
-
- def time_begin_incr_rng(self):
- self.rng + self.semi_month_begin
-
- def time_begin_decr_rng(self):
- self.rng - self.semi_month_begin
-
-
class DatetimeAccessor(object):
def setup(self):
self.N = 100000
diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py
new file mode 100644
index 0000000000000..45f12c6931fd9
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_fiscal.py
@@ -0,0 +1,606 @@
+# -*- coding: utf-8 -*-
+"""
+Tests for Fiscal Year and Fiscal Quarter offset classes
+"""
+from datetime import datetime
+
+from dateutil.relativedelta import relativedelta
+import pytest
+
+import pandas.util.testing as tm
+
+from pandas.tseries.frequencies import get_offset, _INVALID_FREQ_ERROR
+from pandas.tseries.offsets import FY5253Quarter, FY5253
+from pandas._libs.tslibs.offsets import WeekDay
+
+from .common import assert_offset_equal, assert_onOffset
+from .test_offsets import Base
+
+
+def makeFY5253LastOfMonthQuarter(*args, **kwds):
+ return FY5253Quarter(*args, variation="last", **kwds)
+
+
+def makeFY5253NearestEndMonthQuarter(*args, **kwds):
+ return FY5253Quarter(*args, variation="nearest", **kwds)
+
+
+def makeFY5253NearestEndMonth(*args, **kwds):
+ return FY5253(*args, variation="nearest", **kwds)
+
+
+def makeFY5253LastOfMonth(*args, **kwds):
+ return FY5253(*args, variation="last", **kwds)
+
+
+def test_get_offset_name():
+ assert (makeFY5253LastOfMonthQuarter(
+ weekday=1, startingMonth=3,
+ qtr_with_extra_week=4).freqstr == "REQ-L-MAR-TUE-4")
+ assert (makeFY5253NearestEndMonthQuarter(
+ weekday=1, startingMonth=3,
+ qtr_with_extra_week=3).freqstr == "REQ-N-MAR-TUE-3")
+
+
+def test_get_offset():
+ with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
+ get_offset('gibberish')
+ with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
+ get_offset('QS-JAN-B')
+
+ pairs = [
+ ("RE-N-DEC-MON",
+ makeFY5253NearestEndMonth(weekday=0, startingMonth=12)),
+ ("RE-L-DEC-TUE",
+ makeFY5253LastOfMonth(weekday=1, startingMonth=12)),
+ ("REQ-L-MAR-TUE-4",
+ makeFY5253LastOfMonthQuarter(weekday=1,
+ startingMonth=3,
+ qtr_with_extra_week=4)),
+ ("REQ-L-DEC-MON-3",
+ makeFY5253LastOfMonthQuarter(weekday=0,
+ startingMonth=12,
+ qtr_with_extra_week=3)),
+ ("REQ-N-DEC-MON-3",
+ makeFY5253NearestEndMonthQuarter(weekday=0,
+ startingMonth=12,
+ qtr_with_extra_week=3))]
+
+ for name, expected in pairs:
+ offset = get_offset(name)
+ assert offset == expected, ("Expected %r to yield %r (actual: %r)" %
+ (name, expected, offset))
+
+
+class TestFY5253LastOfMonth(Base):
+ offset_lom_sat_aug = makeFY5253LastOfMonth(1, startingMonth=8,
+ weekday=WeekDay.SAT)
+ offset_lom_sat_sep = makeFY5253LastOfMonth(1, startingMonth=9,
+ weekday=WeekDay.SAT)
+
+ on_offset_cases = [
+ # From Wikipedia (see:
+ # http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end)
+ (offset_lom_sat_aug, datetime(2006, 8, 26), True),
+ (offset_lom_sat_aug, datetime(2007, 8, 25), True),
+ (offset_lom_sat_aug, datetime(2008, 8, 30), True),
+ (offset_lom_sat_aug, datetime(2009, 8, 29), True),
+ (offset_lom_sat_aug, datetime(2010, 8, 28), True),
+ (offset_lom_sat_aug, datetime(2011, 8, 27), True),
+ (offset_lom_sat_aug, datetime(2012, 8, 25), True),
+ (offset_lom_sat_aug, datetime(2013, 8, 31), True),
+ (offset_lom_sat_aug, datetime(2014, 8, 30), True),
+ (offset_lom_sat_aug, datetime(2015, 8, 29), True),
+ (offset_lom_sat_aug, datetime(2016, 8, 27), True),
+ (offset_lom_sat_aug, datetime(2017, 8, 26), True),
+ (offset_lom_sat_aug, datetime(2018, 8, 25), True),
+ (offset_lom_sat_aug, datetime(2019, 8, 31), True),
+
+ (offset_lom_sat_aug, datetime(2006, 8, 27), False),
+ (offset_lom_sat_aug, datetime(2007, 8, 28), False),
+ (offset_lom_sat_aug, datetime(2008, 8, 31), False),
+ (offset_lom_sat_aug, datetime(2009, 8, 30), False),
+ (offset_lom_sat_aug, datetime(2010, 8, 29), False),
+ (offset_lom_sat_aug, datetime(2011, 8, 28), False),
+
+ (offset_lom_sat_aug, datetime(2006, 8, 25), False),
+ (offset_lom_sat_aug, datetime(2007, 8, 24), False),
+ (offset_lom_sat_aug, datetime(2008, 8, 29), False),
+ (offset_lom_sat_aug, datetime(2009, 8, 28), False),
+ (offset_lom_sat_aug, datetime(2010, 8, 27), False),
+ (offset_lom_sat_aug, datetime(2011, 8, 26), False),
+ (offset_lom_sat_aug, datetime(2019, 8, 30), False),
+
+ # From GMCR (see for example:
+ # http://yahoo.brand.edgar-online.com/Default.aspx?
+ # companyid=3184&formtypeID=7)
+ (offset_lom_sat_sep, datetime(2010, 9, 25), True),
+ (offset_lom_sat_sep, datetime(2011, 9, 24), True),
+ (offset_lom_sat_sep, datetime(2012, 9, 29), True)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
+
+ def test_apply(self):
+ offset_lom_aug_sat = makeFY5253LastOfMonth(startingMonth=8,
+ weekday=WeekDay.SAT)
+ offset_lom_aug_sat_1 = makeFY5253LastOfMonth(n=1, startingMonth=8,
+ weekday=WeekDay.SAT)
+
+ date_seq_lom_aug_sat = [datetime(2006, 8, 26), datetime(2007, 8, 25),
+ datetime(2008, 8, 30), datetime(2009, 8, 29),
+ datetime(2010, 8, 28), datetime(2011, 8, 27),
+ datetime(2012, 8, 25), datetime(2013, 8, 31),
+ datetime(2014, 8, 30), datetime(2015, 8, 29),
+ datetime(2016, 8, 27)]
+
+ tests = [
+ (offset_lom_aug_sat, date_seq_lom_aug_sat),
+ (offset_lom_aug_sat_1, date_seq_lom_aug_sat),
+ (offset_lom_aug_sat, [
+ datetime(2006, 8, 25)] + date_seq_lom_aug_sat),
+ (offset_lom_aug_sat_1, [
+ datetime(2006, 8, 27)] + date_seq_lom_aug_sat[1:]),
+ (makeFY5253LastOfMonth(n=-1, startingMonth=8,
+ weekday=WeekDay.SAT),
+ list(reversed(date_seq_lom_aug_sat))),
+ ]
+ for test in tests:
+ offset, data = test
+ current = data[0]
+ for datum in data[1:]:
+ current = current + offset
+ assert current == datum
+
+
+class TestFY5253NearestEndMonth(Base):
+
+ def test_get_target_month_end(self):
+ assert (makeFY5253NearestEndMonth(
+ startingMonth=8, weekday=WeekDay.SAT).get_target_month_end(
+ datetime(2013, 1, 1)) == datetime(2013, 8, 31))
+ assert (makeFY5253NearestEndMonth(
+ startingMonth=12, weekday=WeekDay.SAT).get_target_month_end(
+ datetime(2013, 1, 1)) == datetime(2013, 12, 31))
+ assert (makeFY5253NearestEndMonth(
+ startingMonth=2, weekday=WeekDay.SAT).get_target_month_end(
+ datetime(2013, 1, 1)) == datetime(2013, 2, 28))
+
+ def test_get_year_end(self):
+ assert (makeFY5253NearestEndMonth(
+ startingMonth=8, weekday=WeekDay.SAT).get_year_end(
+ datetime(2013, 1, 1)) == datetime(2013, 8, 31))
+ assert (makeFY5253NearestEndMonth(
+ startingMonth=8, weekday=WeekDay.SUN).get_year_end(
+ datetime(2013, 1, 1)) == datetime(2013, 9, 1))
+ assert (makeFY5253NearestEndMonth(
+ startingMonth=8, weekday=WeekDay.FRI).get_year_end(
+ datetime(2013, 1, 1)) == datetime(2013, 8, 30))
+
+ offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
+ variation="nearest")
+ assert (offset_n.get_year_end(datetime(2012, 1, 1)) ==
+ datetime(2013, 1, 1))
+ assert (offset_n.get_year_end(datetime(2012, 1, 10)) ==
+ datetime(2013, 1, 1))
+
+ assert (offset_n.get_year_end(datetime(2013, 1, 1)) ==
+ datetime(2013, 12, 31))
+ assert (offset_n.get_year_end(datetime(2013, 1, 2)) ==
+ datetime(2013, 12, 31))
+ assert (offset_n.get_year_end(datetime(2013, 1, 3)) ==
+ datetime(2013, 12, 31))
+ assert (offset_n.get_year_end(datetime(2013, 1, 10)) ==
+ datetime(2013, 12, 31))
+
+ JNJ = FY5253(n=1, startingMonth=12, weekday=6, variation="nearest")
+ assert (JNJ.get_year_end(datetime(2006, 1, 1)) ==
+ datetime(2006, 12, 31))
+
+ offset_lom_aug_sat = makeFY5253NearestEndMonth(1, startingMonth=8,
+ weekday=WeekDay.SAT)
+ offset_lom_aug_thu = makeFY5253NearestEndMonth(1, startingMonth=8,
+ weekday=WeekDay.THU)
+ offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
+ variation="nearest")
+
+ on_offset_cases = [
+ # From Wikipedia (see:
+ # http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
+ # #Saturday_nearest_the_end_of_month)
+ # 2006-09-02 2006 September 2
+ # 2007-09-01 2007 September 1
+ # 2008-08-30 2008 August 30 (leap year)
+ # 2009-08-29 2009 August 29
+ # 2010-08-28 2010 August 28
+ # 2011-09-03 2011 September 3
+ # 2012-09-01 2012 September 1 (leap year)
+ # 2013-08-31 2013 August 31
+ # 2014-08-30 2014 August 30
+ # 2015-08-29 2015 August 29
+ # 2016-09-03 2016 September 3 (leap year)
+ # 2017-09-02 2017 September 2
+ # 2018-09-01 2018 September 1
+ # 2019-08-31 2019 August 31
+ (offset_lom_aug_sat, datetime(2006, 9, 2), True),
+ (offset_lom_aug_sat, datetime(2007, 9, 1), True),
+ (offset_lom_aug_sat, datetime(2008, 8, 30), True),
+ (offset_lom_aug_sat, datetime(2009, 8, 29), True),
+ (offset_lom_aug_sat, datetime(2010, 8, 28), True),
+ (offset_lom_aug_sat, datetime(2011, 9, 3), True),
+
+ (offset_lom_aug_sat, datetime(2016, 9, 3), True),
+ (offset_lom_aug_sat, datetime(2017, 9, 2), True),
+ (offset_lom_aug_sat, datetime(2018, 9, 1), True),
+ (offset_lom_aug_sat, datetime(2019, 8, 31), True),
+
+ (offset_lom_aug_sat, datetime(2006, 8, 27), False),
+ (offset_lom_aug_sat, datetime(2007, 8, 28), False),
+ (offset_lom_aug_sat, datetime(2008, 8, 31), False),
+ (offset_lom_aug_sat, datetime(2009, 8, 30), False),
+ (offset_lom_aug_sat, datetime(2010, 8, 29), False),
+ (offset_lom_aug_sat, datetime(2011, 8, 28), False),
+
+ (offset_lom_aug_sat, datetime(2006, 8, 25), False),
+ (offset_lom_aug_sat, datetime(2007, 8, 24), False),
+ (offset_lom_aug_sat, datetime(2008, 8, 29), False),
+ (offset_lom_aug_sat, datetime(2009, 8, 28), False),
+ (offset_lom_aug_sat, datetime(2010, 8, 27), False),
+ (offset_lom_aug_sat, datetime(2011, 8, 26), False),
+ (offset_lom_aug_sat, datetime(2019, 8, 30), False),
+
+ # From Micron, see:
+ # http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
+ (offset_lom_aug_thu, datetime(2012, 8, 30), True),
+ (offset_lom_aug_thu, datetime(2011, 9, 1), True),
+
+ (offset_n, datetime(2012, 12, 31), False),
+ (offset_n, datetime(2013, 1, 1), True),
+ (offset_n, datetime(2013, 1, 2), False)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
+
+ def test_apply(self):
+ date_seq_nem_8_sat = [datetime(2006, 9, 2), datetime(2007, 9, 1),
+ datetime(2008, 8, 30), datetime(2009, 8, 29),
+ datetime(2010, 8, 28), datetime(2011, 9, 3)]
+
+ JNJ = [datetime(2005, 1, 2), datetime(2006, 1, 1),
+ datetime(2006, 12, 31), datetime(2007, 12, 30),
+ datetime(2008, 12, 28), datetime(2010, 1, 3),
+ datetime(2011, 1, 2), datetime(2012, 1, 1),
+ datetime(2012, 12, 30)]
+
+ DEC_SAT = FY5253(n=-1, startingMonth=12, weekday=5,
+ variation="nearest")
+
+ tests = [
+ (makeFY5253NearestEndMonth(startingMonth=8,
+ weekday=WeekDay.SAT),
+ date_seq_nem_8_sat),
+ (makeFY5253NearestEndMonth(n=1, startingMonth=8,
+ weekday=WeekDay.SAT),
+ date_seq_nem_8_sat),
+ (makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT),
+ [datetime(2006, 9, 1)] + date_seq_nem_8_sat),
+ (makeFY5253NearestEndMonth(n=1, startingMonth=8,
+ weekday=WeekDay.SAT),
+ [datetime(2006, 9, 3)] + date_seq_nem_8_sat[1:]),
+ (makeFY5253NearestEndMonth(n=-1, startingMonth=8,
+ weekday=WeekDay.SAT),
+ list(reversed(date_seq_nem_8_sat))),
+ (makeFY5253NearestEndMonth(n=1, startingMonth=12,
+ weekday=WeekDay.SUN), JNJ),
+ (makeFY5253NearestEndMonth(n=-1, startingMonth=12,
+ weekday=WeekDay.SUN),
+ list(reversed(JNJ))),
+ (makeFY5253NearestEndMonth(n=1, startingMonth=12,
+ weekday=WeekDay.SUN),
+ [datetime(2005, 1, 2), datetime(2006, 1, 1)]),
+ (makeFY5253NearestEndMonth(n=1, startingMonth=12,
+ weekday=WeekDay.SUN),
+ [datetime(2006, 1, 2), datetime(2006, 12, 31)]),
+ (DEC_SAT, [datetime(2013, 1, 15), datetime(2012, 12, 29)])
+ ]
+ for test in tests:
+ offset, data = test
+ current = data[0]
+ for datum in data[1:]:
+ current = current + offset
+ assert current == datum
+
+
+class TestFY5253LastOfMonthQuarter(Base):
+
+ def test_isAnchored(self):
+ assert makeFY5253LastOfMonthQuarter(
+ startingMonth=1, weekday=WeekDay.SAT,
+ qtr_with_extra_week=4).isAnchored()
+ assert makeFY5253LastOfMonthQuarter(
+ weekday=WeekDay.SAT, startingMonth=3,
+ qtr_with_extra_week=4).isAnchored()
+ assert not makeFY5253LastOfMonthQuarter(
+ 2, startingMonth=1, weekday=WeekDay.SAT,
+ qtr_with_extra_week=4).isAnchored()
+
+ def test_equality(self):
+ assert (makeFY5253LastOfMonthQuarter(
+ startingMonth=1, weekday=WeekDay.SAT,
+ qtr_with_extra_week=4) == makeFY5253LastOfMonthQuarter(
+ startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4))
+ assert (makeFY5253LastOfMonthQuarter(
+ startingMonth=1, weekday=WeekDay.SAT,
+ qtr_with_extra_week=4) != makeFY5253LastOfMonthQuarter(
+ startingMonth=1, weekday=WeekDay.SUN, qtr_with_extra_week=4))
+ assert (makeFY5253LastOfMonthQuarter(
+ startingMonth=1, weekday=WeekDay.SAT,
+ qtr_with_extra_week=4) != makeFY5253LastOfMonthQuarter(
+ startingMonth=2, weekday=WeekDay.SAT, qtr_with_extra_week=4))
+
+ def test_offset(self):
+ offset = makeFY5253LastOfMonthQuarter(1, startingMonth=9,
+ weekday=WeekDay.SAT,
+ qtr_with_extra_week=4)
+ offset2 = makeFY5253LastOfMonthQuarter(2, startingMonth=9,
+ weekday=WeekDay.SAT,
+ qtr_with_extra_week=4)
+ offset4 = makeFY5253LastOfMonthQuarter(4, startingMonth=9,
+ weekday=WeekDay.SAT,
+ qtr_with_extra_week=4)
+
+ offset_neg1 = makeFY5253LastOfMonthQuarter(-1, startingMonth=9,
+ weekday=WeekDay.SAT,
+ qtr_with_extra_week=4)
+ offset_neg2 = makeFY5253LastOfMonthQuarter(-2, startingMonth=9,
+ weekday=WeekDay.SAT,
+ qtr_with_extra_week=4)
+
+ GMCR = [datetime(2010, 3, 27), datetime(2010, 6, 26),
+ datetime(2010, 9, 25), datetime(2010, 12, 25),
+ datetime(2011, 3, 26), datetime(2011, 6, 25),
+ datetime(2011, 9, 24), datetime(2011, 12, 24),
+ datetime(2012, 3, 24), datetime(2012, 6, 23),
+ datetime(2012, 9, 29), datetime(2012, 12, 29),
+ datetime(2013, 3, 30), datetime(2013, 6, 29)]
+
+ assert_offset_equal(offset, base=GMCR[0], expected=GMCR[1])
+ assert_offset_equal(offset, base=GMCR[0] + relativedelta(days=-1),
+ expected=GMCR[0])
+ assert_offset_equal(offset, base=GMCR[1], expected=GMCR[2])
+
+ assert_offset_equal(offset2, base=GMCR[0], expected=GMCR[2])
+ assert_offset_equal(offset4, base=GMCR[0], expected=GMCR[4])
+
+ assert_offset_equal(offset_neg1, base=GMCR[-1], expected=GMCR[-2])
+ assert_offset_equal(offset_neg1,
+ base=GMCR[-1] + relativedelta(days=+1),
+ expected=GMCR[-1])
+ assert_offset_equal(offset_neg2, base=GMCR[-1], expected=GMCR[-3])
+
+ date = GMCR[0] + relativedelta(days=-1)
+ for expected in GMCR:
+ assert_offset_equal(offset, date, expected)
+ date = date + offset
+
+ date = GMCR[-1] + relativedelta(days=+1)
+ for expected in reversed(GMCR):
+ assert_offset_equal(offset_neg1, date, expected)
+ date = date + offset_neg1
+
+ lomq_aug_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=8,
+ weekday=WeekDay.SAT,
+ qtr_with_extra_week=4)
+ lomq_sep_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=9,
+ weekday=WeekDay.SAT,
+ qtr_with_extra_week=4)
+
+ on_offset_cases = [
+ # From Wikipedia
+ (lomq_aug_sat_4, datetime(2006, 8, 26), True),
+ (lomq_aug_sat_4, datetime(2007, 8, 25), True),
+ (lomq_aug_sat_4, datetime(2008, 8, 30), True),
+ (lomq_aug_sat_4, datetime(2009, 8, 29), True),
+ (lomq_aug_sat_4, datetime(2010, 8, 28), True),
+ (lomq_aug_sat_4, datetime(2011, 8, 27), True),
+ (lomq_aug_sat_4, datetime(2019, 8, 31), True),
+
+ (lomq_aug_sat_4, datetime(2006, 8, 27), False),
+ (lomq_aug_sat_4, datetime(2007, 8, 28), False),
+ (lomq_aug_sat_4, datetime(2008, 8, 31), False),
+ (lomq_aug_sat_4, datetime(2009, 8, 30), False),
+ (lomq_aug_sat_4, datetime(2010, 8, 29), False),
+ (lomq_aug_sat_4, datetime(2011, 8, 28), False),
+
+ (lomq_aug_sat_4, datetime(2006, 8, 25), False),
+ (lomq_aug_sat_4, datetime(2007, 8, 24), False),
+ (lomq_aug_sat_4, datetime(2008, 8, 29), False),
+ (lomq_aug_sat_4, datetime(2009, 8, 28), False),
+ (lomq_aug_sat_4, datetime(2010, 8, 27), False),
+ (lomq_aug_sat_4, datetime(2011, 8, 26), False),
+ (lomq_aug_sat_4, datetime(2019, 8, 30), False),
+
+ # From GMCR
+ (lomq_sep_sat_4, datetime(2010, 9, 25), True),
+ (lomq_sep_sat_4, datetime(2011, 9, 24), True),
+ (lomq_sep_sat_4, datetime(2012, 9, 29), True),
+
+ (lomq_sep_sat_4, datetime(2013, 6, 29), True),
+ (lomq_sep_sat_4, datetime(2012, 6, 23), True),
+ (lomq_sep_sat_4, datetime(2012, 6, 30), False),
+
+ (lomq_sep_sat_4, datetime(2013, 3, 30), True),
+ (lomq_sep_sat_4, datetime(2012, 3, 24), True),
+
+ (lomq_sep_sat_4, datetime(2012, 12, 29), True),
+ (lomq_sep_sat_4, datetime(2011, 12, 24), True),
+
+ # INTC (extra week in Q1)
+ # See: http://www.intc.com/releasedetail.cfm?ReleaseID=542844
+ (makeFY5253LastOfMonthQuarter(1, startingMonth=12,
+ weekday=WeekDay.SAT,
+ qtr_with_extra_week=1),
+ datetime(2011, 4, 2), True),
+
+ # see: http://google.brand.edgar-online.com/?sym=INTC&formtypeID=7
+ (makeFY5253LastOfMonthQuarter(1, startingMonth=12,
+ weekday=WeekDay.SAT,
+ qtr_with_extra_week=1),
+ datetime(2012, 12, 29), True),
+ (makeFY5253LastOfMonthQuarter(1, startingMonth=12,
+ weekday=WeekDay.SAT,
+ qtr_with_extra_week=1),
+ datetime(2011, 12, 31), True),
+ (makeFY5253LastOfMonthQuarter(1, startingMonth=12,
+ weekday=WeekDay.SAT,
+ qtr_with_extra_week=1),
+ datetime(2010, 12, 25), True)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
+
+ def test_year_has_extra_week(self):
+ # End of long Q1
+ assert makeFY5253LastOfMonthQuarter(
+ 1, startingMonth=12, weekday=WeekDay.SAT,
+ qtr_with_extra_week=1).year_has_extra_week(datetime(2011, 4, 2))
+
+ # Start of long Q1
+ assert makeFY5253LastOfMonthQuarter(
+ 1, startingMonth=12, weekday=WeekDay.SAT,
+ qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 26))
+
+ # End of year before year with long Q1
+ assert not makeFY5253LastOfMonthQuarter(
+ 1, startingMonth=12, weekday=WeekDay.SAT,
+ qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 25))
+
+ for year in [x
+ for x in range(1994, 2011 + 1)
+ if x not in [2011, 2005, 2000, 1994]]:
+ assert not makeFY5253LastOfMonthQuarter(
+ 1, startingMonth=12, weekday=WeekDay.SAT,
+ qtr_with_extra_week=1).year_has_extra_week(
+ datetime(year, 4, 2))
+
+ # Other long years
+ assert makeFY5253LastOfMonthQuarter(
+ 1, startingMonth=12, weekday=WeekDay.SAT,
+ qtr_with_extra_week=1).year_has_extra_week(datetime(2005, 4, 2))
+
+ assert makeFY5253LastOfMonthQuarter(
+ 1, startingMonth=12, weekday=WeekDay.SAT,
+ qtr_with_extra_week=1).year_has_extra_week(datetime(2000, 4, 2))
+
+ assert makeFY5253LastOfMonthQuarter(
+ 1, startingMonth=12, weekday=WeekDay.SAT,
+ qtr_with_extra_week=1).year_has_extra_week(datetime(1994, 4, 2))
+
+ def test_get_weeks(self):
+ sat_dec_1 = makeFY5253LastOfMonthQuarter(1, startingMonth=12,
+ weekday=WeekDay.SAT,
+ qtr_with_extra_week=1)
+ sat_dec_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=12,
+ weekday=WeekDay.SAT,
+ qtr_with_extra_week=4)
+
+ assert sat_dec_1.get_weeks(datetime(2011, 4, 2)) == [14, 13, 13, 13]
+ assert sat_dec_4.get_weeks(datetime(2011, 4, 2)) == [13, 13, 13, 14]
+ assert sat_dec_1.get_weeks(datetime(2010, 12, 25)) == [13, 13, 13, 13]
+
+
+class TestFY5253NearestEndMonthQuarter(Base):
+
+ offset_nem_sat_aug_4 = makeFY5253NearestEndMonthQuarter(
+ 1, startingMonth=8, weekday=WeekDay.SAT,
+ qtr_with_extra_week=4)
+ offset_nem_thu_aug_4 = makeFY5253NearestEndMonthQuarter(
+ 1, startingMonth=8, weekday=WeekDay.THU,
+ qtr_with_extra_week=4)
+ offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
+ variation="nearest")
+
+ on_offset_cases = [
+ # From Wikipedia
+ (offset_nem_sat_aug_4, datetime(2006, 9, 2), True),
+ (offset_nem_sat_aug_4, datetime(2007, 9, 1), True),
+ (offset_nem_sat_aug_4, datetime(2008, 8, 30), True),
+ (offset_nem_sat_aug_4, datetime(2009, 8, 29), True),
+ (offset_nem_sat_aug_4, datetime(2010, 8, 28), True),
+ (offset_nem_sat_aug_4, datetime(2011, 9, 3), True),
+
+ (offset_nem_sat_aug_4, datetime(2016, 9, 3), True),
+ (offset_nem_sat_aug_4, datetime(2017, 9, 2), True),
+ (offset_nem_sat_aug_4, datetime(2018, 9, 1), True),
+ (offset_nem_sat_aug_4, datetime(2019, 8, 31), True),
+
+ (offset_nem_sat_aug_4, datetime(2006, 8, 27), False),
+ (offset_nem_sat_aug_4, datetime(2007, 8, 28), False),
+ (offset_nem_sat_aug_4, datetime(2008, 8, 31), False),
+ (offset_nem_sat_aug_4, datetime(2009, 8, 30), False),
+ (offset_nem_sat_aug_4, datetime(2010, 8, 29), False),
+ (offset_nem_sat_aug_4, datetime(2011, 8, 28), False),
+
+ (offset_nem_sat_aug_4, datetime(2006, 8, 25), False),
+ (offset_nem_sat_aug_4, datetime(2007, 8, 24), False),
+ (offset_nem_sat_aug_4, datetime(2008, 8, 29), False),
+ (offset_nem_sat_aug_4, datetime(2009, 8, 28), False),
+ (offset_nem_sat_aug_4, datetime(2010, 8, 27), False),
+ (offset_nem_sat_aug_4, datetime(2011, 8, 26), False),
+ (offset_nem_sat_aug_4, datetime(2019, 8, 30), False),
+
+ # From Micron, see:
+ # http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
+ (offset_nem_thu_aug_4, datetime(2012, 8, 30), True),
+ (offset_nem_thu_aug_4, datetime(2011, 9, 1), True),
+
+ # See: http://google.brand.edgar-online.com/?sym=MU&formtypeID=13
+ (offset_nem_thu_aug_4, datetime(2013, 5, 30), True),
+ (offset_nem_thu_aug_4, datetime(2013, 2, 28), True),
+ (offset_nem_thu_aug_4, datetime(2012, 11, 29), True),
+ (offset_nem_thu_aug_4, datetime(2012, 5, 31), True),
+ (offset_nem_thu_aug_4, datetime(2007, 3, 1), True),
+ (offset_nem_thu_aug_4, datetime(1994, 3, 3), True),
+
+ (offset_n, datetime(2012, 12, 31), False),
+ (offset_n, datetime(2013, 1, 1), True),
+ (offset_n, datetime(2013, 1, 2), False)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
+
+ def test_offset(self):
+ offset = makeFY5253NearestEndMonthQuarter(1, startingMonth=8,
+ weekday=WeekDay.THU,
+ qtr_with_extra_week=4)
+
+ MU = [datetime(2012, 5, 31),
+ datetime(2012, 8, 30), datetime(2012, 11, 29),
+ datetime(2013, 2, 28), datetime(2013, 5, 30)]
+
+ date = MU[0] + relativedelta(days=-1)
+ for expected in MU:
+ assert_offset_equal(offset, date, expected)
+ date = date + offset
+
+ assert_offset_equal(offset,
+ datetime(2012, 5, 31),
+ datetime(2012, 8, 30))
+ assert_offset_equal(offset,
+ datetime(2012, 5, 30),
+ datetime(2012, 5, 31))
+
+ offset2 = FY5253Quarter(weekday=5, startingMonth=12, variation="last",
+ qtr_with_extra_week=4)
+
+ assert_offset_equal(offset2,
+ datetime(2013, 1, 15),
+ datetime(2013, 3, 30))
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py
index 357c95282e78d..d6b64896b8a60 100644
--- a/pandas/tests/tseries/offsets/test_offsets.py
+++ b/pandas/tests/tseries/offsets/test_offsets.py
@@ -1,7 +1,6 @@
import os
from distutils.version import LooseVersion
from datetime import date, datetime, timedelta
-from dateutil.relativedelta import relativedelta
import pytest
from pandas.compat import range
@@ -621,50 +620,50 @@ def test_onOffset(self):
for offset, d, expected in tests:
assert_onOffset(offset, d, expected)
- def test_apply(self):
- tests = []
-
- tests.append((BDay(), {datetime(2008, 1, 1): datetime(2008, 1, 2),
- datetime(2008, 1, 4): datetime(2008, 1, 7),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 8)}))
-
- tests.append((2 * BDay(), {datetime(2008, 1, 1): datetime(2008, 1, 3),
- datetime(2008, 1, 4): datetime(2008, 1, 8),
- datetime(2008, 1, 5): datetime(2008, 1, 8),
- datetime(2008, 1, 6): datetime(2008, 1, 8),
- datetime(2008, 1, 7): datetime(2008, 1, 9)}
- ))
-
- tests.append((-BDay(), {datetime(2008, 1, 1): datetime(2007, 12, 31),
- datetime(2008, 1, 4): datetime(2008, 1, 3),
- datetime(2008, 1, 5): datetime(2008, 1, 4),
- datetime(2008, 1, 6): datetime(2008, 1, 4),
- datetime(2008, 1, 7): datetime(2008, 1, 4),
- datetime(2008, 1, 8): datetime(2008, 1, 7)}
- ))
-
- tests.append((-2 * BDay(), {
- datetime(2008, 1, 1): datetime(2007, 12, 28),
- datetime(2008, 1, 4): datetime(2008, 1, 2),
- datetime(2008, 1, 5): datetime(2008, 1, 3),
- datetime(2008, 1, 6): datetime(2008, 1, 3),
- datetime(2008, 1, 7): datetime(2008, 1, 3),
- datetime(2008, 1, 8): datetime(2008, 1, 4),
- datetime(2008, 1, 9): datetime(2008, 1, 7)}
- ))
-
- tests.append((BDay(0), {datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 4): datetime(2008, 1, 4),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 7)}
- ))
+ apply_cases = []
+ apply_cases.append((BDay(), {
+ datetime(2008, 1, 1): datetime(2008, 1, 2),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 8)}))
+
+ apply_cases.append((2 * BDay(), {
+ datetime(2008, 1, 1): datetime(2008, 1, 3),
+ datetime(2008, 1, 4): datetime(2008, 1, 8),
+ datetime(2008, 1, 5): datetime(2008, 1, 8),
+ datetime(2008, 1, 6): datetime(2008, 1, 8),
+ datetime(2008, 1, 7): datetime(2008, 1, 9)}))
+
+ apply_cases.append((-BDay(), {
+ datetime(2008, 1, 1): datetime(2007, 12, 31),
+ datetime(2008, 1, 4): datetime(2008, 1, 3),
+ datetime(2008, 1, 5): datetime(2008, 1, 4),
+ datetime(2008, 1, 6): datetime(2008, 1, 4),
+ datetime(2008, 1, 7): datetime(2008, 1, 4),
+ datetime(2008, 1, 8): datetime(2008, 1, 7)}))
+
+ apply_cases.append((-2 * BDay(), {
+ datetime(2008, 1, 1): datetime(2007, 12, 28),
+ datetime(2008, 1, 4): datetime(2008, 1, 2),
+ datetime(2008, 1, 5): datetime(2008, 1, 3),
+ datetime(2008, 1, 6): datetime(2008, 1, 3),
+ datetime(2008, 1, 7): datetime(2008, 1, 3),
+ datetime(2008, 1, 8): datetime(2008, 1, 4),
+ datetime(2008, 1, 9): datetime(2008, 1, 7)}))
+
+ apply_cases.append((BDay(0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 4): datetime(2008, 1, 4),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 7)}))
- for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
+ @pytest.mark.parametrize('case', apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
@@ -851,483 +850,469 @@ def test_roll_date_object(self):
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
- def test_normalize(self):
- tests = []
-
- tests.append((BusinessHour(normalize=True),
- {datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
- datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
- datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
- datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
- datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
- datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
-
- tests.append((BusinessHour(-1, normalize=True),
- {datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
- datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
- datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
- datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
- datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
- datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
-
- tests.append((BusinessHour(1, normalize=True, start='17:00',
- end='04:00'),
- {datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
- datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
- datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
- datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
- datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
- datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
-
- for offset, cases in tests:
- for dt, expected in compat.iteritems(cases):
- assert offset.apply(dt) == expected
-
- def test_onOffset(self):
- tests = []
-
- tests.append((BusinessHour(), {datetime(2014, 7, 1, 9): True,
- datetime(2014, 7, 1, 8, 59): False,
- datetime(2014, 7, 1, 8): False,
- datetime(2014, 7, 1, 17): True,
- datetime(2014, 7, 1, 17, 1): False,
- datetime(2014, 7, 1, 18): False,
- datetime(2014, 7, 5, 9): False,
- datetime(2014, 7, 6, 12): False}))
-
- tests.append((BusinessHour(start='10:00', end='15:00'),
- {datetime(2014, 7, 1, 9): False,
- datetime(2014, 7, 1, 10): True,
- datetime(2014, 7, 1, 15): True,
- datetime(2014, 7, 1, 15, 1): False,
- datetime(2014, 7, 5, 12): False,
- datetime(2014, 7, 6, 12): False}))
-
- tests.append((BusinessHour(start='19:00', end='05:00'),
- {datetime(2014, 7, 1, 9, 0): False,
- datetime(2014, 7, 1, 10, 0): False,
- datetime(2014, 7, 1, 15): False,
- datetime(2014, 7, 1, 15, 1): False,
- datetime(2014, 7, 5, 12, 0): False,
- datetime(2014, 7, 6, 12, 0): False,
- datetime(2014, 7, 1, 19, 0): True,
- datetime(2014, 7, 2, 0, 0): True,
- datetime(2014, 7, 4, 23): True,
- datetime(2014, 7, 5, 1): True,
- datetime(2014, 7, 5, 5, 0): True,
- datetime(2014, 7, 6, 23, 0): False,
- datetime(2014, 7, 7, 3, 0): False}))
-
- for offset, cases in tests:
- for dt, expected in compat.iteritems(cases):
- assert offset.onOffset(dt) == expected
-
- def test_opening_time(self):
- tests = []
-
- # opening time should be affected by sign of n, not by n's value and
- # end
- tests.append((
- [BusinessHour(), BusinessHour(n=2), BusinessHour(
- n=4), BusinessHour(end='10:00'), BusinessHour(n=2, end='4:00'),
- BusinessHour(n=4, end='15:00')],
- {datetime(2014, 7, 1, 11): (datetime(2014, 7, 2, 9), datetime(
- 2014, 7, 1, 9)),
- datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 9), datetime(
- 2014, 7, 1, 9)),
- datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 9), datetime(
- 2014, 7, 1, 9)),
- datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 9), datetime(
- 2014, 7, 1, 9)),
- # if timestamp is on opening time, next opening time is
- # as it is
- datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9), datetime(
- 2014, 7, 2, 9)),
- datetime(2014, 7, 2, 10): (datetime(2014, 7, 3, 9), datetime(
- 2014, 7, 2, 9)),
- # 2014-07-05 is saturday
- datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 9), datetime(
- 2014, 7, 4, 9)),
- datetime(2014, 7, 4, 10): (datetime(2014, 7, 7, 9), datetime(
- 2014, 7, 4, 9)),
- datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 9), datetime(
- 2014, 7, 4, 9)),
- datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 9), datetime(
- 2014, 7, 4, 9)),
- datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 9), datetime(
- 2014, 7, 4, 9)),
- datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 8, 9), datetime(
- 2014, 7, 7, 9))}))
-
- tests.append(([BusinessHour(start='11:15'),
- BusinessHour(n=2, start='11:15'),
- BusinessHour(n=3, start='11:15'),
- BusinessHour(start='11:15', end='10:00'),
- BusinessHour(n=2, start='11:15', end='4:00'),
- BusinessHour(n=3, start='11:15', end='15:00')],
- {datetime(2014, 7, 1, 11): (datetime(
- 2014, 7, 1, 11, 15), datetime(2014, 6, 30, 11, 15)),
- datetime(2014, 7, 1, 18): (datetime(
- 2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
- datetime(2014, 7, 1, 23): (datetime(
- 2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
- datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 11, 15)),
- datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 11, 15),
- datetime(2014, 7, 1, 11, 15)),
- datetime(2014, 7, 2, 10): (datetime(
- 2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
- datetime(2014, 7, 2, 11, 15): (datetime(
- 2014, 7, 2, 11, 15), datetime(2014, 7, 2, 11, 15)),
- datetime(2014, 7, 2, 11, 15, 1): (datetime(
- 2014, 7, 3, 11, 15), datetime(2014, 7, 2, 11, 15)),
- datetime(2014, 7, 5, 10): (datetime(
- 2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
- datetime(2014, 7, 4, 10): (datetime(
- 2014, 7, 4, 11, 15), datetime(2014, 7, 3, 11, 15)),
- datetime(2014, 7, 4, 23): (datetime(
- 2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
- datetime(2014, 7, 6, 10): (datetime(
- 2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
- datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 11, 15)),
- datetime(2014, 7, 7, 9, 1): (
- datetime(2014, 7, 7, 11, 15),
- datetime(2014, 7, 4, 11, 15))}))
-
- tests.append(([BusinessHour(-1), BusinessHour(n=-2),
- BusinessHour(n=-4),
- BusinessHour(n=-1, end='10:00'),
- BusinessHour(n=-2, end='4:00'),
- BusinessHour(n=-4, end='15:00')],
- {datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 9),
- datetime(2014, 7, 2, 9)),
- datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 9),
- datetime(2014, 7, 2, 9)),
- datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 9),
- datetime(2014, 7, 2, 9)),
- datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 9),
- datetime(2014, 7, 2, 9)),
- datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
- datetime(2014, 7, 2, 9)),
- datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 9),
- datetime(2014, 7, 3, 9)),
- datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9)),
- datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9)),
- datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9)),
- datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9)),
- datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 9),
- datetime(2014, 7, 7, 9)),
- datetime(2014, 7, 7, 9): (datetime(2014, 7, 7, 9),
- datetime(2014, 7, 7, 9)),
- datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 9),
- datetime(2014, 7, 8, 9))}))
-
- tests.append(([BusinessHour(start='17:00', end='05:00'),
- BusinessHour(n=3, start='17:00', end='03:00')],
- {datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 17),
- datetime(2014, 6, 30, 17)),
- datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 17),
- datetime(2014, 7, 1, 17)),
- datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 17),
- datetime(2014, 7, 1, 17)),
- datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 17),
- datetime(2014, 7, 1, 17)),
- datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 17),
- datetime(2014, 7, 1, 17)),
- datetime(2014, 7, 4, 17): (datetime(2014, 7, 4, 17),
- datetime(2014, 7, 4, 17)),
- datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 17),
- datetime(2014, 7, 4, 17)),
- datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 17),
- datetime(2014, 7, 3, 17)),
- datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 17),
- datetime(2014, 7, 4, 17)),
- datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 17),
- datetime(2014, 7, 4, 17)),
- datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 17),
- datetime(2014, 7, 4, 17)),
- datetime(2014, 7, 7, 17, 1): (datetime(
- 2014, 7, 8, 17), datetime(2014, 7, 7, 17)), }))
-
- tests.append(([BusinessHour(-1, start='17:00', end='05:00'),
- BusinessHour(n=-2, start='17:00', end='03:00')],
- {datetime(2014, 7, 1, 11): (datetime(2014, 6, 30, 17),
- datetime(2014, 7, 1, 17)),
- datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17)),
- datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17)),
- datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17)),
- datetime(2014, 7, 2, 9): (datetime(2014, 7, 1, 17),
- datetime(2014, 7, 2, 17)),
- datetime(2014, 7, 2, 16, 59): (datetime(
- 2014, 7, 1, 17), datetime(2014, 7, 2, 17)),
- datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 17)),
- datetime(2014, 7, 4, 10): (datetime(2014, 7, 3, 17),
- datetime(2014, 7, 4, 17)),
- datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 17)),
- datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 17)),
- datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 17),
- datetime(2014, 7, 7, 17)),
- datetime(2014, 7, 7, 18): (datetime(2014, 7, 7, 17),
- datetime(2014, 7, 8, 17))}))
-
- for _offsets, cases in tests:
- for offset in _offsets:
- for dt, (exp_next, exp_prev) in compat.iteritems(cases):
- assert offset._next_opening_time(dt) == exp_next
- assert offset._prev_opening_time(dt) == exp_prev
-
- def test_apply(self):
- tests = []
-
- tests.append((
- BusinessHour(),
- {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
- # out of business hours
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
- # saturday
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30,
- 30)}))
-
- tests.append((BusinessHour(
- 4), {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
- datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
- datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30,
- 30)}))
-
- tests.append(
- (BusinessHour(-1),
- {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
- datetime(2014, 7, 1, 16, 30, 15): datetime(
- 2014, 7, 1, 15, 30, 15),
- datetime(2014, 7, 1, 9, 30, 15): datetime(
- 2014, 6, 30, 16, 30, 15),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
- # out of business hours
- datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
- # saturday
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
- datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
- datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30,
- 30)}))
-
- tests.append((BusinessHour(
- -4), {datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
- datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
- datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
- datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30,
- 30)}))
-
- tests.append((BusinessHour(start='13:00', end='16:00'),
- {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
- datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2,
- 13, 30, 15),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14)}))
-
- tests.append((BusinessHour(n=2, start='13:00', end='16:00'), {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
- datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
- datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
- datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
- datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30)
- }))
-
- tests.append((BusinessHour(n=-1, start='13:00', end='16:00'),
- {datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
- datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1,
- 15, 30, 15),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
- datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15)}))
-
- tests.append((BusinessHour(n=-3, start='10:00', end='16:00'), {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
- datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
- datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
- datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
- datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30)
- }))
-
- tests.append((BusinessHour(start='19:00', end='05:00'), {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
- datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
- datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
- datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
- datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
- datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
- datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30)
- }))
-
- tests.append((BusinessHour(n=-1, start='19:00', end='05:00'), {
- datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
- datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
- datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
- datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
- datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
- datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
- datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
- datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
- datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30)
- }))
-
- for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- def test_apply_large_n(self):
- tests = []
+ normalize_cases = []
+ normalize_cases.append((BusinessHour(normalize=True), {
+ datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
+ datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
+ datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
+
+ normalize_cases.append((BusinessHour(-1, normalize=True), {
+ datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
+ datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
+ datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
+ datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
+ datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
+ datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
+
+ normalize_cases.append((BusinessHour(1, normalize=True, start='17:00',
+ end='04:00'), {
+ datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
+ datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
+ datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
+ datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
+ datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
+
+ @pytest.mark.parametrize('case', normalize_cases)
+ def test_normalize(self, case):
+ offset, cases = case
+ for dt, expected in compat.iteritems(cases):
+ assert offset.apply(dt) == expected
+
+ on_offset_cases = []
+ on_offset_cases.append((BusinessHour(), {
+ datetime(2014, 7, 1, 9): True,
+ datetime(2014, 7, 1, 8, 59): False,
+ datetime(2014, 7, 1, 8): False,
+ datetime(2014, 7, 1, 17): True,
+ datetime(2014, 7, 1, 17, 1): False,
+ datetime(2014, 7, 1, 18): False,
+ datetime(2014, 7, 5, 9): False,
+ datetime(2014, 7, 6, 12): False}))
+
+ on_offset_cases.append((BusinessHour(start='10:00', end='15:00'), {
+ datetime(2014, 7, 1, 9): False,
+ datetime(2014, 7, 1, 10): True,
+ datetime(2014, 7, 1, 15): True,
+ datetime(2014, 7, 1, 15, 1): False,
+ datetime(2014, 7, 5, 12): False,
+ datetime(2014, 7, 6, 12): False}))
+
+ on_offset_cases.append((BusinessHour(start='19:00', end='05:00'), {
+ datetime(2014, 7, 1, 9, 0): False,
+ datetime(2014, 7, 1, 10, 0): False,
+ datetime(2014, 7, 1, 15): False,
+ datetime(2014, 7, 1, 15, 1): False,
+ datetime(2014, 7, 5, 12, 0): False,
+ datetime(2014, 7, 6, 12, 0): False,
+ datetime(2014, 7, 1, 19, 0): True,
+ datetime(2014, 7, 2, 0, 0): True,
+ datetime(2014, 7, 4, 23): True,
+ datetime(2014, 7, 5, 1): True,
+ datetime(2014, 7, 5, 5, 0): True,
+ datetime(2014, 7, 6, 23, 0): False,
+ datetime(2014, 7, 7, 3, 0): False}))
- tests.append(
- (BusinessHour(40), # A week later
- {datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
- datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
- datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
- datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
- datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
- datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
- datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
- datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
- datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
- datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
- datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
- datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30,
- 30)}))
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, cases = case
+ for dt, expected in compat.iteritems(cases):
+ assert offset.onOffset(dt) == expected
- tests.append(
- (BusinessHour(-25), # 3 days and 1 hour before
- {datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
- datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
- datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
- datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
- datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
- datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
- datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
- datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
- datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30,
- 30)}))
-
- # 5 days and 3 hours later
- tests.append((BusinessHour(28, start='21:00', end='02:00'),
- {datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
- datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
- datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
- datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
- datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
- datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
- datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
- datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
- datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
- datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
- datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
- datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
- datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21,
- 30)}))
+ opening_time_cases = []
+ # opening time should be affected by sign of n, not by n's value and
+ # end
+ opening_time_cases.append(([BusinessHour(), BusinessHour(n=2),
+ BusinessHour(n=4), BusinessHour(end='10:00'),
+ BusinessHour(n=2, end='4:00'),
+ BusinessHour(n=4, end='15:00')], {
+ datetime(2014, 7, 1, 11): (datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 9)),
+ datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 9)),
+ datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 9)),
+ datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 9)),
+ # if timestamp is on opening time, next opening time is
+ # as it is
+ datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 2, 9)),
+ datetime(2014, 7, 2, 10): (datetime(2014, 7, 3, 9),
+ datetime(2014, 7, 2, 9)),
+ # 2014-07-05 is saturday
+ datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9)),
+ datetime(2014, 7, 4, 10): (datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9)),
+ datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9)),
+ datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9)),
+ datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 4, 9)),
+ datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 8, 9),
+ datetime(2014, 7, 7, 9))}))
+
+ opening_time_cases.append(([BusinessHour(start='11:15'),
+ BusinessHour(n=2, start='11:15'),
+ BusinessHour(n=3, start='11:15'),
+ BusinessHour(start='11:15', end='10:00'),
+ BusinessHour(n=2, start='11:15', end='4:00'),
+ BusinessHour(n=3, start='11:15',
+ end='15:00')], {
+ datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 11, 15),
+ datetime(2014, 6, 30, 11, 15)),
+ datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15)),
+ datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15)),
+ datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15)),
+ datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15)),
+ datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 1, 11, 15)),
+ datetime(2014, 7, 2, 11, 15): (datetime(2014, 7, 2, 11, 15),
+ datetime(2014, 7, 2, 11, 15)),
+ datetime(2014, 7, 2, 11, 15, 1): (datetime(2014, 7, 3, 11, 15),
+ datetime(2014, 7, 2, 11, 15)),
+ datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15)),
+ datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 11, 15),
+ datetime(2014, 7, 3, 11, 15)),
+ datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15)),
+ datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15)),
+ datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15)),
+ datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 11, 15),
+ datetime(2014, 7, 4, 11, 15))}))
+
+ opening_time_cases.append(([BusinessHour(-1), BusinessHour(n=-2),
+ BusinessHour(n=-4),
+ BusinessHour(n=-1, end='10:00'),
+ BusinessHour(n=-2, end='4:00'),
+ BusinessHour(n=-4, end='15:00')], {
+ datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 9),
+ datetime(2014, 7, 2, 9)),
+ datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 9),
+ datetime(2014, 7, 2, 9)),
+ datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 9),
+ datetime(2014, 7, 2, 9)),
+ datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 9),
+ datetime(2014, 7, 2, 9)),
+ datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 2, 9)),
+ datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 3, 9)),
+ datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9)),
+ datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9)),
+ datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9)),
+ datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9)),
+ datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 9),
+ datetime(2014, 7, 7, 9)),
+ datetime(2014, 7, 7, 9): (datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 7, 9)),
+ datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 9),
+ datetime(2014, 7, 8, 9))}))
+
+ opening_time_cases.append(([BusinessHour(start='17:00', end='05:00'),
+ BusinessHour(n=3, start='17:00',
+ end='03:00')], {
+ datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 17),
+ datetime(2014, 6, 30, 17)),
+ datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 1, 17)),
+ datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 1, 17)),
+ datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 1, 17)),
+ datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 17),
+ datetime(2014, 7, 1, 17)),
+ datetime(2014, 7, 4, 17): (datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 4, 17)),
+ datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 4, 17)),
+ datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 3, 17)),
+ datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 4, 17)),
+ datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 4, 17)),
+ datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 4, 17)),
+ datetime(2014, 7, 7, 17, 1): (datetime(2014, 7, 8, 17),
+ datetime(2014, 7, 7, 17)), }))
+
+ opening_time_cases.append(([BusinessHour(-1, start='17:00', end='05:00'),
+ BusinessHour(n=-2, start='17:00',
+ end='03:00')], {
+ datetime(2014, 7, 1, 11): (datetime(2014, 6, 30, 17),
+ datetime(2014, 7, 1, 17)),
+ datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17)),
+ datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17)),
+ datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17)),
+ datetime(2014, 7, 2, 9): (datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17)),
+ datetime(2014, 7, 2, 16, 59): (datetime(2014, 7, 1, 17),
+ datetime(2014, 7, 2, 17)),
+ datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 17)),
+ datetime(2014, 7, 4, 10): (datetime(2014, 7, 3, 17),
+ datetime(2014, 7, 4, 17)),
+ datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 17)),
+ datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 17)),
+ datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 17),
+ datetime(2014, 7, 7, 17)),
+ datetime(2014, 7, 7, 18): (datetime(2014, 7, 7, 17),
+ datetime(2014, 7, 8, 17))}))
+
+ @pytest.mark.parametrize('case', opening_time_cases)
+ def test_opening_time(self, case):
+ _offsets, cases = case
+ for offset in _offsets:
+ for dt, (exp_next, exp_prev) in compat.iteritems(cases):
+ assert offset._next_opening_time(dt) == exp_next
+ assert offset._prev_opening_time(dt) == exp_prev
+
+ apply_cases = []
+ apply_cases.append((BusinessHour(), {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
+ # out of business hours
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
+ # saturday
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)}))
+
+ apply_cases.append((BusinessHour(4), {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
+ datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
+ datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)}))
+
+ apply_cases.append((BusinessHour(-1), {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
+ datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
+ datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
+ # out of business hours
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
+ # saturday
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
+ datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30)}))
+
+ apply_cases.append((BusinessHour(-4), {
+ datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
+ datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30)}))
+
+ apply_cases.append((BusinessHour(start='13:00', end='16:00'), {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14)}))
+
+ apply_cases.append((BusinessHour(n=2, start='13:00', end='16:00'), {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
+ datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
+ datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
+ datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30)}))
+
+ apply_cases.append((BusinessHour(n=-1, start='13:00', end='16:00'), {
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
+ datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
+ datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15)}))
+
+ apply_cases.append((BusinessHour(n=-3, start='10:00', end='16:00'), {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
+ datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
+ datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
+ datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30)}))
+
+ apply_cases.append((BusinessHour(start='19:00', end='05:00'), {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
+ datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
+ datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
+ datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
+ datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
+ datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
+ datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30)}))
+
+ apply_cases.append((BusinessHour(n=-1, start='19:00', end='05:00'), {
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
+ datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
+ datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
+ datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
+ datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
+ datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
+ datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
+ datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
+ datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30)}))
+
+ @pytest.mark.parametrize('case', apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
- for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
+ apply_large_n_cases = []
+ # A week later
+ apply_large_n_cases.append((BusinessHour(40), {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
+ datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
+ datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
+ datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
+ datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
+ datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
+ datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
+ datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
+ datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
+ datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
+ datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
+ datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30)}))
+
+ # 3 days and 1 hour before
+ apply_large_n_cases.append((BusinessHour(-25), {
+ datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
+ datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
+ datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
+ datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
+ datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
+ datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
+ datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
+ datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
+ datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30)}))
+
+ # 5 days and 3 hours later
+ apply_large_n_cases.append((BusinessHour(28, start='21:00', end='02:00'), {
+ datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
+ datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
+ datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
+ datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
+ datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
+ datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
+ datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
+ datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
+ datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
+ datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
+ datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
+ datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
+ datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30)}))
+
+ @pytest.mark.parametrize('case', apply_large_n_cases)
+ def test_apply_large_n(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
def test_apply_nanoseconds(self):
tests = []
@@ -1743,58 +1728,58 @@ def test_roll_date_object(self):
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
- def test_onOffset(self):
- tests = [(CDay(), datetime(2008, 1, 1), True),
- (CDay(), datetime(2008, 1, 5), False)]
-
- for offset, d, expected in tests:
- assert_onOffset(offset, d, expected)
-
- def test_apply(self):
- tests = []
+ on_offset_cases = [(CDay(), datetime(2008, 1, 1), True),
+ (CDay(), datetime(2008, 1, 5), False)]
- tests.append((CDay(), {datetime(2008, 1, 1): datetime(2008, 1, 2),
- datetime(2008, 1, 4): datetime(2008, 1, 7),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 8)}))
-
- tests.append((2 * CDay(), {
- datetime(2008, 1, 1): datetime(2008, 1, 3),
- datetime(2008, 1, 4): datetime(2008, 1, 8),
- datetime(2008, 1, 5): datetime(2008, 1, 8),
- datetime(2008, 1, 6): datetime(2008, 1, 8),
- datetime(2008, 1, 7): datetime(2008, 1, 9)}
- ))
-
- tests.append((-CDay(), {
- datetime(2008, 1, 1): datetime(2007, 12, 31),
- datetime(2008, 1, 4): datetime(2008, 1, 3),
- datetime(2008, 1, 5): datetime(2008, 1, 4),
- datetime(2008, 1, 6): datetime(2008, 1, 4),
- datetime(2008, 1, 7): datetime(2008, 1, 4),
- datetime(2008, 1, 8): datetime(2008, 1, 7)}
- ))
-
- tests.append((-2 * CDay(), {
- datetime(2008, 1, 1): datetime(2007, 12, 28),
- datetime(2008, 1, 4): datetime(2008, 1, 2),
- datetime(2008, 1, 5): datetime(2008, 1, 3),
- datetime(2008, 1, 6): datetime(2008, 1, 3),
- datetime(2008, 1, 7): datetime(2008, 1, 3),
- datetime(2008, 1, 8): datetime(2008, 1, 4),
- datetime(2008, 1, 9): datetime(2008, 1, 7)}
- ))
-
- tests.append((CDay(0), {datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 4): datetime(2008, 1, 4),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 7)}))
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, d, expected = case
+ assert_onOffset(offset, d, expected)
+
+ apply_cases = []
+ apply_cases.append((CDay(), {
+ datetime(2008, 1, 1): datetime(2008, 1, 2),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 8)}))
+
+ apply_cases.append((2 * CDay(), {
+ datetime(2008, 1, 1): datetime(2008, 1, 3),
+ datetime(2008, 1, 4): datetime(2008, 1, 8),
+ datetime(2008, 1, 5): datetime(2008, 1, 8),
+ datetime(2008, 1, 6): datetime(2008, 1, 8),
+ datetime(2008, 1, 7): datetime(2008, 1, 9)}))
+
+ apply_cases.append((-CDay(), {
+ datetime(2008, 1, 1): datetime(2007, 12, 31),
+ datetime(2008, 1, 4): datetime(2008, 1, 3),
+ datetime(2008, 1, 5): datetime(2008, 1, 4),
+ datetime(2008, 1, 6): datetime(2008, 1, 4),
+ datetime(2008, 1, 7): datetime(2008, 1, 4),
+ datetime(2008, 1, 8): datetime(2008, 1, 7)}))
+
+ apply_cases.append((-2 * CDay(), {
+ datetime(2008, 1, 1): datetime(2007, 12, 28),
+ datetime(2008, 1, 4): datetime(2008, 1, 2),
+ datetime(2008, 1, 5): datetime(2008, 1, 3),
+ datetime(2008, 1, 6): datetime(2008, 1, 3),
+ datetime(2008, 1, 7): datetime(2008, 1, 3),
+ datetime(2008, 1, 8): datetime(2008, 1, 4),
+ datetime(2008, 1, 9): datetime(2008, 1, 7)}))
+
+ apply_cases.append((CDay(0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 4): datetime(2008, 1, 4),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 7)}))
- for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
+ @pytest.mark.parametrize('case', apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
@@ -1988,37 +1973,40 @@ def test_roll_date_object(self):
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
- def test_onOffset(self):
- tests = [(CBMonthEnd(), datetime(2008, 1, 31), True),
- (CBMonthEnd(), datetime(2008, 1, 1), False)]
-
- for offset, d, expected in tests:
- assert_onOffset(offset, d, expected)
+ on_offset_cases = [(CBMonthEnd(), datetime(2008, 1, 31), True),
+ (CBMonthEnd(), datetime(2008, 1, 1), False)]
- def test_apply(self):
- cbm = CBMonthEnd()
- tests = []
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, d, expected = case
+ assert_onOffset(offset, d, expected)
- tests.append((cbm, {datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 2, 7): datetime(2008, 2, 29)}))
+ apply_cases = []
+ apply_cases.append((CBMonthEnd(), {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 2, 7): datetime(2008, 2, 29)}))
- tests.append((2 * cbm, {datetime(2008, 1, 1): datetime(2008, 2, 29),
- datetime(2008, 2, 7): datetime(2008, 3, 31)}))
+ apply_cases.append((2 * CBMonthEnd(), {
+ datetime(2008, 1, 1): datetime(2008, 2, 29),
+ datetime(2008, 2, 7): datetime(2008, 3, 31)}))
- tests.append((-cbm, {datetime(2008, 1, 1): datetime(2007, 12, 31),
- datetime(2008, 2, 8): datetime(2008, 1, 31)}))
+ apply_cases.append((-CBMonthEnd(), {
+ datetime(2008, 1, 1): datetime(2007, 12, 31),
+ datetime(2008, 2, 8): datetime(2008, 1, 31)}))
- tests.append((-2 * cbm, {datetime(2008, 1, 1): datetime(2007, 11, 30),
- datetime(2008, 2, 9): datetime(2007, 12, 31)}
- ))
+ apply_cases.append((-2 * CBMonthEnd(), {
+ datetime(2008, 1, 1): datetime(2007, 11, 30),
+ datetime(2008, 2, 9): datetime(2007, 12, 31)}))
- tests.append((CBMonthEnd(0),
- {datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 2, 7): datetime(2008, 2, 29)}))
+ apply_cases.append((CBMonthEnd(0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 2, 7): datetime(2008, 2, 29)}))
- for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
+ @pytest.mark.parametrize('case', apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
@@ -2102,36 +2090,40 @@ def test_roll_date_object(self):
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
- def test_onOffset(self):
- tests = [(CBMonthBegin(), datetime(2008, 1, 1), True),
- (CBMonthBegin(), datetime(2008, 1, 31), False)]
-
- for offset, dt, expected in tests:
- assert_onOffset(offset, dt, expected)
+ on_offset_cases = [(CBMonthBegin(), datetime(2008, 1, 1), True),
+ (CBMonthBegin(), datetime(2008, 1, 31), False)]
- def test_apply(self):
- cbm = CBMonthBegin()
- tests = []
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
- tests.append((cbm, {datetime(2008, 1, 1): datetime(2008, 2, 1),
- datetime(2008, 2, 7): datetime(2008, 3, 3)}))
+ apply_cases = []
+ apply_cases.append((CBMonthBegin(), {
+ datetime(2008, 1, 1): datetime(2008, 2, 1),
+ datetime(2008, 2, 7): datetime(2008, 3, 3)}))
- tests.append((2 * cbm, {datetime(2008, 1, 1): datetime(2008, 3, 3),
- datetime(2008, 2, 7): datetime(2008, 4, 1)}))
+ apply_cases.append((2 * CBMonthBegin(), {
+ datetime(2008, 1, 1): datetime(2008, 3, 3),
+ datetime(2008, 2, 7): datetime(2008, 4, 1)}))
- tests.append((-cbm, {datetime(2008, 1, 1): datetime(2007, 12, 3),
- datetime(2008, 2, 8): datetime(2008, 2, 1)}))
+ apply_cases.append((-CBMonthBegin(), {
+ datetime(2008, 1, 1): datetime(2007, 12, 3),
+ datetime(2008, 2, 8): datetime(2008, 2, 1)}))
- tests.append((-2 * cbm, {datetime(2008, 1, 1): datetime(2007, 11, 1),
- datetime(2008, 2, 9): datetime(2008, 1, 1)}))
+ apply_cases.append((-2 * CBMonthBegin(), {
+ datetime(2008, 1, 1): datetime(2007, 11, 1),
+ datetime(2008, 2, 9): datetime(2008, 1, 1)}))
- tests.append((CBMonthBegin(0),
- {datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 7): datetime(2008, 2, 1)}))
+ apply_cases.append((CBMonthBegin(0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 7): datetime(2008, 2, 1)}))
- for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
+ @pytest.mark.parametrize('case', apply_cases)
+ def test_apply(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
@@ -2189,38 +2181,42 @@ def test_isAnchored(self):
assert not Week(2, weekday=2).isAnchored()
assert not Week(2).isAnchored()
- def test_offset(self):
- tests = []
-
- tests.append((Week(), # not business week
- {datetime(2008, 1, 1): datetime(2008, 1, 8),
- datetime(2008, 1, 4): datetime(2008, 1, 11),
- datetime(2008, 1, 5): datetime(2008, 1, 12),
- datetime(2008, 1, 6): datetime(2008, 1, 13),
- datetime(2008, 1, 7): datetime(2008, 1, 14)}))
-
- tests.append((Week(weekday=0), # Mon
- {datetime(2007, 12, 31): datetime(2008, 1, 7),
- datetime(2008, 1, 4): datetime(2008, 1, 7),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 14)}))
-
- tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
- {datetime(2007, 12, 31): datetime(2007, 12, 31),
- datetime(2008, 1, 4): datetime(2008, 1, 7),
- datetime(2008, 1, 5): datetime(2008, 1, 7),
- datetime(2008, 1, 6): datetime(2008, 1, 7),
- datetime(2008, 1, 7): datetime(2008, 1, 7)}))
-
- tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
- {datetime(2010, 4, 6): datetime(2010, 3, 23),
- datetime(2010, 4, 8): datetime(2010, 3, 30),
- datetime(2010, 4, 5): datetime(2010, 3, 23)}))
+ offset_cases = []
+ # not business week
+ offset_cases.append((Week(), {
+ datetime(2008, 1, 1): datetime(2008, 1, 8),
+ datetime(2008, 1, 4): datetime(2008, 1, 11),
+ datetime(2008, 1, 5): datetime(2008, 1, 12),
+ datetime(2008, 1, 6): datetime(2008, 1, 13),
+ datetime(2008, 1, 7): datetime(2008, 1, 14)}))
+
+ # Mon
+ offset_cases.append((Week(weekday=0), {
+ datetime(2007, 12, 31): datetime(2008, 1, 7),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 14)}))
+
+ # n=0 -> roll forward. Mon
+ offset_cases.append((Week(0, weekday=0), {
+ datetime(2007, 12, 31): datetime(2007, 12, 31),
+ datetime(2008, 1, 4): datetime(2008, 1, 7),
+ datetime(2008, 1, 5): datetime(2008, 1, 7),
+ datetime(2008, 1, 6): datetime(2008, 1, 7),
+ datetime(2008, 1, 7): datetime(2008, 1, 7)}))
+
+ # n=0 -> roll forward. Mon
+ offset_cases.append((Week(-2, weekday=1), {
+ datetime(2010, 4, 6): datetime(2010, 3, 23),
+ datetime(2010, 4, 8): datetime(2010, 3, 30),
+ datetime(2010, 4, 5): datetime(2010, 3, 23)}))
- for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
def test_onOffset(self):
for weekday in range(7):
@@ -2300,8 +2296,7 @@ def test_offset(self):
(2, 2, 1, date1, datetime(2011, 2, 15)),
(2, 2, 1, date2, datetime(2011, 2, 15)),
(2, 2, 1, date3, datetime(2011, 3, 15)),
- (2, 2, 1, date4, datetime(2011, 3, 15)),
- ]
+ (2, 2, 1, date4, datetime(2011, 3, 15))]
for n, week, weekday, dt, expected in test_cases:
offset = WeekOfMonth(n, week=week, weekday=weekday)
@@ -2314,19 +2309,18 @@ def test_offset(self):
result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)
assert result == datetime(2011, 2, 2)
- def test_onOffset(self):
- test_cases = [
- (0, 0, datetime(2011, 2, 7), True),
- (0, 0, datetime(2011, 2, 6), False),
- (0, 0, datetime(2011, 2, 14), False),
- (1, 0, datetime(2011, 2, 14), True),
- (0, 1, datetime(2011, 2, 1), True),
- (0, 1, datetime(2011, 2, 8), False),
- ]
-
- for week, weekday, dt, expected in test_cases:
- offset = WeekOfMonth(week=week, weekday=weekday)
- assert offset.onOffset(dt) == expected
+ on_offset_cases = [(0, 0, datetime(2011, 2, 7), True),
+ (0, 0, datetime(2011, 2, 6), False),
+ (0, 0, datetime(2011, 2, 14), False),
+ (1, 0, datetime(2011, 2, 14), True),
+ (0, 1, datetime(2011, 2, 1), True),
+ (0, 1, datetime(2011, 2, 8), False)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ week, weekday, dt, expected = case
+ offset = WeekOfMonth(week=week, weekday=weekday)
+ assert offset.onOffset(dt) == expected
class TestLastWeekOfMonth(Base):
@@ -2380,346 +2374,32 @@ def test_offset(self):
offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)
assert datetime(2013, 7, 31) + offset_sunday == datetime(2013, 8, 25)
- def test_onOffset(self):
- test_cases = [
- (WeekDay.SUN, datetime(2013, 1, 27), True),
- (WeekDay.SAT, datetime(2013, 3, 30), True),
- (WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon
- (WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN
- (WeekDay.MON, datetime(2013, 2, 25), True),
- (WeekDay.SAT, datetime(2013, 11, 30), True),
-
- (WeekDay.SAT, datetime(2006, 8, 26), True),
- (WeekDay.SAT, datetime(2007, 8, 25), True),
- (WeekDay.SAT, datetime(2008, 8, 30), True),
- (WeekDay.SAT, datetime(2009, 8, 29), True),
- (WeekDay.SAT, datetime(2010, 8, 28), True),
- (WeekDay.SAT, datetime(2011, 8, 27), True),
- (WeekDay.SAT, datetime(2019, 8, 31), True),
- ]
-
- for weekday, dt, expected in test_cases:
- offset = LastWeekOfMonth(weekday=weekday)
- assert offset.onOffset(dt) == expected
-
-
-class TestBMonthBegin(Base):
- _offset = BMonthBegin
-
- def test_offset(self):
- tests = []
-
- tests.append((BMonthBegin(),
- {datetime(2008, 1, 1): datetime(2008, 2, 1),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 31): datetime(2007, 1, 1),
- datetime(2006, 9, 1): datetime(2006, 10, 2),
- datetime(2007, 1, 1): datetime(2007, 2, 1),
- datetime(2006, 12, 1): datetime(2007, 1, 1)}))
-
- tests.append((BMonthBegin(0),
- {datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2006, 10, 2): datetime(2006, 10, 2),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 31): datetime(2007, 1, 1),
- datetime(2006, 9, 15): datetime(2006, 10, 2)}))
-
- tests.append((BMonthBegin(2),
- {datetime(2008, 1, 1): datetime(2008, 3, 3),
- datetime(2008, 1, 15): datetime(2008, 3, 3),
- datetime(2006, 12, 29): datetime(2007, 2, 1),
- datetime(2006, 12, 31): datetime(2007, 2, 1),
- datetime(2007, 1, 1): datetime(2007, 3, 1),
- datetime(2006, 11, 1): datetime(2007, 1, 1)}))
-
- tests.append((BMonthBegin(-1),
- {datetime(2007, 1, 1): datetime(2006, 12, 1),
- datetime(2008, 6, 30): datetime(2008, 6, 2),
- datetime(2008, 6, 1): datetime(2008, 5, 1),
- datetime(2008, 3, 10): datetime(2008, 3, 3),
- datetime(2008, 12, 31): datetime(2008, 12, 1),
- datetime(2006, 12, 29): datetime(2006, 12, 1),
- datetime(2006, 12, 30): datetime(2006, 12, 1),
- datetime(2007, 1, 1): datetime(2006, 12, 1)}))
-
- for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- def test_onOffset(self):
-
- tests = [(BMonthBegin(), datetime(2007, 12, 31), False),
- (BMonthBegin(), datetime(2008, 1, 1), True),
- (BMonthBegin(), datetime(2001, 4, 2), True),
- (BMonthBegin(), datetime(2008, 3, 3), True)]
-
- for offset, dt, expected in tests:
- assert_onOffset(offset, dt, expected)
-
- def test_offsets_compare_equal(self):
- # root cause of #456
- offset1 = BMonthBegin()
- offset2 = BMonthBegin()
- assert not offset1 != offset2
-
-
-class TestBMonthEnd(Base):
- _offset = BMonthEnd
-
- def test_offset(self):
- tests = []
-
- tests.append((BMonthEnd(),
- {datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 2, 29),
- datetime(2006, 12, 29): datetime(2007, 1, 31),
- datetime(2006, 12, 31): datetime(2007, 1, 31),
- datetime(2007, 1, 1): datetime(2007, 1, 31),
- datetime(2006, 12, 1): datetime(2006, 12, 29)}))
-
- tests.append((BMonthEnd(0),
- {datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 1, 31),
- datetime(2006, 12, 29): datetime(2006, 12, 29),
- datetime(2006, 12, 31): datetime(2007, 1, 31),
- datetime(2007, 1, 1): datetime(2007, 1, 31)}))
-
- tests.append((BMonthEnd(2),
- {datetime(2008, 1, 1): datetime(2008, 2, 29),
- datetime(2008, 1, 31): datetime(2008, 3, 31),
- datetime(2006, 12, 29): datetime(2007, 2, 28),
- datetime(2006, 12, 31): datetime(2007, 2, 28),
- datetime(2007, 1, 1): datetime(2007, 2, 28),
- datetime(2006, 11, 1): datetime(2006, 12, 29)}))
-
- tests.append((BMonthEnd(-1),
- {datetime(2007, 1, 1): datetime(2006, 12, 29),
- datetime(2008, 6, 30): datetime(2008, 5, 30),
- datetime(2008, 12, 31): datetime(2008, 11, 28),
- datetime(2006, 12, 29): datetime(2006, 11, 30),
- datetime(2006, 12, 30): datetime(2006, 12, 29),
- datetime(2007, 1, 1): datetime(2006, 12, 29)}))
-
- for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- def test_normalize(self):
- dt = datetime(2007, 1, 1, 3)
-
- result = dt + BMonthEnd(normalize=True)
- expected = dt.replace(hour=0) + BMonthEnd()
- assert result == expected
-
- def test_onOffset(self):
-
- tests = [(BMonthEnd(), datetime(2007, 12, 31), True),
- (BMonthEnd(), datetime(2008, 1, 1), False)]
-
- for offset, dt, expected in tests:
- assert_onOffset(offset, dt, expected)
-
- def test_offsets_compare_equal(self):
- # root cause of #456
- offset1 = BMonthEnd()
- offset2 = BMonthEnd()
- assert not offset1 != offset2
-
-
-class TestMonthBegin(Base):
- _offset = MonthBegin
-
- def test_offset(self):
- tests = []
-
- # NOTE: I'm not entirely happy with the logic here for Begin -ss
- # see thread 'offset conventions' on the ML
- tests.append((MonthBegin(),
- {datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2008, 2, 1): datetime(2008, 3, 1),
- datetime(2006, 12, 31): datetime(2007, 1, 1),
- datetime(2006, 12, 1): datetime(2007, 1, 1),
- datetime(2007, 1, 31): datetime(2007, 2, 1)}))
-
- tests.append((MonthBegin(0),
- {datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2006, 12, 3): datetime(2007, 1, 1),
- datetime(2007, 1, 31): datetime(2007, 2, 1)}))
-
- tests.append((MonthBegin(2),
- {datetime(2008, 2, 29): datetime(2008, 4, 1),
- datetime(2008, 1, 31): datetime(2008, 3, 1),
- datetime(2006, 12, 31): datetime(2007, 2, 1),
- datetime(2007, 12, 28): datetime(2008, 2, 1),
- datetime(2007, 1, 1): datetime(2007, 3, 1),
- datetime(2006, 11, 1): datetime(2007, 1, 1)}))
-
- tests.append((MonthBegin(-1),
- {datetime(2007, 1, 1): datetime(2006, 12, 1),
- datetime(2008, 5, 31): datetime(2008, 5, 1),
- datetime(2008, 12, 31): datetime(2008, 12, 1),
- datetime(2006, 12, 29): datetime(2006, 12, 1),
- datetime(2006, 1, 2): datetime(2006, 1, 1)}))
-
- for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
-
-class TestMonthEnd(Base):
- _offset = MonthEnd
-
- def test_offset(self):
- tests = []
-
- tests.append((MonthEnd(),
- {datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 2, 29),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2007, 1, 31),
- datetime(2007, 1, 1): datetime(2007, 1, 31),
- datetime(2006, 12, 1): datetime(2006, 12, 31)}))
-
- tests.append((MonthEnd(0),
- {datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 1, 31),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2006, 12, 31),
- datetime(2007, 1, 1): datetime(2007, 1, 31)}))
-
- tests.append((MonthEnd(2),
- {datetime(2008, 1, 1): datetime(2008, 2, 29),
- datetime(2008, 1, 31): datetime(2008, 3, 31),
- datetime(2006, 12, 29): datetime(2007, 1, 31),
- datetime(2006, 12, 31): datetime(2007, 2, 28),
- datetime(2007, 1, 1): datetime(2007, 2, 28),
- datetime(2006, 11, 1): datetime(2006, 12, 31)}))
-
- tests.append((MonthEnd(-1),
- {datetime(2007, 1, 1): datetime(2006, 12, 31),
- datetime(2008, 6, 30): datetime(2008, 5, 31),
- datetime(2008, 12, 31): datetime(2008, 11, 30),
- datetime(2006, 12, 29): datetime(2006, 11, 30),
- datetime(2006, 12, 30): datetime(2006, 11, 30),
- datetime(2007, 1, 1): datetime(2006, 12, 31)}))
-
- for offset, cases in tests:
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- def test_day_of_month(self):
- dt = datetime(2007, 1, 1)
- offset = MonthEnd()
-
- result = dt + offset
- assert result == Timestamp(2007, 1, 31)
-
- result = result + offset
- assert result == Timestamp(2007, 2, 28)
-
- def test_normalize(self):
- dt = datetime(2007, 1, 1, 3)
-
- result = dt + MonthEnd(normalize=True)
- expected = dt.replace(hour=0) + MonthEnd()
- assert result == expected
-
- def test_onOffset(self):
-
- tests = [(MonthEnd(), datetime(2007, 12, 31), True),
- (MonthEnd(), datetime(2008, 1, 1), False)]
+ on_offset_cases = [
+ (WeekDay.SUN, datetime(2013, 1, 27), True),
+ (WeekDay.SAT, datetime(2013, 3, 30), True),
+ (WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon
+ (WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN
+ (WeekDay.MON, datetime(2013, 2, 25), True),
+ (WeekDay.SAT, datetime(2013, 11, 30), True),
+
+ (WeekDay.SAT, datetime(2006, 8, 26), True),
+ (WeekDay.SAT, datetime(2007, 8, 25), True),
+ (WeekDay.SAT, datetime(2008, 8, 30), True),
+ (WeekDay.SAT, datetime(2009, 8, 29), True),
+ (WeekDay.SAT, datetime(2010, 8, 28), True),
+ (WeekDay.SAT, datetime(2011, 8, 27), True),
+ (WeekDay.SAT, datetime(2019, 8, 31), True)]
- for offset, dt, expected in tests:
- assert_onOffset(offset, dt, expected)
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ weekday, dt, expected = case
+ offset = LastWeekOfMonth(weekday=weekday)
+ assert offset.onOffset(dt) == expected
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
- def _get_tests(self):
- tests = []
-
- tests.append((SemiMonthEnd(),
- {datetime(2008, 1, 1): datetime(2008, 1, 15),
- datetime(2008, 1, 15): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 2, 15),
- datetime(2006, 12, 14): datetime(2006, 12, 15),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2007, 1, 15),
- datetime(2007, 1, 1): datetime(2007, 1, 15),
- datetime(2006, 12, 1): datetime(2006, 12, 15),
- datetime(2006, 12, 15): datetime(2006, 12, 31)}))
-
- tests.append((SemiMonthEnd(day_of_month=20),
- {datetime(2008, 1, 1): datetime(2008, 1, 20),
- datetime(2008, 1, 15): datetime(2008, 1, 20),
- datetime(2008, 1, 21): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 2, 20),
- datetime(2006, 12, 14): datetime(2006, 12, 20),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2007, 1, 20),
- datetime(2007, 1, 1): datetime(2007, 1, 20),
- datetime(2006, 12, 1): datetime(2006, 12, 20),
- datetime(2006, 12, 15): datetime(2006, 12, 20)}))
-
- tests.append((SemiMonthEnd(0),
- {datetime(2008, 1, 1): datetime(2008, 1, 15),
- datetime(2008, 1, 16): datetime(2008, 1, 31),
- datetime(2008, 1, 15): datetime(2008, 1, 15),
- datetime(2008, 1, 31): datetime(2008, 1, 31),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2006, 12, 31),
- datetime(2007, 1, 1): datetime(2007, 1, 15)}))
-
- tests.append((SemiMonthEnd(0, day_of_month=16),
- {datetime(2008, 1, 1): datetime(2008, 1, 16),
- datetime(2008, 1, 16): datetime(2008, 1, 16),
- datetime(2008, 1, 15): datetime(2008, 1, 16),
- datetime(2008, 1, 31): datetime(2008, 1, 31),
- datetime(2006, 12, 29): datetime(2006, 12, 31),
- datetime(2006, 12, 31): datetime(2006, 12, 31),
- datetime(2007, 1, 1): datetime(2007, 1, 16)}))
-
- tests.append((SemiMonthEnd(2),
- {datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 2, 29),
- datetime(2006, 12, 29): datetime(2007, 1, 15),
- datetime(2006, 12, 31): datetime(2007, 1, 31),
- datetime(2007, 1, 1): datetime(2007, 1, 31),
- datetime(2007, 1, 16): datetime(2007, 2, 15),
- datetime(2006, 11, 1): datetime(2006, 11, 30)}))
-
- tests.append((SemiMonthEnd(-1),
- {datetime(2007, 1, 1): datetime(2006, 12, 31),
- datetime(2008, 6, 30): datetime(2008, 6, 15),
- datetime(2008, 12, 31): datetime(2008, 12, 15),
- datetime(2006, 12, 29): datetime(2006, 12, 15),
- datetime(2006, 12, 30): datetime(2006, 12, 15),
- datetime(2007, 1, 1): datetime(2006, 12, 31)}))
-
- tests.append((SemiMonthEnd(-1, day_of_month=4),
- {datetime(2007, 1, 1): datetime(2006, 12, 31),
- datetime(2007, 1, 4): datetime(2006, 12, 31),
- datetime(2008, 6, 30): datetime(2008, 6, 4),
- datetime(2008, 12, 31): datetime(2008, 12, 4),
- datetime(2006, 12, 5): datetime(2006, 12, 4),
- datetime(2006, 12, 30): datetime(2006, 12, 4),
- datetime(2007, 1, 1): datetime(2006, 12, 31)}))
-
- tests.append((SemiMonthEnd(-2),
- {datetime(2007, 1, 1): datetime(2006, 12, 15),
- datetime(2008, 6, 30): datetime(2008, 5, 31),
- datetime(2008, 3, 15): datetime(2008, 2, 15),
- datetime(2008, 12, 31): datetime(2008, 11, 30),
- datetime(2006, 12, 29): datetime(2006, 11, 30),
- datetime(2006, 12, 14): datetime(2006, 11, 15),
- datetime(2007, 1, 1): datetime(2006, 12, 15)}))
-
- return tests
-
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 31),
datetime(2008, 1, 15),
@@ -2761,28 +2441,107 @@ def test_offset_whole_year(self):
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
- def test_offset(self):
- for offset, cases in self._get_tests():
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- def test_apply_index(self):
- for offset, cases in self._get_tests():
- s = DatetimeIndex(cases.keys())
- result = offset.apply_index(s)
- exp = DatetimeIndex(cases.values())
- tm.assert_index_equal(result, exp)
-
- def test_onOffset(self):
-
- tests = [(datetime(2007, 12, 31), True),
- (datetime(2007, 12, 15), True),
- (datetime(2007, 12, 14), False),
- (datetime(2007, 12, 1), False),
- (datetime(2008, 2, 29), True)]
+ offset_cases = []
+ offset_cases.append((SemiMonthEnd(), {
+ datetime(2008, 1, 1): datetime(2008, 1, 15),
+ datetime(2008, 1, 15): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 2, 15),
+ datetime(2006, 12, 14): datetime(2006, 12, 15),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2007, 1, 15),
+ datetime(2007, 1, 1): datetime(2007, 1, 15),
+ datetime(2006, 12, 1): datetime(2006, 12, 15),
+ datetime(2006, 12, 15): datetime(2006, 12, 31)}))
+
+ offset_cases.append((SemiMonthEnd(day_of_month=20), {
+ datetime(2008, 1, 1): datetime(2008, 1, 20),
+ datetime(2008, 1, 15): datetime(2008, 1, 20),
+ datetime(2008, 1, 21): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 2, 20),
+ datetime(2006, 12, 14): datetime(2006, 12, 20),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2007, 1, 20),
+ datetime(2007, 1, 1): datetime(2007, 1, 20),
+ datetime(2006, 12, 1): datetime(2006, 12, 20),
+ datetime(2006, 12, 15): datetime(2006, 12, 20)}))
+
+ offset_cases.append((SemiMonthEnd(0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 15),
+ datetime(2008, 1, 16): datetime(2008, 1, 31),
+ datetime(2008, 1, 15): datetime(2008, 1, 15),
+ datetime(2008, 1, 31): datetime(2008, 1, 31),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2006, 12, 31),
+ datetime(2007, 1, 1): datetime(2007, 1, 15)}))
+
+ offset_cases.append((SemiMonthEnd(0, day_of_month=16), {
+ datetime(2008, 1, 1): datetime(2008, 1, 16),
+ datetime(2008, 1, 16): datetime(2008, 1, 16),
+ datetime(2008, 1, 15): datetime(2008, 1, 16),
+ datetime(2008, 1, 31): datetime(2008, 1, 31),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2006, 12, 31),
+ datetime(2007, 1, 1): datetime(2007, 1, 16)}))
+
+ offset_cases.append((SemiMonthEnd(2), {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 2, 29),
+ datetime(2006, 12, 29): datetime(2007, 1, 15),
+ datetime(2006, 12, 31): datetime(2007, 1, 31),
+ datetime(2007, 1, 1): datetime(2007, 1, 31),
+ datetime(2007, 1, 16): datetime(2007, 2, 15),
+ datetime(2006, 11, 1): datetime(2006, 11, 30)}))
+
+ offset_cases.append((SemiMonthEnd(-1), {
+ datetime(2007, 1, 1): datetime(2006, 12, 31),
+ datetime(2008, 6, 30): datetime(2008, 6, 15),
+ datetime(2008, 12, 31): datetime(2008, 12, 15),
+ datetime(2006, 12, 29): datetime(2006, 12, 15),
+ datetime(2006, 12, 30): datetime(2006, 12, 15),
+ datetime(2007, 1, 1): datetime(2006, 12, 31)}))
+
+ offset_cases.append((SemiMonthEnd(-1, day_of_month=4), {
+ datetime(2007, 1, 1): datetime(2006, 12, 31),
+ datetime(2007, 1, 4): datetime(2006, 12, 31),
+ datetime(2008, 6, 30): datetime(2008, 6, 4),
+ datetime(2008, 12, 31): datetime(2008, 12, 4),
+ datetime(2006, 12, 5): datetime(2006, 12, 4),
+ datetime(2006, 12, 30): datetime(2006, 12, 4),
+ datetime(2007, 1, 1): datetime(2006, 12, 31)}))
+
+ offset_cases.append((SemiMonthEnd(-2), {
+ datetime(2007, 1, 1): datetime(2006, 12, 15),
+ datetime(2008, 6, 30): datetime(2008, 5, 31),
+ datetime(2008, 3, 15): datetime(2008, 2, 15),
+ datetime(2008, 12, 31): datetime(2008, 11, 30),
+ datetime(2006, 12, 29): datetime(2006, 11, 30),
+ datetime(2006, 12, 14): datetime(2006, 11, 15),
+ datetime(2007, 1, 1): datetime(2006, 12, 15)}))
- for dt, expected in tests:
- assert_onOffset(SemiMonthEnd(), dt, expected)
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_apply_index(self, case):
+ offset, cases = case
+ s = DatetimeIndex(cases.keys())
+ result = offset.apply_index(s)
+ exp = DatetimeIndex(cases.values())
+ tm.assert_index_equal(result, exp)
+
+ on_offset_cases = [(datetime(2007, 12, 31), True),
+ (datetime(2007, 12, 15), True),
+ (datetime(2007, 12, 14), False),
+ (datetime(2007, 12, 1), False),
+ (datetime(2008, 2, 29), True)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ dt, expected = case
+ assert_onOffset(SemiMonthEnd(), dt, expected)
@pytest.mark.parametrize('klass,assert_func',
[(Series, tm.assert_series_equal),
@@ -2811,91 +2570,6 @@ def test_vectorized_offset_addition(self, klass, assert_func):
class TestSemiMonthBegin(Base):
_offset = SemiMonthBegin
- def _get_tests(self):
- tests = []
-
- tests.append((SemiMonthBegin(),
- {datetime(2008, 1, 1): datetime(2008, 1, 15),
- datetime(2008, 1, 15): datetime(2008, 2, 1),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 14): datetime(2006, 12, 15),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 31): datetime(2007, 1, 1),
- datetime(2007, 1, 1): datetime(2007, 1, 15),
- datetime(2006, 12, 1): datetime(2006, 12, 15),
- datetime(2006, 12, 15): datetime(2007, 1, 1)}))
-
- tests.append((SemiMonthBegin(day_of_month=20),
- {datetime(2008, 1, 1): datetime(2008, 1, 20),
- datetime(2008, 1, 15): datetime(2008, 1, 20),
- datetime(2008, 1, 21): datetime(2008, 2, 1),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 14): datetime(2006, 12, 20),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 31): datetime(2007, 1, 1),
- datetime(2007, 1, 1): datetime(2007, 1, 20),
- datetime(2006, 12, 1): datetime(2006, 12, 20),
- datetime(2006, 12, 15): datetime(2006, 12, 20)}))
-
- tests.append((SemiMonthBegin(0),
- {datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 16): datetime(2008, 2, 1),
- datetime(2008, 1, 15): datetime(2008, 1, 15),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 2): datetime(2006, 12, 15),
- datetime(2007, 1, 1): datetime(2007, 1, 1)}))
-
- tests.append((SemiMonthBegin(0, day_of_month=16),
- {datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 16): datetime(2008, 1, 16),
- datetime(2008, 1, 15): datetime(2008, 1, 16),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2006, 12, 29): datetime(2007, 1, 1),
- datetime(2006, 12, 31): datetime(2007, 1, 1),
- datetime(2007, 1, 5): datetime(2007, 1, 16),
- datetime(2007, 1, 1): datetime(2007, 1, 1)}))
-
- tests.append((SemiMonthBegin(2),
- {datetime(2008, 1, 1): datetime(2008, 2, 1),
- datetime(2008, 1, 31): datetime(2008, 2, 15),
- datetime(2006, 12, 1): datetime(2007, 1, 1),
- datetime(2006, 12, 29): datetime(2007, 1, 15),
- datetime(2006, 12, 15): datetime(2007, 1, 15),
- datetime(2007, 1, 1): datetime(2007, 2, 1),
- datetime(2007, 1, 16): datetime(2007, 2, 15),
- datetime(2006, 11, 1): datetime(2006, 12, 1)}))
-
- tests.append((SemiMonthBegin(-1),
- {datetime(2007, 1, 1): datetime(2006, 12, 15),
- datetime(2008, 6, 30): datetime(2008, 6, 15),
- datetime(2008, 6, 14): datetime(2008, 6, 1),
- datetime(2008, 12, 31): datetime(2008, 12, 15),
- datetime(2006, 12, 29): datetime(2006, 12, 15),
- datetime(2006, 12, 15): datetime(2006, 12, 1),
- datetime(2007, 1, 1): datetime(2006, 12, 15)}))
-
- tests.append((SemiMonthBegin(-1, day_of_month=4),
- {datetime(2007, 1, 1): datetime(2006, 12, 4),
- datetime(2007, 1, 4): datetime(2007, 1, 1),
- datetime(2008, 6, 30): datetime(2008, 6, 4),
- datetime(2008, 12, 31): datetime(2008, 12, 4),
- datetime(2006, 12, 5): datetime(2006, 12, 4),
- datetime(2006, 12, 30): datetime(2006, 12, 4),
- datetime(2006, 12, 2): datetime(2006, 12, 1),
- datetime(2007, 1, 1): datetime(2006, 12, 4)}))
-
- tests.append((SemiMonthBegin(-2),
- {datetime(2007, 1, 1): datetime(2006, 12, 1),
- datetime(2008, 6, 30): datetime(2008, 6, 1),
- datetime(2008, 6, 14): datetime(2008, 5, 15),
- datetime(2008, 12, 31): datetime(2008, 12, 1),
- datetime(2006, 12, 29): datetime(2006, 12, 1),
- datetime(2006, 12, 15): datetime(2006, 11, 15),
- datetime(2007, 1, 1): datetime(2006, 12, 1)}))
-
- return tests
-
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 15),
datetime(2008, 1, 1),
@@ -2937,27 +2611,111 @@ def test_offset_whole_year(self):
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
- def test_offset(self):
- for offset, cases in self._get_tests():
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
+ offset_cases = []
+ offset_cases.append((SemiMonthBegin(), {
+ datetime(2008, 1, 1): datetime(2008, 1, 15),
+ datetime(2008, 1, 15): datetime(2008, 2, 1),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 14): datetime(2006, 12, 15),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 31): datetime(2007, 1, 1),
+ datetime(2007, 1, 1): datetime(2007, 1, 15),
+ datetime(2006, 12, 1): datetime(2006, 12, 15),
+ datetime(2006, 12, 15): datetime(2007, 1, 1)}))
+
+ offset_cases.append((SemiMonthBegin(day_of_month=20), {
+ datetime(2008, 1, 1): datetime(2008, 1, 20),
+ datetime(2008, 1, 15): datetime(2008, 1, 20),
+ datetime(2008, 1, 21): datetime(2008, 2, 1),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 14): datetime(2006, 12, 20),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 31): datetime(2007, 1, 1),
+ datetime(2007, 1, 1): datetime(2007, 1, 20),
+ datetime(2006, 12, 1): datetime(2006, 12, 20),
+ datetime(2006, 12, 15): datetime(2006, 12, 20)}))
+
+ offset_cases.append((SemiMonthBegin(0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 16): datetime(2008, 2, 1),
+ datetime(2008, 1, 15): datetime(2008, 1, 15),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 2): datetime(2006, 12, 15),
+ datetime(2007, 1, 1): datetime(2007, 1, 1)}))
- def test_apply_index(self):
- for offset, cases in self._get_tests():
- s = DatetimeIndex(cases.keys())
- result = offset.apply_index(s)
- exp = DatetimeIndex(cases.values())
- tm.assert_index_equal(result, exp)
+ offset_cases.append((SemiMonthBegin(0, day_of_month=16), {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 16): datetime(2008, 1, 16),
+ datetime(2008, 1, 15): datetime(2008, 1, 16),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 31): datetime(2007, 1, 1),
+ datetime(2007, 1, 5): datetime(2007, 1, 16),
+ datetime(2007, 1, 1): datetime(2007, 1, 1)}))
- def test_onOffset(self):
- tests = [(datetime(2007, 12, 1), True),
- (datetime(2007, 12, 15), True),
- (datetime(2007, 12, 14), False),
- (datetime(2007, 12, 31), False),
- (datetime(2008, 2, 15), True)]
+ offset_cases.append((SemiMonthBegin(2), {
+ datetime(2008, 1, 1): datetime(2008, 2, 1),
+ datetime(2008, 1, 31): datetime(2008, 2, 15),
+ datetime(2006, 12, 1): datetime(2007, 1, 1),
+ datetime(2006, 12, 29): datetime(2007, 1, 15),
+ datetime(2006, 12, 15): datetime(2007, 1, 15),
+ datetime(2007, 1, 1): datetime(2007, 2, 1),
+ datetime(2007, 1, 16): datetime(2007, 2, 15),
+ datetime(2006, 11, 1): datetime(2006, 12, 1)}))
+
+ offset_cases.append((SemiMonthBegin(-1), {
+ datetime(2007, 1, 1): datetime(2006, 12, 15),
+ datetime(2008, 6, 30): datetime(2008, 6, 15),
+ datetime(2008, 6, 14): datetime(2008, 6, 1),
+ datetime(2008, 12, 31): datetime(2008, 12, 15),
+ datetime(2006, 12, 29): datetime(2006, 12, 15),
+ datetime(2006, 12, 15): datetime(2006, 12, 1),
+ datetime(2007, 1, 1): datetime(2006, 12, 15)}))
+
+ offset_cases.append((SemiMonthBegin(-1, day_of_month=4), {
+ datetime(2007, 1, 1): datetime(2006, 12, 4),
+ datetime(2007, 1, 4): datetime(2007, 1, 1),
+ datetime(2008, 6, 30): datetime(2008, 6, 4),
+ datetime(2008, 12, 31): datetime(2008, 12, 4),
+ datetime(2006, 12, 5): datetime(2006, 12, 4),
+ datetime(2006, 12, 30): datetime(2006, 12, 4),
+ datetime(2006, 12, 2): datetime(2006, 12, 1),
+ datetime(2007, 1, 1): datetime(2006, 12, 4)}))
+
+ offset_cases.append((SemiMonthBegin(-2), {
+ datetime(2007, 1, 1): datetime(2006, 12, 1),
+ datetime(2008, 6, 30): datetime(2008, 6, 1),
+ datetime(2008, 6, 14): datetime(2008, 5, 15),
+ datetime(2008, 12, 31): datetime(2008, 12, 1),
+ datetime(2006, 12, 29): datetime(2006, 12, 1),
+ datetime(2006, 12, 15): datetime(2006, 11, 15),
+ datetime(2007, 1, 1): datetime(2006, 12, 1)}))
- for dt, expected in tests:
- assert_onOffset(SemiMonthBegin(), dt, expected)
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_apply_index(self, case):
+ offset, cases = case
+ s = DatetimeIndex(cases.keys())
+ result = offset.apply_index(s)
+ exp = DatetimeIndex(cases.values())
+ tm.assert_index_equal(result, exp)
+
+ on_offset_cases = [(datetime(2007, 12, 1), True),
+ (datetime(2007, 12, 15), True),
+ (datetime(2007, 12, 14), False),
+ (datetime(2007, 12, 31), False),
+ (datetime(2008, 2, 15), True)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ dt, expected = case
+ assert_onOffset(SemiMonthBegin(), dt, expected)
@pytest.mark.parametrize('klass,assert_func',
[(Series, tm.assert_series_equal),
@@ -2982,1276 +2740,6 @@ def test_vectorized_offset_addition(self, klass, assert_func):
assert_func(result2, exp)
-class TestBQuarterBegin(Base):
- _offset = BQuarterBegin
-
- def test_repr(self):
- assert (repr(BQuarterBegin()) ==
- "<BusinessQuarterBegin: startingMonth=3>")
- assert (repr(BQuarterBegin(startingMonth=3)) ==
- "<BusinessQuarterBegin: startingMonth=3>")
- assert (repr(BQuarterBegin(startingMonth=1)) ==
- "<BusinessQuarterBegin: startingMonth=1>")
-
- def test_isAnchored(self):
- assert BQuarterBegin(startingMonth=1).isAnchored()
- assert BQuarterBegin().isAnchored()
- assert not BQuarterBegin(2, startingMonth=1).isAnchored()
-
- offset_cases = []
- offset_cases.append((BQuarterBegin(startingMonth=1), {
- datetime(2008, 1, 1): datetime(2008, 4, 1),
- datetime(2008, 1, 31): datetime(2008, 4, 1),
- datetime(2008, 2, 15): datetime(2008, 4, 1),
- datetime(2008, 2, 29): datetime(2008, 4, 1),
- datetime(2008, 3, 15): datetime(2008, 4, 1),
- datetime(2008, 3, 31): datetime(2008, 4, 1),
- datetime(2008, 4, 15): datetime(2008, 7, 1),
- datetime(2007, 3, 15): datetime(2007, 4, 2),
- datetime(2007, 2, 28): datetime(2007, 4, 2),
- datetime(2007, 1, 1): datetime(2007, 4, 2),
- datetime(2007, 4, 15): datetime(2007, 7, 2),
- datetime(2007, 7, 1): datetime(2007, 7, 2),
- datetime(2007, 4, 1): datetime(2007, 4, 2),
- datetime(2007, 4, 2): datetime(2007, 7, 2),
- datetime(2008, 4, 30): datetime(2008, 7, 1)}))
-
- offset_cases.append((BQuarterBegin(startingMonth=2), {
- datetime(2008, 1, 1): datetime(2008, 2, 1),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2008, 1, 15): datetime(2008, 2, 1),
- datetime(2008, 2, 29): datetime(2008, 5, 1),
- datetime(2008, 3, 15): datetime(2008, 5, 1),
- datetime(2008, 3, 31): datetime(2008, 5, 1),
- datetime(2008, 4, 15): datetime(2008, 5, 1),
- datetime(2008, 8, 15): datetime(2008, 11, 3),
- datetime(2008, 9, 15): datetime(2008, 11, 3),
- datetime(2008, 11, 1): datetime(2008, 11, 3),
- datetime(2008, 4, 30): datetime(2008, 5, 1)}))
-
- offset_cases.append((BQuarterBegin(startingMonth=1, n=0), {
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2007, 12, 31): datetime(2008, 1, 1),
- datetime(2008, 2, 15): datetime(2008, 4, 1),
- datetime(2008, 2, 29): datetime(2008, 4, 1),
- datetime(2008, 1, 15): datetime(2008, 4, 1),
- datetime(2008, 2, 27): datetime(2008, 4, 1),
- datetime(2008, 3, 15): datetime(2008, 4, 1),
- datetime(2007, 4, 1): datetime(2007, 4, 2),
- datetime(2007, 4, 2): datetime(2007, 4, 2),
- datetime(2007, 7, 1): datetime(2007, 7, 2),
- datetime(2007, 4, 15): datetime(2007, 7, 2),
- datetime(2007, 7, 2): datetime(2007, 7, 2)}))
-
- offset_cases.append((BQuarterBegin(startingMonth=1, n=-1), {
- datetime(2008, 1, 1): datetime(2007, 10, 1),
- datetime(2008, 1, 31): datetime(2008, 1, 1),
- datetime(2008, 2, 15): datetime(2008, 1, 1),
- datetime(2008, 2, 29): datetime(2008, 1, 1),
- datetime(2008, 3, 15): datetime(2008, 1, 1),
- datetime(2008, 3, 31): datetime(2008, 1, 1),
- datetime(2008, 4, 15): datetime(2008, 4, 1),
- datetime(2007, 7, 3): datetime(2007, 7, 2),
- datetime(2007, 4, 3): datetime(2007, 4, 2),
- datetime(2007, 7, 2): datetime(2007, 4, 2),
- datetime(2008, 4, 1): datetime(2008, 1, 1)}))
-
- offset_cases.append((BQuarterBegin(startingMonth=1, n=2), {
- datetime(2008, 1, 1): datetime(2008, 7, 1),
- datetime(2008, 1, 15): datetime(2008, 7, 1),
- datetime(2008, 2, 29): datetime(2008, 7, 1),
- datetime(2008, 3, 15): datetime(2008, 7, 1),
- datetime(2007, 3, 31): datetime(2007, 7, 2),
- datetime(2007, 4, 15): datetime(2007, 10, 1),
- datetime(2008, 4, 30): datetime(2008, 10, 1)}))
-
- @pytest.mark.parametrize('case', offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- def test_offset_corner_case(self):
- # corner
- offset = BQuarterBegin(n=-1, startingMonth=1)
- assert datetime(2007, 4, 3) + offset == datetime(2007, 4, 2)
-
-
-class TestBQuarterEnd(Base):
- _offset = BQuarterEnd
-
- def test_repr(self):
- assert (repr(BQuarterEnd()) ==
- "<BusinessQuarterEnd: startingMonth=3>")
- assert (repr(BQuarterEnd(startingMonth=3)) ==
- "<BusinessQuarterEnd: startingMonth=3>")
- assert (repr(BQuarterEnd(startingMonth=1)) ==
- "<BusinessQuarterEnd: startingMonth=1>")
-
- def test_isAnchored(self):
- assert BQuarterEnd(startingMonth=1).isAnchored()
- assert BQuarterEnd().isAnchored()
- assert not BQuarterEnd(2, startingMonth=1).isAnchored()
-
- offset_cases = []
- offset_cases.append((BQuarterEnd(startingMonth=1),
- {datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 4, 30),
- datetime(2008, 2, 15): datetime(2008, 4, 30),
- datetime(2008, 2, 29): datetime(2008, 4, 30),
- datetime(2008, 3, 15): datetime(2008, 4, 30),
- datetime(2008, 3, 31): datetime(2008, 4, 30),
- datetime(2008, 4, 15): datetime(2008, 4, 30),
- datetime(2008, 4, 30): datetime(2008, 7, 31), }))
-
- offset_cases.append((BQuarterEnd(startingMonth=2),
- {datetime(2008, 1, 1): datetime(2008, 2, 29),
- datetime(2008, 1, 31): datetime(2008, 2, 29),
- datetime(2008, 2, 15): datetime(2008, 2, 29),
- datetime(2008, 2, 29): datetime(2008, 5, 30),
- datetime(2008, 3, 15): datetime(2008, 5, 30),
- datetime(2008, 3, 31): datetime(2008, 5, 30),
- datetime(2008, 4, 15): datetime(2008, 5, 30),
- datetime(2008, 4, 30): datetime(2008, 5, 30), }))
-
- offset_cases.append((BQuarterEnd(startingMonth=1, n=0),
- {datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 1, 31),
- datetime(2008, 2, 15): datetime(2008, 4, 30),
- datetime(2008, 2, 29): datetime(2008, 4, 30),
- datetime(2008, 3, 15): datetime(2008, 4, 30),
- datetime(2008, 3, 31): datetime(2008, 4, 30),
- datetime(2008, 4, 15): datetime(2008, 4, 30),
- datetime(2008, 4, 30): datetime(2008, 4, 30), }))
-
- offset_cases.append((BQuarterEnd(startingMonth=1, n=-1),
- {datetime(2008, 1, 1): datetime(2007, 10, 31),
- datetime(2008, 1, 31): datetime(2007, 10, 31),
- datetime(2008, 2, 15): datetime(2008, 1, 31),
- datetime(2008, 2, 29): datetime(2008, 1, 31),
- datetime(2008, 3, 15): datetime(2008, 1, 31),
- datetime(2008, 3, 31): datetime(2008, 1, 31),
- datetime(2008, 4, 15): datetime(2008, 1, 31),
- datetime(2008, 4, 30): datetime(2008, 1, 31), }))
-
- offset_cases.append((BQuarterEnd(startingMonth=1, n=2),
- {datetime(2008, 1, 31): datetime(2008, 7, 31),
- datetime(2008, 2, 15): datetime(2008, 7, 31),
- datetime(2008, 2, 29): datetime(2008, 7, 31),
- datetime(2008, 3, 15): datetime(2008, 7, 31),
- datetime(2008, 3, 31): datetime(2008, 7, 31),
- datetime(2008, 4, 15): datetime(2008, 7, 31),
- datetime(2008, 4, 30): datetime(2008, 10, 31), }))
-
- @pytest.mark.parametrize('case', offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- def test_offset_corner_case(self):
- # corner
- offset = BQuarterEnd(n=-1, startingMonth=1)
- assert datetime(2010, 1, 31) + offset == datetime(2010, 1, 29)
-
- on_offset_cases = [
- (BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
- (BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
- (BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
- (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
- (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
- (BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
- (BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
- (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
- (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
- (BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
- (BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
- (BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
- (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
- (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
- (BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
- (BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
- (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
- (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
- (BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
- (BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
- (BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
- (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True),
- (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
- (BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
- (BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
- (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True),
- (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False)]
-
- @pytest.mark.parametrize('case', on_offset_cases)
- def test_onOffset(self, case):
- offset, dt, expected = case
- assert_onOffset(offset, dt, expected)
-
-
-def makeFY5253LastOfMonthQuarter(*args, **kwds):
- return FY5253Quarter(*args, variation="last", **kwds)
-
-
-def makeFY5253NearestEndMonthQuarter(*args, **kwds):
- return FY5253Quarter(*args, variation="nearest", **kwds)
-
-
-def makeFY5253NearestEndMonth(*args, **kwds):
- return FY5253(*args, variation="nearest", **kwds)
-
-
-def makeFY5253LastOfMonth(*args, **kwds):
- return FY5253(*args, variation="last", **kwds)
-
-
-class TestFY5253LastOfMonth(Base):
- offset_lom_sat_aug = makeFY5253LastOfMonth(1, startingMonth=8,
- weekday=WeekDay.SAT)
- offset_lom_sat_sep = makeFY5253LastOfMonth(1, startingMonth=9,
- weekday=WeekDay.SAT)
-
- on_offset_cases = [
- # From Wikipedia (see:
- # http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end)
- (offset_lom_sat_aug, datetime(2006, 8, 26), True),
- (offset_lom_sat_aug, datetime(2007, 8, 25), True),
- (offset_lom_sat_aug, datetime(2008, 8, 30), True),
- (offset_lom_sat_aug, datetime(2009, 8, 29), True),
- (offset_lom_sat_aug, datetime(2010, 8, 28), True),
- (offset_lom_sat_aug, datetime(2011, 8, 27), True),
- (offset_lom_sat_aug, datetime(2012, 8, 25), True),
- (offset_lom_sat_aug, datetime(2013, 8, 31), True),
- (offset_lom_sat_aug, datetime(2014, 8, 30), True),
- (offset_lom_sat_aug, datetime(2015, 8, 29), True),
- (offset_lom_sat_aug, datetime(2016, 8, 27), True),
- (offset_lom_sat_aug, datetime(2017, 8, 26), True),
- (offset_lom_sat_aug, datetime(2018, 8, 25), True),
- (offset_lom_sat_aug, datetime(2019, 8, 31), True),
-
- (offset_lom_sat_aug, datetime(2006, 8, 27), False),
- (offset_lom_sat_aug, datetime(2007, 8, 28), False),
- (offset_lom_sat_aug, datetime(2008, 8, 31), False),
- (offset_lom_sat_aug, datetime(2009, 8, 30), False),
- (offset_lom_sat_aug, datetime(2010, 8, 29), False),
- (offset_lom_sat_aug, datetime(2011, 8, 28), False),
-
- (offset_lom_sat_aug, datetime(2006, 8, 25), False),
- (offset_lom_sat_aug, datetime(2007, 8, 24), False),
- (offset_lom_sat_aug, datetime(2008, 8, 29), False),
- (offset_lom_sat_aug, datetime(2009, 8, 28), False),
- (offset_lom_sat_aug, datetime(2010, 8, 27), False),
- (offset_lom_sat_aug, datetime(2011, 8, 26), False),
- (offset_lom_sat_aug, datetime(2019, 8, 30), False),
-
- # From GMCR (see for example:
- # http://yahoo.brand.edgar-online.com/Default.aspx?
- # companyid=3184&formtypeID=7)
- (offset_lom_sat_sep, datetime(2010, 9, 25), True),
- (offset_lom_sat_sep, datetime(2011, 9, 24), True),
- (offset_lom_sat_sep, datetime(2012, 9, 29), True)]
-
- @pytest.mark.parametrize('case', on_offset_cases)
- def test_onOffset(self, case):
- offset, dt, expected = case
- assert_onOffset(offset, dt, expected)
-
- def test_apply(self):
- offset_lom_aug_sat = makeFY5253LastOfMonth(startingMonth=8,
- weekday=WeekDay.SAT)
- offset_lom_aug_sat_1 = makeFY5253LastOfMonth(n=1, startingMonth=8,
- weekday=WeekDay.SAT)
-
- date_seq_lom_aug_sat = [datetime(2006, 8, 26), datetime(2007, 8, 25),
- datetime(2008, 8, 30), datetime(2009, 8, 29),
- datetime(2010, 8, 28), datetime(2011, 8, 27),
- datetime(2012, 8, 25), datetime(2013, 8, 31),
- datetime(2014, 8, 30), datetime(2015, 8, 29),
- datetime(2016, 8, 27)]
-
- tests = [
- (offset_lom_aug_sat, date_seq_lom_aug_sat),
- (offset_lom_aug_sat_1, date_seq_lom_aug_sat),
- (offset_lom_aug_sat, [
- datetime(2006, 8, 25)] + date_seq_lom_aug_sat),
- (offset_lom_aug_sat_1, [
- datetime(2006, 8, 27)] + date_seq_lom_aug_sat[1:]),
- (makeFY5253LastOfMonth(n=-1, startingMonth=8,
- weekday=WeekDay.SAT),
- list(reversed(date_seq_lom_aug_sat))),
- ]
- for test in tests:
- offset, data = test
- current = data[0]
- for datum in data[1:]:
- current = current + offset
- assert current == datum
-
-
-class TestFY5253NearestEndMonth(Base):
-
- def test_get_target_month_end(self):
- assert (makeFY5253NearestEndMonth(
- startingMonth=8, weekday=WeekDay.SAT).get_target_month_end(
- datetime(2013, 1, 1)) == datetime(2013, 8, 31))
- assert (makeFY5253NearestEndMonth(
- startingMonth=12, weekday=WeekDay.SAT).get_target_month_end(
- datetime(2013, 1, 1)) == datetime(2013, 12, 31))
- assert (makeFY5253NearestEndMonth(
- startingMonth=2, weekday=WeekDay.SAT).get_target_month_end(
- datetime(2013, 1, 1)) == datetime(2013, 2, 28))
-
- def test_get_year_end(self):
- assert (makeFY5253NearestEndMonth(
- startingMonth=8, weekday=WeekDay.SAT).get_year_end(
- datetime(2013, 1, 1)) == datetime(2013, 8, 31))
- assert (makeFY5253NearestEndMonth(
- startingMonth=8, weekday=WeekDay.SUN).get_year_end(
- datetime(2013, 1, 1)) == datetime(2013, 9, 1))
- assert (makeFY5253NearestEndMonth(
- startingMonth=8, weekday=WeekDay.FRI).get_year_end(
- datetime(2013, 1, 1)) == datetime(2013, 8, 30))
-
- offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
- variation="nearest")
- assert (offset_n.get_year_end(datetime(2012, 1, 1)) ==
- datetime(2013, 1, 1))
- assert (offset_n.get_year_end(datetime(2012, 1, 10)) ==
- datetime(2013, 1, 1))
-
- assert (offset_n.get_year_end(datetime(2013, 1, 1)) ==
- datetime(2013, 12, 31))
- assert (offset_n.get_year_end(datetime(2013, 1, 2)) ==
- datetime(2013, 12, 31))
- assert (offset_n.get_year_end(datetime(2013, 1, 3)) ==
- datetime(2013, 12, 31))
- assert (offset_n.get_year_end(datetime(2013, 1, 10)) ==
- datetime(2013, 12, 31))
-
- JNJ = FY5253(n=1, startingMonth=12, weekday=6, variation="nearest")
- assert (JNJ.get_year_end(datetime(2006, 1, 1)) ==
- datetime(2006, 12, 31))
-
- offset_lom_aug_sat = makeFY5253NearestEndMonth(1, startingMonth=8,
- weekday=WeekDay.SAT)
- offset_lom_aug_thu = makeFY5253NearestEndMonth(1, startingMonth=8,
- weekday=WeekDay.THU)
- offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
- variation="nearest")
-
- on_offset_cases = [
- # From Wikipedia (see:
- # http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
- # #Saturday_nearest_the_end_of_month)
- # 2006-09-02 2006 September 2
- # 2007-09-01 2007 September 1
- # 2008-08-30 2008 August 30 (leap year)
- # 2009-08-29 2009 August 29
- # 2010-08-28 2010 August 28
- # 2011-09-03 2011 September 3
- # 2012-09-01 2012 September 1 (leap year)
- # 2013-08-31 2013 August 31
- # 2014-08-30 2014 August 30
- # 2015-08-29 2015 August 29
- # 2016-09-03 2016 September 3 (leap year)
- # 2017-09-02 2017 September 2
- # 2018-09-01 2018 September 1
- # 2019-08-31 2019 August 31
- (offset_lom_aug_sat, datetime(2006, 9, 2), True),
- (offset_lom_aug_sat, datetime(2007, 9, 1), True),
- (offset_lom_aug_sat, datetime(2008, 8, 30), True),
- (offset_lom_aug_sat, datetime(2009, 8, 29), True),
- (offset_lom_aug_sat, datetime(2010, 8, 28), True),
- (offset_lom_aug_sat, datetime(2011, 9, 3), True),
-
- (offset_lom_aug_sat, datetime(2016, 9, 3), True),
- (offset_lom_aug_sat, datetime(2017, 9, 2), True),
- (offset_lom_aug_sat, datetime(2018, 9, 1), True),
- (offset_lom_aug_sat, datetime(2019, 8, 31), True),
-
- (offset_lom_aug_sat, datetime(2006, 8, 27), False),
- (offset_lom_aug_sat, datetime(2007, 8, 28), False),
- (offset_lom_aug_sat, datetime(2008, 8, 31), False),
- (offset_lom_aug_sat, datetime(2009, 8, 30), False),
- (offset_lom_aug_sat, datetime(2010, 8, 29), False),
- (offset_lom_aug_sat, datetime(2011, 8, 28), False),
-
- (offset_lom_aug_sat, datetime(2006, 8, 25), False),
- (offset_lom_aug_sat, datetime(2007, 8, 24), False),
- (offset_lom_aug_sat, datetime(2008, 8, 29), False),
- (offset_lom_aug_sat, datetime(2009, 8, 28), False),
- (offset_lom_aug_sat, datetime(2010, 8, 27), False),
- (offset_lom_aug_sat, datetime(2011, 8, 26), False),
- (offset_lom_aug_sat, datetime(2019, 8, 30), False),
-
- # From Micron, see:
- # http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
- (offset_lom_aug_thu, datetime(2012, 8, 30), True),
- (offset_lom_aug_thu, datetime(2011, 9, 1), True),
-
- (offset_n, datetime(2012, 12, 31), False),
- (offset_n, datetime(2013, 1, 1), True),
- (offset_n, datetime(2013, 1, 2), False)]
-
- @pytest.mark.parametrize('case', on_offset_cases)
- def test_onOffset(self, case):
- offset, dt, expected = case
- assert_onOffset(offset, dt, expected)
-
- def test_apply(self):
- date_seq_nem_8_sat = [datetime(2006, 9, 2), datetime(2007, 9, 1),
- datetime(2008, 8, 30), datetime(2009, 8, 29),
- datetime(2010, 8, 28), datetime(2011, 9, 3)]
-
- JNJ = [datetime(2005, 1, 2), datetime(2006, 1, 1),
- datetime(2006, 12, 31), datetime(2007, 12, 30),
- datetime(2008, 12, 28), datetime(2010, 1, 3),
- datetime(2011, 1, 2), datetime(2012, 1, 1),
- datetime(2012, 12, 30)]
-
- DEC_SAT = FY5253(n=-1, startingMonth=12, weekday=5,
- variation="nearest")
-
- tests = [
- (makeFY5253NearestEndMonth(startingMonth=8,
- weekday=WeekDay.SAT),
- date_seq_nem_8_sat),
- (makeFY5253NearestEndMonth(n=1, startingMonth=8,
- weekday=WeekDay.SAT),
- date_seq_nem_8_sat),
- (makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT),
- [datetime(2006, 9, 1)] + date_seq_nem_8_sat),
- (makeFY5253NearestEndMonth(n=1, startingMonth=8,
- weekday=WeekDay.SAT),
- [datetime(2006, 9, 3)] + date_seq_nem_8_sat[1:]),
- (makeFY5253NearestEndMonth(n=-1, startingMonth=8,
- weekday=WeekDay.SAT),
- list(reversed(date_seq_nem_8_sat))),
- (makeFY5253NearestEndMonth(n=1, startingMonth=12,
- weekday=WeekDay.SUN), JNJ),
- (makeFY5253NearestEndMonth(n=-1, startingMonth=12,
- weekday=WeekDay.SUN),
- list(reversed(JNJ))),
- (makeFY5253NearestEndMonth(n=1, startingMonth=12,
- weekday=WeekDay.SUN),
- [datetime(2005, 1, 2), datetime(2006, 1, 1)]),
- (makeFY5253NearestEndMonth(n=1, startingMonth=12,
- weekday=WeekDay.SUN),
- [datetime(2006, 1, 2), datetime(2006, 12, 31)]),
- (DEC_SAT, [datetime(2013, 1, 15), datetime(2012, 12, 29)])
- ]
- for test in tests:
- offset, data = test
- current = data[0]
- for datum in data[1:]:
- current = current + offset
- assert current == datum
-
-
-class TestFY5253LastOfMonthQuarter(Base):
-
- def test_isAnchored(self):
- assert makeFY5253LastOfMonthQuarter(
- startingMonth=1, weekday=WeekDay.SAT,
- qtr_with_extra_week=4).isAnchored()
- assert makeFY5253LastOfMonthQuarter(
- weekday=WeekDay.SAT, startingMonth=3,
- qtr_with_extra_week=4).isAnchored()
- assert not makeFY5253LastOfMonthQuarter(
- 2, startingMonth=1, weekday=WeekDay.SAT,
- qtr_with_extra_week=4).isAnchored()
-
- def test_equality(self):
- assert (makeFY5253LastOfMonthQuarter(
- startingMonth=1, weekday=WeekDay.SAT,
- qtr_with_extra_week=4) == makeFY5253LastOfMonthQuarter(
- startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4))
- assert (makeFY5253LastOfMonthQuarter(
- startingMonth=1, weekday=WeekDay.SAT,
- qtr_with_extra_week=4) != makeFY5253LastOfMonthQuarter(
- startingMonth=1, weekday=WeekDay.SUN, qtr_with_extra_week=4))
- assert (makeFY5253LastOfMonthQuarter(
- startingMonth=1, weekday=WeekDay.SAT,
- qtr_with_extra_week=4) != makeFY5253LastOfMonthQuarter(
- startingMonth=2, weekday=WeekDay.SAT, qtr_with_extra_week=4))
-
- def test_offset(self):
- offset = makeFY5253LastOfMonthQuarter(1, startingMonth=9,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=4)
- offset2 = makeFY5253LastOfMonthQuarter(2, startingMonth=9,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=4)
- offset4 = makeFY5253LastOfMonthQuarter(4, startingMonth=9,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=4)
-
- offset_neg1 = makeFY5253LastOfMonthQuarter(-1, startingMonth=9,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=4)
- offset_neg2 = makeFY5253LastOfMonthQuarter(-2, startingMonth=9,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=4)
-
- GMCR = [datetime(2010, 3, 27), datetime(2010, 6, 26),
- datetime(2010, 9, 25), datetime(2010, 12, 25),
- datetime(2011, 3, 26), datetime(2011, 6, 25),
- datetime(2011, 9, 24), datetime(2011, 12, 24),
- datetime(2012, 3, 24), datetime(2012, 6, 23),
- datetime(2012, 9, 29), datetime(2012, 12, 29),
- datetime(2013, 3, 30), datetime(2013, 6, 29)]
-
- assert_offset_equal(offset, base=GMCR[0], expected=GMCR[1])
- assert_offset_equal(offset, base=GMCR[0] + relativedelta(days=-1),
- expected=GMCR[0])
- assert_offset_equal(offset, base=GMCR[1], expected=GMCR[2])
-
- assert_offset_equal(offset2, base=GMCR[0], expected=GMCR[2])
- assert_offset_equal(offset4, base=GMCR[0], expected=GMCR[4])
-
- assert_offset_equal(offset_neg1, base=GMCR[-1], expected=GMCR[-2])
- assert_offset_equal(offset_neg1,
- base=GMCR[-1] + relativedelta(days=+1),
- expected=GMCR[-1])
- assert_offset_equal(offset_neg2, base=GMCR[-1], expected=GMCR[-3])
-
- date = GMCR[0] + relativedelta(days=-1)
- for expected in GMCR:
- assert_offset_equal(offset, date, expected)
- date = date + offset
-
- date = GMCR[-1] + relativedelta(days=+1)
- for expected in reversed(GMCR):
- assert_offset_equal(offset_neg1, date, expected)
- date = date + offset_neg1
-
- lomq_aug_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=8,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=4)
- lomq_sep_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=9,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=4)
-
- on_offset_cases = [
- # From Wikipedia
- (lomq_aug_sat_4, datetime(2006, 8, 26), True),
- (lomq_aug_sat_4, datetime(2007, 8, 25), True),
- (lomq_aug_sat_4, datetime(2008, 8, 30), True),
- (lomq_aug_sat_4, datetime(2009, 8, 29), True),
- (lomq_aug_sat_4, datetime(2010, 8, 28), True),
- (lomq_aug_sat_4, datetime(2011, 8, 27), True),
- (lomq_aug_sat_4, datetime(2019, 8, 31), True),
-
- (lomq_aug_sat_4, datetime(2006, 8, 27), False),
- (lomq_aug_sat_4, datetime(2007, 8, 28), False),
- (lomq_aug_sat_4, datetime(2008, 8, 31), False),
- (lomq_aug_sat_4, datetime(2009, 8, 30), False),
- (lomq_aug_sat_4, datetime(2010, 8, 29), False),
- (lomq_aug_sat_4, datetime(2011, 8, 28), False),
-
- (lomq_aug_sat_4, datetime(2006, 8, 25), False),
- (lomq_aug_sat_4, datetime(2007, 8, 24), False),
- (lomq_aug_sat_4, datetime(2008, 8, 29), False),
- (lomq_aug_sat_4, datetime(2009, 8, 28), False),
- (lomq_aug_sat_4, datetime(2010, 8, 27), False),
- (lomq_aug_sat_4, datetime(2011, 8, 26), False),
- (lomq_aug_sat_4, datetime(2019, 8, 30), False),
-
- # From GMCR
- (lomq_sep_sat_4, datetime(2010, 9, 25), True),
- (lomq_sep_sat_4, datetime(2011, 9, 24), True),
- (lomq_sep_sat_4, datetime(2012, 9, 29), True),
-
- (lomq_sep_sat_4, datetime(2013, 6, 29), True),
- (lomq_sep_sat_4, datetime(2012, 6, 23), True),
- (lomq_sep_sat_4, datetime(2012, 6, 30), False),
-
- (lomq_sep_sat_4, datetime(2013, 3, 30), True),
- (lomq_sep_sat_4, datetime(2012, 3, 24), True),
-
- (lomq_sep_sat_4, datetime(2012, 12, 29), True),
- (lomq_sep_sat_4, datetime(2011, 12, 24), True),
-
- # INTC (extra week in Q1)
- # See: http://www.intc.com/releasedetail.cfm?ReleaseID=542844
- (makeFY5253LastOfMonthQuarter(1, startingMonth=12,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=1),
- datetime(2011, 4, 2), True),
-
- # see: http://google.brand.edgar-online.com/?sym=INTC&formtypeID=7
- (makeFY5253LastOfMonthQuarter(1, startingMonth=12,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=1),
- datetime(2012, 12, 29), True),
- (makeFY5253LastOfMonthQuarter(1, startingMonth=12,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=1),
- datetime(2011, 12, 31), True),
- (makeFY5253LastOfMonthQuarter(1, startingMonth=12,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=1),
- datetime(2010, 12, 25), True)]
-
- @pytest.mark.parametrize('case', on_offset_cases)
- def test_onOffset(self, case):
- offset, dt, expected = case
- assert_onOffset(offset, dt, expected)
-
- def test_year_has_extra_week(self):
- # End of long Q1
- assert makeFY5253LastOfMonthQuarter(
- 1, startingMonth=12, weekday=WeekDay.SAT,
- qtr_with_extra_week=1).year_has_extra_week(datetime(2011, 4, 2))
-
- # Start of long Q1
- assert makeFY5253LastOfMonthQuarter(
- 1, startingMonth=12, weekday=WeekDay.SAT,
- qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 26))
-
- # End of year before year with long Q1
- assert not makeFY5253LastOfMonthQuarter(
- 1, startingMonth=12, weekday=WeekDay.SAT,
- qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 25))
-
- for year in [x
- for x in range(1994, 2011 + 1)
- if x not in [2011, 2005, 2000, 1994]]:
- assert not makeFY5253LastOfMonthQuarter(
- 1, startingMonth=12, weekday=WeekDay.SAT,
- qtr_with_extra_week=1).year_has_extra_week(
- datetime(year, 4, 2))
-
- # Other long years
- assert makeFY5253LastOfMonthQuarter(
- 1, startingMonth=12, weekday=WeekDay.SAT,
- qtr_with_extra_week=1).year_has_extra_week(datetime(2005, 4, 2))
-
- assert makeFY5253LastOfMonthQuarter(
- 1, startingMonth=12, weekday=WeekDay.SAT,
- qtr_with_extra_week=1).year_has_extra_week(datetime(2000, 4, 2))
-
- assert makeFY5253LastOfMonthQuarter(
- 1, startingMonth=12, weekday=WeekDay.SAT,
- qtr_with_extra_week=1).year_has_extra_week(datetime(1994, 4, 2))
-
- def test_get_weeks(self):
- sat_dec_1 = makeFY5253LastOfMonthQuarter(1, startingMonth=12,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=1)
- sat_dec_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=12,
- weekday=WeekDay.SAT,
- qtr_with_extra_week=4)
-
- assert sat_dec_1.get_weeks(datetime(2011, 4, 2)) == [14, 13, 13, 13]
- assert sat_dec_4.get_weeks(datetime(2011, 4, 2)) == [13, 13, 13, 14]
- assert sat_dec_1.get_weeks(datetime(2010, 12, 25)) == [13, 13, 13, 13]
-
-
-class TestFY5253NearestEndMonthQuarter(Base):
-
- offset_nem_sat_aug_4 = makeFY5253NearestEndMonthQuarter(
- 1, startingMonth=8, weekday=WeekDay.SAT,
- qtr_with_extra_week=4)
- offset_nem_thu_aug_4 = makeFY5253NearestEndMonthQuarter(
- 1, startingMonth=8, weekday=WeekDay.THU,
- qtr_with_extra_week=4)
- offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
- variation="nearest")
-
- on_offset_cases = [
- # From Wikipedia
- (offset_nem_sat_aug_4, datetime(2006, 9, 2), True),
- (offset_nem_sat_aug_4, datetime(2007, 9, 1), True),
- (offset_nem_sat_aug_4, datetime(2008, 8, 30), True),
- (offset_nem_sat_aug_4, datetime(2009, 8, 29), True),
- (offset_nem_sat_aug_4, datetime(2010, 8, 28), True),
- (offset_nem_sat_aug_4, datetime(2011, 9, 3), True),
-
- (offset_nem_sat_aug_4, datetime(2016, 9, 3), True),
- (offset_nem_sat_aug_4, datetime(2017, 9, 2), True),
- (offset_nem_sat_aug_4, datetime(2018, 9, 1), True),
- (offset_nem_sat_aug_4, datetime(2019, 8, 31), True),
-
- (offset_nem_sat_aug_4, datetime(2006, 8, 27), False),
- (offset_nem_sat_aug_4, datetime(2007, 8, 28), False),
- (offset_nem_sat_aug_4, datetime(2008, 8, 31), False),
- (offset_nem_sat_aug_4, datetime(2009, 8, 30), False),
- (offset_nem_sat_aug_4, datetime(2010, 8, 29), False),
- (offset_nem_sat_aug_4, datetime(2011, 8, 28), False),
-
- (offset_nem_sat_aug_4, datetime(2006, 8, 25), False),
- (offset_nem_sat_aug_4, datetime(2007, 8, 24), False),
- (offset_nem_sat_aug_4, datetime(2008, 8, 29), False),
- (offset_nem_sat_aug_4, datetime(2009, 8, 28), False),
- (offset_nem_sat_aug_4, datetime(2010, 8, 27), False),
- (offset_nem_sat_aug_4, datetime(2011, 8, 26), False),
- (offset_nem_sat_aug_4, datetime(2019, 8, 30), False),
-
- # From Micron, see:
- # http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
- (offset_nem_thu_aug_4, datetime(2012, 8, 30), True),
- (offset_nem_thu_aug_4, datetime(2011, 9, 1), True),
-
- # See: http://google.brand.edgar-online.com/?sym=MU&formtypeID=13
- (offset_nem_thu_aug_4, datetime(2013, 5, 30), True),
- (offset_nem_thu_aug_4, datetime(2013, 2, 28), True),
- (offset_nem_thu_aug_4, datetime(2012, 11, 29), True),
- (offset_nem_thu_aug_4, datetime(2012, 5, 31), True),
- (offset_nem_thu_aug_4, datetime(2007, 3, 1), True),
- (offset_nem_thu_aug_4, datetime(1994, 3, 3), True),
-
- (offset_n, datetime(2012, 12, 31), False),
- (offset_n, datetime(2013, 1, 1), True),
- (offset_n, datetime(2013, 1, 2), False)]
-
- @pytest.mark.parametrize('case', on_offset_cases)
- def test_onOffset(self, case):
- offset, dt, expected = case
- assert_onOffset(offset, dt, expected)
-
- def test_offset(self):
- offset = makeFY5253NearestEndMonthQuarter(1, startingMonth=8,
- weekday=WeekDay.THU,
- qtr_with_extra_week=4)
-
- MU = [datetime(2012, 5, 31),
- datetime(2012, 8, 30), datetime(2012, 11, 29),
- datetime(2013, 2, 28), datetime(2013, 5, 30)]
-
- date = MU[0] + relativedelta(days=-1)
- for expected in MU:
- assert_offset_equal(offset, date, expected)
- date = date + offset
-
- assert_offset_equal(offset,
- datetime(2012, 5, 31),
- datetime(2012, 8, 30))
- assert_offset_equal(offset,
- datetime(2012, 5, 30),
- datetime(2012, 5, 31))
-
- offset2 = FY5253Quarter(weekday=5, startingMonth=12, variation="last",
- qtr_with_extra_week=4)
-
- assert_offset_equal(offset2,
- datetime(2013, 1, 15),
- datetime(2013, 3, 30))
-
-
-class TestQuarterBegin(Base):
-
- def test_repr(self):
- assert (repr(QuarterBegin()) ==
- "<QuarterBegin: startingMonth=3>")
- assert (repr(QuarterBegin(startingMonth=3)) ==
- "<QuarterBegin: startingMonth=3>")
- assert (repr(QuarterBegin(startingMonth=1)) ==
- "<QuarterBegin: startingMonth=1>")
-
- def test_isAnchored(self):
- assert QuarterBegin(startingMonth=1).isAnchored()
- assert QuarterBegin().isAnchored()
- assert not QuarterBegin(2, startingMonth=1).isAnchored()
-
- offset_cases = []
- offset_cases.append((QuarterBegin(startingMonth=1),
- {datetime(2007, 12, 1): datetime(2008, 1, 1),
- datetime(2008, 1, 1): datetime(2008, 4, 1),
- datetime(2008, 2, 15): datetime(2008, 4, 1),
- datetime(2008, 2, 29): datetime(2008, 4, 1),
- datetime(2008, 3, 15): datetime(2008, 4, 1),
- datetime(2008, 3, 31): datetime(2008, 4, 1),
- datetime(2008, 4, 15): datetime(2008, 7, 1),
- datetime(2008, 4, 1): datetime(2008, 7, 1), }))
-
- offset_cases.append((QuarterBegin(startingMonth=2),
- {datetime(2008, 1, 1): datetime(2008, 2, 1),
- datetime(2008, 1, 31): datetime(2008, 2, 1),
- datetime(2008, 1, 15): datetime(2008, 2, 1),
- datetime(2008, 2, 29): datetime(2008, 5, 1),
- datetime(2008, 3, 15): datetime(2008, 5, 1),
- datetime(2008, 3, 31): datetime(2008, 5, 1),
- datetime(2008, 4, 15): datetime(2008, 5, 1),
- datetime(2008, 4, 30): datetime(2008, 5, 1), }))
-
- offset_cases.append((QuarterBegin(startingMonth=1, n=0),
- {datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 12, 1): datetime(2009, 1, 1),
- datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 2, 15): datetime(2008, 4, 1),
- datetime(2008, 2, 29): datetime(2008, 4, 1),
- datetime(2008, 3, 15): datetime(2008, 4, 1),
- datetime(2008, 3, 31): datetime(2008, 4, 1),
- datetime(2008, 4, 15): datetime(2008, 7, 1),
- datetime(2008, 4, 30): datetime(2008, 7, 1), }))
-
- offset_cases.append((QuarterBegin(startingMonth=1, n=-1),
- {datetime(2008, 1, 1): datetime(2007, 10, 1),
- datetime(2008, 1, 31): datetime(2008, 1, 1),
- datetime(2008, 2, 15): datetime(2008, 1, 1),
- datetime(2008, 2, 29): datetime(2008, 1, 1),
- datetime(2008, 3, 15): datetime(2008, 1, 1),
- datetime(2008, 3, 31): datetime(2008, 1, 1),
- datetime(2008, 4, 15): datetime(2008, 4, 1),
- datetime(2008, 4, 30): datetime(2008, 4, 1),
- datetime(2008, 7, 1): datetime(2008, 4, 1)}))
-
- offset_cases.append((QuarterBegin(startingMonth=1, n=2),
- {datetime(2008, 1, 1): datetime(2008, 7, 1),
- datetime(2008, 2, 15): datetime(2008, 7, 1),
- datetime(2008, 2, 29): datetime(2008, 7, 1),
- datetime(2008, 3, 15): datetime(2008, 7, 1),
- datetime(2008, 3, 31): datetime(2008, 7, 1),
- datetime(2008, 4, 15): datetime(2008, 10, 1),
- datetime(2008, 4, 1): datetime(2008, 10, 1), }))
-
- @pytest.mark.parametrize('case', offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- def test_offset_corner_case(self):
- # corner
- offset = QuarterBegin(n=-1, startingMonth=1)
- assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 1)
-
-
-class TestQuarterEnd(Base):
- _offset = QuarterEnd
-
- def test_repr(self):
- assert (repr(QuarterEnd()) ==
- "<QuarterEnd: startingMonth=3>")
- assert (repr(QuarterEnd(startingMonth=3)) ==
- "<QuarterEnd: startingMonth=3>")
- assert (repr(QuarterEnd(startingMonth=1)) ==
- "<QuarterEnd: startingMonth=1>")
-
- def test_isAnchored(self):
- assert QuarterEnd(startingMonth=1).isAnchored()
- assert QuarterEnd().isAnchored()
- assert not QuarterEnd(2, startingMonth=1).isAnchored()
-
- offset_cases = []
- offset_cases.append((QuarterEnd(startingMonth=1),
- {datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 4, 30),
- datetime(2008, 2, 15): datetime(2008, 4, 30),
- datetime(2008, 2, 29): datetime(2008, 4, 30),
- datetime(2008, 3, 15): datetime(2008, 4, 30),
- datetime(2008, 3, 31): datetime(2008, 4, 30),
- datetime(2008, 4, 15): datetime(2008, 4, 30),
- datetime(2008, 4, 30): datetime(2008, 7, 31), }))
-
- offset_cases.append((QuarterEnd(startingMonth=2),
- {datetime(2008, 1, 1): datetime(2008, 2, 29),
- datetime(2008, 1, 31): datetime(2008, 2, 29),
- datetime(2008, 2, 15): datetime(2008, 2, 29),
- datetime(2008, 2, 29): datetime(2008, 5, 31),
- datetime(2008, 3, 15): datetime(2008, 5, 31),
- datetime(2008, 3, 31): datetime(2008, 5, 31),
- datetime(2008, 4, 15): datetime(2008, 5, 31),
- datetime(2008, 4, 30): datetime(2008, 5, 31), }))
-
- offset_cases.append((QuarterEnd(startingMonth=1, n=0),
- {datetime(2008, 1, 1): datetime(2008, 1, 31),
- datetime(2008, 1, 31): datetime(2008, 1, 31),
- datetime(2008, 2, 15): datetime(2008, 4, 30),
- datetime(2008, 2, 29): datetime(2008, 4, 30),
- datetime(2008, 3, 15): datetime(2008, 4, 30),
- datetime(2008, 3, 31): datetime(2008, 4, 30),
- datetime(2008, 4, 15): datetime(2008, 4, 30),
- datetime(2008, 4, 30): datetime(2008, 4, 30), }))
-
- offset_cases.append((QuarterEnd(startingMonth=1, n=-1),
- {datetime(2008, 1, 1): datetime(2007, 10, 31),
- datetime(2008, 1, 31): datetime(2007, 10, 31),
- datetime(2008, 2, 15): datetime(2008, 1, 31),
- datetime(2008, 2, 29): datetime(2008, 1, 31),
- datetime(2008, 3, 15): datetime(2008, 1, 31),
- datetime(2008, 3, 31): datetime(2008, 1, 31),
- datetime(2008, 4, 15): datetime(2008, 1, 31),
- datetime(2008, 4, 30): datetime(2008, 1, 31),
- datetime(2008, 7, 1): datetime(2008, 4, 30)}))
-
- offset_cases.append((QuarterEnd(startingMonth=1, n=2),
- {datetime(2008, 1, 31): datetime(2008, 7, 31),
- datetime(2008, 2, 15): datetime(2008, 7, 31),
- datetime(2008, 2, 29): datetime(2008, 7, 31),
- datetime(2008, 3, 15): datetime(2008, 7, 31),
- datetime(2008, 3, 31): datetime(2008, 7, 31),
- datetime(2008, 4, 15): datetime(2008, 7, 31),
- datetime(2008, 4, 30): datetime(2008, 10, 31), }))
-
- @pytest.mark.parametrize('case', offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- def test_offset_corner_case(self):
- # corner
- offset = QuarterEnd(n=-1, startingMonth=1)
- assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 31)
-
- on_offset_cases = [
- (QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
- (QuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
- (QuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
- (QuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
- (QuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
- (QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
- (QuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
- (QuarterEnd(1, startingMonth=1), datetime(2008, 5, 31), False),
- (QuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
- (QuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
- (QuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
- (QuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
- (QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
- (QuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
- (QuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
- (QuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
- (QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), False),
- (QuarterEnd(1, startingMonth=2), datetime(2008, 5, 31), True),
- (QuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
- (QuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
- (QuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
- (QuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
- (QuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
- (QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
- (QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True),
- (QuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
- (QuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
- (QuarterEnd(1, startingMonth=3), datetime(2008, 5, 31), False),
- (QuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), False),
- (QuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), True)]
-
- @pytest.mark.parametrize('case', on_offset_cases)
- def test_onOffset(self, case):
- offset, dt, expected = case
- assert_onOffset(offset, dt, expected)
-
-
-class TestBYearBegin(Base):
- _offset = BYearBegin
-
- def test_misspecified(self):
- pytest.raises(ValueError, BYearBegin, month=13)
- pytest.raises(ValueError, BYearEnd, month=13)
-
- offset_cases = []
- offset_cases.append((BYearBegin(),
- {datetime(2008, 1, 1): datetime(2009, 1, 1),
- datetime(2008, 6, 30): datetime(2009, 1, 1),
- datetime(2008, 12, 31): datetime(2009, 1, 1),
- datetime(2011, 1, 1): datetime(2011, 1, 3),
- datetime(2011, 1, 3): datetime(2012, 1, 2),
- datetime(2005, 12, 30): datetime(2006, 1, 2),
- datetime(2005, 12, 31): datetime(2006, 1, 2)}))
-
- offset_cases.append((BYearBegin(0),
- {datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 6, 30): datetime(2009, 1, 1),
- datetime(2008, 12, 31): datetime(2009, 1, 1),
- datetime(2005, 12, 30): datetime(2006, 1, 2),
- datetime(2005, 12, 31): datetime(2006, 1, 2), }))
-
- offset_cases.append((BYearBegin(-1),
- {datetime(2007, 1, 1): datetime(2006, 1, 2),
- datetime(2009, 1, 4): datetime(2009, 1, 1),
- datetime(2009, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 6, 30): datetime(2008, 1, 1),
- datetime(2008, 12, 31): datetime(2008, 1, 1),
- datetime(2006, 12, 29): datetime(2006, 1, 2),
- datetime(2006, 12, 30): datetime(2006, 1, 2),
- datetime(2006, 1, 1): datetime(2005, 1, 3), }))
-
- offset_cases.append((BYearBegin(-2),
- {datetime(2007, 1, 1): datetime(2005, 1, 3),
- datetime(2007, 6, 30): datetime(2006, 1, 2),
- datetime(2008, 12, 31): datetime(2007, 1, 1), }))
-
- @pytest.mark.parametrize('case', offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
-
-class TestYearBegin(Base):
- _offset = YearBegin
-
- def test_misspecified(self):
- pytest.raises(ValueError, YearBegin, month=13)
-
- offset_cases = []
- offset_cases.append((YearBegin(),
- {datetime(2008, 1, 1): datetime(2009, 1, 1),
- datetime(2008, 6, 30): datetime(2009, 1, 1),
- datetime(2008, 12, 31): datetime(2009, 1, 1),
- datetime(2005, 12, 30): datetime(2006, 1, 1),
- datetime(2005, 12, 31): datetime(2006, 1, 1), }))
-
- offset_cases.append((YearBegin(0),
- {datetime(2008, 1, 1): datetime(2008, 1, 1),
- datetime(2008, 6, 30): datetime(2009, 1, 1),
- datetime(2008, 12, 31): datetime(2009, 1, 1),
- datetime(2005, 12, 30): datetime(2006, 1, 1),
- datetime(2005, 12, 31): datetime(2006, 1, 1), }))
-
- offset_cases.append((YearBegin(3),
- {datetime(2008, 1, 1): datetime(2011, 1, 1),
- datetime(2008, 6, 30): datetime(2011, 1, 1),
- datetime(2008, 12, 31): datetime(2011, 1, 1),
- datetime(2005, 12, 30): datetime(2008, 1, 1),
- datetime(2005, 12, 31): datetime(2008, 1, 1), }))
-
- offset_cases.append((YearBegin(-1),
- {datetime(2007, 1, 1): datetime(2006, 1, 1),
- datetime(2007, 1, 15): datetime(2007, 1, 1),
- datetime(2008, 6, 30): datetime(2008, 1, 1),
- datetime(2008, 12, 31): datetime(2008, 1, 1),
- datetime(2006, 12, 29): datetime(2006, 1, 1),
- datetime(2006, 12, 30): datetime(2006, 1, 1),
- datetime(2007, 1, 1): datetime(2006, 1, 1), }))
-
- offset_cases.append((YearBegin(-2),
- {datetime(2007, 1, 1): datetime(2005, 1, 1),
- datetime(2008, 6, 30): datetime(2007, 1, 1),
- datetime(2008, 12, 31): datetime(2007, 1, 1), }))
-
- offset_cases.append((YearBegin(month=4),
- {datetime(2007, 4, 1): datetime(2008, 4, 1),
- datetime(2007, 4, 15): datetime(2008, 4, 1),
- datetime(2007, 3, 1): datetime(2007, 4, 1),
- datetime(2007, 12, 15): datetime(2008, 4, 1),
- datetime(2012, 1, 31): datetime(2012, 4, 1), }))
-
- offset_cases.append((YearBegin(0, month=4),
- {datetime(2007, 4, 1): datetime(2007, 4, 1),
- datetime(2007, 3, 1): datetime(2007, 4, 1),
- datetime(2007, 12, 15): datetime(2008, 4, 1),
- datetime(2012, 1, 31): datetime(2012, 4, 1), }))
-
- offset_cases.append((YearBegin(4, month=4),
- {datetime(2007, 4, 1): datetime(2011, 4, 1),
- datetime(2007, 4, 15): datetime(2011, 4, 1),
- datetime(2007, 3, 1): datetime(2010, 4, 1),
- datetime(2007, 12, 15): datetime(2011, 4, 1),
- datetime(2012, 1, 31): datetime(2015, 4, 1), }))
-
- offset_cases.append((YearBegin(-1, month=4),
- {datetime(2007, 4, 1): datetime(2006, 4, 1),
- datetime(2007, 3, 1): datetime(2006, 4, 1),
- datetime(2007, 12, 15): datetime(2007, 4, 1),
- datetime(2012, 1, 31): datetime(2011, 4, 1), }))
-
- offset_cases.append((YearBegin(-3, month=4),
- {datetime(2007, 4, 1): datetime(2004, 4, 1),
- datetime(2007, 3, 1): datetime(2004, 4, 1),
- datetime(2007, 12, 15): datetime(2005, 4, 1),
- datetime(2012, 1, 31): datetime(2009, 4, 1), }))
-
- @pytest.mark.parametrize('case', offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- on_offset_cases = [(YearBegin(), datetime(2007, 1, 3), False),
- (YearBegin(), datetime(2008, 1, 1), True),
- (YearBegin(), datetime(2006, 12, 31), False),
- (YearBegin(), datetime(2006, 1, 2), False)]
-
- @pytest.mark.parametrize('case', on_offset_cases)
- def test_onOffset(self, case):
- offset, dt, expected = case
- assert_onOffset(offset, dt, expected)
-
-
-class TestBYearEndLagged(Base):
-
- def test_bad_month_fail(self):
- pytest.raises(Exception, BYearEnd, month=13)
- pytest.raises(Exception, BYearEnd, month=0)
-
- offset_cases = []
- offset_cases.append((BYearEnd(month=6),
- {datetime(2008, 1, 1): datetime(2008, 6, 30),
- datetime(2007, 6, 30): datetime(2008, 6, 30)}, ))
-
- offset_cases.append((BYearEnd(n=-1, month=6),
- {datetime(2008, 1, 1): datetime(2007, 6, 29),
- datetime(2007, 6, 30): datetime(2007, 6, 29)}, ))
-
- @pytest.mark.parametrize('case', offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in compat.iteritems(cases):
- assert base + offset == expected
-
- def test_roll(self):
- offset = BYearEnd(month=6)
- date = datetime(2009, 11, 30)
-
- assert offset.rollforward(date) == datetime(2010, 6, 30)
- assert offset.rollback(date) == datetime(2009, 6, 30)
-
- on_offset_cases = [(BYearEnd(month=2), datetime(2007, 2, 28), True),
- (BYearEnd(month=6), datetime(2007, 6, 30), False)]
-
- @pytest.mark.parametrize('case', on_offset_cases)
- def test_onOffset(self, case):
- offset, dt, expected = case
- assert_onOffset(offset, dt, expected)
-
-
-class TestBYearEnd(Base):
- _offset = BYearEnd
-
- offset_cases = []
- offset_cases.append((BYearEnd(),
- {datetime(2008, 1, 1): datetime(2008, 12, 31),
- datetime(2008, 6, 30): datetime(2008, 12, 31),
- datetime(2008, 12, 31): datetime(2009, 12, 31),
- datetime(2005, 12, 30): datetime(2006, 12, 29),
- datetime(2005, 12, 31): datetime(2006, 12, 29), }))
-
- offset_cases.append((BYearEnd(0),
- {datetime(2008, 1, 1): datetime(2008, 12, 31),
- datetime(2008, 6, 30): datetime(2008, 12, 31),
- datetime(2008, 12, 31): datetime(2008, 12, 31),
- datetime(2005, 12, 31): datetime(2006, 12, 29), }))
-
- offset_cases.append((BYearEnd(-1),
- {datetime(2007, 1, 1): datetime(2006, 12, 29),
- datetime(2008, 6, 30): datetime(2007, 12, 31),
- datetime(2008, 12, 31): datetime(2007, 12, 31),
- datetime(2006, 12, 29): datetime(2005, 12, 30),
- datetime(2006, 12, 30): datetime(2006, 12, 29),
- datetime(2007, 1, 1): datetime(2006, 12, 29), }))
-
- offset_cases.append((BYearEnd(-2),
- {datetime(2007, 1, 1): datetime(2005, 12, 30),
- datetime(2008, 6, 30): datetime(2006, 12, 29),
- datetime(2008, 12, 31): datetime(2006, 12, 29), }))
-
- @pytest.mark.parametrize('case', offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- on_offset_cases = [(BYearEnd(), datetime(2007, 12, 31), True),
- (BYearEnd(), datetime(2008, 1, 1), False),
- (BYearEnd(), datetime(2006, 12, 31), False),
- (BYearEnd(), datetime(2006, 12, 29), True)]
-
- @pytest.mark.parametrize('case', on_offset_cases)
- def test_onOffset(self, case):
- offset, dt, expected = case
- assert_onOffset(offset, dt, expected)
-
-
-class TestYearEnd(Base):
- _offset = YearEnd
-
- def test_misspecified(self):
- pytest.raises(ValueError, YearEnd, month=13)
-
- offset_cases = []
- offset_cases.append((YearEnd(),
- {datetime(2008, 1, 1): datetime(2008, 12, 31),
- datetime(2008, 6, 30): datetime(2008, 12, 31),
- datetime(2008, 12, 31): datetime(2009, 12, 31),
- datetime(2005, 12, 30): datetime(2005, 12, 31),
- datetime(2005, 12, 31): datetime(2006, 12, 31), }))
-
- offset_cases.append((YearEnd(0),
- {datetime(2008, 1, 1): datetime(2008, 12, 31),
- datetime(2008, 6, 30): datetime(2008, 12, 31),
- datetime(2008, 12, 31): datetime(2008, 12, 31),
- datetime(2005, 12, 30): datetime(2005, 12, 31), }))
-
- offset_cases.append((YearEnd(-1),
- {datetime(2007, 1, 1): datetime(2006, 12, 31),
- datetime(2008, 6, 30): datetime(2007, 12, 31),
- datetime(2008, 12, 31): datetime(2007, 12, 31),
- datetime(2006, 12, 29): datetime(2005, 12, 31),
- datetime(2006, 12, 30): datetime(2005, 12, 31),
- datetime(2007, 1, 1): datetime(2006, 12, 31), }))
-
- offset_cases.append((YearEnd(-2),
- {datetime(2007, 1, 1): datetime(2005, 12, 31),
- datetime(2008, 6, 30): datetime(2006, 12, 31),
- datetime(2008, 12, 31): datetime(2006, 12, 31), }))
-
- @pytest.mark.parametrize('case', offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- on_offset_cases = [(YearEnd(), datetime(2007, 12, 31), True),
- (YearEnd(), datetime(2008, 1, 1), False),
- (YearEnd(), datetime(2006, 12, 31), True),
- (YearEnd(), datetime(2006, 12, 29), False)]
-
- @pytest.mark.parametrize('case', on_offset_cases)
- def test_onOffset(self, case):
- offset, dt, expected = case
- assert_onOffset(offset, dt, expected)
-
-
-class TestYearEndDiffMonth(Base):
-
- offset_cases = []
- offset_cases.append((YearEnd(month=3),
- {datetime(2008, 1, 1): datetime(2008, 3, 31),
- datetime(2008, 2, 15): datetime(2008, 3, 31),
- datetime(2008, 3, 31): datetime(2009, 3, 31),
- datetime(2008, 3, 30): datetime(2008, 3, 31),
- datetime(2005, 3, 31): datetime(2006, 3, 31),
- datetime(2006, 7, 30): datetime(2007, 3, 31)}))
-
- offset_cases.append((YearEnd(0, month=3),
- {datetime(2008, 1, 1): datetime(2008, 3, 31),
- datetime(2008, 2, 28): datetime(2008, 3, 31),
- datetime(2008, 3, 31): datetime(2008, 3, 31),
- datetime(2005, 3, 30): datetime(2005, 3, 31), }))
-
- offset_cases.append((YearEnd(-1, month=3),
- {datetime(2007, 1, 1): datetime(2006, 3, 31),
- datetime(2008, 2, 28): datetime(2007, 3, 31),
- datetime(2008, 3, 31): datetime(2007, 3, 31),
- datetime(2006, 3, 29): datetime(2005, 3, 31),
- datetime(2006, 3, 30): datetime(2005, 3, 31),
- datetime(2007, 3, 1): datetime(2006, 3, 31), }))
-
- offset_cases.append((YearEnd(-2, month=3),
- {datetime(2007, 1, 1): datetime(2005, 3, 31),
- datetime(2008, 6, 30): datetime(2007, 3, 31),
- datetime(2008, 3, 31): datetime(2006, 3, 31), }))
-
- @pytest.mark.parametrize('case', offset_cases)
- def test_offset(self, case):
- offset, cases = case
- for base, expected in compat.iteritems(cases):
- assert_offset_equal(offset, base, expected)
-
- on_offset_cases = [(YearEnd(month=3), datetime(2007, 3, 31), True),
- (YearEnd(month=3), datetime(2008, 1, 1), False),
- (YearEnd(month=3), datetime(2006, 3, 31), True),
- (YearEnd(month=3), datetime(2006, 3, 29), False)]
-
- @pytest.mark.parametrize('case', on_offset_cases)
- def test_onOffset(self, case):
- offset, dt, expected = case
- assert_onOffset(offset, dt, expected)
-
-
def test_Easter():
assert_offset_equal(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4))
assert_offset_equal(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24))
@@ -4285,12 +2773,6 @@ def test_get_offset_name(self):
assert Week(weekday=4).freqstr == 'W-FRI'
assert LastWeekOfMonth(weekday=WeekDay.SUN).freqstr == "LWOM-SUN"
- assert (makeFY5253LastOfMonthQuarter(
- weekday=1, startingMonth=3,
- qtr_with_extra_week=4).freqstr == "REQ-L-MAR-TUE-4")
- assert (makeFY5253NearestEndMonthQuarter(
- weekday=1, startingMonth=3,
- qtr_with_extra_week=3).freqstr == "REQ-N-MAR-TUE-3")
def test_get_offset():
@@ -4303,17 +2785,7 @@ def test_get_offset():
('B', BDay()), ('b', BDay()), ('bm', BMonthEnd()),
('Bm', BMonthEnd()), ('W-MON', Week(weekday=0)),
('W-TUE', Week(weekday=1)), ('W-WED', Week(weekday=2)),
- ('W-THU', Week(weekday=3)), ('W-FRI', Week(weekday=4)),
- ("RE-N-DEC-MON", makeFY5253NearestEndMonth(weekday=0,
- startingMonth=12)),
- ("RE-L-DEC-TUE", makeFY5253LastOfMonth(weekday=1, startingMonth=12)),
- ("REQ-L-MAR-TUE-4", makeFY5253LastOfMonthQuarter(
- weekday=1, startingMonth=3, qtr_with_extra_week=4)),
- ("REQ-L-DEC-MON-3", makeFY5253LastOfMonthQuarter(
- weekday=0, startingMonth=12, qtr_with_extra_week=3)),
- ("REQ-N-DEC-MON-3", makeFY5253NearestEndMonthQuarter(
- weekday=0, startingMonth=12, qtr_with_extra_week=3)),
- ]
+ ('W-THU', Week(weekday=3)), ('W-FRI', Week(weekday=4))]
for name, expected in pairs:
offset = get_offset(name)
@@ -4381,16 +2853,6 @@ def test_get_standard_freq():
assert fstr == get_standard_freq(('q', 5))
-def test_quarterly_dont_normalize():
- date = datetime(2012, 3, 31, 5, 30)
-
- offsets = (QuarterBegin, QuarterEnd, BQuarterEnd, BQuarterBegin)
-
- for klass in offsets:
- result = date + klass()
- assert (result.time() == date.time())
-
-
class TestOffsetAliases(object):
def setup_method(self, method):
diff --git a/pandas/tests/tseries/offsets/test_yqm_offsets.py b/pandas/tests/tseries/offsets/test_yqm_offsets.py
new file mode 100644
index 0000000000000..1d47cf67c6e55
--- /dev/null
+++ b/pandas/tests/tseries/offsets/test_yqm_offsets.py
@@ -0,0 +1,994 @@
+# -*- coding: utf-8 -*-
+"""
+Tests for Year, Quarter, and Month-based DateOffset subclasses
+"""
+from datetime import datetime
+
+import pytest
+
+from pandas import Timestamp
+from pandas import compat
+
+from pandas.tseries.offsets import (BMonthBegin, BMonthEnd,
+ MonthBegin, MonthEnd,
+ YearEnd, YearBegin, BYearEnd, BYearBegin,
+ QuarterEnd, QuarterBegin,
+ BQuarterEnd, BQuarterBegin)
+
+from .test_offsets import Base
+from .common import assert_offset_equal, assert_onOffset
+
+
+# --------------------------------------------------------------------
+# Misc
+
+def test_quarterly_dont_normalize():
+ date = datetime(2012, 3, 31, 5, 30)
+
+ offsets = (QuarterBegin, QuarterEnd, BQuarterEnd, BQuarterBegin)
+
+ for klass in offsets:
+ result = date + klass()
+ assert (result.time() == date.time())
+
+
+# --------------------------------------------------------------------
+# Months
+
+class TestMonthBegin(Base):
+ _offset = MonthBegin
+
+ offset_cases = []
+ # NOTE: I'm not entirely happy with the logic here for Begin -ss
+ # see thread 'offset conventions' on the ML
+ offset_cases.append((MonthBegin(), {
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2008, 2, 1): datetime(2008, 3, 1),
+ datetime(2006, 12, 31): datetime(2007, 1, 1),
+ datetime(2006, 12, 1): datetime(2007, 1, 1),
+ datetime(2007, 1, 31): datetime(2007, 2, 1)}))
+
+ offset_cases.append((MonthBegin(0), {
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2006, 12, 3): datetime(2007, 1, 1),
+ datetime(2007, 1, 31): datetime(2007, 2, 1)}))
+
+ offset_cases.append((MonthBegin(2), {
+ datetime(2008, 2, 29): datetime(2008, 4, 1),
+ datetime(2008, 1, 31): datetime(2008, 3, 1),
+ datetime(2006, 12, 31): datetime(2007, 2, 1),
+ datetime(2007, 12, 28): datetime(2008, 2, 1),
+ datetime(2007, 1, 1): datetime(2007, 3, 1),
+ datetime(2006, 11, 1): datetime(2007, 1, 1)}))
+
+ offset_cases.append((MonthBegin(-1), {
+ datetime(2007, 1, 1): datetime(2006, 12, 1),
+ datetime(2008, 5, 31): datetime(2008, 5, 1),
+ datetime(2008, 12, 31): datetime(2008, 12, 1),
+ datetime(2006, 12, 29): datetime(2006, 12, 1),
+ datetime(2006, 1, 2): datetime(2006, 1, 1)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+
+class TestMonthEnd(Base):
+ _offset = MonthEnd
+
+ def test_day_of_month(self):
+ dt = datetime(2007, 1, 1)
+ offset = MonthEnd()
+
+ result = dt + offset
+ assert result == Timestamp(2007, 1, 31)
+
+ result = result + offset
+ assert result == Timestamp(2007, 2, 28)
+
+ def test_normalize(self):
+ dt = datetime(2007, 1, 1, 3)
+
+ result = dt + MonthEnd(normalize=True)
+ expected = dt.replace(hour=0) + MonthEnd()
+ assert result == expected
+
+ offset_cases = []
+ offset_cases.append((MonthEnd(), {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 2, 29),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2007, 1, 31),
+ datetime(2007, 1, 1): datetime(2007, 1, 31),
+ datetime(2006, 12, 1): datetime(2006, 12, 31)}))
+
+ offset_cases.append((MonthEnd(0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 1, 31),
+ datetime(2006, 12, 29): datetime(2006, 12, 31),
+ datetime(2006, 12, 31): datetime(2006, 12, 31),
+ datetime(2007, 1, 1): datetime(2007, 1, 31)}))
+
+ offset_cases.append((MonthEnd(2), {
+ datetime(2008, 1, 1): datetime(2008, 2, 29),
+ datetime(2008, 1, 31): datetime(2008, 3, 31),
+ datetime(2006, 12, 29): datetime(2007, 1, 31),
+ datetime(2006, 12, 31): datetime(2007, 2, 28),
+ datetime(2007, 1, 1): datetime(2007, 2, 28),
+ datetime(2006, 11, 1): datetime(2006, 12, 31)}))
+
+ offset_cases.append((MonthEnd(-1), {
+ datetime(2007, 1, 1): datetime(2006, 12, 31),
+ datetime(2008, 6, 30): datetime(2008, 5, 31),
+ datetime(2008, 12, 31): datetime(2008, 11, 30),
+ datetime(2006, 12, 29): datetime(2006, 11, 30),
+ datetime(2006, 12, 30): datetime(2006, 11, 30),
+ datetime(2007, 1, 1): datetime(2006, 12, 31)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+ on_offset_cases = [(MonthEnd(), datetime(2007, 12, 31), True),
+ (MonthEnd(), datetime(2008, 1, 1), False)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
+
+
+class TestBMonthBegin(Base):
+ _offset = BMonthBegin
+
+ def test_offsets_compare_equal(self):
+ # root cause of #456
+ offset1 = BMonthBegin()
+ offset2 = BMonthBegin()
+ assert not offset1 != offset2
+
+ offset_cases = []
+ offset_cases.append((BMonthBegin(), {
+ datetime(2008, 1, 1): datetime(2008, 2, 1),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 31): datetime(2007, 1, 1),
+ datetime(2006, 9, 1): datetime(2006, 10, 2),
+ datetime(2007, 1, 1): datetime(2007, 2, 1),
+ datetime(2006, 12, 1): datetime(2007, 1, 1)}))
+
+ offset_cases.append((BMonthBegin(0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2006, 10, 2): datetime(2006, 10, 2),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2006, 12, 29): datetime(2007, 1, 1),
+ datetime(2006, 12, 31): datetime(2007, 1, 1),
+ datetime(2006, 9, 15): datetime(2006, 10, 2)}))
+
+ offset_cases.append((BMonthBegin(2), {
+ datetime(2008, 1, 1): datetime(2008, 3, 3),
+ datetime(2008, 1, 15): datetime(2008, 3, 3),
+ datetime(2006, 12, 29): datetime(2007, 2, 1),
+ datetime(2006, 12, 31): datetime(2007, 2, 1),
+ datetime(2007, 1, 1): datetime(2007, 3, 1),
+ datetime(2006, 11, 1): datetime(2007, 1, 1)}))
+
+ offset_cases.append((BMonthBegin(-1), {
+ datetime(2007, 1, 1): datetime(2006, 12, 1),
+ datetime(2008, 6, 30): datetime(2008, 6, 2),
+ datetime(2008, 6, 1): datetime(2008, 5, 1),
+ datetime(2008, 3, 10): datetime(2008, 3, 3),
+ datetime(2008, 12, 31): datetime(2008, 12, 1),
+ datetime(2006, 12, 29): datetime(2006, 12, 1),
+ datetime(2006, 12, 30): datetime(2006, 12, 1),
+ datetime(2007, 1, 1): datetime(2006, 12, 1)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+ on_offset_cases = [(BMonthBegin(), datetime(2007, 12, 31), False),
+ (BMonthBegin(), datetime(2008, 1, 1), True),
+ (BMonthBegin(), datetime(2001, 4, 2), True),
+ (BMonthBegin(), datetime(2008, 3, 3), True)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
+
+
+class TestBMonthEnd(Base):
+ _offset = BMonthEnd
+
+ def test_normalize(self):
+ dt = datetime(2007, 1, 1, 3)
+
+ result = dt + BMonthEnd(normalize=True)
+ expected = dt.replace(hour=0) + BMonthEnd()
+ assert result == expected
+
+ def test_offsets_compare_equal(self):
+ # root cause of #456
+ offset1 = BMonthEnd()
+ offset2 = BMonthEnd()
+ assert not offset1 != offset2
+
+ offset_cases = []
+ offset_cases.append((BMonthEnd(), {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 2, 29),
+ datetime(2006, 12, 29): datetime(2007, 1, 31),
+ datetime(2006, 12, 31): datetime(2007, 1, 31),
+ datetime(2007, 1, 1): datetime(2007, 1, 31),
+ datetime(2006, 12, 1): datetime(2006, 12, 29)}))
+
+ offset_cases.append((BMonthEnd(0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 1, 31),
+ datetime(2006, 12, 29): datetime(2006, 12, 29),
+ datetime(2006, 12, 31): datetime(2007, 1, 31),
+ datetime(2007, 1, 1): datetime(2007, 1, 31)}))
+
+ offset_cases.append((BMonthEnd(2), {
+ datetime(2008, 1, 1): datetime(2008, 2, 29),
+ datetime(2008, 1, 31): datetime(2008, 3, 31),
+ datetime(2006, 12, 29): datetime(2007, 2, 28),
+ datetime(2006, 12, 31): datetime(2007, 2, 28),
+ datetime(2007, 1, 1): datetime(2007, 2, 28),
+ datetime(2006, 11, 1): datetime(2006, 12, 29)}))
+
+ offset_cases.append((BMonthEnd(-1), {
+ datetime(2007, 1, 1): datetime(2006, 12, 29),
+ datetime(2008, 6, 30): datetime(2008, 5, 30),
+ datetime(2008, 12, 31): datetime(2008, 11, 28),
+ datetime(2006, 12, 29): datetime(2006, 11, 30),
+ datetime(2006, 12, 30): datetime(2006, 12, 29),
+ datetime(2007, 1, 1): datetime(2006, 12, 29)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+ on_offset_cases = [(BMonthEnd(), datetime(2007, 12, 31), True),
+ (BMonthEnd(), datetime(2008, 1, 1), False)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
+
+# --------------------------------------------------------------------
+# Quarters
+
+
+class TestQuarterBegin(Base):
+
+ def test_repr(self):
+ expected = "<QuarterBegin: startingMonth=3>"
+ assert repr(QuarterBegin()) == expected
+ expected = "<QuarterBegin: startingMonth=3>"
+ assert repr(QuarterBegin(startingMonth=3)) == expected
+ expected = "<QuarterBegin: startingMonth=1>"
+ assert repr(QuarterBegin(startingMonth=1)) == expected
+
+ def test_isAnchored(self):
+ assert QuarterBegin(startingMonth=1).isAnchored()
+ assert QuarterBegin().isAnchored()
+ assert not QuarterBegin(2, startingMonth=1).isAnchored()
+
+ def test_offset_corner_case(self):
+ # corner
+ offset = QuarterBegin(n=-1, startingMonth=1)
+ assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 1)
+
+ offset_cases = []
+ offset_cases.append((QuarterBegin(startingMonth=1), {
+ datetime(2007, 12, 1): datetime(2008, 1, 1),
+ datetime(2008, 1, 1): datetime(2008, 4, 1),
+ datetime(2008, 2, 15): datetime(2008, 4, 1),
+ datetime(2008, 2, 29): datetime(2008, 4, 1),
+ datetime(2008, 3, 15): datetime(2008, 4, 1),
+ datetime(2008, 3, 31): datetime(2008, 4, 1),
+ datetime(2008, 4, 15): datetime(2008, 7, 1),
+ datetime(2008, 4, 1): datetime(2008, 7, 1)}))
+
+ offset_cases.append((QuarterBegin(startingMonth=2), {
+ datetime(2008, 1, 1): datetime(2008, 2, 1),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2008, 1, 15): datetime(2008, 2, 1),
+ datetime(2008, 2, 29): datetime(2008, 5, 1),
+ datetime(2008, 3, 15): datetime(2008, 5, 1),
+ datetime(2008, 3, 31): datetime(2008, 5, 1),
+ datetime(2008, 4, 15): datetime(2008, 5, 1),
+ datetime(2008, 4, 30): datetime(2008, 5, 1)}))
+
+ offset_cases.append((QuarterBegin(startingMonth=1, n=0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 12, 1): datetime(2009, 1, 1),
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 2, 15): datetime(2008, 4, 1),
+ datetime(2008, 2, 29): datetime(2008, 4, 1),
+ datetime(2008, 3, 15): datetime(2008, 4, 1),
+ datetime(2008, 3, 31): datetime(2008, 4, 1),
+ datetime(2008, 4, 15): datetime(2008, 7, 1),
+ datetime(2008, 4, 30): datetime(2008, 7, 1)}))
+
+ offset_cases.append((QuarterBegin(startingMonth=1, n=-1), {
+ datetime(2008, 1, 1): datetime(2007, 10, 1),
+ datetime(2008, 1, 31): datetime(2008, 1, 1),
+ datetime(2008, 2, 15): datetime(2008, 1, 1),
+ datetime(2008, 2, 29): datetime(2008, 1, 1),
+ datetime(2008, 3, 15): datetime(2008, 1, 1),
+ datetime(2008, 3, 31): datetime(2008, 1, 1),
+ datetime(2008, 4, 15): datetime(2008, 4, 1),
+ datetime(2008, 4, 30): datetime(2008, 4, 1),
+ datetime(2008, 7, 1): datetime(2008, 4, 1)}))
+
+ offset_cases.append((QuarterBegin(startingMonth=1, n=2), {
+ datetime(2008, 1, 1): datetime(2008, 7, 1),
+ datetime(2008, 2, 15): datetime(2008, 7, 1),
+ datetime(2008, 2, 29): datetime(2008, 7, 1),
+ datetime(2008, 3, 15): datetime(2008, 7, 1),
+ datetime(2008, 3, 31): datetime(2008, 7, 1),
+ datetime(2008, 4, 15): datetime(2008, 10, 1),
+ datetime(2008, 4, 1): datetime(2008, 10, 1)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+
+class TestQuarterEnd(Base):
+ _offset = QuarterEnd
+
+ def test_repr(self):
+ expected = "<QuarterEnd: startingMonth=3>"
+ assert repr(QuarterEnd()) == expected
+ expected = "<QuarterEnd: startingMonth=3>"
+ assert repr(QuarterEnd(startingMonth=3)) == expected
+ expected = "<QuarterEnd: startingMonth=1>"
+ assert repr(QuarterEnd(startingMonth=1)) == expected
+
+ def test_isAnchored(self):
+ assert QuarterEnd(startingMonth=1).isAnchored()
+ assert QuarterEnd().isAnchored()
+ assert not QuarterEnd(2, startingMonth=1).isAnchored()
+
+ def test_offset_corner_case(self):
+ # corner
+ offset = QuarterEnd(n=-1, startingMonth=1)
+ assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 31)
+
+ offset_cases = []
+ offset_cases.append((QuarterEnd(startingMonth=1), {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 4, 30),
+ datetime(2008, 2, 15): datetime(2008, 4, 30),
+ datetime(2008, 2, 29): datetime(2008, 4, 30),
+ datetime(2008, 3, 15): datetime(2008, 4, 30),
+ datetime(2008, 3, 31): datetime(2008, 4, 30),
+ datetime(2008, 4, 15): datetime(2008, 4, 30),
+ datetime(2008, 4, 30): datetime(2008, 7, 31)}))
+
+ offset_cases.append((QuarterEnd(startingMonth=2), {
+ datetime(2008, 1, 1): datetime(2008, 2, 29),
+ datetime(2008, 1, 31): datetime(2008, 2, 29),
+ datetime(2008, 2, 15): datetime(2008, 2, 29),
+ datetime(2008, 2, 29): datetime(2008, 5, 31),
+ datetime(2008, 3, 15): datetime(2008, 5, 31),
+ datetime(2008, 3, 31): datetime(2008, 5, 31),
+ datetime(2008, 4, 15): datetime(2008, 5, 31),
+ datetime(2008, 4, 30): datetime(2008, 5, 31)}))
+
+ offset_cases.append((QuarterEnd(startingMonth=1, n=0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 1, 31),
+ datetime(2008, 2, 15): datetime(2008, 4, 30),
+ datetime(2008, 2, 29): datetime(2008, 4, 30),
+ datetime(2008, 3, 15): datetime(2008, 4, 30),
+ datetime(2008, 3, 31): datetime(2008, 4, 30),
+ datetime(2008, 4, 15): datetime(2008, 4, 30),
+ datetime(2008, 4, 30): datetime(2008, 4, 30)}))
+
+ offset_cases.append((QuarterEnd(startingMonth=1, n=-1), {
+ datetime(2008, 1, 1): datetime(2007, 10, 31),
+ datetime(2008, 1, 31): datetime(2007, 10, 31),
+ datetime(2008, 2, 15): datetime(2008, 1, 31),
+ datetime(2008, 2, 29): datetime(2008, 1, 31),
+ datetime(2008, 3, 15): datetime(2008, 1, 31),
+ datetime(2008, 3, 31): datetime(2008, 1, 31),
+ datetime(2008, 4, 15): datetime(2008, 1, 31),
+ datetime(2008, 4, 30): datetime(2008, 1, 31),
+ datetime(2008, 7, 1): datetime(2008, 4, 30)}))
+
+ offset_cases.append((QuarterEnd(startingMonth=1, n=2), {
+ datetime(2008, 1, 31): datetime(2008, 7, 31),
+ datetime(2008, 2, 15): datetime(2008, 7, 31),
+ datetime(2008, 2, 29): datetime(2008, 7, 31),
+ datetime(2008, 3, 15): datetime(2008, 7, 31),
+ datetime(2008, 3, 31): datetime(2008, 7, 31),
+ datetime(2008, 4, 15): datetime(2008, 7, 31),
+ datetime(2008, 4, 30): datetime(2008, 10, 31)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+ on_offset_cases = [
+ (QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
+ (QuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
+ (QuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
+ (QuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
+ (QuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
+ (QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
+ (QuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
+ (QuarterEnd(1, startingMonth=1), datetime(2008, 5, 31), False),
+ (QuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
+ (QuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
+ (QuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
+ (QuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
+ (QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
+ (QuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
+ (QuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
+ (QuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
+ (QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), False),
+ (QuarterEnd(1, startingMonth=2), datetime(2008, 5, 31), True),
+ (QuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
+ (QuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
+ (QuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
+ (QuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
+ (QuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
+ (QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
+ (QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True),
+ (QuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
+ (QuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
+ (QuarterEnd(1, startingMonth=3), datetime(2008, 5, 31), False),
+ (QuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), False),
+ (QuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), True)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
+
+
+class TestBQuarterBegin(Base):
+ _offset = BQuarterBegin
+
+ def test_repr(self):
+ expected = "<BusinessQuarterBegin: startingMonth=3>"
+ assert repr(BQuarterBegin()) == expected
+ expected = "<BusinessQuarterBegin: startingMonth=3>"
+ assert repr(BQuarterBegin(startingMonth=3)) == expected
+ expected = "<BusinessQuarterBegin: startingMonth=1>"
+ assert repr(BQuarterBegin(startingMonth=1)) == expected
+
+ def test_isAnchored(self):
+ assert BQuarterBegin(startingMonth=1).isAnchored()
+ assert BQuarterBegin().isAnchored()
+ assert not BQuarterBegin(2, startingMonth=1).isAnchored()
+
+ def test_offset_corner_case(self):
+ # corner
+ offset = BQuarterBegin(n=-1, startingMonth=1)
+ assert datetime(2007, 4, 3) + offset == datetime(2007, 4, 2)
+
+ offset_cases = []
+ offset_cases.append((BQuarterBegin(startingMonth=1), {
+ datetime(2008, 1, 1): datetime(2008, 4, 1),
+ datetime(2008, 1, 31): datetime(2008, 4, 1),
+ datetime(2008, 2, 15): datetime(2008, 4, 1),
+ datetime(2008, 2, 29): datetime(2008, 4, 1),
+ datetime(2008, 3, 15): datetime(2008, 4, 1),
+ datetime(2008, 3, 31): datetime(2008, 4, 1),
+ datetime(2008, 4, 15): datetime(2008, 7, 1),
+ datetime(2007, 3, 15): datetime(2007, 4, 2),
+ datetime(2007, 2, 28): datetime(2007, 4, 2),
+ datetime(2007, 1, 1): datetime(2007, 4, 2),
+ datetime(2007, 4, 15): datetime(2007, 7, 2),
+ datetime(2007, 7, 1): datetime(2007, 7, 2),
+ datetime(2007, 4, 1): datetime(2007, 4, 2),
+ datetime(2007, 4, 2): datetime(2007, 7, 2),
+ datetime(2008, 4, 30): datetime(2008, 7, 1)}))
+
+ offset_cases.append((BQuarterBegin(startingMonth=2), {
+ datetime(2008, 1, 1): datetime(2008, 2, 1),
+ datetime(2008, 1, 31): datetime(2008, 2, 1),
+ datetime(2008, 1, 15): datetime(2008, 2, 1),
+ datetime(2008, 2, 29): datetime(2008, 5, 1),
+ datetime(2008, 3, 15): datetime(2008, 5, 1),
+ datetime(2008, 3, 31): datetime(2008, 5, 1),
+ datetime(2008, 4, 15): datetime(2008, 5, 1),
+ datetime(2008, 8, 15): datetime(2008, 11, 3),
+ datetime(2008, 9, 15): datetime(2008, 11, 3),
+ datetime(2008, 11, 1): datetime(2008, 11, 3),
+ datetime(2008, 4, 30): datetime(2008, 5, 1)}))
+
+ offset_cases.append((BQuarterBegin(startingMonth=1, n=0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2007, 12, 31): datetime(2008, 1, 1),
+ datetime(2008, 2, 15): datetime(2008, 4, 1),
+ datetime(2008, 2, 29): datetime(2008, 4, 1),
+ datetime(2008, 1, 15): datetime(2008, 4, 1),
+ datetime(2008, 2, 27): datetime(2008, 4, 1),
+ datetime(2008, 3, 15): datetime(2008, 4, 1),
+ datetime(2007, 4, 1): datetime(2007, 4, 2),
+ datetime(2007, 4, 2): datetime(2007, 4, 2),
+ datetime(2007, 7, 1): datetime(2007, 7, 2),
+ datetime(2007, 4, 15): datetime(2007, 7, 2),
+ datetime(2007, 7, 2): datetime(2007, 7, 2)}))
+
+ offset_cases.append((BQuarterBegin(startingMonth=1, n=-1), {
+ datetime(2008, 1, 1): datetime(2007, 10, 1),
+ datetime(2008, 1, 31): datetime(2008, 1, 1),
+ datetime(2008, 2, 15): datetime(2008, 1, 1),
+ datetime(2008, 2, 29): datetime(2008, 1, 1),
+ datetime(2008, 3, 15): datetime(2008, 1, 1),
+ datetime(2008, 3, 31): datetime(2008, 1, 1),
+ datetime(2008, 4, 15): datetime(2008, 4, 1),
+ datetime(2007, 7, 3): datetime(2007, 7, 2),
+ datetime(2007, 4, 3): datetime(2007, 4, 2),
+ datetime(2007, 7, 2): datetime(2007, 4, 2),
+ datetime(2008, 4, 1): datetime(2008, 1, 1)}))
+
+ offset_cases.append((BQuarterBegin(startingMonth=1, n=2), {
+ datetime(2008, 1, 1): datetime(2008, 7, 1),
+ datetime(2008, 1, 15): datetime(2008, 7, 1),
+ datetime(2008, 2, 29): datetime(2008, 7, 1),
+ datetime(2008, 3, 15): datetime(2008, 7, 1),
+ datetime(2007, 3, 31): datetime(2007, 7, 2),
+ datetime(2007, 4, 15): datetime(2007, 10, 1),
+ datetime(2008, 4, 30): datetime(2008, 10, 1)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+
+class TestBQuarterEnd(Base):
+ _offset = BQuarterEnd
+
+ def test_repr(self):
+ expected = "<BusinessQuarterEnd: startingMonth=3>"
+ assert repr(BQuarterEnd()) == expected
+ expected = "<BusinessQuarterEnd: startingMonth=3>"
+ assert repr(BQuarterEnd(startingMonth=3)) == expected
+ expected = "<BusinessQuarterEnd: startingMonth=1>"
+ assert repr(BQuarterEnd(startingMonth=1)) == expected
+
+ def test_isAnchored(self):
+ assert BQuarterEnd(startingMonth=1).isAnchored()
+ assert BQuarterEnd().isAnchored()
+ assert not BQuarterEnd(2, startingMonth=1).isAnchored()
+
+ def test_offset_corner_case(self):
+ # corner
+ offset = BQuarterEnd(n=-1, startingMonth=1)
+ assert datetime(2010, 1, 31) + offset == datetime(2010, 1, 29)
+
+ offset_cases = []
+ offset_cases.append((BQuarterEnd(startingMonth=1), {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 4, 30),
+ datetime(2008, 2, 15): datetime(2008, 4, 30),
+ datetime(2008, 2, 29): datetime(2008, 4, 30),
+ datetime(2008, 3, 15): datetime(2008, 4, 30),
+ datetime(2008, 3, 31): datetime(2008, 4, 30),
+ datetime(2008, 4, 15): datetime(2008, 4, 30),
+ datetime(2008, 4, 30): datetime(2008, 7, 31)}))
+
+ offset_cases.append((BQuarterEnd(startingMonth=2), {
+ datetime(2008, 1, 1): datetime(2008, 2, 29),
+ datetime(2008, 1, 31): datetime(2008, 2, 29),
+ datetime(2008, 2, 15): datetime(2008, 2, 29),
+ datetime(2008, 2, 29): datetime(2008, 5, 30),
+ datetime(2008, 3, 15): datetime(2008, 5, 30),
+ datetime(2008, 3, 31): datetime(2008, 5, 30),
+ datetime(2008, 4, 15): datetime(2008, 5, 30),
+ datetime(2008, 4, 30): datetime(2008, 5, 30)}))
+
+ offset_cases.append((BQuarterEnd(startingMonth=1, n=0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 31),
+ datetime(2008, 1, 31): datetime(2008, 1, 31),
+ datetime(2008, 2, 15): datetime(2008, 4, 30),
+ datetime(2008, 2, 29): datetime(2008, 4, 30),
+ datetime(2008, 3, 15): datetime(2008, 4, 30),
+ datetime(2008, 3, 31): datetime(2008, 4, 30),
+ datetime(2008, 4, 15): datetime(2008, 4, 30),
+ datetime(2008, 4, 30): datetime(2008, 4, 30)}))
+
+ offset_cases.append((BQuarterEnd(startingMonth=1, n=-1), {
+ datetime(2008, 1, 1): datetime(2007, 10, 31),
+ datetime(2008, 1, 31): datetime(2007, 10, 31),
+ datetime(2008, 2, 15): datetime(2008, 1, 31),
+ datetime(2008, 2, 29): datetime(2008, 1, 31),
+ datetime(2008, 3, 15): datetime(2008, 1, 31),
+ datetime(2008, 3, 31): datetime(2008, 1, 31),
+ datetime(2008, 4, 15): datetime(2008, 1, 31),
+ datetime(2008, 4, 30): datetime(2008, 1, 31)}))
+
+ offset_cases.append((BQuarterEnd(startingMonth=1, n=2), {
+ datetime(2008, 1, 31): datetime(2008, 7, 31),
+ datetime(2008, 2, 15): datetime(2008, 7, 31),
+ datetime(2008, 2, 29): datetime(2008, 7, 31),
+ datetime(2008, 3, 15): datetime(2008, 7, 31),
+ datetime(2008, 3, 31): datetime(2008, 7, 31),
+ datetime(2008, 4, 15): datetime(2008, 7, 31),
+ datetime(2008, 4, 30): datetime(2008, 10, 31)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+ on_offset_cases = [
+ (BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
+ (BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
+ (BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
+ (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
+ (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
+ (BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
+ (BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
+ (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
+ (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
+ (BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
+ (BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
+ (BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
+ (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
+ (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
+ (BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
+ (BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
+ (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
+ (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
+ (BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
+ (BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
+ (BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
+ (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True),
+ (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
+ (BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
+ (BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
+ (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True),
+ (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
+
+# --------------------------------------------------------------------
+# Years
+
+
+class TestYearBegin(Base):
+ _offset = YearBegin
+
+ def test_misspecified(self):
+ pytest.raises(ValueError, YearBegin, month=13)
+
+ offset_cases = []
+ offset_cases.append((YearBegin(), {
+ datetime(2008, 1, 1): datetime(2009, 1, 1),
+ datetime(2008, 6, 30): datetime(2009, 1, 1),
+ datetime(2008, 12, 31): datetime(2009, 1, 1),
+ datetime(2005, 12, 30): datetime(2006, 1, 1),
+ datetime(2005, 12, 31): datetime(2006, 1, 1)}))
+
+ offset_cases.append((YearBegin(0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 6, 30): datetime(2009, 1, 1),
+ datetime(2008, 12, 31): datetime(2009, 1, 1),
+ datetime(2005, 12, 30): datetime(2006, 1, 1),
+ datetime(2005, 12, 31): datetime(2006, 1, 1)}))
+
+ offset_cases.append((YearBegin(3), {
+ datetime(2008, 1, 1): datetime(2011, 1, 1),
+ datetime(2008, 6, 30): datetime(2011, 1, 1),
+ datetime(2008, 12, 31): datetime(2011, 1, 1),
+ datetime(2005, 12, 30): datetime(2008, 1, 1),
+ datetime(2005, 12, 31): datetime(2008, 1, 1)}))
+
+ offset_cases.append((YearBegin(-1), {
+ datetime(2007, 1, 1): datetime(2006, 1, 1),
+ datetime(2007, 1, 15): datetime(2007, 1, 1),
+ datetime(2008, 6, 30): datetime(2008, 1, 1),
+ datetime(2008, 12, 31): datetime(2008, 1, 1),
+ datetime(2006, 12, 29): datetime(2006, 1, 1),
+ datetime(2006, 12, 30): datetime(2006, 1, 1),
+ datetime(2007, 1, 1): datetime(2006, 1, 1)}))
+
+ offset_cases.append((YearBegin(-2), {
+ datetime(2007, 1, 1): datetime(2005, 1, 1),
+ datetime(2008, 6, 30): datetime(2007, 1, 1),
+ datetime(2008, 12, 31): datetime(2007, 1, 1)}))
+
+ offset_cases.append((YearBegin(month=4), {
+ datetime(2007, 4, 1): datetime(2008, 4, 1),
+ datetime(2007, 4, 15): datetime(2008, 4, 1),
+ datetime(2007, 3, 1): datetime(2007, 4, 1),
+ datetime(2007, 12, 15): datetime(2008, 4, 1),
+ datetime(2012, 1, 31): datetime(2012, 4, 1)}))
+
+ offset_cases.append((YearBegin(0, month=4), {
+ datetime(2007, 4, 1): datetime(2007, 4, 1),
+ datetime(2007, 3, 1): datetime(2007, 4, 1),
+ datetime(2007, 12, 15): datetime(2008, 4, 1),
+ datetime(2012, 1, 31): datetime(2012, 4, 1)}))
+
+ offset_cases.append((YearBegin(4, month=4), {
+ datetime(2007, 4, 1): datetime(2011, 4, 1),
+ datetime(2007, 4, 15): datetime(2011, 4, 1),
+ datetime(2007, 3, 1): datetime(2010, 4, 1),
+ datetime(2007, 12, 15): datetime(2011, 4, 1),
+ datetime(2012, 1, 31): datetime(2015, 4, 1)}))
+
+ offset_cases.append((YearBegin(-1, month=4), {
+ datetime(2007, 4, 1): datetime(2006, 4, 1),
+ datetime(2007, 3, 1): datetime(2006, 4, 1),
+ datetime(2007, 12, 15): datetime(2007, 4, 1),
+ datetime(2012, 1, 31): datetime(2011, 4, 1)}))
+
+ offset_cases.append((YearBegin(-3, month=4), {
+ datetime(2007, 4, 1): datetime(2004, 4, 1),
+ datetime(2007, 3, 1): datetime(2004, 4, 1),
+ datetime(2007, 12, 15): datetime(2005, 4, 1),
+ datetime(2012, 1, 31): datetime(2009, 4, 1)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+ on_offset_cases = [(YearBegin(), datetime(2007, 1, 3), False),
+ (YearBegin(), datetime(2008, 1, 1), True),
+ (YearBegin(), datetime(2006, 12, 31), False),
+ (YearBegin(), datetime(2006, 1, 2), False)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
+
+
+class TestYearEnd(Base):
+ _offset = YearEnd
+
+ def test_misspecified(self):
+ pytest.raises(ValueError, YearEnd, month=13)
+
+ offset_cases = []
+ offset_cases.append((YearEnd(), {
+ datetime(2008, 1, 1): datetime(2008, 12, 31),
+ datetime(2008, 6, 30): datetime(2008, 12, 31),
+ datetime(2008, 12, 31): datetime(2009, 12, 31),
+ datetime(2005, 12, 30): datetime(2005, 12, 31),
+ datetime(2005, 12, 31): datetime(2006, 12, 31)}))
+
+ offset_cases.append((YearEnd(0), {
+ datetime(2008, 1, 1): datetime(2008, 12, 31),
+ datetime(2008, 6, 30): datetime(2008, 12, 31),
+ datetime(2008, 12, 31): datetime(2008, 12, 31),
+ datetime(2005, 12, 30): datetime(2005, 12, 31)}))
+
+ offset_cases.append((YearEnd(-1), {
+ datetime(2007, 1, 1): datetime(2006, 12, 31),
+ datetime(2008, 6, 30): datetime(2007, 12, 31),
+ datetime(2008, 12, 31): datetime(2007, 12, 31),
+ datetime(2006, 12, 29): datetime(2005, 12, 31),
+ datetime(2006, 12, 30): datetime(2005, 12, 31),
+ datetime(2007, 1, 1): datetime(2006, 12, 31)}))
+
+ offset_cases.append((YearEnd(-2), {
+ datetime(2007, 1, 1): datetime(2005, 12, 31),
+ datetime(2008, 6, 30): datetime(2006, 12, 31),
+ datetime(2008, 12, 31): datetime(2006, 12, 31)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+ on_offset_cases = [(YearEnd(), datetime(2007, 12, 31), True),
+ (YearEnd(), datetime(2008, 1, 1), False),
+ (YearEnd(), datetime(2006, 12, 31), True),
+ (YearEnd(), datetime(2006, 12, 29), False)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
+
+
+class TestYearEndDiffMonth(Base):
+ offset_cases = []
+ offset_cases.append((YearEnd(month=3),
+ {datetime(2008, 1, 1): datetime(2008, 3, 31),
+ datetime(2008, 2, 15): datetime(2008, 3, 31),
+ datetime(2008, 3, 31): datetime(2009, 3, 31),
+ datetime(2008, 3, 30): datetime(2008, 3, 31),
+ datetime(2005, 3, 31): datetime(2006, 3, 31),
+ datetime(2006, 7, 30): datetime(2007, 3, 31)}))
+
+ offset_cases.append((YearEnd(0, month=3),
+ {datetime(2008, 1, 1): datetime(2008, 3, 31),
+ datetime(2008, 2, 28): datetime(2008, 3, 31),
+ datetime(2008, 3, 31): datetime(2008, 3, 31),
+ datetime(2005, 3, 30): datetime(2005, 3, 31)}))
+
+ offset_cases.append((YearEnd(-1, month=3),
+ {datetime(2007, 1, 1): datetime(2006, 3, 31),
+ datetime(2008, 2, 28): datetime(2007, 3, 31),
+ datetime(2008, 3, 31): datetime(2007, 3, 31),
+ datetime(2006, 3, 29): datetime(2005, 3, 31),
+ datetime(2006, 3, 30): datetime(2005, 3, 31),
+ datetime(2007, 3, 1): datetime(2006, 3, 31)}))
+
+ offset_cases.append((YearEnd(-2, month=3),
+ {datetime(2007, 1, 1): datetime(2005, 3, 31),
+ datetime(2008, 6, 30): datetime(2007, 3, 31),
+ datetime(2008, 3, 31): datetime(2006, 3, 31)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+ on_offset_cases = [(YearEnd(month=3), datetime(2007, 3, 31), True),
+ (YearEnd(month=3), datetime(2008, 1, 1), False),
+ (YearEnd(month=3), datetime(2006, 3, 31), True),
+ (YearEnd(month=3), datetime(2006, 3, 29), False)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
+
+
+class TestBYearBegin(Base):
+ _offset = BYearBegin
+
+ def test_misspecified(self):
+ pytest.raises(ValueError, BYearBegin, month=13)
+ pytest.raises(ValueError, BYearEnd, month=13)
+
+ offset_cases = []
+ offset_cases.append((BYearBegin(), {
+ datetime(2008, 1, 1): datetime(2009, 1, 1),
+ datetime(2008, 6, 30): datetime(2009, 1, 1),
+ datetime(2008, 12, 31): datetime(2009, 1, 1),
+ datetime(2011, 1, 1): datetime(2011, 1, 3),
+ datetime(2011, 1, 3): datetime(2012, 1, 2),
+ datetime(2005, 12, 30): datetime(2006, 1, 2),
+ datetime(2005, 12, 31): datetime(2006, 1, 2)}))
+
+ offset_cases.append((BYearBegin(0), {
+ datetime(2008, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 6, 30): datetime(2009, 1, 1),
+ datetime(2008, 12, 31): datetime(2009, 1, 1),
+ datetime(2005, 12, 30): datetime(2006, 1, 2),
+ datetime(2005, 12, 31): datetime(2006, 1, 2)}))
+
+ offset_cases.append((BYearBegin(-1), {
+ datetime(2007, 1, 1): datetime(2006, 1, 2),
+ datetime(2009, 1, 4): datetime(2009, 1, 1),
+ datetime(2009, 1, 1): datetime(2008, 1, 1),
+ datetime(2008, 6, 30): datetime(2008, 1, 1),
+ datetime(2008, 12, 31): datetime(2008, 1, 1),
+ datetime(2006, 12, 29): datetime(2006, 1, 2),
+ datetime(2006, 12, 30): datetime(2006, 1, 2),
+ datetime(2006, 1, 1): datetime(2005, 1, 3)}))
+
+ offset_cases.append((BYearBegin(-2), {
+ datetime(2007, 1, 1): datetime(2005, 1, 3),
+ datetime(2007, 6, 30): datetime(2006, 1, 2),
+ datetime(2008, 12, 31): datetime(2007, 1, 1)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+
+class TestBYearEnd(Base):
+ _offset = BYearEnd
+
+ offset_cases = []
+ offset_cases.append((BYearEnd(), {
+ datetime(2008, 1, 1): datetime(2008, 12, 31),
+ datetime(2008, 6, 30): datetime(2008, 12, 31),
+ datetime(2008, 12, 31): datetime(2009, 12, 31),
+ datetime(2005, 12, 30): datetime(2006, 12, 29),
+ datetime(2005, 12, 31): datetime(2006, 12, 29)}))
+
+ offset_cases.append((BYearEnd(0), {
+ datetime(2008, 1, 1): datetime(2008, 12, 31),
+ datetime(2008, 6, 30): datetime(2008, 12, 31),
+ datetime(2008, 12, 31): datetime(2008, 12, 31),
+ datetime(2005, 12, 31): datetime(2006, 12, 29)}))
+
+ offset_cases.append((BYearEnd(-1), {
+ datetime(2007, 1, 1): datetime(2006, 12, 29),
+ datetime(2008, 6, 30): datetime(2007, 12, 31),
+ datetime(2008, 12, 31): datetime(2007, 12, 31),
+ datetime(2006, 12, 29): datetime(2005, 12, 30),
+ datetime(2006, 12, 30): datetime(2006, 12, 29),
+ datetime(2007, 1, 1): datetime(2006, 12, 29)}))
+
+ offset_cases.append((BYearEnd(-2), {
+ datetime(2007, 1, 1): datetime(2005, 12, 30),
+ datetime(2008, 6, 30): datetime(2006, 12, 29),
+ datetime(2008, 12, 31): datetime(2006, 12, 29)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+ on_offset_cases = [(BYearEnd(), datetime(2007, 12, 31), True),
+ (BYearEnd(), datetime(2008, 1, 1), False),
+ (BYearEnd(), datetime(2006, 12, 31), False),
+ (BYearEnd(), datetime(2006, 12, 29), True)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
+
+
+class TestBYearEndLagged(Base):
+ _offset = BYearEnd
+
+ def test_bad_month_fail(self):
+ pytest.raises(Exception, BYearEnd, month=13)
+ pytest.raises(Exception, BYearEnd, month=0)
+
+ offset_cases = []
+ offset_cases.append((BYearEnd(month=6), {
+ datetime(2008, 1, 1): datetime(2008, 6, 30),
+ datetime(2007, 6, 30): datetime(2008, 6, 30)}))
+
+ offset_cases.append((BYearEnd(n=-1, month=6), {
+ datetime(2008, 1, 1): datetime(2007, 6, 29),
+ datetime(2007, 6, 30): datetime(2007, 6, 29)}))
+
+ @pytest.mark.parametrize('case', offset_cases)
+ def test_offset(self, case):
+ offset, cases = case
+ for base, expected in compat.iteritems(cases):
+ assert_offset_equal(offset, base, expected)
+
+ def test_roll(self):
+ offset = BYearEnd(month=6)
+ date = datetime(2009, 11, 30)
+
+ assert offset.rollforward(date) == datetime(2010, 6, 30)
+ assert offset.rollback(date) == datetime(2009, 6, 30)
+
+ on_offset_cases = [(BYearEnd(month=2), datetime(2007, 2, 28), True),
+ (BYearEnd(month=6), datetime(2007, 6, 30), False)]
+
+ @pytest.mark.parametrize('case', on_offset_cases)
+ def test_onOffset(self, case):
+ offset, dt, expected = case
+ assert_onOffset(offset, dt, expected)
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index 7b699349c3f07..a307b7e5817a8 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -985,6 +985,162 @@ class BusinessMonthBegin(MonthOffset):
_day_opt = 'business_start'
+class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
+ """
+ DateOffset subclass representing one custom business month, incrementing
+ between end of month dates
+
+ Parameters
+ ----------
+ n : int, default 1
+ offset : timedelta, default timedelta(0)
+ normalize : bool, default False
+ Normalize start/end dates to midnight before generating date range
+ weekmask : str, Default 'Mon Tue Wed Thu Fri'
+ weekmask of valid business days, passed to ``numpy.busdaycalendar``
+ holidays : list
+ list/array of dates to exclude from the set of valid business days,
+ passed to ``numpy.busdaycalendar``
+ calendar : pd.HolidayCalendar or np.busdaycalendar
+ """
+
+ _cacheable = False
+ _prefix = 'CBM'
+
+ onOffset = DateOffset.onOffset # override MonthOffset method
+
+ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
+ holidays=None, calendar=None, offset=timedelta(0)):
+ self.n = self._validate_n(n)
+ self.normalize = normalize
+ self._offset = offset
+ self.kwds = {}
+
+ calendar, holidays = _get_calendar(weekmask=weekmask,
+ holidays=holidays,
+ calendar=calendar)
+ self.kwds['weekmask'] = self.weekmask = weekmask
+ self.kwds['holidays'] = self.holidays = holidays
+ self.kwds['calendar'] = self.calendar = calendar
+ self.kwds['offset'] = offset
+
+ @cache_readonly
+ def cbday(self):
+ kwds = self.kwds
+ return CustomBusinessDay(n=self.n, normalize=self.normalize, **kwds)
+
+ @cache_readonly
+ def m_offset(self):
+ kwds = self.kwds
+ kwds = {key: kwds[key] for key in kwds
+ if key not in ['calendar', 'weekmask', 'holidays', 'offset']}
+ return MonthEnd(n=1, normalize=self.normalize, **kwds)
+
+ @apply_wraps
+ def apply(self, other):
+ n = self.n
+
+ # First move to month offset
+ cur_mend = self.m_offset.rollforward(other)
+
+ # Find this custom month offset
+ cur_cmend = self.cbday.rollback(cur_mend)
+
+ # handle zero case. arbitrarily rollforward
+ if n == 0 and other != cur_cmend:
+ n += 1
+
+ if other < cur_cmend and n >= 1:
+ n -= 1
+ elif other > cur_cmend and n <= -1:
+ n += 1
+
+ new = cur_mend + n * self.m_offset
+ result = self.cbday.rollback(new)
+ return result
+
+
+class CustomBusinessMonthBegin(BusinessMixin, MonthOffset):
+ """
+ DateOffset subclass representing one custom business month, incrementing
+ between beginning of month dates
+
+ Parameters
+ ----------
+ n : int, default 1
+ offset : timedelta, default timedelta(0)
+ normalize : bool, default False
+ Normalize start/end dates to midnight before generating date range
+ weekmask : str, Default 'Mon Tue Wed Thu Fri'
+ weekmask of valid business days, passed to ``numpy.busdaycalendar``
+ holidays : list
+ list/array of dates to exclude from the set of valid business days,
+ passed to ``numpy.busdaycalendar``
+ calendar : pd.HolidayCalendar or np.busdaycalendar
+ """
+
+ _cacheable = False
+ _prefix = 'CBMS'
+
+ onOffset = DateOffset.onOffset # override MonthOffset method
+
+ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
+ holidays=None, calendar=None, offset=timedelta(0)):
+ self.n = self._validate_n(n)
+ self.normalize = normalize
+ self._offset = offset
+ self.kwds = {}
+
+ # _get_calendar does validation and possible transformation
+ # of calendar and holidays.
+ calendar, holidays = _get_calendar(weekmask=weekmask,
+ holidays=holidays,
+ calendar=calendar)
+ self.kwds['calendar'] = self.calendar = calendar
+ self.kwds['weekmask'] = self.weekmask = weekmask
+ self.kwds['holidays'] = self.holidays = holidays
+ self.kwds['offset'] = offset
+
+ @cache_readonly
+ def cbday(self):
+ kwds = self.kwds
+ return CustomBusinessDay(n=self.n, normalize=self.normalize, **kwds)
+
+ @cache_readonly
+ def m_offset(self):
+ kwds = self.kwds
+ kwds = {key: kwds[key] for key in kwds
+ if key not in ['calendar', 'weekmask', 'holidays', 'offset']}
+ return MonthBegin(n=1, normalize=self.normalize, **kwds)
+
+ @apply_wraps
+ def apply(self, other):
+ n = self.n
+ dt_in = other
+
+ # First move to month offset
+ cur_mbegin = self.m_offset.rollback(dt_in)
+
+ # Find this custom month offset
+ cur_cmbegin = self.cbday.rollforward(cur_mbegin)
+
+ # handle zero case. arbitrarily rollforward
+ if n == 0 and dt_in != cur_cmbegin:
+ n += 1
+
+ if dt_in > cur_cmbegin and n <= -1:
+ n += 1
+ elif dt_in < cur_cmbegin and n >= 1:
+ n -= 1
+
+ new = cur_mbegin + n * self.m_offset
+ result = self.cbday.rollforward(new)
+ return result
+
+
+# ---------------------------------------------------------------------
+# Semi-Month Based Offset Classes
+
class SemiMonthOffset(DateOffset):
_adjust_dst = True
_default_day_of_month = 15
@@ -1185,155 +1341,6 @@ def _apply_index_days(self, i, roll):
return i + (roll % 2) * Timedelta(days=self.day_of_month - 1).value
-class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
- """
- DateOffset subclass representing one custom business month, incrementing
- between end of month dates
-
- Parameters
- ----------
- n : int, default 1
- offset : timedelta, default timedelta(0)
- normalize : bool, default False
- Normalize start/end dates to midnight before generating date range
- weekmask : str, Default 'Mon Tue Wed Thu Fri'
- weekmask of valid business days, passed to ``numpy.busdaycalendar``
- holidays : list
- list/array of dates to exclude from the set of valid business days,
- passed to ``numpy.busdaycalendar``
- calendar : pd.HolidayCalendar or np.busdaycalendar
- """
-
- _cacheable = False
- _prefix = 'CBM'
-
- onOffset = DateOffset.onOffset # override MonthOffset method
-
- def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
- holidays=None, calendar=None, offset=timedelta(0)):
- self.n = self._validate_n(n)
- self.normalize = normalize
- self._offset = offset
- self.kwds = {}
-
- calendar, holidays = _get_calendar(weekmask=weekmask,
- holidays=holidays,
- calendar=calendar)
- self.kwds['weekmask'] = self.weekmask = weekmask
- self.kwds['holidays'] = self.holidays = holidays
- self.kwds['calendar'] = self.calendar = calendar
- self.kwds['offset'] = offset
-
- @cache_readonly
- def cbday(self):
- kwds = self.kwds
- return CustomBusinessDay(n=self.n, normalize=self.normalize, **kwds)
-
- @cache_readonly
- def m_offset(self):
- kwds = self.kwds
- kwds = {key: kwds[key] for key in kwds
- if key not in ['calendar', 'weekmask', 'holidays', 'offset']}
- return MonthEnd(n=1, normalize=self.normalize, **kwds)
-
- @apply_wraps
- def apply(self, other):
- n = self.n
- # First move to month offset
- cur_mend = self.m_offset.rollforward(other)
- # Find this custom month offset
- cur_cmend = self.cbday.rollback(cur_mend)
-
- # handle zero case. arbitrarily rollforward
- if n == 0 and other != cur_cmend:
- n += 1
-
- if other < cur_cmend and n >= 1:
- n -= 1
- elif other > cur_cmend and n <= -1:
- n += 1
-
- new = cur_mend + n * self.m_offset
- result = self.cbday.rollback(new)
- return result
-
-
-class CustomBusinessMonthBegin(BusinessMixin, MonthOffset):
- """
- DateOffset subclass representing one custom business month, incrementing
- between beginning of month dates
-
- Parameters
- ----------
- n : int, default 1
- offset : timedelta, default timedelta(0)
- normalize : bool, default False
- Normalize start/end dates to midnight before generating date range
- weekmask : str, Default 'Mon Tue Wed Thu Fri'
- weekmask of valid business days, passed to ``numpy.busdaycalendar``
- holidays : list
- list/array of dates to exclude from the set of valid business days,
- passed to ``numpy.busdaycalendar``
- calendar : pd.HolidayCalendar or np.busdaycalendar
- """
-
- _cacheable = False
- _prefix = 'CBMS'
-
- onOffset = DateOffset.onOffset # override MonthOffset method
-
- def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
- holidays=None, calendar=None, offset=timedelta(0)):
- self.n = self._validate_n(n)
- self.normalize = normalize
- self._offset = offset
- self.kwds = {}
-
- # _get_calendar does validation and possible transformation
- # of calendar and holidays.
- calendar, holidays = _get_calendar(weekmask=weekmask,
- holidays=holidays,
- calendar=calendar)
- self.kwds['calendar'] = self.calendar = calendar
- self.kwds['weekmask'] = self.weekmask = weekmask
- self.kwds['holidays'] = self.holidays = holidays
- self.kwds['offset'] = offset
-
- @cache_readonly
- def cbday(self):
- kwds = self.kwds
- return CustomBusinessDay(n=self.n, normalize=self.normalize, **kwds)
-
- @cache_readonly
- def m_offset(self):
- kwds = self.kwds
- kwds = {key: kwds[key] for key in kwds
- if key not in ['calendar', 'weekmask', 'holidays', 'offset']}
- return MonthBegin(n=1, normalize=self.normalize, **kwds)
-
- @apply_wraps
- def apply(self, other):
- n = self.n
- dt_in = other
- # First move to month offset
- cur_mbegin = self.m_offset.rollback(dt_in)
- # Find this custom month offset
- cur_cmbegin = self.cbday.rollforward(cur_mbegin)
-
- # handle zero case. arbitrarily rollforward
- if n == 0 and dt_in != cur_cmbegin:
- n += 1
-
- if dt_in > cur_cmbegin and n <= -1:
- n += 1
- elif dt_in < cur_cmbegin and n >= 1:
- n -= 1
-
- new = cur_mbegin + n * self.m_offset
- result = self.cbday.rollforward(new)
- return result
-
-
# ---------------------------------------------------------------------
# Week-Based Offset Classes
| This began as part of #18489 and quickly became huge and tedious. Merits separating out and getting out of the way.
- Move Year, Quarter, and Month offset test cases to a dedicated file
- Implement pytest.mark.parametrize in very-nearly all the remaining places. | https://api.github.com/repos/pandas-dev/pandas/pulls/18494 | 2017-11-26T00:43:35Z | 2017-11-26T15:06:09Z | 2017-11-26T15:06:09Z | 2017-12-08T19:40:00Z |
CI: remove pandas-gbq from 3.5 build to avoid conflicts with 3.6 build-test | diff --git a/ci/requirements-3.5.pip b/ci/requirements-3.5.pip
index 6e4f7b65f9728..0d9e44cf39fa4 100644
--- a/ci/requirements-3.5.pip
+++ b/ci/requirements-3.5.pip
@@ -1,2 +1 @@
xarray==0.9.1
-pandas-gbq
| https://api.github.com/repos/pandas-dev/pandas/pulls/18492 | 2017-11-25T23:21:07Z | 2017-11-26T00:30:34Z | 2017-11-26T00:30:34Z | 2017-12-11T20:22:37Z | |
COMPAT: map infers all-nan / empty correctly | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 4ae3d9be04aa7..9f59ea2f2a1f0 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -75,7 +75,7 @@ Other API Changes
- :class:`CacheableOffset` and :class:`WeekDay` are no longer available in the ``pandas.tseries.offsets`` module (:issue:`17830`)
- `tseries.frequencies.get_freq_group()` and `tseries.frequencies.DAYS` are removed from the public API (:issue:`18034`)
- :func:`Series.truncate` and :func:`DataFrame.truncate` will raise a ``ValueError`` if the index is not sorted instead of an unhelpful ``KeyError`` (:issue:`17935`)
-- :func:`Index.map` can now accept ``Series`` and dictionary input objects (:issue:`12756`).
+- :func:`Index.map` can now accept ``Series`` and dictionary input objects (:issue:`12756`, :issue:`18482`).
- :func:`Dataframe.unstack` will now default to filling with ``np.nan`` for ``object`` columns. (:issue:`12815`)
- :class:`IntervalIndex` constructor will raise if the ``closed`` parameter conflicts with how the input data is inferred to be closed (:issue:`18421`)
- Inserting missing values into indexes will work for all types of indexes and automatically insert the correct type of missing value (``NaN``, ``NaT``, etc.) regardless of the type passed in (:issue:`18295`)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 2696f9f94375d..f4332ac244af4 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2910,7 +2910,10 @@ def map(self, mapper, na_action=None):
from .multi import MultiIndex
new_values = super(Index, self)._map_values(
mapper, na_action=na_action)
+
attributes = self._get_attributes_dict()
+
+ # we can return a MultiIndex
if new_values.size and isinstance(new_values[0], tuple):
if isinstance(self, MultiIndex):
names = self.names
@@ -2923,8 +2926,25 @@ def map(self, mapper, na_action=None):
attributes['copy'] = False
- # we infer the result types based on the
- # returned values
+ # we want to try to return our original dtype
+ # ints infer to integer, but if we have
+ # uints, would prefer to return these
+ if is_unsigned_integer_dtype(self.dtype):
+ inferred = lib.infer_dtype(new_values)
+ if inferred == 'integer':
+ attributes['dtype'] = self.dtype
+
+ elif not new_values.size:
+ # empty
+ attributes['dtype'] = self.dtype
+ elif isna(new_values).all():
+ # all nan
+ inferred = lib.infer_dtype(self)
+ if inferred in ['datetime', 'datetime64',
+ 'timedelta', 'timedelta64',
+ 'period']:
+ new_values = [libts.NaT] * len(new_values)
+
return Index(new_values, **attributes)
def isin(self, values, level=None):
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py
index ba7795d005721..99bdaf02e25ff 100644
--- a/pandas/tests/indexes/common.py
+++ b/pandas/tests/indexes/common.py
@@ -1007,31 +1007,33 @@ def test_searchsorted_monotonic(self, indices):
indices._searchsorted_monotonic(value, side='left')
def test_map(self):
+ # callable
index = self.create_index()
+ expected = index
+ result = index.map(lambda x: x)
+ tm.assert_index_equal(result, expected)
- # From output of UInt64Index mapping can't infer that we
- # shouldn't default to Int64
- if isinstance(index, UInt64Index):
- expected = Index(index.values.tolist())
- else:
- expected = index
+ @pytest.mark.parametrize(
+ "mapper",
+ [
+ lambda values, index: {i: e for e, i in zip(values, index)},
+ lambda values, index: pd.Series(values, index)])
+ def test_map_dictlike(self, mapper):
- tm.assert_index_equal(index.map(lambda x: x), expected)
+ index = self.create_index()
+ if isinstance(index, (pd.CategoricalIndex, pd.IntervalIndex)):
+ pytest.skip("skipping tests for {}".format(type(index)))
- identity_dict = {x: x for x in index}
- tm.assert_index_equal(index.map(identity_dict), expected)
+ expected = index
- # Use values to work around MultiIndex instantiation of series
- identity_series = Series(expected.values, index=index)
- tm.assert_index_equal(index.map(identity_series), expected)
+ identity = mapper(index.values, index)
+ result = index.map(identity)
+ tm.assert_index_equal(result, expected)
# empty mappable
- nan_index = pd.Index([np.nan] * len(index))
- series_map = pd.Series()
- tm.assert_index_equal(index.map(series_map), nan_index)
-
- dict_map = {}
- tm.assert_index_equal(index.map(dict_map), nan_index)
+ expected = pd.Index([np.nan] * len(index))
+ result = index.map(mapper(expected, index))
+ tm.assert_index_equal(result, expected)
def test_putmask_with_wrong_mask(self):
# GH18368
diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py
index 839fccc1441e5..a01c60a47c0f9 100644
--- a/pandas/tests/indexes/datetimelike.py
+++ b/pandas/tests/indexes/datetimelike.py
@@ -1,7 +1,6 @@
""" generic datetimelike tests """
import pytest
import pandas as pd
-import numpy as np
from .common import Base
import pandas.util.testing as tm
@@ -73,6 +72,6 @@ def test_map_dictlike(self, mapper):
# empty map; these map to np.nan because we cannot know
# to re-infer things
- expected = pd.Index([np.nan] * len(self.index))
+ expected = pd.Index([pd.NaT] * len(self.index))
result = self.index.map(mapper([], []))
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 7dfd1511da292..372c11b296d9e 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -852,11 +852,15 @@ def test_map_tseries_indices_return_index(self):
exp = Index(range(24), name='hourly')
tm.assert_index_equal(exp, date_index.map(lambda x: x.hour))
- def test_map_with_dict_and_series(self):
+ @pytest.mark.parametrize(
+ "mapper",
+ [
+ lambda values, index: {i: e for e, i in zip(values, index)},
+ lambda values, index: pd.Series(values, index)])
+ def test_map_dictlike(self, mapper):
# GH 12756
expected = Index(['foo', 'bar', 'baz'])
- mapper = Series(expected.values, index=[0, 1, 2])
- result = tm.makeIntIndex(3).map(mapper)
+ result = tm.makeIntIndex(3).map(mapper(expected.values, [0, 1, 2]))
tm.assert_index_equal(result, expected)
for name in self.indices.keys():
@@ -867,21 +871,16 @@ def test_map_with_dict_and_series(self):
# Cannot map duplicated index
continue
- cur_index = self.indices[name]
- expected = Index(np.arange(len(cur_index), 0, -1))
- mapper = pd.Series(expected, index=cur_index)
- result = cur_index.map(mapper)
-
- tm.assert_index_equal(result, expected)
+ index = self.indices[name]
+ expected = Index(np.arange(len(index), 0, -1))
- # If the mapper is empty the expected index type is Int64Index
- # but the output defaults to Float64 so I treat it independently
- mapper = {o: n for o, n in
- zip(cur_index, expected)}
+ # to match proper result coercion for uints
+ if name == 'uintIndex':
+ expected = expected.astype('uint64')
+ elif name == 'empty':
+ expected = Index([])
- result = cur_index.map(mapper)
- if not mapper:
- expected = Float64Index([])
+ result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
def test_map_with_non_function_missing_values(self):
diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py
index 33ba0189d747a..7df189113247b 100644
--- a/pandas/tests/indexes/test_interval.py
+++ b/pandas/tests/indexes/test_interval.py
@@ -566,10 +566,6 @@ def test_repr_max_seq_item_setting(self):
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
- @pytest.mark.xfail(reason='get_indexer behavior does not currently work')
- def test_map(self):
- super(TestIntervalIndex, self).test_map()
-
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
| xref #18482 | https://api.github.com/repos/pandas-dev/pandas/pulls/18491 | 2017-11-25T23:05:08Z | 2017-11-26T15:14:52Z | 2017-11-26T15:14:52Z | 2017-11-27T01:39:00Z |
Implement business_start/end cases for shift_months | diff --git a/asv_bench/benchmarks/offset.py b/asv_bench/benchmarks/offset.py
index ea826e8270ace..849776bf9a591 100644
--- a/asv_bench/benchmarks/offset.py
+++ b/asv_bench/benchmarks/offset.py
@@ -38,6 +38,25 @@ def time_apply_series(self, param):
self.ser + self.offset
+class OnOffset(object):
+ goal_time = 0.2
+
+ params = [pd.offsets.QuarterBegin(), pd.offsets.QuarterEnd(),
+ pd.offsets.BQuarterBegin(), pd.offsets.BQuarterEnd()]
+ param_names = ['offset']
+
+ def setup(self, offset):
+ self.offset = offset
+ self.dates = [datetime(2016, m, d)
+ for m in [10, 11, 12]
+ for d in [1, 2, 3, 28, 29, 30, 31]
+ if not (m == 11 and d == 31)]
+
+ def time_on_offset(self, offset):
+ for date in self.dates:
+ self.offset.onOffset(date)
+
+
class DatetimeIndexArithmetic(object):
goal_time = 0.2
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 78b8ca8d5a480..d0b27e1d22a89 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -116,6 +116,7 @@ Performance Improvements
- ``Series`` construction will reduce the number of copies made of the input data in certain cases (:issue:`17449`)
- Improved performance of :func:`Series.dt.date` and :func:`DatetimeIndex.date` (:issue:`18058`)
- Improved performance of ``IntervalIndex.symmetric_difference()`` (:issue:`18475`)
+- Improved performance of ``DatetimeIndex`` and ``Series`` arithmetic operations with Business-Month and Business-Quarter frequencies (:issue:`18489`)
.. _whatsnew_0220.docs:
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx
index 4ed4d4a9b7b99..654c51f0ca842 100644
--- a/pandas/_libs/tslibs/offsets.pyx
+++ b/pandas/_libs/tslibs/offsets.pyx
@@ -554,8 +554,58 @@ def shift_months(int64_t[:] dtindex, int months, object day=None):
dts.day = get_days_in_month(dts.year, dts.month)
out[i] = dtstruct_to_dt64(&dts)
+
+ elif day == 'business_start':
+ for i in range(count):
+ if dtindex[i] == NPY_NAT:
+ out[i] = NPY_NAT
+ continue
+
+ dt64_to_dtstruct(dtindex[i], &dts)
+ months_to_roll = months
+ wkday, days_in_month = monthrange(dts.year, dts.month)
+ compare_day = get_firstbday(wkday, days_in_month)
+
+ if months_to_roll > 0 and dts.day < compare_day:
+ months_to_roll -= 1
+ elif months_to_roll <= 0 and dts.day > compare_day:
+ # as if rolled forward already
+ months_to_roll += 1
+
+ dts.year = year_add_months(dts, months_to_roll)
+ dts.month = month_add_months(dts, months_to_roll)
+
+ wkday, days_in_month = monthrange(dts.year, dts.month)
+ dts.day = get_firstbday(wkday, days_in_month)
+ out[i] = dtstruct_to_dt64(&dts)
+
+ elif day == 'business_end':
+ for i in range(count):
+ if dtindex[i] == NPY_NAT:
+ out[i] = NPY_NAT
+ continue
+
+ dt64_to_dtstruct(dtindex[i], &dts)
+ months_to_roll = months
+ wkday, days_in_month = monthrange(dts.year, dts.month)
+ compare_day = get_lastbday(wkday, days_in_month)
+
+ if months_to_roll > 0 and dts.day < compare_day:
+ months_to_roll -= 1
+ elif months_to_roll <= 0 and dts.day > compare_day:
+ # as if rolled forward already
+ months_to_roll += 1
+
+ dts.year = year_add_months(dts, months_to_roll)
+ dts.month = month_add_months(dts, months_to_roll)
+
+ wkday, days_in_month = monthrange(dts.year, dts.month)
+ dts.day = get_lastbday(wkday, days_in_month)
+ out[i] = dtstruct_to_dt64(&dts)
+
else:
- raise ValueError("day must be None, 'start' or 'end'")
+ raise ValueError("day must be None, 'start', 'end', "
+ "'business_start', or 'business_end'")
return np.asarray(out)
diff --git a/pandas/tests/tseries/offsets/test_yqm_offsets.py b/pandas/tests/tseries/offsets/test_yqm_offsets.py
index 1d47cf67c6e55..292dd5eba938e 100644
--- a/pandas/tests/tseries/offsets/test_yqm_offsets.py
+++ b/pandas/tests/tseries/offsets/test_yqm_offsets.py
@@ -6,6 +6,7 @@
import pytest
+import pandas as pd
from pandas import Timestamp
from pandas import compat
@@ -32,6 +33,35 @@ def test_quarterly_dont_normalize():
assert (result.time() == date.time())
+@pytest.mark.parametrize('offset', [MonthBegin(), MonthEnd(),
+ BMonthBegin(), BMonthEnd()])
+def test_apply_index(offset):
+ rng = pd.date_range(start='1/1/2000', periods=100000, freq='T')
+ ser = pd.Series(rng)
+
+ res = rng + offset
+ res_v2 = offset.apply_index(rng)
+ assert (res == res_v2).all()
+ assert res[0] == rng[0] + offset
+ assert res[-1] == rng[-1] + offset
+ res2 = ser + offset
+ # apply_index is only for indexes, not series, so no res2_v2
+ assert res2.iloc[0] == ser.iloc[0] + offset
+ assert res2.iloc[-1] == ser.iloc[-1] + offset
+
+
+@pytest.mark.parametrize('offset', [QuarterBegin(), QuarterEnd(),
+ BQuarterBegin(), BQuarterEnd()])
+def test_on_offset(offset):
+ dates = [datetime(2016, m, d)
+ for m in [10, 11, 12]
+ for d in [1, 2, 3, 28, 29, 30, 31] if not (m == 11 and d == 31)]
+ for date in dates:
+ res = offset.onOffset(date)
+ slow_version = date == (date + offset) - offset
+ assert res == slow_version
+
+
# --------------------------------------------------------------------
# Months
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index a307b7e5817a8..8e1ead5dfbe9e 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -929,8 +929,9 @@ def name(self):
if self.isAnchored:
return self.rule_code
else:
+ month = liboffsets._int_to_month[self.n]
return "{code}-{month}".format(code=self.rule_code,
- month=_int_to_month[self.n])
+ month=month)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
@@ -950,28 +951,23 @@ def apply(self, other):
return shift_month(other, n, self._day_opt)
+ @apply_index_wraps
+ def apply_index(self, i):
+ shifted = liboffsets.shift_months(i.asi8, self.n, self._day_opt)
+ return i._shallow_copy(shifted)
+
class MonthEnd(MonthOffset):
"""DateOffset of one month end"""
_prefix = 'M'
_day_opt = 'end'
- @apply_index_wraps
- def apply_index(self, i):
- shifted = liboffsets.shift_months(i.asi8, self.n, self._day_opt)
- return i._shallow_copy(shifted)
-
class MonthBegin(MonthOffset):
"""DateOffset of one month at beginning"""
_prefix = 'MS'
_day_opt = 'start'
- @apply_index_wraps
- def apply_index(self, i):
- shifted = liboffsets.shift_months(i.asi8, self.n, self._day_opt)
- return i._shallow_copy(shifted)
-
class BusinessMonthEnd(MonthOffset):
"""DateOffset increments between business EOM dates"""
@@ -1008,6 +1004,7 @@ class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
_prefix = 'CBM'
onOffset = DateOffset.onOffset # override MonthOffset method
+ apply_index = DateOffset.apply_index # override MonthOffset method
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, offset=timedelta(0)):
@@ -1083,6 +1080,7 @@ class CustomBusinessMonthBegin(BusinessMixin, MonthOffset):
_prefix = 'CBMS'
onOffset = DateOffset.onOffset # override MonthOffset method
+ apply_index = DateOffset.apply_index # override MonthOffset method
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, offset=timedelta(0)):
@@ -1603,7 +1601,7 @@ def isAnchored(self):
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
- kwargs['startingMonth'] = _month_to_int[suffix]
+ kwargs['startingMonth'] = liboffsets._month_to_int[suffix]
else:
if cls._from_name_startingMonth is not None:
kwargs['startingMonth'] = cls._from_name_startingMonth
@@ -1611,7 +1609,7 @@ def _from_name(cls, suffix=None):
@property
def rule_code(self):
- month = _int_to_month[self.startingMonth]
+ month = liboffsets._int_to_month[self.startingMonth]
return '{prefix}-{month}'.format(prefix=self._prefix, month=month)
@apply_wraps
@@ -1631,6 +1629,12 @@ def apply(self, other):
return shift_month(other, 3 * n - months_since, self._day_opt)
+ def onOffset(self, dt):
+ if self.normalize and not _is_normalized(dt):
+ return False
+ modMonth = (dt.month - self.startingMonth) % 3
+ return modMonth == 0 and dt.day == self._get_offset_day(dt)
+
class BQuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
@@ -1644,16 +1648,6 @@ class BQuarterEnd(QuarterOffset):
_prefix = 'BQ'
_day_opt = 'business_end'
- def onOffset(self, dt):
- if self.normalize and not _is_normalized(dt):
- return False
- modMonth = (dt.month - self.startingMonth) % 3
- return modMonth == 0 and dt.day == self._get_offset_day(dt)
-
-
-_int_to_month = tslib._MONTH_ALIASES
-_month_to_int = {v: k for k, v in _int_to_month.items()}
-
# TODO: This is basically the same as BQuarterEnd
class BQuarterBegin(QuarterOffset):
@@ -1680,12 +1674,6 @@ class QuarterEnd(EndMixin, QuarterOffset):
def apply_index(self, i):
return self._end_apply_index(i, self.freqstr)
- def onOffset(self, dt):
- if self.normalize and not _is_normalized(dt):
- return False
- modMonth = (dt.month - self.startingMonth) % 3
- return modMonth == 0 and dt.day == self._get_offset_day(dt)
-
class QuarterBegin(BeginMixin, QuarterOffset):
_outputName = 'QuarterBegin'
@@ -1697,7 +1685,8 @@ class QuarterBegin(BeginMixin, QuarterOffset):
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.startingMonth == 1 else self.startingMonth - 1
- freqstr = 'Q-{month}'.format(month=_int_to_month[freq_month])
+ month = liboffsets._int_to_month[freq_month]
+ freqstr = 'Q-{month}'.format(month=month)
return self._beg_apply_index(i, freqstr)
@@ -1738,12 +1727,12 @@ def __init__(self, n=1, normalize=False, month=None):
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
- kwargs['month'] = _month_to_int[suffix]
+ kwargs['month'] = liboffsets._month_to_int[suffix]
return cls(**kwargs)
@property
def rule_code(self):
- month = _int_to_month[self.month]
+ month = liboffsets._int_to_month[self.month]
return '{prefix}-{month}'.format(prefix=self._prefix, month=month)
@@ -1784,7 +1773,8 @@ class YearBegin(BeginMixin, YearOffset):
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.month == 1 else self.month - 1
- freqstr = 'A-{month}'.format(month=_int_to_month[freq_month])
+ month = liboffsets._int_to_month[freq_month]
+ freqstr = 'A-{month}'.format(month=month)
return self._beg_apply_index(i, freqstr)
@@ -1969,7 +1959,7 @@ def _get_suffix_prefix(self):
def get_rule_code_suffix(self):
prefix = self._get_suffix_prefix()
- month = _int_to_month[self.startingMonth]
+ month = liboffsets._int_to_month[self.startingMonth]
weekday = _int_to_weekday[self.weekday]
return '{prefix}-{month}-{weekday}'.format(prefix=prefix, month=month,
weekday=weekday)
@@ -1984,7 +1974,7 @@ def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
raise ValueError("Unable to parse varion_code: "
"{code}".format(code=varion_code))
- startingMonth = _month_to_int[startingMonth_code]
+ startingMonth = liboffsets._month_to_int[startingMonth_code]
weekday = _weekday_to_int[weekday_code]
return {"weekday": weekday,
| Unifies `apply_index` implementations for MonthEnd/MonthBegin, plus extends them to BMonthEnd and BMonthBegin.
Unifies `onOffset` implementations for QuarterEnd/BQuarterEnd, plus extends them to QuarterBegin/BQuarterBegin.
Implements a `cdef` version of `monthrange`. | https://api.github.com/repos/pandas-dev/pandas/pulls/18489 | 2017-11-25T21:27:20Z | 2017-11-27T00:59:27Z | 2017-11-27T00:59:27Z | 2017-12-08T19:38:27Z |
Fix tzaware dates mismatch but no exception raised | diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt
index 637ccf0603e0f..db1f685cb16f7 100644
--- a/doc/source/whatsnew/v0.21.1.txt
+++ b/doc/source/whatsnew/v0.21.1.txt
@@ -64,7 +64,7 @@ Conversion
- Bug in :class:`DatetimeIndex` subtracting datetimelike from DatetimeIndex could fail to overflow (:issue:`18020`)
- Bug in :meth:`IntervalIndex.copy` when copying and ``IntervalIndex`` with non-default ``closed`` (:issue:`18339`)
- Bug in :func:`DataFrame.to_dict` where columns of datetime that are tz-aware were not converted to required arrays when used with ``orient='records'``, raising``TypeError` (:issue:`18372`)
--
+- Bug in :class:`DateTimeIndex` and :meth:`date_range` where mismatching tz-aware ``start`` and ``end`` timezones would not raise an err if ``end.tzinfo`` is None (:issue:`18431`)
-
Indexing
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx
index 7fb48e7c66f47..d326f2cb68f24 100644
--- a/pandas/_libs/tslibs/timezones.pyx
+++ b/pandas/_libs/tslibs/timezones.pyx
@@ -284,10 +284,9 @@ cdef object get_dst_info(object tz):
def infer_tzinfo(start, end):
if start is not None and end is not None:
tz = start.tzinfo
- if end.tzinfo:
- if not (get_timezone(tz) == get_timezone(end.tzinfo)):
- msg = 'Inputs must both have the same timezone, {tz1} != {tz2}'
- raise AssertionError(msg.format(tz1=tz, tz2=end.tzinfo))
+ if not (get_timezone(tz) == get_timezone(end.tzinfo)):
+ msg = 'Inputs must both have the same timezone, {tz1} != {tz2}'
+ raise AssertionError(msg.format(tz1=tz, tz2=end.tzinfo))
elif start is not None:
tz = start.tzinfo
elif end is not None:
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py
index edcee0479827f..826e20b8b0586 100644
--- a/pandas/tests/indexes/datetimes/test_date_range.py
+++ b/pandas/tests/indexes/datetimes/test_date_range.py
@@ -290,6 +290,22 @@ def test_precision_finer_than_offset(self):
tm.assert_index_equal(result1, expected1)
tm.assert_index_equal(result2, expected2)
+ dt1, dt2 = '2017-01-01', '2017-01-01'
+ tz1, tz2 = 'US/Eastern', 'Europe/London'
+
+ @pytest.mark.parametrize("start,end", [
+ (pd.Timestamp(dt1, tz=tz1), pd.Timestamp(dt2)),
+ (pd.Timestamp(dt1), pd.Timestamp(dt2, tz=tz2)),
+ (pd.Timestamp(dt1, tz=tz1), pd.Timestamp(dt2, tz=tz2)),
+ (pd.Timestamp(dt1, tz=tz2), pd.Timestamp(dt2, tz=tz1))
+ ])
+ def test_mismatching_tz_raises_err(self, start, end):
+ # issue 18488
+ with pytest.raises(TypeError):
+ pd.date_range(start, end)
+ with pytest.raises(TypeError):
+ pd.DatetimeIndex(start, end, freq=BDay())
+
class TestBusinessDateRange(object):
diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py
index 3dfad2d4af75e..a01166daf6be1 100644
--- a/pandas/tests/tseries/test_timezones.py
+++ b/pandas/tests/tseries/test_timezones.py
@@ -424,7 +424,7 @@ def test_with_tz(self):
# datetimes with tzinfo set
dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc),
- '1/1/2009', tz=pytz.utc)
+ datetime(2009, 1, 1, tzinfo=pytz.utc))
pytest.raises(Exception, bdate_range,
datetime(2005, 1, 1, tzinfo=pytz.utc), '1/1/2009',
| - [x] closes #18431
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18488 | 2017-11-25T19:31:14Z | 2017-11-26T15:19:14Z | 2017-11-26T15:19:14Z | 2017-12-11T20:11:08Z |
BUG: Fix the un-pickleable plot with DatetimeIndex | diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt
index 637ccf0603e0f..33c4aa72c7fd6 100644
--- a/doc/source/whatsnew/v0.21.1.txt
+++ b/doc/source/whatsnew/v0.21.1.txt
@@ -93,7 +93,7 @@ I/O
Plotting
^^^^^^^^
--
+- Bug in ``DataFrame.plot()`` and ``Series.plot()`` with :class:`DatetimeIndex` where a figure generated by them is not pickleable in Python 3 (:issue:`18439`)
-
-
diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py
index 3d04973ed0009..56b5311326e98 100644
--- a/pandas/plotting/_timeseries.py
+++ b/pandas/plotting/_timeseries.py
@@ -1,5 +1,7 @@
# TODO: Use the fact that axis can have units to simplify the process
+import functools
+
import numpy as np
from matplotlib import pylab
@@ -293,6 +295,10 @@ def format_timedelta_ticks(x, pos, n_decimals):
return s
+def _format_coord(freq, t, y):
+ return "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y)
+
+
def format_dateaxis(subplot, freq, index):
"""
Pretty-formats the date axis (x-axis).
@@ -327,8 +333,7 @@ def format_dateaxis(subplot, freq, index):
subplot.xaxis.set_minor_formatter(minformatter)
# x and y coord info
- subplot.format_coord = lambda t, y: (
- "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y))
+ subplot.format_coord = functools.partial(_format_coord, freq)
elif isinstance(index, TimedeltaIndex):
subplot.xaxis.set_major_formatter(
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index d66012e2a56a0..d6cedac747f25 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -1,13 +1,14 @@
""" Test cases for time series specific (freq conversion, etc) """
from datetime import datetime, timedelta, date, time
+import pickle
import pytest
from pandas.compat import lrange, zip
import numpy as np
from pandas import Index, Series, DataFrame, NaT
-from pandas.compat import is_platform_mac
+from pandas.compat import is_platform_mac, PY3
from pandas.core.indexes.datetimes import date_range, bdate_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.tseries.offsets import DateOffset
@@ -1470,5 +1471,12 @@ def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
+
+ # GH18439
+ # this is supported only in Python 3 pickle since
+ # pickle in Python2 doesn't support instancemethod pickling
+ if PY3:
+ with ensure_clean(return_filelike=True) as path:
+ pickle.dump(fig, path)
finally:
plt.close(fig)
| - [x] closes #18439
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18486 | 2017-11-25T17:30:22Z | 2017-12-06T11:26:47Z | 2017-12-06T11:26:47Z | 2017-12-11T20:20:54Z |
BUG: Fix inaccurate rolling.var calculation | diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt
index 51fd3b1076ade..976f3524e3c71 100644
--- a/doc/source/whatsnew/v0.21.1.txt
+++ b/doc/source/whatsnew/v0.21.1.txt
@@ -103,7 +103,7 @@ Groupby/Resample/Rolling
- Bug in ``DataFrame.resample(...).apply(...)`` when there is a callable that returns different columns (:issue:`15169`)
- Bug in ``DataFrame.resample(...)`` when there is a time change (DST) and resampling frequecy is 12h or higher (:issue:`15549`)
- Bug in ``pd.DataFrameGroupBy.count()`` when counting over a datetimelike column (:issue:`13393`)
--
+- Bug in ``rolling.var`` where calculation is inaccurate with a zero-valued array (:issue:`18430`)
-
-
diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx
index 4d5ebdc0c581a..95df5a07a390b 100644
--- a/pandas/_libs/window.pyx
+++ b/pandas/_libs/window.pyx
@@ -661,9 +661,11 @@ cdef inline void add_var(double val, double *nobs, double *mean_x,
if val == val:
nobs[0] = nobs[0] + 1
- delta = (val - mean_x[0])
+ # a part of Welford's method for the online variance-calculation
+ # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+ delta = val - mean_x[0]
mean_x[0] = mean_x[0] + delta / nobs[0]
- ssqdm_x[0] = ssqdm_x[0] + delta * (val - mean_x[0])
+ ssqdm_x[0] = ssqdm_x[0] + ((nobs[0] - 1) * delta ** 2) / nobs[0]
cdef inline void remove_var(double val, double *nobs, double *mean_x,
@@ -675,9 +677,11 @@ cdef inline void remove_var(double val, double *nobs, double *mean_x,
if val == val:
nobs[0] = nobs[0] - 1
if nobs[0]:
- delta = (val - mean_x[0])
+ # a part of Welford's method for the online variance-calculation
+ # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+ delta = val - mean_x[0]
mean_x[0] = mean_x[0] - delta / nobs[0]
- ssqdm_x[0] = ssqdm_x[0] - delta * (val - mean_x[0])
+ ssqdm_x[0] = ssqdm_x[0] - ((nobs[0] + 1) * delta ** 2) / nobs[0]
else:
mean_x[0] = 0
ssqdm_x[0] = 0
@@ -689,7 +693,7 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp,
Numerically stable implementation using Welford's method.
"""
cdef:
- double val, prev, mean_x = 0, ssqdm_x = 0, nobs = 0, delta
+ double val, prev, mean_x = 0, ssqdm_x = 0, nobs = 0, delta, mean_x_old
int64_t s, e
bint is_variable
Py_ssize_t i, j, N
@@ -749,6 +753,9 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp,
add_var(input[i], &nobs, &mean_x, &ssqdm_x)
output[i] = calc_var(minp, ddof, nobs, ssqdm_x)
+ # a part of Welford's method for the online variance-calculation
+ # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+
# After the first window, observations can both be added and
# removed
for i from win <= i < N:
@@ -760,10 +767,12 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp,
# Adding one observation and removing another one
delta = val - prev
- prev -= mean_x
+ mean_x_old = mean_x
+
mean_x += delta / nobs
- val -= mean_x
- ssqdm_x += (val + prev) * delta
+ ssqdm_x += ((nobs - 1) * val
+ + (nobs + 1) * prev
+ - 2 * nobs * mean_x_old) * delta / nobs
else:
add_var(val, &nobs, &mean_x, &ssqdm_x)
diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py
index 2427bcea4053d..8135e263f412f 100644
--- a/pandas/tests/test_window.py
+++ b/pandas/tests/test_window.py
@@ -2482,6 +2482,14 @@ def test_rolling_corr_pairwise(self):
self._check_pairwise_moment('rolling', 'corr', window=10,
min_periods=5)
+ @pytest.mark.parametrize('window', range(7))
+ def test_rolling_corr_with_zero_variance(self, window):
+ # GH 18430
+ s = pd.Series(np.zeros(20))
+ other = pd.Series(np.arange(20))
+
+ assert s.rolling(window=window).corr(other=other).isna().all()
+
def _check_pairwise_moment(self, dispatch, name, **kwargs):
def get_result(obj, obj2=None):
return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2)
| - [x] closes #18430
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/18481 | 2017-11-25T13:37:59Z | 2017-11-25T21:13:15Z | 2017-11-25T21:13:14Z | 2017-12-11T20:22:45Z |
CLN: ASV ctors benchmark | diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py
index b5694a3a21502..2c9c382e2db86 100644
--- a/asv_bench/benchmarks/ctors.py
+++ b/asv_bench/benchmarks/ctors.py
@@ -1,24 +1,28 @@
-from .pandas_vb_common import *
+import numpy as np
+from pandas import DataFrame, Series, Index, DatetimeIndex, Timestamp
class Constructors(object):
+
goal_time = 0.2
def setup(self):
- self.arr = np.random.randn(100, 100)
+ N = 10**2
+ np.random.seed(1234)
+ self.arr = np.random.randn(N, N)
self.arr_str = np.array(['foo', 'bar', 'baz'], dtype=object)
- self.data = np.random.randn(100)
- self.index = Index(np.arange(100))
+ self.data = np.random.randn(N)
+ self.index = Index(np.arange(N))
- self.s = Series(([Timestamp('20110101'), Timestamp('20120101'),
- Timestamp('20130101')] * 1000))
+ self.s = Series([Timestamp('20110101'), Timestamp('20120101'),
+ Timestamp('20130101')] * N * 10)
def time_frame_from_ndarray(self):
DataFrame(self.arr)
def time_series_from_ndarray(self):
- pd.Series(self.data, index=self.index)
+ Series(self.data, index=self.index)
def time_index_from_array_string(self):
Index(self.arr_str)
@@ -26,5 +30,5 @@ def time_index_from_array_string(self):
def time_dtindex_from_series(self):
DatetimeIndex(self.s)
- def time_dtindex_from_series2(self):
+ def time_dtindex_from_index_with_series(self):
Index(self.s)
| I think this these asvs should be enhanced or reorganized to other files in the future, but in the meantime:
- Added `np.random.seed(1234)` in setup classes where random data is created xref #8144
- Ran flake8 and replaced star imports
```
$ asv run -q -b ^ctors
[ 0.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
[ 20.00%] ··· Running ctors.Constructors.time_dtindex_from_index_with_series 99.0μs
[ 40.00%] ··· Running ctors.Constructors.time_dtindex_from_series 59.5μs
[ 60.00%] ··· Running ctors.Constructors.time_frame_from_ndarray 237μs
[ 80.00%] ··· Running ctors.Constructors.time_index_from_array_string 101μs
[100.00%] ··· Running ctors.Constructors.time_series_from_ndarray 61.2μs
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/18479 | 2017-11-25T01:57:55Z | 2017-11-25T14:31:39Z | 2017-11-25T14:31:38Z | 2017-11-25T18:23:37Z |
TYPO: IntervalIndex.symmetric_differnce -> IntervalIndex.symmetric_difference | diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 4ae3d9be04aa7..be23e31279d0c 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -114,7 +114,7 @@ Performance Improvements
- The overriden ``Timedelta`` properties of days, seconds and microseconds have been removed, leveraging their built-in Python versions instead (:issue:`18242`)
- ``Series`` construction will reduce the number of copies made of the input data in certain cases (:issue:`17449`)
- Improved performance of :func:`Series.dt.date` and :func:`DatetimeIndex.date` (:issue:`18058`)
--
+- Improved performance of ``IntervalIndex.symmetric_difference()`` (:issue:`18475`)
.. _whatsnew_0220.docs:
@@ -146,7 +146,7 @@ Indexing
- Bug in :func:`MultiIndex.remove_unused_levels`` which would fill nan values (:issue:`18417`)
- Bug in :func:`MultiIndex.from_tuples`` which would fail to take zipped tuples in python3 (:issue:`18434`)
- Bug in :class:`IntervalIndex` where empty and purely NA data was constructed inconsistently depending on the construction method (:issue:`18421`)
--
+- Bug in ``IntervalIndex.symmetric_difference()`` where the symmetric difference with a non-``IntervalIndex`` did not raise (:issue:`18475`)
I/O
^^^
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 06843150bf46a..3f74694880533 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1171,7 +1171,7 @@ def func(self, other):
union = _setop('union')
intersection = _setop('intersection')
difference = _setop('difference')
- symmetric_differnce = _setop('symmetric_difference')
+ symmetric_difference = _setop('symmetric_difference')
# TODO: arithmetic operations
diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py
index 33ba0189d747a..815d5fcde1400 100644
--- a/pandas/tests/indexes/test_interval.py
+++ b/pandas/tests/indexes/test_interval.py
@@ -3,9 +3,9 @@
import pytest
import numpy as np
from datetime import timedelta
-from pandas import (Interval, IntervalIndex, Index, isna,
- interval_range, Timestamp, Timedelta,
- compat, date_range, timedelta_range, DateOffset)
+from pandas import (
+ Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
+ Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
@@ -19,6 +19,11 @@ def closed(request):
return request.param
+@pytest.fixture(scope='class', params=[None, 'foo'])
+def name(request):
+ return request.param
+
+
class TestIntervalIndex(Base):
_holder = IntervalIndex
@@ -29,13 +34,14 @@ def setup_method(self, method):
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
- return IntervalIndex.from_breaks(np.arange(3), closed=closed)
+ return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
- return IntervalIndex.from_tuples(
- [(0, 1), np.nan, (1, 2)], closed=closed)
+ mask = [True, False] + [True] * 8
+ return IntervalIndex.from_arrays(
+ np.where(mask, np.arange(10), np.nan),
+ np.where(mask, np.arange(1, 11), np.nan), closed=closed)
- @pytest.mark.parametrize('name', [None, 'foo'])
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
@@ -226,55 +232,67 @@ def f():
def test_properties(self, closed):
index = self.create_index(closed=closed)
- assert len(index) == 2
- assert index.size == 2
- assert index.shape == (2, )
+ assert len(index) == 10
+ assert index.size == 10
+ assert index.shape == (10, )
- tm.assert_index_equal(index.left, Index([0, 1]))
- tm.assert_index_equal(index.right, Index([1, 2]))
- tm.assert_index_equal(index.mid, Index([0.5, 1.5]))
+ tm.assert_index_equal(index.left, Index(np.arange(10)))
+ tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
+ tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
- expected = np.array([Interval(0, 1, closed=closed),
- Interval(1, 2, closed=closed)], dtype=object)
+ ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
+ expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
- assert len(index) == 3
- assert index.size == 3
- assert index.shape == (3, )
+ assert len(index) == 10
+ assert index.size == 10
+ assert index.shape == (10, )
- tm.assert_index_equal(index.left, Index([0, np.nan, 1]))
- tm.assert_index_equal(index.right, Index([1, np.nan, 2]))
- tm.assert_index_equal(index.mid, Index([0.5, np.nan, 1.5]))
+ expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
+ expected_right = expected_left + 1
+ expected_mid = expected_left + 0.5
+ tm.assert_index_equal(index.left, expected_left)
+ tm.assert_index_equal(index.right, expected_right)
+ tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
- expected = np.array([Interval(0, 1, closed=closed), np.nan,
- Interval(1, 2, closed=closed)], dtype=object)
+ ivs = [Interval(l, r, closed) if notna(l) else np.nan
+ for l, r in zip(expected_left, expected_right)]
+ expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
- tm.assert_numpy_array_equal(index.isna(),
- np.array([False, False]))
- tm.assert_numpy_array_equal(index.notna(),
- np.array([True, True]))
+
+ result = index.isna()
+ expected = np.repeat(False, len(index))
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = index.notna()
+ expected = np.repeat(True, len(index))
+ tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
- tm.assert_numpy_array_equal(index.notna(),
- np.array([True, False, True]))
- tm.assert_numpy_array_equal(index.isna(),
- np.array([False, True, False]))
+
+ result = index.isna()
+ expected = np.array([False, True] + [False] * (len(index) - 2))
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = index.notna()
+ expected = np.array([True, False] + [True] * (len(index) - 2))
+ tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
- expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
+ expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
@@ -362,7 +380,7 @@ def test_where(self, closed, klass):
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
- expected = IntervalIndex.from_breaks([1, 2], closed=closed)
+ expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@@ -414,13 +432,13 @@ def test_insert(self, data):
def test_take(self, closed):
index = self.create_index(closed=closed)
- actual = index.take([0, 1])
- tm.assert_index_equal(actual, index)
+ result = index.take(range(10))
+ tm.assert_index_equal(result, index)
+ result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
- actual = index.take([0, 0, 1])
- tm.assert_index_equal(actual, expected)
+ tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
@@ -780,50 +798,85 @@ def test_non_contiguous(self, closed):
assert 1.5 not in index
def test_union(self, closed):
- idx = self.create_index(closed=closed)
- other = IntervalIndex.from_arrays([2], [3], closed=closed)
- expected = IntervalIndex.from_arrays(
- range(3), range(1, 4), closed=closed)
- actual = idx.union(other)
- assert expected.equals(actual)
+ index = self.create_index(closed=closed)
+ other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
- actual = other.union(idx)
- assert expected.equals(actual)
+ expected = IntervalIndex.from_breaks(range(13), closed=closed)
+ result = index.union(other)
+ tm.assert_index_equal(result, expected)
+
+ result = other.union(index)
+ tm.assert_index_equal(result, expected)
- tm.assert_index_equal(idx.union(idx), idx)
- tm.assert_index_equal(idx.union(idx[:1]), idx)
+ tm.assert_index_equal(index.union(index), index)
+ tm.assert_index_equal(index.union(index[:1]), index)
def test_intersection(self, closed):
- idx = self.create_index(closed=closed)
- other = IntervalIndex.from_breaks([1, 2, 3], closed=closed)
- expected = IntervalIndex.from_breaks([1, 2], closed=closed)
- actual = idx.intersection(other)
- assert expected.equals(actual)
+ index = self.create_index(closed=closed)
+ other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
+
+ expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
+ result = index.intersection(other)
+ tm.assert_index_equal(result, expected)
+
+ result = other.intersection(index)
+ tm.assert_index_equal(result, expected)
- tm.assert_index_equal(idx.intersection(idx), idx)
+ tm.assert_index_equal(index.intersection(index), index)
def test_difference(self, closed):
- idx = self.create_index(closed=closed)
- tm.assert_index_equal(idx.difference(idx[:1]), idx[1:])
+ index = self.create_index(closed=closed)
+ tm.assert_index_equal(index.difference(index[:1]), index[1:])
- def test_symmetric_difference(self):
- result = self.index[:1].symmetric_difference(self.index[1:])
- expected = self.index
+ def test_symmetric_difference(self, closed):
+ idx = self.create_index(closed=closed)
+ result = idx[1:].symmetric_difference(idx[:-1])
+ expected = IntervalIndex([idx[0], idx[-1]])
tm.assert_index_equal(result, expected)
- def test_set_operation_errors(self):
- pytest.raises(ValueError, self.index.union, self.index.left)
+ @pytest.mark.parametrize('op_name', [
+ 'union', 'intersection', 'difference', 'symmetric_difference'])
+ def test_set_operation_errors(self, closed, op_name):
+ index = self.create_index(closed=closed)
+ set_op = getattr(index, op_name)
+
+ # test errors
+ msg = ('can only do set operations between two IntervalIndex objects '
+ 'that are closed on the same side')
+ with tm.assert_raises_regex(ValueError, msg):
+ set_op(Index([1, 2, 3]))
- other = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
- pytest.raises(ValueError, self.index.union, other)
+ for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
+ other = self.create_index(closed=other_closed)
+ with tm.assert_raises_regex(ValueError, msg):
+ set_op(other)
def test_isin(self, closed):
- idx = self.create_index(closed=closed)
- actual = idx.isin(idx)
- tm.assert_numpy_array_equal(np.array([True, True]), actual)
+ index = self.create_index(closed=closed)
- actual = idx.isin(idx[:1])
- tm.assert_numpy_array_equal(np.array([True, False]), actual)
+ expected = np.array([True] + [False] * (len(index) - 1))
+ result = index.isin(index[:1])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = index.isin([index[0]])
+ tm.assert_numpy_array_equal(result, expected)
+
+ other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
+ expected = np.array([True] * (len(index) - 1) + [False])
+ result = index.isin(other)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = index.isin(other.tolist())
+ tm.assert_numpy_array_equal(result, expected)
+
+ for other_closed in {'right', 'left', 'both', 'neither'}:
+ other = self.create_index(closed=other_closed)
+ expected = np.repeat(closed == other_closed, len(index))
+ result = index.isin(other)
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = index.isin(other.tolist())
+ tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
@@ -896,23 +949,24 @@ def test_missing_values(self, closed):
np.array([True, False, False]))
def test_sort_values(self, closed):
- expected = IntervalIndex.from_breaks([1, 2, 3, 4], closed=closed)
- actual = IntervalIndex.from_tuples(
- [(3, 4), (1, 2), (2, 3)], closed=closed).sort_values()
- tm.assert_index_equal(expected, actual)
+ index = self.create_index(closed=closed)
+
+ result = index.sort_values()
+ tm.assert_index_equal(result, index)
- # nan
- idx = self.create_index_with_nan(closed=closed)
- mask = idx.isna()
- tm.assert_numpy_array_equal(mask, np.array([False, True, False]))
+ result = index.sort_values(ascending=False)
+ tm.assert_index_equal(result, index[::-1])
- result = idx.sort_values()
- mask = result.isna()
- tm.assert_numpy_array_equal(mask, np.array([False, False, True]))
+ # with nan
+ index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
- result = idx.sort_values(ascending=False)
- mask = result.isna()
- tm.assert_numpy_array_equal(mask, np.array([True, False, False]))
+ result = index.sort_values()
+ expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
+ tm.assert_index_equal(result, expected)
+
+ result = index.sort_values(ascending=False)
+ expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
+ tm.assert_index_equal(result, expected)
def test_datetime(self):
dates = date_range('2000', periods=3)
@@ -992,58 +1046,58 @@ def test_is_non_overlapping_monotonic(self, closed):
class TestIntervalRange(object):
- def test_construction_from_numeric(self, closed):
+ def test_construction_from_numeric(self, closed, name):
# combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
- np.arange(0, 6), name='foo', closed=closed)
+ np.arange(0, 6), name=name, closed=closed)
- result = interval_range(start=0, end=5, name='foo', closed=closed)
+ result = interval_range(start=0, end=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(start=0, periods=5, name='foo', closed=closed)
+ result = interval_range(start=0, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(end=5, periods=5, name='foo', closed=closed)
+ result = interval_range(end=5, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with freq
expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
- name='foo', closed=closed)
+ name=name, closed=closed)
- result = interval_range(start=0, end=6, freq=2, name='foo',
+ result = interval_range(start=0, end=6, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(start=0, periods=3, freq=2, name='foo',
+ result = interval_range(start=0, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(end=6, periods=3, freq=2, name='foo',
+ result = interval_range(end=6, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
- name='foo', closed=closed)
- result = interval_range(start=0, end=4, freq=1.5, name='foo',
+ name=name, closed=closed)
+ result = interval_range(start=0, end=4, freq=1.5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- def test_construction_from_timestamp(self, closed):
+ def test_construction_from_timestamp(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
breaks = date_range(start=start, end=end)
- expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+ expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
- result = interval_range(start=start, end=end, name='foo',
+ result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(start=start, periods=5, name='foo',
+ result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(end=end, periods=5, name='foo',
+ result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
@@ -1051,23 +1105,23 @@ def test_construction_from_timestamp(self, closed):
freq = '2D'
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
breaks = date_range(start=start, end=end, freq=freq)
- expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+ expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
- result = interval_range(start=start, end=end, freq=freq, name='foo',
+ result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(start=start, periods=3, freq=freq, name='foo',
+ result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(end=end, periods=3, freq=freq, name='foo',
+ result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2017-01-08')
- result = interval_range(start=start, end=end, freq=freq, name='foo',
+ result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
@@ -1075,41 +1129,41 @@ def test_construction_from_timestamp(self, closed):
freq = 'M'
start, end = Timestamp('2017-01-01'), Timestamp('2017-12-31')
breaks = date_range(start=start, end=end, freq=freq)
- expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+ expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
- result = interval_range(start=start, end=end, freq=freq, name='foo',
+ result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(start=start, periods=11, freq=freq, name='foo',
+ result = interval_range(start=start, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(end=end, periods=11, freq=freq, name='foo',
+ result = interval_range(end=end, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2018-01-15')
- result = interval_range(start=start, end=end, freq=freq, name='foo',
+ result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- def test_construction_from_timedelta(self, closed):
+ def test_construction_from_timedelta(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timedelta('1 day'), Timedelta('6 days')
breaks = timedelta_range(start=start, end=end)
- expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+ expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
- result = interval_range(start=start, end=end, name='foo',
+ result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(start=start, periods=5, name='foo',
+ result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(end=end, periods=5, name='foo',
+ result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
@@ -1117,23 +1171,23 @@ def test_construction_from_timedelta(self, closed):
freq = '2D'
start, end = Timedelta('1 day'), Timedelta('7 days')
breaks = timedelta_range(start=start, end=end, freq=freq)
- expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
+ expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
- result = interval_range(start=start, end=end, freq=freq, name='foo',
+ result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(start=start, periods=3, freq=freq, name='foo',
+ result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
- result = interval_range(end=end, periods=3, freq=freq, name='foo',
+ result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timedelta('7 days 1 hour')
- result = interval_range(start=start, end=end, freq=freq, name='foo',
+ result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
| - [X] closes #18475
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [X] whatsnew entry
Before this, `IntervalIndex.symmetric_difference` hit the base class implementation, and appeared to generally be working, albeit without full error handling and with worse performance than the intended implementation.
Also went through and did some more work on the tests:
- expanded the `test_set_operation_errors`, since this didn't have full coverage of all set ops prior.
- `self.create_index` previous only created an index with two elements, which seemed kind of small; increased this to 10 elements, and updated impacted tests accordingly.
- added a fixture for parameterizing over `name`, and updated a few tests to use this. | https://api.github.com/repos/pandas-dev/pandas/pulls/18476 | 2017-11-24T22:59:55Z | 2017-11-26T15:05:20Z | 2017-11-26T15:05:19Z | 2017-12-04T20:43:03Z |
Make khash its own extension | diff --git a/pandas/_libs/src/khash.pxd b/pandas/_libs/khash.pxd
similarity index 98%
rename from pandas/_libs/src/khash.pxd
rename to pandas/_libs/khash.pxd
index ba9a3c70097b2..b1d965c3618cd 100644
--- a/pandas/_libs/src/khash.pxd
+++ b/pandas/_libs/khash.pxd
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+# cython: profile=False
from cpython cimport PyObject
from numpy cimport int64_t, uint64_t, int32_t, uint32_t, float64_t
diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx
index 0692d985b4877..1c20dbe7f8fc9 100644
--- a/pandas/_libs/tslibs/resolution.pyx
+++ b/pandas/_libs/tslibs/resolution.pyx
@@ -10,7 +10,7 @@ np.import_array()
from util cimport is_string_object, get_nat
-from khash cimport (
+from pandas._libs.khash cimport (
khiter_t,
kh_destroy_int64, kh_put_int64,
kh_init_int64, kh_int64_t,
diff --git a/setup.py b/setup.py
index 7e56298d1b20b..7faf33b607a09 100755
--- a/setup.py
+++ b/setup.py
@@ -496,7 +496,7 @@ def pxd(name):
'pyxfile': '_libs/hashing'},
'_libs.hashtable': {
'pyxfile': '_libs/hashtable',
- 'pxdfiles': ['_libs/hashtable', '_libs/missing'],
+ 'pxdfiles': ['_libs/hashtable', '_libs/missing', '_libs/khash'],
'depends': (['pandas/_libs/src/klib/khash_python.h'] +
_pxi_dep['hashtable'])},
'_libs.index': {
@@ -550,7 +550,6 @@ def pxd(name):
'_libs.tslib': {
'pyxfile': '_libs/tslib',
'pxdfiles': ['_libs/src/util',
- '_libs/src/khash',
'_libs/tslibs/conversion',
'_libs/tslibs/timedeltas',
'_libs/tslibs/timestamps',
@@ -591,12 +590,11 @@ def pxd(name):
'sources': np_datetime_sources},
'_libs.tslibs.parsing': {
'pyxfile': '_libs/tslibs/parsing',
- 'pxdfiles': ['_libs/src/util',
- '_libs/src/khash']},
+ 'pxdfiles': ['_libs/src/util']},
'_libs.tslibs.resolution': {
'pyxfile': '_libs/tslibs/resolution',
'pxdfiles': ['_libs/src/util',
- '_libs/src/khash',
+ '_libs/khash',
'_libs/tslibs/frequencies',
'_libs/tslibs/timezones'],
'depends': tseries_depends,
| The path towards cythonize continues. | https://api.github.com/repos/pandas-dev/pandas/pulls/18472 | 2017-11-24T21:24:41Z | 2017-11-29T12:58:12Z | 2017-11-29T12:58:12Z | 2017-12-08T19:38:26Z |
VIS: let PeriodConverter handle datetime64 data | diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py
index 2a45e20dda4cc..66ee7fa98491f 100644
--- a/pandas/plotting/_converter.py
+++ b/pandas/plotting/_converter.py
@@ -244,7 +244,7 @@ def _convert_1d(values, units, axis):
if not hasattr(axis, 'freq'):
raise TypeError('Axis must have `freq` set to convert to Periods')
valid_types = (compat.string_types, datetime,
- Period, pydt.date, pydt.time)
+ Period, pydt.date, pydt.time, np.datetime64)
if (isinstance(values, valid_types) or is_integer(values) or
is_float(values)):
return get_datevalue(values, axis.freq)
@@ -263,7 +263,7 @@ def get_datevalue(date, freq):
if isinstance(date, Period):
return date.asfreq(freq).ordinal
elif isinstance(date, (compat.string_types, datetime,
- pydt.date, pydt.time)):
+ pydt.date, pydt.time, np.datetime64)):
return Period(date, freq).ordinal
elif (is_integer(date) or is_float(date) or
(isinstance(date, (np.ndarray, Index)) and (date.size == 1))):
diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py
index cc275282436c8..47cded19f5300 100644
--- a/pandas/tests/plotting/test_converter.py
+++ b/pandas/tests/plotting/test_converter.py
@@ -327,21 +327,19 @@ def test_conversion(self):
rs = self.pc.convert(Timestamp('2012-1-1'), None, self.axis)
assert rs == xp
- # FIXME
- # rs = self.pc.convert(
- # np_datetime64_compat('2012-01-01'), None, self.axis)
- # assert rs == xp
- #
- # rs = self.pc.convert(
- # np_datetime64_compat('2012-01-01 00:00:00+0000'),
- # None, self.axis)
- # assert rs == xp
- #
- # rs = self.pc.convert(np.array([
- # np_datetime64_compat('2012-01-01 00:00:00+0000'),
- # np_datetime64_compat('2012-01-02 00:00:00+0000')]),
- # None, self.axis)
- # assert rs[0] == xp
+ rs = self.pc.convert(
+ np_datetime64_compat('2012-01-01'), None, self.axis)
+ assert rs == xp
+
+ rs = self.pc.convert(
+ np_datetime64_compat('2012-01-01 00:00:00+0000'), None, self.axis)
+ assert rs == xp
+
+ rs = self.pc.convert(np.array([
+ np_datetime64_compat('2012-01-01 00:00:00+0000'),
+ np_datetime64_compat('2012-01-02 00:00:00+0000')]),
+ None, self.axis)
+ assert rs[0] == xp
def test_integer_passthrough(self):
# GH9012
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py
index 935e75ae76e1d..94adf349fe2cd 100644
--- a/pandas/tests/plotting/test_datetimelike.py
+++ b/pandas/tests/plotting/test_datetimelike.py
@@ -1449,6 +1449,19 @@ def test_overlapping_datetime(self):
s2.plot(ax=ax)
s1.plot(ax=ax)
+ @pytest.mark.xfail(reason="GH9053 matplotlib does not use"
+ " ax.xaxis.converter")
+ def test_add_matplotlib_datetime64(self):
+ # GH9053 - ensure that a plot with PeriodConverter still understands
+ # datetime64 data. This still fails because matplotlib overrides the
+ # ax.xaxis.converter with a DatetimeConverter
+ s = Series(np.random.randn(10),
+ index=date_range('1970-01-02', periods=10))
+ ax = s.plot()
+ ax.plot(s.index, s.values, color='g')
+ l1, l2 = ax.lines
+ tm.assert_numpy_array_equal(l1.get_xydata(), l2.get_xydata())
+
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
| xref https://github.com/pandas-dev/pandas/issues/9053 (but doesn't solve it) | https://api.github.com/repos/pandas-dev/pandas/pulls/18468 | 2017-11-24T15:14:27Z | 2018-01-23T11:20:10Z | 2018-01-23T11:20:10Z | 2018-01-23T11:20:13Z |
Revert "CI: temp skip geopandas downstream tests (GH18456)" | diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index 1ec25bc8bb295..0f0abd8cd3400 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -92,7 +92,6 @@ def test_pandas_datareader():
pandas_datareader.get_data_google('AAPL')
-@pytest.mark.skip(reason="import issue with fiona GH18456")
def test_geopandas():
geopandas = import_module('geopandas') # noqa
| Reverts pandas-dev/pandas#18457
Closes https://github.com/pandas-dev/pandas/issues/18456 | https://api.github.com/repos/pandas-dev/pandas/pulls/18466 | 2017-11-24T10:16:55Z | 2017-11-24T11:47:05Z | 2017-11-24T11:47:05Z | 2017-11-24T13:27:15Z |
CLN: ASV categoricals benchmark | diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index a5bb5e790dec1..df41a2afad1f8 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -1,4 +1,6 @@
-from .pandas_vb_common import *
+import numpy as np
+import pandas as pd
+import pandas.util.testing as tm
try:
from pandas.api.types import union_categoricals
except ImportError:
@@ -8,107 +10,136 @@
pass
-class Categoricals(object):
+class Concat(object):
+
goal_time = 0.2
def setup(self):
- N = 100000
- self.s = pd.Series((list('aabbcd') * N)).astype('category')
+ N = 10**5
+ self.s = pd.Series(list('aabbcd') * N).astype('category')
+
+ self.a = pd.Categorical(list('aabbcd') * N)
+ self.b = pd.Categorical(list('bbcdjk') * N)
+
+ def time_concat(self):
+ pd.concat([self.s, self.s])
+
+ def time_union(self):
+ union_categoricals([self.a, self.b])
+
- self.a = pd.Categorical((list('aabbcd') * N))
- self.b = pd.Categorical((list('bbcdjk') * N))
+class Constructor(object):
+ goal_time = 0.2
+
+ def setup(self):
+ N = 10**5
self.categories = list('abcde')
- self.cat_idx = Index(self.categories)
+ self.cat_idx = pd.Index(self.categories)
self.values = np.tile(self.categories, N)
self.codes = np.tile(range(len(self.categories)), N)
- self.datetimes = pd.Series(pd.date_range(
- '1995-01-01 00:00:00', periods=10000, freq='s'))
+ self.datetimes = pd.Series(pd.date_range('1995-01-01 00:00:00',
+ periods=N / 10,
+ freq='s'))
+ self.datetimes_with_nat = self.datetimes.copy()
+ self.datetimes_with_nat.iloc[-1] = pd.NaT
self.values_some_nan = list(np.tile(self.categories + [np.nan], N))
self.values_all_nan = [np.nan] * len(self.values)
- def time_concat(self):
- concat([self.s, self.s])
-
- def time_union(self):
- union_categoricals([self.a, self.b])
+ def time_regular(self):
+ pd.Categorical(self.values, self.categories)
- def time_constructor_regular(self):
- Categorical(self.values, self.categories)
+ def time_fastpath(self):
+ pd.Categorical(self.codes, self.cat_idx, fastpath=True)
- def time_constructor_fastpath(self):
- Categorical(self.codes, self.cat_idx, fastpath=True)
+ def time_datetimes(self):
+ pd.Categorical(self.datetimes)
- def time_constructor_datetimes(self):
- Categorical(self.datetimes)
+ def time_datetimes_with_nat(self):
+ pd.Categorical(self.datetimes_with_nat)
- def time_constructor_datetimes_with_nat(self):
- t = self.datetimes
- t.iloc[-1] = pd.NaT
- Categorical(t)
+ def time_with_nan(self):
+ pd.Categorical(self.values_some_nan)
- def time_constructor_with_nan(self):
- Categorical(self.values_some_nan)
+ def time_all_nan(self):
+ pd.Categorical(self.values_all_nan)
- def time_constructor_all_nan(self):
- Categorical(self.values_all_nan)
+class ValueCounts(object):
-class Categoricals2(object):
goal_time = 0.2
- def setup(self):
- n = 500000
+ params = [True, False]
+ param_names = ['dropna']
+
+ def setup(self, dropna):
+ n = 5 * 10**5
np.random.seed(2718281)
arr = ['s%04d' % i for i in np.random.randint(0, n // 10, size=n)]
- self.ts = Series(arr).astype('category')
+ self.ts = pd.Series(arr).astype('category')
+
+ def time_value_counts(self, dropna):
+ self.ts.value_counts(dropna=dropna)
+
- self.sel = self.ts.loc[[0]]
+class Repr(object):
- def time_value_counts(self):
- self.ts.value_counts(dropna=False)
+ goal_time = 0.2
- def time_value_counts_dropna(self):
- self.ts.value_counts(dropna=True)
+ def setup(self):
+ self.sel = pd.Series(['s1234']).astype('category')
def time_rendering(self):
str(self.sel)
+
+class SetCategories(object):
+
+ goal_time = 0.2
+
+ def setup(self):
+ n = 5 * 10**5
+ np.random.seed(2718281)
+ arr = ['s%04d' % i for i in np.random.randint(0, n // 10, size=n)]
+ self.ts = pd.Series(arr).astype('category')
+
def time_set_categories(self):
self.ts.cat.set_categories(self.ts.cat.categories[::2])
-class Categoricals3(object):
+class Rank(object):
+
goal_time = 0.2
def setup(self):
- N = 100000
+ N = 10**5
ncats = 100
+ np.random.seed(1234)
- self.s1 = Series(np.array(tm.makeCategoricalIndex(N, ncats)))
- self.s1_cat = self.s1.astype('category')
- self.s1_cat_ordered = self.s1.astype('category', ordered=True)
+ self.s_str = pd.Series(tm.makeCategoricalIndex(N, ncats)).astype(str)
+ self.s_str_cat = self.s_str.astype('category')
+ self.s_str_cat_ordered = self.s_str.astype('category', ordered=True)
- self.s2 = Series(np.random.randint(0, ncats, size=N))
- self.s2_cat = self.s2.astype('category')
- self.s2_cat_ordered = self.s2.astype('category', ordered=True)
+ self.s_int = pd.Series(np.random.randint(0, ncats, size=N))
+ self.s_int_cat = self.s_int.astype('category')
+ self.s_int_cat_ordered = self.s_int.astype('category', ordered=True)
def time_rank_string(self):
- self.s1.rank()
+ self.s_str.rank()
def time_rank_string_cat(self):
- self.s1_cat.rank()
+ self.s_str_cat.rank()
def time_rank_string_cat_ordered(self):
- self.s1_cat_ordered.rank()
+ self.s_str_cat_ordered.rank()
def time_rank_int(self):
- self.s2.rank()
+ self.s_int.rank()
def time_rank_int_cat(self):
- self.s2_cat.rank()
+ self.s_int_cat.rank()
def time_rank_int_cat_ordered(self):
- self.s2_cat_ordered.rank()
+ self.s_int_cat_ordered.rank()
| - Split classes from `Categoricals/2/3` --> `Concat`, `ValueCounts`, `Rank`, `Repr`, `SetCategories`, and `Constructor`
- Utilized `params` and `param_names` for `ValueCounts`
- Added `np.random.seed(1234)` in setup classes where random data is created xref #8144
- Ran flake8 and replaced star imports
```
$ asv run -q -b ^categoricals
[ 0.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
[ 5.88%] ··· Running categoricals.Concat.time_concat 14.2ms
[ 11.76%] ··· Running categoricals.Concat.time_union 12.2ms
[ 17.65%] ··· Running categoricals.Constructor.time_constructor_all_nan 54.7ms
[ 23.53%] ··· Running categoricals.Constructor.time_constructor_datetimes 2.22ms
[ 29.41%] ··· Running categoricals.Constructor.time_constructor_datetimes_with_nat 2.32ms
[ 35.29%] ··· Running categoricals.Constructor.time_constructor_fastpath 1.59ms
[ 41.18%] ··· Running categoricals.Constructor.time_constructor_regular 48.8ms
[ 47.06%] ··· Running categoricals.Constructor.time_constructor_with_nan 510ms
[ 52.94%] ··· Running categoricals.Rank.time_rank_int 11.7ms
[ 58.82%] ··· Running categoricals.Rank.time_rank_int_cat 12.9ms
[ 64.71%] ··· Running categoricals.Rank.time_rank_int_cat_ordered 12.2ms
[ 70.59%] ··· Running categoricals.Rank.time_rank_string 520ms
[ 76.47%] ··· Running categoricals.Rank.time_rank_string_cat 18.3ms
[ 82.35%] ··· Running categoricals.Rank.time_rank_string_cat_ordered 10.2ms
[ 88.24%] ··· Running categoricals.Repr.time_rendering 1.58ms
[ 94.12%] ··· Running categoricals.SetCategories.time_set_categories 70.7ms
[100.00%] ··· Running categoricals.ValueCounts.time_value_counts 81.2ms;...
``` | https://api.github.com/repos/pandas-dev/pandas/pulls/18465 | 2017-11-24T07:55:01Z | 2017-11-25T14:37:00Z | 2017-11-25T14:37:00Z | 2017-11-25T18:25:05Z |
Propogating NaN values when using str.split (#18450) | diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt
index 637ccf0603e0f..576b22fb990b1 100644
--- a/doc/source/whatsnew/v0.21.1.txt
+++ b/doc/source/whatsnew/v0.21.1.txt
@@ -138,9 +138,13 @@ Categorical
- ``CategoricalIndex`` can now correctly take a ``pd.api.types.CategoricalDtype`` as its dtype (:issue:`18116`)
- Bug in ``Categorical.unique()`` returning read-only ``codes`` array when all categories were ``NaN`` (:issue:`18051`)
+String
+^^^^^^
+
+- :meth:`Series.str.split()` will now propogate ``NaN`` values across all expanded columns instead of ``None`` (:issue:`18450`)
+
Other
^^^^^
-
-
--
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index abef6f6086dbd..9614641aa1abf 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -1423,6 +1423,10 @@ def cons_row(x):
return [x]
result = [cons_row(x) for x in result]
+ if result:
+ # propogate nan values to match longest sequence (GH 18450)
+ max_len = max(len(x) for x in result)
+ result = [x * max_len if x[0] is np.nan else x for x in result]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index f1b97081b6d93..8aa69bcbfdf7f 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -2086,6 +2086,18 @@ def test_rsplit_to_multiindex_expand(self):
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
+ def test_split_nan_expand(self):
+ # gh-18450
+ s = Series(["foo,bar,baz", NA])
+ result = s.str.split(",", expand=True)
+ exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
+ tm.assert_frame_equal(result, exp)
+
+ # check that these are actually np.nan and not None
+ # TODO see GH 18463
+ # tm.assert_frame_equal does not differentiate
+ assert all(np.isnan(x) for x in result.iloc[1])
+
def test_split_with_name(self):
# GH 12617
| - [X] closes #18450
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry | https://api.github.com/repos/pandas-dev/pandas/pulls/18462 | 2017-11-24T02:49:25Z | 2017-11-25T21:51:14Z | 2017-11-25T21:51:14Z | 2017-12-11T20:22:40Z |
Improve DatetimeIndex.time performance | diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py
index b3996739e33f7..fe282df25e9c5 100644
--- a/asv_bench/benchmarks/timeseries.py
+++ b/asv_bench/benchmarks/timeseries.py
@@ -85,7 +85,7 @@ def time_dti_tz_factorize(self):
self.dti_tz.factorize()
def time_dti_time(self):
- self.rng.time
+ self.dst_rng.time
def time_timestamp_tzinfo_cons(self):
self.rng5[0]
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt
index 32b548e5f32f1..9878814c2cc17 100644
--- a/doc/source/whatsnew/v0.22.0.txt
+++ b/doc/source/whatsnew/v0.22.0.txt
@@ -231,6 +231,7 @@ Performance Improvements
- Improved performance of :func:`Series.dt.date` and :func:`DatetimeIndex.date` (:issue:`18058`)
- Improved performance of :func:`IntervalIndex.symmetric_difference()` (:issue:`18475`)
- Improved performance of ``DatetimeIndex`` and ``Series`` arithmetic operations with Business-Month and Business-Quarter frequencies (:issue:`18489`)
+- Improved performance of :func:`Series.dt.time` and :func:`DatetimeIndex.time`
.. _whatsnew_0220.docs:
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 293e10d1934fa..7b0504388be22 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -15,7 +15,7 @@ from util cimport (is_integer_object, is_float_object, is_string_object,
from cpython.datetime cimport (PyDateTime_Check, PyDate_Check,
PyDateTime_CheckExact,
PyDateTime_IMPORT,
- timedelta, datetime, date)
+ timedelta, datetime, date, time)
# import datetime C API
PyDateTime_IMPORT
@@ -70,11 +70,17 @@ cdef inline object create_date_from_ts(
""" convenience routine to construct a datetime.date from its parts """
return date(dts.year, dts.month, dts.day)
+cdef inline object create_time_from_ts(
+ int64_t value, pandas_datetimestruct dts,
+ object tz, object freq):
+ """ convenience routine to construct a datetime.time from its parts """
+ return time(dts.hour, dts.min, dts.sec, dts.us, tz)
+
def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None,
box="datetime"):
"""
- Convert an i8 repr to an ndarray of datetimes, date or Timestamp
+ Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp
Parameters
----------
@@ -83,9 +89,10 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None,
convert to this timezone
freq : str/Offset, default None
freq to convert
- box : {'datetime', 'timestamp', 'date'}, default 'datetime'
+ box : {'datetime', 'timestamp', 'date', 'time'}, default 'datetime'
If datetime, convert to datetime.datetime
If date, convert to datetime.date
+ If time, convert to datetime.time
If Timestamp, convert to pandas.Timestamp
Returns
@@ -93,9 +100,6 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None,
result : array of dtype specified by box
"""
- assert ((box == "datetime") or (box == "date") or (box == "timestamp")), \
- "box must be one of 'datetime', 'date' or 'timestamp'"
-
cdef:
Py_ssize_t i, n = len(arr)
ndarray[int64_t] trans, deltas
@@ -115,8 +119,13 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None,
if is_string_object(freq):
from pandas.tseries.frequencies import to_offset
freq = to_offset(freq)
+ elif box == "time":
+ func_create = create_time_from_ts
elif box == "datetime":
func_create = create_datetime_from_ts
+ else:
+ raise ValueError("box must be one of 'datetime', 'date', 'time' or" +
+ " 'timestamp'")
if tz is not None:
if is_utc(tz):
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index 17b3a88cbf544..290c77dd7f040 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -53,8 +53,7 @@
import pandas.core.tools.datetimes as tools
from pandas._libs import (lib, index as libindex, tslib as libts,
- algos as libalgos, join as libjoin,
- Timestamp)
+ join as libjoin, Timestamp)
from pandas._libs.tslibs import (timezones, conversion, fields, parsing,
period as libperiod)
@@ -1677,9 +1676,7 @@ def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
- return self._maybe_mask_results(libalgos.arrmap_object(
- self.astype(object).values,
- lambda x: np.nan if x is libts.NaT else x.time()))
+ return libts.ints_to_pydatetime(self.asi8, self.tz, box="time")
@property
def date(self):
| xref #18058
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Speed up DatetimeIndex.time in a similar way to DatetimeIndex.date. Not sure whether the timezone information should be passed to `ints_to_pydatetime()` or not. | https://api.github.com/repos/pandas-dev/pandas/pulls/18461 | 2017-11-24T02:39:30Z | 2017-12-10T16:22:45Z | 2017-12-10T16:22:45Z | 2017-12-10T19:49:20Z |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.