title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
BUG: groupby.transform retains timezone information
diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst index f17c4974cd450..0c78cf01ad300 100644 --- a/doc/source/whatsnew/v0.24.2.rst +++ b/doc/source/whatsnew/v0.24.2.rst @@ -78,7 +78,7 @@ Bug Fixes **Reshaping** -- +- Bug in :meth:`pandas.core.groupby.GroupBy.transform` where applying a function to a timezone aware column would return a timezone naive result (:issue:`24198`) - Bug in :func:`DataFrame.join` when joining on a timezone aware :class:`DatetimeIndex` (:issue:`23931`) - diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 27e13e86a6e9e..52056a6842ed9 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -964,7 +964,7 @@ def _transform_fast(self, func, func_nm): ids, _, ngroup = self.grouper.group_info cast = self._transform_should_cast(func_nm) - out = algorithms.take_1d(func().values, ids) + out = algorithms.take_1d(func()._values, ids) if cast: out = self._try_cast(out, self.obj) return Series(out, index=self.obj.index, name=self.obj.name) diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index f120402e6e8ca..b645073fcf72a 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -834,3 +834,14 @@ def demean_rename(x): tm.assert_frame_equal(result, expected) result_single = df.groupby('group').value.transform(demean_rename) tm.assert_series_equal(result_single, expected['value']) + + +@pytest.mark.parametrize('func', [min, max, np.min, np.max, 'first', 'last']) +def test_groupby_transform_timezone_column(func): + # GH 24198 + ts = pd.to_datetime('now', utc=True).tz_convert('Asia/Singapore') + result = pd.DataFrame({'end_time': [ts], 'id': [1]}) + result['max_end_time'] = result.groupby('id').end_time.transform(func) + expected = pd.DataFrame([[ts, 1, ts]], columns=['end_time', 'id', + 'max_end_time']) + tm.assert_frame_equal(result, expected)
- [x] closes #24198 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/25264
2019-02-11T07:01:00Z
2019-02-16T16:32:04Z
2019-02-16T16:32:04Z
2019-02-16T22:33:36Z
BUG: Indexing with UTC offset string no longer ignored
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index 23f1aabd69ff3..4e2c428415926 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -633,6 +633,16 @@ We are stopping on the included end-point as it is part of the index: dft2 = dft2.swaplevel(0, 1).sort_index() dft2.loc[idx[:, '2013-01-05'], :] +.. versionadded:: 0.25.0 + +Slicing with string indexing also honors UTC offset. + +.. ipython:: python + + df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific')) + df + df['2019-01-01 12:00:00+04:00':'2019-01-01 13:00:00+04:00'] + .. _timeseries.slice_vs_exact_match: Slice vs. Exact Match diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 6e225185ecf84..5716bea7ce694 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -29,7 +29,37 @@ Other Enhancements Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- :meth:`Timestamp.strptime` will now raise a NotImplementedError (:issue:`25016`) +.. _whatsnew_0250.api_breaking.utc_offset_indexing: + +Indexing with date strings with UTC offsets +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Indexing a :class:`DataFrame` or :class:`Series` with a :class:`DatetimeIndex` with a +date string with a UTC offset would previously ignore the UTC offset. Now, the UTC offset +is respected in indexing. (:issue:`24076`, :issue:`16785`) + +*Previous Behavior*: + +.. code-block:: ipython + + In [1]: df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific')) + + In [2]: df + Out[2]: + 0 + 2019-01-01 00:00:00-08:00 0 + + In [3]: df['2019-01-01 00:00:00+04:00':'2019-01-01 01:00:00+04:00'] + Out[3]: + 0 + 2019-01-01 00:00:00-08:00 0 + +*New Behavior*: + +.. ipython:: ipython + + df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific')) + df['2019-01-01 12:00:00+04:00':'2019-01-01 13:00:00+04:00'] .. _whatsnew_0250.api.other: @@ -38,7 +68,7 @@ Other API Changes - :class:`DatetimeTZDtype` will now standardize pytz timezones to a common timezone instance (:issue:`24713`) - ``Timestamp`` and ``Timedelta`` scalars now implement the :meth:`to_numpy` method as aliases to :meth:`Timestamp.to_datetime64` and :meth:`Timedelta.to_timedelta64`, respectively. (:issue:`24653`) -- +- :meth:`Timestamp.strptime` will now rise a ``NotImplementedError`` (:issue:`25016`) - .. _whatsnew_0250.deprecations: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index b5f3c929a7f36..1cdacc908b663 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6,9 +6,10 @@ import numpy as np from pandas._libs import ( - Timedelta, algos as libalgos, index as libindex, join as libjoin, lib, - tslibs) + algos as libalgos, index as libindex, join as libjoin, lib) from pandas._libs.lib import is_datetime_array +from pandas._libs.tslibs import OutOfBoundsDatetime, Timedelta, Timestamp +from pandas._libs.tslibs.timezones import tz_compare import pandas.compat as compat from pandas.compat import range, set_function_name, u from pandas.compat.numpy import function as nv @@ -447,7 +448,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, try: return DatetimeIndex(subarr, copy=copy, name=name, **kwargs) - except tslibs.OutOfBoundsDatetime: + except OutOfBoundsDatetime: pass elif inferred.startswith('timedelta'): @@ -4867,6 +4868,20 @@ def slice_locs(self, start=None, end=None, step=None, kind=None): # If it's a reverse slice, temporarily swap bounds. start, end = end, start + # GH 16785: If start and end happen to be date strings with UTC offsets + # attempt to parse and check that the offsets are the same + if (isinstance(start, (compat.string_types, datetime)) + and isinstance(end, (compat.string_types, datetime))): + try: + ts_start = Timestamp(start) + ts_end = Timestamp(end) + except (ValueError, TypeError): + pass + else: + if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): + raise ValueError("Both dates must have the " + "same UTC offset") + start_slice = None if start is not None: start_slice = self.get_slice_bound(start, 'left', kind) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 1037e2d9a3bd6..527991f15364b 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -32,9 +32,8 @@ from pandas.core.ops import get_op_result_name import pandas.core.tools.datetimes as tools -from pandas.tseries import offsets from pandas.tseries.frequencies import Resolution, to_offset -from pandas.tseries.offsets import CDay, prefix_mapping +from pandas.tseries.offsets import CDay, Nano, prefix_mapping def _new_DatetimeIndex(cls, d): @@ -826,54 +825,57 @@ def _parsed_string_to_bounds(self, reso, parsed): lower, upper: pd.Timestamp """ + valid_resos = {'year', 'month', 'quarter', 'day', 'hour', 'minute', + 'second', 'minute', 'second', 'microsecond'} + if reso not in valid_resos: + raise KeyError if reso == 'year': - return (Timestamp(datetime(parsed.year, 1, 1), tz=self.tz), - Timestamp(datetime(parsed.year, 12, 31, 23, - 59, 59, 999999), tz=self.tz)) + start = Timestamp(parsed.year, 1, 1) + end = Timestamp(parsed.year, 12, 31, 23, 59, 59, 999999) elif reso == 'month': d = ccalendar.get_days_in_month(parsed.year, parsed.month) - return (Timestamp(datetime(parsed.year, parsed.month, 1), - tz=self.tz), - Timestamp(datetime(parsed.year, parsed.month, d, 23, - 59, 59, 999999), tz=self.tz)) + start = Timestamp(parsed.year, parsed.month, 1) + end = Timestamp(parsed.year, parsed.month, d, 23, 59, 59, 999999) elif reso == 'quarter': qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead d = ccalendar.get_days_in_month(parsed.year, qe) # at end of month - return (Timestamp(datetime(parsed.year, parsed.month, 1), - tz=self.tz), - Timestamp(datetime(parsed.year, qe, d, 23, 59, - 59, 999999), tz=self.tz)) + start = Timestamp(parsed.year, parsed.month, 1) + end = Timestamp(parsed.year, qe, d, 23, 59, 59, 999999) elif reso == 'day': - st = datetime(parsed.year, parsed.month, parsed.day) - return (Timestamp(st, tz=self.tz), - Timestamp(Timestamp(st + offsets.Day(), - tz=self.tz).value - 1)) + start = Timestamp(parsed.year, parsed.month, parsed.day) + end = start + timedelta(days=1) - Nano(1) elif reso == 'hour': - st = datetime(parsed.year, parsed.month, parsed.day, - hour=parsed.hour) - return (Timestamp(st, tz=self.tz), - Timestamp(Timestamp(st + offsets.Hour(), - tz=self.tz).value - 1)) + start = Timestamp(parsed.year, parsed.month, parsed.day, + parsed.hour) + end = start + timedelta(hours=1) - Nano(1) elif reso == 'minute': - st = datetime(parsed.year, parsed.month, parsed.day, - hour=parsed.hour, minute=parsed.minute) - return (Timestamp(st, tz=self.tz), - Timestamp(Timestamp(st + offsets.Minute(), - tz=self.tz).value - 1)) + start = Timestamp(parsed.year, parsed.month, parsed.day, + parsed.hour, parsed.minute) + end = start + timedelta(minutes=1) - Nano(1) elif reso == 'second': - st = datetime(parsed.year, parsed.month, parsed.day, - hour=parsed.hour, minute=parsed.minute, - second=parsed.second) - return (Timestamp(st, tz=self.tz), - Timestamp(Timestamp(st + offsets.Second(), - tz=self.tz).value - 1)) + start = Timestamp(parsed.year, parsed.month, parsed.day, + parsed.hour, parsed.minute, parsed.second) + end = start + timedelta(seconds=1) - Nano(1) elif reso == 'microsecond': - st = datetime(parsed.year, parsed.month, parsed.day, - parsed.hour, parsed.minute, parsed.second, - parsed.microsecond) - return (Timestamp(st, tz=self.tz), Timestamp(st, tz=self.tz)) - else: - raise KeyError + start = Timestamp(parsed.year, parsed.month, parsed.day, + parsed.hour, parsed.minute, parsed.second, + parsed.microsecond) + end = start + timedelta(microseconds=1) - Nano(1) + # GH 24076 + # If an incoming date string contained a UTC offset, need to localize + # the parsed date to this offset first before aligning with the index's + # timezone + if parsed.tzinfo is not None: + if self.tz is None: + raise ValueError("The index must be timezone aware " + "when indexing with a date string with a " + "UTC offset") + start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz) + end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz) + elif self.tz is not None: + start = start.tz_localize(self.tz) + end = end.tz_localize(self.tz) + return start, end def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True): is_monotonic = self.is_monotonic diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index e1ba0e1708442..a3ee5fe39769f 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -102,7 +102,7 @@ def test_stringified_slice_with_tz(self): # GH#2658 import datetime start = datetime.datetime.now() - idx = date_range(start=start, freq="1d", periods=10) + idx = date_range(start=start, freq="1d", periods=10, tz='US/Eastern') df = DataFrame(lrange(10), index=idx) df["2013-01-14 23:44:34.437768-05:00":] # no exception here diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index a0c9d9f02385c..64693324521b3 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -396,3 +396,30 @@ def test_selection_by_datetimelike(self, datetimelike, op, expected): result = op(df.A, datetimelike) expected = Series(expected, name='A') tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('start', [ + '2018-12-02 21:50:00+00:00', pd.Timestamp('2018-12-02 21:50:00+00:00'), + pd.Timestamp('2018-12-02 21:50:00+00:00').to_pydatetime() + ]) + @pytest.mark.parametrize('end', [ + '2018-12-02 21:52:00+00:00', pd.Timestamp('2018-12-02 21:52:00+00:00'), + pd.Timestamp('2018-12-02 21:52:00+00:00').to_pydatetime() + ]) + def test_getitem_with_datestring_with_UTC_offset(self, start, end): + # GH 24076 + idx = pd.date_range(start='2018-12-02 14:50:00-07:00', + end='2018-12-02 14:50:00-07:00', freq='1min') + df = pd.DataFrame(1, index=idx, columns=['A']) + result = df[start:end] + expected = df.iloc[0:3, :] + tm.assert_frame_equal(result, expected) + + # GH 16785 + start = str(start) + end = str(end) + with pytest.raises(ValueError, match="Both dates must"): + df[start:end[:-4] + '1:00'] + + with pytest.raises(ValueError, match="The index must be timezone"): + df = df.tz_localize(None) + df[start:end]
- [x] closes #24076 - [x] closes #16785 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Additionally, date strings with UTC offsets that are using in indexing are now validated by the following: - An error is raised if 2 date strings have different UTC offsets - An error is raised if the date strings have a UTC offset and the index is not timezone aware.
https://api.github.com/repos/pandas-dev/pandas/pulls/25263
2019-02-11T03:16:48Z
2019-02-24T03:37:40Z
2019-02-24T03:37:39Z
2019-11-25T12:47:06Z
REF/TST: resample/test_base.py
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index 48debfa2848e7..8f912ea5c524a 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -28,15 +28,10 @@ period_range, 'pi', datetime(2005, 1, 1), datetime(2005, 1, 10)) TIMEDELTA_RANGE = (timedelta_range, 'tdi', '1 day', '10 day') -ALL_TIMESERIES_INDEXES = [DATE_RANGE, PERIOD_RANGE, TIMEDELTA_RANGE] - - -def pytest_generate_tests(metafunc): - # called once per each test function - if metafunc.function.__name__.endswith('_all_ts'): - metafunc.parametrize( - '_index_factory,_series_name,_index_start,_index_end', - ALL_TIMESERIES_INDEXES) +all_ts = pytest.mark.parametrize( + '_index_factory,_series_name,_index_start,_index_end', + [DATE_RANGE, PERIOD_RANGE, TIMEDELTA_RANGE] +) @pytest.fixture @@ -84,7 +79,8 @@ def test_asfreq_fill_value(series, create_index): assert_frame_equal(result, expected) -def test_resample_interpolate_all_ts(frame): +@all_ts +def test_resample_interpolate(frame): # # 12925 df = frame assert_frame_equal( @@ -101,8 +97,9 @@ def test_raises_on_non_datetimelike_index(): xp.resample('A').mean() +@all_ts @pytest.mark.parametrize('freq', ['M', 'D', 'H']) -def test_resample_empty_series_all_ts(freq, empty_series, resample_method): +def test_resample_empty_series(freq, empty_series, resample_method): # GH12771 & GH12868 if resample_method == 'ohlc': @@ -121,8 +118,9 @@ def test_resample_empty_series_all_ts(freq, empty_series, resample_method): assert_series_equal(result, expected, check_dtype=False) +@all_ts @pytest.mark.parametrize('freq', ['M', 'D', 'H']) -def test_resample_empty_dataframe_all_ts(empty_frame, freq, resample_method): +def test_resample_empty_dataframe(empty_frame, freq, resample_method): # GH13212 df = empty_frame # count retains dimensions too @@ -162,7 +160,8 @@ def test_resample_empty_dtypes(index, dtype, resample_method): pass -def test_resample_loffset_arg_type_all_ts(frame, create_index): +@all_ts +def test_resample_loffset_arg_type(frame, create_index): # GH 13218, 15002 df = frame expected_means = [df.values[i:i + 2].mean() @@ -202,7 +201,8 @@ def test_resample_loffset_arg_type_all_ts(frame, create_index): assert_frame_equal(result_how, expected) -def test_apply_to_empty_series_all_ts(empty_series): +@all_ts +def test_apply_to_empty_series(empty_series): # GH 14313 s = empty_series for freq in ['M', 'D', 'H']: @@ -212,7 +212,8 @@ def test_apply_to_empty_series_all_ts(empty_series): assert_series_equal(result, expected, check_dtype=False) -def test_resampler_is_iterable_all_ts(series): +@all_ts +def test_resampler_is_iterable(series): # GH 15314 freq = 'H' tg = TimeGrouper(freq, convention='start') @@ -223,7 +224,8 @@ def test_resampler_is_iterable_all_ts(series): assert_series_equal(rv, gv) -def test_resample_quantile_all_ts(series): +@all_ts +def test_resample_quantile(series): # GH 15023 s = series q = 0.75
follow-on from #24377 xref #25197 in #24377, the `pytest_generate_tests` hook was used to parametrize the base tests over date_range, period_range and timedelta_range indexes. to make it easier to understand the code and use less pytest magic, this PR uses a simpler approach of applying markers to the tests instead of having the test name ending in _all_ts.
https://api.github.com/repos/pandas-dev/pandas/pulls/25262
2019-02-11T00:23:41Z
2019-02-11T12:55:05Z
2019-02-11T12:55:05Z
2019-03-13T21:11:32Z
BUG: DataFrame.join on tz-aware DatetimeIndex
diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst index b0f287cf0b9f6..3eb58798ab462 100644 --- a/doc/source/whatsnew/v0.24.2.rst +++ b/doc/source/whatsnew/v0.24.2.rst @@ -77,7 +77,7 @@ Bug Fixes **Reshaping** - -- +- Bug in :func:`DataFrame.join` when joining on a timezone aware :class:`DatetimeIndex` (:issue:`23931`) - **Visualization** diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 4032dc20b2e19..0073f7ab11467 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -181,7 +181,6 @@ Reshaping - Bug in :func:`pandas.merge` adds a string of ``None`` if ``None`` is assigned in suffixes instead of remain the column name as-is (:issue:`24782`). - Bug in :func:`merge` when merging by index name would sometimes result in an incorrectly numbered index (:issue:`24212`) - :func:`to_records` now accepts dtypes to its `column_dtypes` parameter (:issue:`24895`) -- Sparse diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index ad3327e694b67..fb50a3c60f705 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -909,7 +909,7 @@ def _get_merge_keys(self): in zip(self.right.index.levels, self.right.index.codes)] else: - right_keys = [self.right.index.values] + right_keys = [self.right.index._values] elif _any(self.right_on): for k in self.right_on: if is_rkey(k): diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 5d7a9ab6f4cf0..62c9047b17f3d 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -682,6 +682,28 @@ def test_join_multi_to_multi(self, join_type): with pytest.raises(ValueError, match=msg): right.join(left, on=['abc', 'xy'], how=join_type) + def test_join_on_tz_aware_datetimeindex(self): + # GH 23931 + df1 = pd.DataFrame( + { + 'date': pd.date_range(start='2018-01-01', periods=5, + tz='America/Chicago'), + 'vals': list('abcde') + } + ) + + df2 = pd.DataFrame( + { + 'date': pd.date_range(start='2018-01-03', periods=5, + tz='America/Chicago'), + 'vals_2': list('tuvwx') + } + ) + result = df1.join(df2.set_index('date'), on='date') + expected = df1.copy() + expected['vals_2'] = pd.Series([np.nan] * len(expected), dtype=object) + assert_frame_equal(result, expected) + def _check_join(left, right, result, join_col, how='left', lsuffix='_x', rsuffix='_y'):
- [x] closes #23931 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/25260
2019-02-10T23:33:06Z
2019-02-11T18:53:43Z
2019-02-11T18:53:42Z
2019-03-11T19:17:27Z
Fix minor error in dynamic load function
diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index bb58449843096..09fb5a30cbc3b 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -4,6 +4,8 @@ import textwrap import pytest import numpy as np +import pandas as pd + import validate_docstrings validate_one = validate_docstrings.validate_one @@ -1004,6 +1006,32 @@ def test_item_subsection(self, idx, subsection): assert result[idx][3] == subsection +class TestDocstringClass(object): + @pytest.mark.parametrize('name, expected_obj', + [('pandas.isnull', pd.isnull), + ('pandas.DataFrame', pd.DataFrame), + ('pandas.Series.sum', pd.Series.sum)]) + def test_resolves_class_name(self, name, expected_obj): + d = validate_docstrings.Docstring(name) + assert d.obj is expected_obj + + @pytest.mark.parametrize('invalid_name', ['panda', 'panda.DataFrame']) + def test_raises_for_invalid_module_name(self, invalid_name): + msg = 'No module can be imported from "{}"'.format(invalid_name) + with pytest.raises(ImportError, match=msg): + validate_docstrings.Docstring(invalid_name) + + @pytest.mark.parametrize('invalid_name', + ['pandas.BadClassName', + 'pandas.Series.bad_method_name']) + def test_raises_for_invalid_attribute_name(self, invalid_name): + name_components = invalid_name.split('.') + obj_name, invalid_attr_name = name_components[-2], name_components[-1] + msg = "'{}' has no attribute '{}'".format(obj_name, invalid_attr_name) + with pytest.raises(AttributeError, match=msg): + validate_docstrings.Docstring(invalid_name) + + class TestMainFunction(object): def test_exit_status_for_validate_one(self, monkeypatch): monkeypatch.setattr( diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index bce33f7e78daa..20f32124a2532 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -267,7 +267,7 @@ def _load_obj(name): else: continue - if 'module' not in locals(): + if 'obj' not in locals(): raise ImportError('No module can be imported ' 'from "{}"'.format(name))
- [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry The `validate_docstrings.py` script dynamically loads modules/classes/functions from its command-line argument. Currently, if the user supplies an invalid argument, an UnboundLocalError can be obtained: ``` Traceback (most recent call last): ... File "./scripts/validate_docstrings.py", line 275, in _load_obj obj = getattr(obj, part) UnboundLocalError: local variable 'obj' referenced before assignment ``` This PR fixes a check within the relevant function, so that a more informative error can be raised: ``` $ ./scripts/validate_docstrings.py tslibs.Timestamp Traceback (most recent call last): ... ImportError: No module can be imported from "tslibs.Timestamp" ```
https://api.github.com/repos/pandas-dev/pandas/pulls/25256
2019-02-10T01:08:56Z
2019-02-28T13:35:01Z
2019-02-28T13:35:01Z
2019-02-28T13:35:04Z
API: Ensure DatetimeTZDtype standardizes pytz timezones
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index 5841125817d03..23f1aabd69ff3 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -321,6 +321,15 @@ which can be specified. These are computed from the starting point specified by pd.to_datetime([1349720105100, 1349720105200, 1349720105300, 1349720105400, 1349720105500], unit='ms') +Constructing a :class:`Timestamp` or :class:`DatetimeIndex` with an epoch timestamp +with the ``tz`` argument specified will localize the epoch timestamps to UTC +first then convert the result to the specified time zone. + +.. ipython:: python + + pd.Timestamp(1262347200000000000, tz='US/Pacific') + pd.DatetimeIndex([1262347200000000000], tz='US/Pacific') + .. note:: Epoch times will be rounded to the nearest nanosecond. @@ -2205,6 +2214,21 @@ you can use the ``tz_convert`` method. rng_pytz.tz_convert('US/Eastern') +.. note:: + + When using ``pytz`` time zones, :class:`DatetimeIndex` will construct a different + time zone object than a :class:`Timestamp` for the same time zone input. A :class:`DatetimeIndex` + can hold a collection of :class:`Timestamp` objects that may have different UTC offsets and cannot be + succinctly represented by one ``pytz`` time zone instance while one :class:`Timestamp` + represents one point in time with a specific UTC offset. + + .. ipython:: python + + dti = pd.date_range('2019-01-01', periods=3, freq='D', tz='US/Pacific') + dti.tz + ts = pd.Timestamp('2019-01-01', tz='US/Pacific') + ts.tz + .. warning:: Be wary of conversions between libraries. For some time zones, ``pytz`` and ``dateutil`` have different diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 4032dc20b2e19..70a1cc7dab08e 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -33,7 +33,7 @@ Backwards incompatible API changes Other API Changes ^^^^^^^^^^^^^^^^^ -- +- :class:`DatetimeTZDtype` will now standardize pytz timezones to a common timezone instance (:issue:`24713`) - - diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 8b9ac680493a1..f187d786d9f61 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -639,6 +639,7 @@ def __init__(self, unit="ns", tz=None): if tz: tz = timezones.maybe_get_tz(tz) + tz = timezones.tz_standardize(tz) elif tz is not None: raise pytz.UnknownTimeZoneError(tz) elif tz is None: diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 0fe0a845f5129..710f215686eab 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -3,6 +3,7 @@ import numpy as np import pytest +import pytz from pandas.core.dtypes.common import ( is_bool_dtype, is_categorical, is_categorical_dtype, @@ -302,6 +303,15 @@ def test_empty(self): with pytest.raises(TypeError, match="A 'tz' is required."): DatetimeTZDtype() + def test_tz_standardize(self): + # GH 24713 + tz = pytz.timezone('US/Eastern') + dr = date_range('2013-01-01', periods=3, tz='US/Eastern') + dtype = DatetimeTZDtype('ns', dr.tz) + assert dtype.tz == tz + dtype = DatetimeTZDtype('ns', dr[0].tz) + assert dtype.tz == tz + class TestPeriodDtype(Base):
- [x] closes #24713 - [x] closes #23815 - [x] closes #20257 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Includes some related-ish timezone documentation issues.
https://api.github.com/repos/pandas-dev/pandas/pulls/25254
2019-02-09T23:42:52Z
2019-02-11T14:32:54Z
2019-02-11T14:32:54Z
2019-02-11T16:22:08Z
Revert "BLD: prevent asv from calling sys.stdin.close() by using different launch method"
diff --git a/azure-pipelines.yml b/azure-pipelines.yml index c86d5c50705a8..f0567d76659b6 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -104,7 +104,7 @@ jobs: if git diff upstream/master --name-only | grep -q "^asv_bench/"; then cd asv_bench asv machine --yes - ASV_OUTPUT="$(asv run --quick --show-stderr --python=same --launch-method=spawn)" + ASV_OUTPUT="$(asv dev)" if [[ $(echo "$ASV_OUTPUT" | grep "failed") ]]; then echo "##vso[task.logissue type=error]Benchmarks run with errors" echo "$ASV_OUTPUT"
Reverts pandas-dev/pandas#25237
https://api.github.com/repos/pandas-dev/pandas/pulls/25253
2019-02-09T23:39:43Z
2019-02-11T13:08:16Z
2019-02-11T13:08:16Z
2019-05-12T08:02:49Z
Refactor groupby group_prod, group_var, group_mean, group_ohlc
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 950ba3f89ffb7..e6b6e2c8a0055 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -382,6 +382,10 @@ def group_any_all(uint8_t[:] out, if values[i] == flag_val: out[lab] = flag_val +# ---------------------------------------------------------------------- +# group_add, group_prod, group_var, group_mean, group_ohlc +# ---------------------------------------------------------------------- + @cython.wraparound(False) @cython.boundscheck(False) @@ -396,9 +400,9 @@ def _group_add(floating[:, :] out, cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) floating val, count - ndarray[floating, ndim=2] sumx, nobs + floating[:, :] sumx, nobs - if not len(values) == len(labels): + if len(values) != len(labels): raise AssertionError("len(index) != len(labels)") nobs = np.zeros_like(out) @@ -407,7 +411,6 @@ def _group_add(floating[:, :] out, N, K = (<object>values).shape with nogil: - for i in range(N): lab = labels[i] if lab < 0: @@ -433,5 +436,213 @@ def _group_add(floating[:, :] out, group_add_float32 = _group_add['float'] group_add_float64 = _group_add['double'] + +@cython.wraparound(False) +@cython.boundscheck(False) +def _group_prod(floating[:, :] out, + int64_t[:] counts, + floating[:, :] values, + const int64_t[:] labels, + Py_ssize_t min_count=0): + """ + Only aggregates on axis=0 + """ + cdef: + Py_ssize_t i, j, N, K, lab, ncounts = len(counts) + floating val, count + floating[:, :] prodx, nobs + + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros_like(out) + prodx = np.ones_like(out) + + N, K = (<object>values).shape + + with nogil: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + prodx[lab, j] *= val + + for i in range(ncounts): + for j in range(K): + if nobs[i, j] < min_count: + out[i, j] = NAN + else: + out[i, j] = prodx[i, j] + + +group_prod_float32 = _group_prod['float'] +group_prod_float64 = _group_prod['double'] + + +@cython.wraparound(False) +@cython.boundscheck(False) +@cython.cdivision(True) +def _group_var(floating[:, :] out, + int64_t[:] counts, + floating[:, :] values, + const int64_t[:] labels, + Py_ssize_t min_count=-1): + cdef: + Py_ssize_t i, j, N, K, lab, ncounts = len(counts) + floating val, ct, oldmean + floating[:, :] nobs, mean + + assert min_count == -1, "'min_count' only used in add and prod" + + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros_like(out) + mean = np.zeros_like(out) + + N, K = (<object>values).shape + + out[:, :] = 0.0 + + with nogil: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + oldmean = mean[lab, j] + mean[lab, j] += (val - oldmean) / nobs[lab, j] + out[lab, j] += (val - mean[lab, j]) * (val - oldmean) + + for i in range(ncounts): + for j in range(K): + ct = nobs[i, j] + if ct < 2: + out[i, j] = NAN + else: + out[i, j] /= (ct - 1) + + +group_var_float32 = _group_var['float'] +group_var_float64 = _group_var['double'] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def _group_mean(floating[:, :] out, + int64_t[:] counts, + floating[:, :] values, + const int64_t[:] labels, + Py_ssize_t min_count=-1): + cdef: + Py_ssize_t i, j, N, K, lab, ncounts = len(counts) + floating val, count + floating[:, :] sumx, nobs + + assert min_count == -1, "'min_count' only used in add and prod" + + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros_like(out) + sumx = np.zeros_like(out) + + N, K = (<object>values).shape + + with nogil: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + # not nan + if val == val: + nobs[lab, j] += 1 + sumx[lab, j] += val + + for i in range(ncounts): + for j in range(K): + count = nobs[i, j] + if nobs[i, j] == 0: + out[i, j] = NAN + else: + out[i, j] = sumx[i, j] / count + + +group_mean_float32 = _group_mean['float'] +group_mean_float64 = _group_mean['double'] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def _group_ohlc(floating[:, :] out, + int64_t[:] counts, + floating[:, :] values, + const int64_t[:] labels, + Py_ssize_t min_count=-1): + """ + Only aggregates on axis=0 + """ + cdef: + Py_ssize_t i, j, N, K, lab + floating val, count + Py_ssize_t ngroups = len(counts) + + assert min_count == -1, "'min_count' only used in add and prod" + + if len(labels) == 0: + return + + N, K = (<object>values).shape + + if out.shape[1] != 4: + raise ValueError('Output array must have 4 columns') + + if K > 1: + raise NotImplementedError("Argument 'values' must have only " + "one dimension") + out[:] = np.nan + + with nogil: + for i in range(N): + lab = labels[i] + if lab == -1: + continue + + counts[lab] += 1 + val = values[i, 0] + if val != val: + continue + + if out[lab, 0] != out[lab, 0]: + out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val + else: + out[lab, 1] = max(out[lab, 1], val) + out[lab, 2] = min(out[lab, 2], val) + out[lab, 3] = val + + +group_ohlc_float32 = _group_ohlc['float'] +group_ohlc_float64 = _group_ohlc['double'] + # generated from template include "groupby_helper.pxi" diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index db7018e1a7254..63cd4d6ac6ff2 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -8,219 +8,6 @@ cdef extern from "numpy/npy_math.h": float64_t NAN "NPY_NAN" _int64_max = np.iinfo(np.int64).max -# ---------------------------------------------------------------------- -# group_prod, group_var, group_mean, group_ohlc -# ---------------------------------------------------------------------- - -{{py: - -# name, c_type -dtypes = [('float64', 'float64_t'), - ('float32', 'float32_t')] - -def get_dispatch(dtypes): - - for name, c_type in dtypes: - yield name, c_type -}} - -{{for name, c_type in get_dispatch(dtypes)}} - - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_prod_{{name}}({{c_type}}[:, :] out, - int64_t[:] counts, - {{c_type}}[:, :] values, - const int64_t[:] labels, - Py_ssize_t min_count=0): - """ - Only aggregates on axis=0 - """ - cdef: - Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{c_type}} val, count - ndarray[{{c_type}}, ndim=2] prodx, nobs - - if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") - - nobs = np.zeros_like(out) - prodx = np.ones_like(out) - - N, K = (<object>values).shape - - with nogil: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - prodx[lab, j] *= val - - for i in range(ncounts): - for j in range(K): - if nobs[i, j] < min_count: - out[i, j] = NAN - else: - out[i, j] = prodx[i, j] - - -@cython.wraparound(False) -@cython.boundscheck(False) -@cython.cdivision(True) -def group_var_{{name}}({{c_type}}[:, :] out, - int64_t[:] counts, - {{c_type}}[:, :] values, - const int64_t[:] labels, - Py_ssize_t min_count=-1): - cdef: - Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{c_type}} val, ct, oldmean - ndarray[{{c_type}}, ndim=2] nobs, mean - - assert min_count == -1, "'min_count' only used in add and prod" - - if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") - - nobs = np.zeros_like(out) - mean = np.zeros_like(out) - - N, K = (<object>values).shape - - out[:, :] = 0.0 - - with nogil: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - oldmean = mean[lab, j] - mean[lab, j] += (val - oldmean) / nobs[lab, j] - out[lab, j] += (val - mean[lab, j]) * (val - oldmean) - - for i in range(ncounts): - for j in range(K): - ct = nobs[i, j] - if ct < 2: - out[i, j] = NAN - else: - out[i, j] /= (ct - 1) -# add passing bin edges, instead of labels - - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_mean_{{name}}({{c_type}}[:, :] out, - int64_t[:] counts, - {{c_type}}[:, :] values, - const int64_t[:] labels, - Py_ssize_t min_count=-1): - cdef: - Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{c_type}} val, count - ndarray[{{c_type}}, ndim=2] sumx, nobs - - assert min_count == -1, "'min_count' only used in add and prod" - - if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - - N, K = (<object>values).shape - - with nogil: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - # not nan - if val == val: - nobs[lab, j] += 1 - sumx[lab, j] += val - - for i in range(ncounts): - for j in range(K): - count = nobs[i, j] - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = sumx[i, j] / count - - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_ohlc_{{name}}({{c_type}}[:, :] out, - int64_t[:] counts, - {{c_type}}[:, :] values, - const int64_t[:] labels, - Py_ssize_t min_count=-1): - """ - Only aggregates on axis=0 - """ - cdef: - Py_ssize_t i, j, N, K, lab - {{c_type}} val, count - Py_ssize_t ngroups = len(counts) - - assert min_count == -1, "'min_count' only used in add and prod" - - if len(labels) == 0: - return - - N, K = (<object>values).shape - - if out.shape[1] != 4: - raise ValueError('Output array must have 4 columns') - - if K > 1: - raise NotImplementedError("Argument 'values' must have only " - "one dimension") - out[:] = np.nan - - with nogil: - for i in range(N): - lab = labels[i] - if lab == -1: - continue - - counts[lab] += 1 - val = values[i, 0] - if val != val: - continue - - if out[lab, 0] != out[lab, 0]: - out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val - else: - out[lab, 1] = max(out[lab, 1], val) - out[lab, 2] = min(out[lab, 2], val) - out[lab, 3] = val - -{{endfor}} - # ---------------------------------------------------------------------- # group_nth, group_last, group_rank # ---------------------------------------------------------------------- diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 3d28b17750540..888cf78a1c66a 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1222,7 +1222,7 @@ def test_group_var_constant(self): class TestGroupVarFloat64(GroupVarTestMixin): __test__ = True - algo = libgroupby.group_var_float64 + algo = staticmethod(libgroupby.group_var_float64) dtype = np.float64 rtol = 1e-5 @@ -1245,7 +1245,7 @@ def test_group_var_large_inputs(self): class TestGroupVarFloat32(GroupVarTestMixin): __test__ = True - algo = libgroupby.group_var_float32 + algo = staticmethod(libgroupby.group_var_float32) dtype = np.float32 rtol = 1e-2
Followup to a previous [PR](https://github.com/pandas-dev/pandas/pull/24954). Continue refactoring cython function implemented in tempita into fused types.
https://api.github.com/repos/pandas-dev/pandas/pulls/25249
2019-02-09T20:17:01Z
2019-02-11T19:39:27Z
2019-02-11T19:39:27Z
2019-02-11T19:39:30Z
Backport PR #25171 on branch 0.24.x
diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst index c95ed818c9da0..b0f287cf0b9f6 100644 --- a/doc/source/whatsnew/v0.24.2.rst +++ b/doc/source/whatsnew/v0.24.2.rst @@ -52,6 +52,7 @@ Bug Fixes **I/O** - Bug in reading a HDF5 table-format ``DataFrame`` created in Python 2, in Python 3 (:issue:`24925`) +- Bug in reading a JSON with ``orient='table'`` generated by :meth:`DataFrame.to_json` with ``index=False`` (:issue:`25170`) - Bug where float indexes could have misaligned values when printing (:issue:`25061`) - diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py index 2bd93b19d4225..971386c91944e 100644 --- a/pandas/io/json/table_schema.py +++ b/pandas/io/json/table_schema.py @@ -314,12 +314,13 @@ def parse_table_schema(json, precise_float): df = df.astype(dtypes) - df = df.set_index(table['schema']['primaryKey']) - if len(df.index.names) == 1: - if df.index.name == 'index': - df.index.name = None - else: - df.index.names = [None if x.startswith('level_') else x for x in - df.index.names] + if 'primaryKey' in table['schema']: + df = df.set_index(table['schema']['primaryKey']) + if len(df.index.names) == 1: + if df.index.name == 'index': + df.index.name = None + else: + df.index.names = [None if x.startswith('level_') else x for x in + df.index.names] return df diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 23c40276072d6..51a1d5488b191 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1262,3 +1262,13 @@ def test_index_false_error_to_json(self, orient): "'orient' is 'split' or 'table'") with pytest.raises(ValueError, match=msg): df.to_json(orient=orient, index=False) + + @pytest.mark.parametrize('orient', ['split', 'table']) + @pytest.mark.parametrize('index', [True, False]) + def test_index_false_from_json_to_json(self, orient, index): + # GH25170 + # Test index=False in from_json to_json + expected = DataFrame({'a': [1, 2], 'b': [3, 4]}) + dfjson = expected.to_json(orient=orient, index=index) + result = read_json(dfjson, orient=orient) + assert_frame_equal(result, expected)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/25248
2019-02-09T19:05:24Z
2019-02-11T12:51:51Z
2019-02-11T12:51:51Z
2019-02-11T12:51:53Z
BUG: pandas Timestamp tz_localize and tz_convert do not preserve `freq` attribute
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 4032dc20b2e19..ecce54b88fb04 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -93,7 +93,7 @@ Timezones ^^^^^^^^^ - Bug in :func:`to_datetime` with ``utc=True`` and datetime strings that would apply previously parsed UTC offsets to subsequent arguments (:issue:`24992`) -- +- Bug in :func:`Timestamp.tz_localize` and :func:`Timestamp.tz_convert` does not propagate ``freq`` (:issue:`25241`) - Numeric diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 25b0b4069cf7c..8a95d2494dfa4 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1187,12 +1187,12 @@ class Timestamp(_Timestamp): value = tz_localize_to_utc(np.array([self.value], dtype='i8'), tz, ambiguous=ambiguous, nonexistent=nonexistent)[0] - return Timestamp(value, tz=tz) + return Timestamp(value, tz=tz, freq=self.freq) else: if tz is None: # reset tz value = tz_convert_single(self.value, UTC, self.tz) - return Timestamp(value, tz=None) + return Timestamp(value, tz=tz, freq=self.freq) else: raise TypeError('Cannot localize tz-aware Timestamp, use ' 'tz_convert for conversions') @@ -1222,7 +1222,7 @@ class Timestamp(_Timestamp): 'tz_localize to localize') else: # Same UTC timestamp, different time zone - return Timestamp(self.value, tz=tz) + return Timestamp(self.value, tz=tz, freq=self.freq) astimezone = tz_convert diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index 12c1b15733895..b25918417efcd 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -825,6 +825,13 @@ def test_dti_drop_dont_lose_tz(self): assert ind.tz is not None + def test_dti_tz_conversion_freq(self, tz_naive_fixture): + # GH25241 + t3 = DatetimeIndex(['2019-01-01 10:00'], freq='H') + assert t3.tz_localize(tz=tz_naive_fixture).freq == t3.freq + t4 = DatetimeIndex(['2019-01-02 12:00'], tz='UTC', freq='T') + assert t4.tz_convert(tz='UTC').freq == t4.freq + def test_drop_dst_boundary(self): # see gh-18031 tz = "Europe/Brussels" diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index b2c05d1564a48..c27ef3d0662c8 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -780,6 +780,13 @@ def test_hash_equivalent(self): stamp = Timestamp(datetime(2011, 1, 1)) assert d[stamp] == 5 + def test_tz_conversion_freq(self, tz_naive_fixture): + # GH25241 + t1 = Timestamp('2019-01-01 10:00', freq='H') + assert t1.tz_localize(tz=tz_naive_fixture).freq == t1.freq + t2 = Timestamp('2019-01-02 12:00', tz='UTC', freq='T') + assert t2.tz_convert(tz='UTC').freq == t2.freq + class TestTimestampNsOperations(object): diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index a5855f68127f4..dbe667a166d0a 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -114,7 +114,8 @@ def test_getitem_get(test_data): # missing d = test_data.ts.index[0] - BDay() - with pytest.raises(KeyError, match=r"Timestamp\('1999-12-31 00:00:00'\)"): + msg = r"Timestamp\('1999-12-31 00:00:00', freq='B'\)" + with pytest.raises(KeyError, match=msg): test_data.ts[d] # None
- [ ] closes #25241 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/25247
2019-02-09T18:55:27Z
2019-02-11T13:09:37Z
2019-02-11T13:09:36Z
2019-02-11T13:10:05Z
BUG: Fix exceptions when Series.interpolate's `order` parameter is missing or invalid
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 4032dc20b2e19..cd087c6429d6d 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -139,7 +139,7 @@ Indexing Missing ^^^^^^^ -- +- Fixed misleading exception message in :meth:`Series.missing` if argument ``order`` is required, but omitted (:issue:`10633`, :issue:`24014`). - - diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index ac7d21de442db..4e2c04dba8b04 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1115,24 +1115,18 @@ def check_int_bool(self, inplace): fill_value=fill_value, coerce=coerce, downcast=downcast) - # try an interp method - try: - m = missing.clean_interp_method(method, **kwargs) - except ValueError: - m = None - - if m is not None: - r = check_int_bool(self, inplace) - if r is not None: - return r - return self._interpolate(method=m, index=index, values=values, - axis=axis, limit=limit, - limit_direction=limit_direction, - limit_area=limit_area, - fill_value=fill_value, inplace=inplace, - downcast=downcast, **kwargs) - - raise ValueError("invalid method '{0}' to interpolate.".format(method)) + # validate the interp method + m = missing.clean_interp_method(method, **kwargs) + + r = check_int_bool(self, inplace) + if r is not None: + return r + return self._interpolate(method=m, index=index, values=values, + axis=axis, limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + fill_value=fill_value, inplace=inplace, + downcast=downcast, **kwargs) def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, limit=None, fill_value=None, coerce=False, diff --git a/pandas/core/missing.py b/pandas/core/missing.py index cc7bdf95200d1..9acdb1a06b2d1 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -293,9 +293,10 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None, bounds_error=bounds_error) new_y = terp(new_x) elif method == 'spline': - # GH #10633 - if not order: - raise ValueError("order needs to be specified and greater than 0") + # GH #10633, #24014 + if isna(order) or (order <= 0): + raise ValueError("order needs to be specified and greater than 0; " + "got order: {}".format(order)) terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs) new_y = terp(new_x) else: diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 985288c439917..f07dd1dfb5fda 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -854,8 +854,23 @@ def test_series_pad_backfill_limit(self): assert_series_equal(result, expected) -class TestSeriesInterpolateData(): +@pytest.fixture(params=['linear', 'index', 'values', 'nearest', 'slinear', + 'zero', 'quadratic', 'cubic', 'barycentric', 'krogh', + 'polynomial', 'spline', 'piecewise_polynomial', + 'from_derivatives', 'pchip', 'akima', ]) +def nontemporal_method(request): + """ Fixture that returns an (method name, required kwargs) pair. + + This fixture does not include method 'time' as a parameterization; that + method requires a Series with a DatetimeIndex, and is generally tested + separately from these non-temporal methods. + """ + method = request.param + kwargs = dict(order=1) if method in ('spline', 'polynomial') else dict() + return method, kwargs + +class TestSeriesInterpolateData(): def test_interpolate(self, datetime_series, string_series): ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index) @@ -875,12 +890,12 @@ def test_interpolate(self, datetime_series, string_series): time_interp = ord_ts_copy.interpolate(method='time') tm.assert_series_equal(time_interp, ord_ts) - # try time interpolation on a non-TimeSeries - # Only raises ValueError if there are NaNs. - non_ts = string_series.copy() - non_ts[0] = np.NaN - msg = ("time-weighted interpolation only works on Series or DataFrames" - " with a DatetimeIndex") + def test_interpolate_time_raises_for_non_timeseries(self): + # When method='time' is used on a non-TimeSeries that contains a null + # value, a ValueError should be raised. + non_ts = Series([0, 1, 2, np.NaN]) + msg = ("time-weighted interpolation only works on Series.* " + "with a DatetimeIndex") with pytest.raises(ValueError, match=msg): non_ts.interpolate(method='time') @@ -1061,21 +1076,35 @@ def test_interp_limit(self): result = s.interpolate(method='linear', limit=2) assert_series_equal(result, expected) - # GH 9217, make sure limit is an int and greater than 0 - methods = ['linear', 'time', 'index', 'values', 'nearest', 'zero', - 'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh', - 'polynomial', 'spline', 'piecewise_polynomial', None, - 'from_derivatives', 'pchip', 'akima'] - s = pd.Series([1, 2, np.nan, np.nan, 5]) - msg = (r"Limit must be greater than 0|" - "time-weighted interpolation only works on Series or" - r" DataFrames with a DatetimeIndex|" - r"invalid method '(polynomial|spline|None)' to interpolate|" - "Limit must be an integer") - for limit in [-1, 0, 1., 2.]: - for method in methods: - with pytest.raises(ValueError, match=msg): - s.interpolate(limit=limit, method=method) + @pytest.mark.parametrize("limit", [-1, 0]) + def test_interpolate_invalid_nonpositive_limit(self, nontemporal_method, + limit): + # GH 9217: make sure limit is greater than zero. + s = pd.Series([1, 2, np.nan, 4]) + method, kwargs = nontemporal_method + with pytest.raises(ValueError, match="Limit must be greater than 0"): + s.interpolate(limit=limit, method=method, **kwargs) + + def test_interpolate_invalid_float_limit(self, nontemporal_method): + # GH 9217: make sure limit is an integer. + s = pd.Series([1, 2, np.nan, 4]) + method, kwargs = nontemporal_method + limit = 2.0 + with pytest.raises(ValueError, match="Limit must be an integer"): + s.interpolate(limit=limit, method=method, **kwargs) + + @pytest.mark.parametrize("invalid_method", [None, 'nonexistent_method']) + def test_interp_invalid_method(self, invalid_method): + s = Series([1, 3, np.nan, 12, np.nan, 25]) + + msg = "method must be one of.* Got '{}' instead".format(invalid_method) + with pytest.raises(ValueError, match=msg): + s.interpolate(method=invalid_method) + + # When an invalid method and invalid limit (such as -1) are + # provided, the error message reflects the invalid method. + with pytest.raises(ValueError, match=msg): + s.interpolate(method=invalid_method, limit=-1) def test_interp_limit_forward(self): s = Series([1, 3, np.nan, np.nan, np.nan, 11]) @@ -1276,11 +1305,20 @@ def test_interp_limit_no_nans(self): @td.skip_if_no_scipy @pytest.mark.parametrize("method", ['polynomial', 'spline']) def test_no_order(self, method): + # see GH-10633, GH-24014 s = Series([0, 1, np.nan, 3]) - msg = "invalid method '{}' to interpolate".format(method) + msg = "You must specify the order of the spline or polynomial" with pytest.raises(ValueError, match=msg): s.interpolate(method=method) + @td.skip_if_no_scipy + @pytest.mark.parametrize('order', [-1, -1.0, 0, 0.0, np.nan]) + def test_interpolate_spline_invalid_order(self, order): + s = Series([0, 1, np.nan, 3]) + msg = "order needs to be specified and greater than 0" + with pytest.raises(ValueError, match=msg): + s.interpolate(method='spline', order=order) + @td.skip_if_no_scipy def test_spline(self): s = Series([1, 2, np.nan, 4, 5, np.nan, 7]) @@ -1313,19 +1351,6 @@ def test_spline_interpolation(self): expected1 = s.interpolate(method='spline', order=1) assert_series_equal(result1, expected1) - @td.skip_if_no_scipy - def test_spline_error(self): - # see gh-10633 - s = pd.Series(np.arange(10) ** 2) - s[np.random.randint(0, 9, 3)] = np.nan - msg = "invalid method 'spline' to interpolate" - with pytest.raises(ValueError, match=msg): - s.interpolate(method='spline') - - msg = "order needs to be specified and greater than 0" - with pytest.raises(ValueError, match=msg): - s.interpolate(method='spline', order=0) - def test_interp_timedelta64(self): # GH 6424 df = Series([1, np.nan, 3],
closes #24014 xref #10633 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Before this change, when the `Series.interpolate()` method was called with invalid arguments, exceptions with informative messages would be raised internally, but then caught and suppressed. An exception with a potentially misleading message would be raised instead. For example, when interpolate is called with `method='spline'`, an `order` argument must also be supplied. This is checked internally, but when the `order` argument was missing, a confusing error would be raised: ``` >>> import pandas as pd >>> s = pd.Series([0, 1, pd.np.nan, 3, 4]) >>> s.interpolate(method='spline') Traceback (most recent call last): ... ValueError: invalid method 'spline' to interpolate. ``` This problem was reported as issues #10633 and #24014. After this change, a specific and correct exception (previously caught and discarded internally) is raised: ``` >>> import pandas as pd >>> s = pd.Series([0, 1, pd.np.nan, 3, 4]) >>> s.interpolate(method='spline') ... ValueError: You must specify the order of the spline or polynomial. ``` In addition, light validation is now performed on the `order` parameter before it is passed to a `scipy` class. Previously, `pandas` would check if `order` was truthy in the Python sense. If `order` was non-zero and invalid, a scipy error would propagate to the user: ``` >>> s.interpolate(method='spline', order=-1) Traceback (most recent call last): ... dfitpack.error: (1<=k && k<=5) failed for 3rd argument k: fpcurf0:k=-1 ``` After this change, a more understandable exception is raised in this case: ``` >>> s.interpolate(method='spline', order=-1) Traceback (most recent call last): ... ValueError: order needs to be specified and greater than 0 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/25246
2019-02-09T18:33:57Z
2019-02-11T16:08:48Z
2019-02-11T16:08:48Z
2019-02-11T16:08:55Z
Backport PR #25230 on branch 0.24.x (BUG: Fix regression in DataFrame.apply causing RecursionError)
diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst index 73df504c89d5b..23d993923e697 100644 --- a/doc/source/whatsnew/v0.24.2.rst +++ b/doc/source/whatsnew/v0.24.2.rst @@ -21,8 +21,8 @@ Fixed Regressions ^^^^^^^^^^^^^^^^^ - Fixed regression in :meth:`DataFrame.all` and :meth:`DataFrame.any` where ``bool_only=True`` was ignored (:issue:`25101`) - - Fixed issue in ``DataFrame`` construction with passing a mixed list of mixed types could segfault. (:issue:`25075`) +- Fixed regression in :meth:`DataFrame.apply` causing ``RecursionError`` when ``dict``-like classes were passed as argument. (:issue:`25196`) .. _whatsnew_0242.enhancements: diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index b11542622451c..dd05e2022f066 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -397,12 +397,15 @@ def is_dict_like(obj): True >>> is_dict_like([1, 2, 3]) False + >>> is_dict_like(dict) + False + >>> is_dict_like(dict()) + True """ - for attr in ("__getitem__", "keys", "__contains__"): - if not hasattr(obj, attr): - return False - - return True + dict_like_attrs = ("__getitem__", "keys", "__contains__") + return (all(hasattr(obj, attr) for attr in dict_like_attrs) + # [GH 25196] exclude classes + and not isinstance(obj, type)) def is_named_tuple(obj): diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 89662b70a39ad..49a66efaffc11 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -159,13 +159,15 @@ def test_is_nested_list_like_fails(obj): @pytest.mark.parametrize( - "ll", [{}, {'A': 1}, Series([1])]) + "ll", [{}, {'A': 1}, Series([1]), collections.defaultdict()]) def test_is_dict_like_passes(ll): assert inference.is_dict_like(ll) -@pytest.mark.parametrize( - "ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])]) +@pytest.mark.parametrize("ll", [ + '1', 1, [1, 2], (1, 2), range(2), Index([1]), + dict, collections.defaultdict, Series +]) def test_is_dict_like_fails(ll): assert not inference.is_dict_like(ll) diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index ade527a16c902..a4cd1aa3bacb6 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -318,6 +318,13 @@ def test_apply_reduce_Series(self, float_frame): result = float_frame.apply(np.mean, axis=1) assert_series_equal(result, expected) + def test_apply_reduce_rows_to_dict(self): + # GH 25196 + data = pd.DataFrame([[1, 2], [3, 4]]) + expected = pd.Series([{0: 1, 1: 3}, {0: 2, 1: 4}]) + result = data.apply(dict) + assert_series_equal(result, expected) + def test_apply_differently_indexed(self): df = DataFrame(np.random.randn(20, 10))
Backport PR #25230: BUG: Fix regression in DataFrame.apply causing RecursionError
https://api.github.com/repos/pandas-dev/pandas/pulls/25245
2019-02-09T16:54:17Z
2019-02-09T17:59:26Z
2019-02-09T17:59:26Z
2019-02-09T17:59:26Z
REF: simplify ensure_index
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index f91b96dc1b1dc..0c8f2baabc804 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -190,11 +190,7 @@ def maybe_indices_to_slice( max_len: int, ) -> slice | np.ndarray: ... # np.ndarray[np.uint8] -def clean_index_list(obj: list) -> tuple[ - list | np.ndarray, # np.ndarray[object | np.int64 | np.uint64] - bool, -]: ... - +def is_all_arraylike(obj: list) -> bool: ... # ----------------------------------------------------------------- # Functions which in reality take memoryviews diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 4b5ef3e909a00..1a07b76583fca 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -740,19 +740,15 @@ cpdef ndarray[object] ensure_string_array( return result -@cython.wraparound(False) -@cython.boundscheck(False) -def clean_index_list(obj: list): +def is_all_arraylike(obj: list) -> bool: """ - Utility used in ``pandas.core.indexes.api.ensure_index``. + Should we treat these as levels of a MultiIndex, as opposed to Index items? """ cdef: Py_ssize_t i, n = len(obj) object val bint all_arrays = True - # First check if we have a list of arraylikes, in which case we will - # pass them to MultiIndex.from_arrays for i in range(n): val = obj[i] if not (isinstance(val, list) or @@ -762,31 +758,7 @@ def clean_index_list(obj: list): all_arrays = False break - if all_arrays: - return obj, all_arrays - - # don't force numpy coerce with nan's - inferred = infer_dtype(obj, skipna=False) - if inferred in ['string', 'bytes', 'mixed', 'mixed-integer']: - return np.asarray(obj, dtype=object), 0 - elif inferred in ['integer']: - # we infer an integer but it *could* be a uint64 - - arr = np.asarray(obj) - if arr.dtype.kind not in ["i", "u"]: - # eg [0, uint64max] gets cast to float64, - # but then we know we have either uint64 or object - if (arr < 0).any(): - # TODO: similar to maybe_cast_to_integer_array - return np.asarray(obj, dtype="object"), 0 - - # GH#35481 - guess = np.asarray(obj, dtype="uint64") - return guess, 0 - - return arr, 0 - - return np.asarray(obj), 0 + return all_arrays # ------------------------------------------------------------------------------ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 124903446220d..96278f5686b57 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1,6 +1,5 @@ from __future__ import annotations -from copy import copy as copy_func from datetime import datetime import functools from itertools import zip_longest @@ -6312,21 +6311,15 @@ def ensure_index(index_like: AnyArrayLike | Sequence, copy: bool = False) -> Ind # check in clean_index_list index_like = list(index_like) - converted, all_arrays = lib.clean_index_list(index_like) - - if len(converted) > 0 and all_arrays: + if len(index_like) and lib.is_all_arraylike(index_like): from pandas.core.indexes.multi import MultiIndex - return MultiIndex.from_arrays(converted) + return MultiIndex.from_arrays(index_like) else: - index_like = converted + return Index(index_like, copy=copy, tupleize_cols=False) else: - # clean_index_list does the equivalent of copying - # so only need to do this if not list instance - if copy: - index_like = copy_func(index_like) - return Index(index_like) + return Index(index_like, copy=copy) def ensure_has_len(seq): diff --git a/pandas/tests/indexes/numeric/test_numeric.py b/pandas/tests/indexes/numeric/test_numeric.py index c796a25faf0a6..9572aeaf41c91 100644 --- a/pandas/tests/indexes/numeric/test_numeric.py +++ b/pandas/tests/indexes/numeric/test_numeric.py @@ -531,6 +531,14 @@ def test_constructor(self, dtype): res = Index([1, 2 ** 63 + 1], dtype=dtype) tm.assert_index_equal(res, idx) + @pytest.mark.xfail(reason="https://github.com/numpy/numpy/issues/19146") + def test_constructor_does_not_cast_to_float(self): + # https://github.com/numpy/numpy/issues/19146 + values = [0, np.iinfo(np.uint64).max] + + result = UInt64Index(values) + assert list(result) == values + @pytest.mark.parametrize( "box", diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index f75e4af888643..d7abaf0b5dfbe 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1622,6 +1622,18 @@ def test_ensure_index_mixed_closed_intervals(self): expected = Index(intervals, dtype=object) tm.assert_index_equal(result, expected) + def test_ensure_index_uint64(self): + # with both 0 and a large-uint64, np.array will infer to float64 + # https://github.com/numpy/numpy/issues/19146 + # but a more accurate choice would be uint64 + values = [0, np.iinfo(np.uint64).max] + + result = ensure_index(values) + assert list(result) == values + + expected = Index(values, dtype="uint64") + tm.assert_index_equal(result, expected) + def test_get_combined_index(self): result = _get_combined_index([]) expected = Index([]) diff --git a/pandas/tests/libs/test_lib.py b/pandas/tests/libs/test_lib.py index 0b1f807f2da63..5b7e90fe16d8f 100644 --- a/pandas/tests/libs/test_lib.py +++ b/pandas/tests/libs/test_lib.py @@ -206,15 +206,3 @@ def test_no_default_pickle(): # GH#40397 obj = tm.round_trip_pickle(lib.no_default) assert obj is lib.no_default - - -def test_clean_index_list(): - # with both 0 and a large-uint64, np.array will infer to float64 - # https://github.com/numpy/numpy/issues/19146 - # but a more accurate choice would be uint64 - values = [0, np.iinfo(np.uint64).max] - - result, _ = lib.clean_index_list(values) - - expected = np.array(values, dtype="uint64") - tm.assert_numpy_array_equal(result, expected, check_dtype=True)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41793
2021-06-03T01:14:12Z
2021-06-03T17:15:18Z
2021-06-03T17:15:18Z
2021-06-03T18:34:17Z
REF: de-duplicate ExtensionIndex methods
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 7c76a04a605e3..a8626158d48b2 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -346,6 +346,36 @@ def where( res_values = np.where(mask, self._ndarray, value) return self._from_backing_data(res_values) + # ------------------------------------------------------------------------ + # Index compat methods + + def insert( + self: NDArrayBackedExtensionArrayT, loc: int, item + ) -> NDArrayBackedExtensionArrayT: + """ + Make new ExtensionArray inserting new item at location. Follows + Python list.append semantics for negative values. + + Parameters + ---------- + loc : int + item : object + + Returns + ------- + type(self) + """ + code = self._validate_scalar(item) + + new_vals = np.concatenate( + ( + self._ndarray[:loc], + np.asarray([code], dtype=self._ndarray.dtype), + self._ndarray[loc:], + ) + ) + return self._from_backing_data(new_vals) + # ------------------------------------------------------------------------ # Additional array methods # These are not part of the EA API, but we implement them because diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 8c2cff21c114e..2cbf1a8063a92 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -37,7 +37,11 @@ is_string_dtype, needs_i8_conversion, ) -from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + IntervalDtype, + PeriodDtype, +) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCExtensionArray, @@ -630,7 +634,13 @@ def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool: # This is needed for Categorical, but is kind of weird return True - # must be PeriodDType + elif isinstance(dtype, PeriodDtype): + return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) + + elif isinstance(dtype, IntervalDtype): + return lib.is_float(obj) or obj is None or obj is libmissing.NA + + # fallback, default to allowing NaN, None, NA, NaT return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index 066fa1f547328..b1cabf92bf985 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -13,17 +13,12 @@ from pandas._typing import ArrayLike from pandas.compat.numpy import function as nv -from pandas.errors import AbstractMethodError from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.util._exceptions import rewrite_exception -from pandas.core.dtypes.cast import ( - find_common_type, - infer_dtype_from, -) from pandas.core.dtypes.common import ( is_dtype_equal, is_object_dtype, @@ -34,6 +29,7 @@ ABCSeries, ) +from pandas.core.array_algos.putmask import validate_putmask from pandas.core.arrays import ( Categorical, DatetimeArray, @@ -297,6 +293,21 @@ def searchsorted(self, value, side="left", sorter=None) -> np.ndarray: # overriding IndexOpsMixin improves performance GH#38083 return self._data.searchsorted(value, side=side, sorter=sorter) + def putmask(self, mask, value) -> Index: + mask, noop = validate_putmask(self._data, mask) + if noop: + return self.copy() + + try: + self._validate_fill_value(value) + except (ValueError, TypeError): + dtype = self._find_common_type_compat(value) + return self.astype(dtype).putmask(mask, value) + + arr = self._data.copy() + arr.putmask(mask, value) + return type(self)._simple_new(arr, name=self.name) + # --------------------------------------------------------------------- def _get_engine_target(self) -> np.ndarray: @@ -323,9 +334,30 @@ def repeat(self, repeats, axis=None): result = self._data.repeat(repeats, axis=axis) return type(self)._simple_new(result, name=self.name) - def insert(self, loc: int, item): - # ExtensionIndex subclasses must override Index.insert - raise AbstractMethodError(self) + def insert(self, loc: int, item) -> Index: + """ + Make new Index inserting new item at location. Follows + Python list.append semantics for negative values. + + Parameters + ---------- + loc : int + item : object + + Returns + ------- + new_index : Index + """ + try: + result = self._data.insert(loc, item) + except (ValueError, TypeError): + # e.g. trying to insert an integer into a DatetimeIndex + # We cannot keep the same dtype, so cast to the (often object) + # minimal shared dtype before doing the insert. + dtype = self._find_common_type_compat(item) + return self.astype(dtype).insert(loc, item) + else: + return type(self)._simple_new(result, name=self.name) def _validate_fill_value(self, value): """ @@ -426,60 +458,3 @@ def _get_engine_target(self) -> np.ndarray: def _from_join_target(self, result: np.ndarray) -> ArrayLike: assert result.dtype == self._data._ndarray.dtype return self._data._from_backing_data(result) - - def insert(self: _T, loc: int, item) -> Index: - """ - Make new Index inserting new item at location. Follows - Python list.append semantics for negative values. - - Parameters - ---------- - loc : int - item : object - - Returns - ------- - new_index : Index - - Raises - ------ - ValueError if the item is not valid for this dtype. - """ - arr = self._data - try: - code = arr._validate_scalar(item) - except (ValueError, TypeError): - # e.g. trying to insert an integer into a DatetimeIndex - # We cannot keep the same dtype, so cast to the (often object) - # minimal shared dtype before doing the insert. - dtype, _ = infer_dtype_from(item, pandas_dtype=True) - dtype = find_common_type([self.dtype, dtype]) - return self.astype(dtype).insert(loc, item) - else: - new_vals = np.concatenate( - ( - arr._ndarray[:loc], - np.asarray([code], dtype=arr._ndarray.dtype), - arr._ndarray[loc:], - ) - ) - new_arr = arr._from_backing_data(new_vals) - return type(self)._simple_new(new_arr, name=self.name) - - def putmask(self, mask, value) -> Index: - res_values = self._data.copy() - try: - res_values.putmask(mask, value) - except (TypeError, ValueError): - return self.astype(object).putmask(mask, value) - - return type(self)._simple_new(res_values, name=self.name) - - # error: Argument 1 of "_wrap_joined_index" is incompatible with supertype - # "Index"; supertype defines the argument type as "Union[ExtensionArray, ndarray]" - def _wrap_joined_index( # type: ignore[override] - self: _T, joined: NDArrayBackedExtensionArray, other: _T - ) -> _T: - name = get_op_result_name(self, other) - - return type(self)._simple_new(joined, name=name) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 6dcb2a44e7d3d..06ab7fdbcf872 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -67,7 +67,6 @@ take_nd, unique, ) -from pandas.core.array_algos.putmask import validate_putmask from pandas.core.arrays.interval import ( IntervalArray, _interval_shared_docs, @@ -854,46 +853,6 @@ def mid(self) -> Index: def length(self) -> Index: return Index(self._data.length, copy=False) - def putmask(self, mask, value) -> Index: - mask, noop = validate_putmask(self._data, mask) - if noop: - return self.copy() - - try: - self._validate_fill_value(value) - except (ValueError, TypeError): - dtype = self._find_common_type_compat(value) - return self.astype(dtype).putmask(mask, value) - - arr = self._data.copy() - arr.putmask(mask, value) - return type(self)._simple_new(arr, name=self.name) - - def insert(self, loc: int, item): - """ - Return a new IntervalIndex inserting new item at location. Follows - Python list.append semantics for negative values. Only Interval - objects and NA can be inserted into an IntervalIndex - - Parameters - ---------- - loc : int - item : object - - Returns - ------- - IntervalIndex - """ - try: - result = self._data.insert(loc, item) - except (ValueError, TypeError): - # e.g trying to insert a string - dtype, _ = infer_dtype_from_scalar(item, pandas_dtype=True) - dtype = find_common_type([self.dtype, dtype]) - return self.astype(dtype).insert(loc, item) - - return type(self)._simple_new(result, name=self.name) - # -------------------------------------------------------------------- # Rendering Methods # __repr__ associated methods are based on MultiIndex diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index 778373fc7f0df..92ef388d73fde 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -24,6 +24,7 @@ ) from pandas.core.dtypes.missing import ( array_equivalent, + is_valid_na_for_dtype, isna, isnull, na_value_for_dtype, @@ -729,3 +730,12 @@ def test_is_matching_na_nan_matches_none(self): assert libmissing.is_matching_na(None, np.nan, nan_matches_none=True) assert libmissing.is_matching_na(np.nan, None, nan_matches_none=True) + + +class TestIsValidNAForDtype: + def test_is_valid_na_for_dtype_interval(self): + dtype = IntervalDtype("int64", "left") + assert not is_valid_na_for_dtype(NaT, dtype) + + dtype = IntervalDtype("datetime64[ns]", "both") + assert not is_valid_na_for_dtype(NaT, dtype)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41791
2021-06-02T23:34:38Z
2021-06-03T00:51:15Z
2021-06-03T00:51:15Z
2021-06-03T00:54:56Z
REF: de-duplicate _validate_fill_value/_validate_scalar
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 7c76a04a605e3..0bb6b2fa4b703 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -94,7 +94,7 @@ def take( axis: int = 0, ) -> NDArrayBackedExtensionArrayT: if allow_fill: - fill_value = self._validate_fill_value(fill_value) + fill_value = self._validate_scalar(fill_value) new_data = take( self._ndarray, @@ -107,25 +107,6 @@ def take( ) return self._from_backing_data(new_data) - def _validate_fill_value(self, fill_value): - """ - If a fill_value is passed to `take` convert it to a representation - suitable for self._ndarray, raising TypeError if this is not possible. - - Parameters - ---------- - fill_value : object - - Returns - ------- - fill_value : native representation - - Raises - ------ - TypeError - """ - raise AbstractMethodError(self) - # ------------------------------------------------------------------------ def equals(self, other) -> bool: @@ -194,7 +175,7 @@ def shift(self, periods=1, fill_value=None, axis=0): def _validate_shift_value(self, fill_value): # TODO: after deprecation in datetimelikearraymixin is enforced, # we can remove this and ust validate_fill_value directly - return self._validate_fill_value(fill_value) + return self._validate_scalar(fill_value) def __setitem__(self, key, value): key = check_array_indexer(self, key) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 47779dd6dba25..068f5703649fa 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1407,7 +1407,7 @@ def _validate_searchsorted_value(self, value): codes = np.array(locs, dtype=self.codes.dtype) # type: ignore[assignment] return codes - def _validate_fill_value(self, fill_value): + def _validate_scalar(self, fill_value): """ Convert a user-facing fill_value to a representation to use with our underlying ndarray, raising TypeError if this is not possible. @@ -1436,8 +1436,6 @@ def _validate_fill_value(self, fill_value): ) return fill_value - _validate_scalar = _validate_fill_value - # ------------------------------------------------------------- def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ff46715d0a527..ba5be03b93490 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -557,27 +557,8 @@ def _validate_comparison_value(self, other): return other - def _validate_fill_value(self, fill_value): - """ - If a fill_value is passed to `take` convert it to an i8 representation, - raising TypeError if this is not possible. - - Parameters - ---------- - fill_value : object - - Returns - ------- - fill_value : np.int64, np.datetime64, or np.timedelta64 - - Raises - ------ - TypeError - """ - return self._validate_scalar(fill_value) - def _validate_shift_value(self, fill_value): - # TODO(2.0): once this deprecation is enforced, use _validate_fill_value + # TODO(2.0): once this deprecation is enforced, use _validate_scalar if is_valid_na_for_dtype(fill_value, self.dtype): fill_value = NaT elif isinstance(fill_value, self._recognized_scalars): diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 4aa3bab168ac6..8836695efcbcb 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -801,7 +801,7 @@ def fillna( if limit is not None: raise TypeError("limit is not supported for IntervalArray.") - value_left, value_right = self._validate_fill_value(value) + value_left, value_right = self._validate_scalar(value) left = self.left.fillna(value=value_left) right = self.right.fillna(value=value_right) @@ -1000,7 +1000,7 @@ def take( fill_left = fill_right = fill_value if allow_fill: - fill_left, fill_right = self._validate_fill_value(fill_value) + fill_left, fill_right = self._validate_scalar(fill_value) left_take = take( self._left, indices, allow_fill=allow_fill, fill_value=fill_left @@ -1037,6 +1037,7 @@ def _validate_scalar(self, value): if isinstance(value, Interval): self._check_closed_matches(value, name="value") left, right = value.left, value.right + # TODO: check subdtype match like _validate_setitem_value? elif is_valid_na_for_dtype(value, self.left.dtype): # GH#18295 left = right = value @@ -1046,9 +1047,6 @@ def _validate_scalar(self, value): ) return left, right - def _validate_fill_value(self, value): - return self._validate_scalar(value) - def _validate_setitem_value(self, value): needs_float_conversion = False diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index e9d554200805e..dc592f205b3ea 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -190,7 +190,7 @@ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): def isna(self) -> np.ndarray: return isna(self._ndarray) - def _validate_fill_value(self, fill_value): + def _validate_scalar(self, fill_value): if fill_value is None: # Primarily for subclasses fill_value = self.dtype.na_value
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41790
2021-06-02T23:28:12Z
2021-06-03T00:51:45Z
2021-06-03T00:51:45Z
2021-06-03T00:56:17Z
Bug in xs raising KeyError for MultiIndex columns with droplevel False and list indexe
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index 500030e1304c6..42c71acdee7e0 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -21,6 +21,13 @@ Fixed regressions .. --------------------------------------------------------------------------- +.. _whatsnew_125.deprecations: + +Deprecations +~~~~~~~~~~~~ + +- Deprecated passing lists as ``key`` to :meth:`DataFrame.xs` and :meth:`Series.xs` (:issue:`41760`) + .. _whatsnew_125.bug_fixes: Bug fixes diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 99816237155b3..35e10b27e978c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3756,6 +3756,15 @@ class animal locomotion """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) + + if isinstance(key, list): + warnings.warn( + "Passing lists as key for xs is deprecated and will be removed in a " + "future version. Pass key as a tuple instead.", + FutureWarning, + stacklevel=2, + ) + if level is not None: if not isinstance(labels, MultiIndex): raise TypeError("Index must be a MultiIndex") diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py index 57ea6a63f86e8..ccd989e2de411 100644 --- a/pandas/tests/frame/indexing/test_xs.py +++ b/pandas/tests/frame/indexing/test_xs.py @@ -106,7 +106,8 @@ def test_xs_keep_level(self): expected = df[:1] tm.assert_frame_equal(result, expected) - result = df.xs([2008, "sat"], level=["year", "day"], drop_level=False) + with tm.assert_produces_warning(FutureWarning): + result = df.xs([2008, "sat"], level=["year", "day"], drop_level=False) tm.assert_frame_equal(result, expected) def test_xs_view(self, using_array_manager): @@ -187,7 +188,11 @@ def test_xs_with_duplicates(self, key, level, multiindex_dataframe_random_data): assert df.index.is_unique is False expected = concat([frame.xs("one", level="second")] * 2) - result = df.xs(key, level=level) + if isinstance(key, list): + with tm.assert_produces_warning(FutureWarning): + result = df.xs(key, level=level) + else: + result = df.xs(key, level=level) tm.assert_frame_equal(result, expected) def test_xs_missing_values_in_index(self): @@ -358,3 +363,11 @@ def test_xs_droplevel_false_view(self, using_array_manager): df.iloc[0, 0] = 2 expected = DataFrame({"a": [1]}) tm.assert_frame_equal(result, expected) + + def test_xs_list_indexer_droplevel_false(self): + # GH#41760 + mi = MultiIndex.from_tuples([("x", "m", "a"), ("x", "n", "b"), ("y", "o", "c")]) + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=mi) + with tm.assert_produces_warning(FutureWarning): + with pytest.raises(KeyError, match="y"): + df.xs(["x", "y"], drop_level=False, axis=1) diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py index 932295c28c8cf..a99f09143e282 100644 --- a/pandas/tests/indexing/multiindex/test_partial.py +++ b/pandas/tests/indexing/multiindex/test_partial.py @@ -69,7 +69,8 @@ def test_xs_partial( ) df = DataFrame(np.random.randn(8, 4), index=index, columns=list("abcd")) - result = df.xs(["foo", "one"]) + with tm.assert_produces_warning(FutureWarning): + result = df.xs(["foo", "one"]) expected = df.loc["foo", "one"] tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/indexing/test_xs.py b/pandas/tests/series/indexing/test_xs.py index b6351e970222f..9a277783a1b3d 100644 --- a/pandas/tests/series/indexing/test_xs.py +++ b/pandas/tests/series/indexing/test_xs.py @@ -69,3 +69,13 @@ def test_series_xs_droplevel_false(self): ), ) tm.assert_series_equal(result, expected) + + def test_xs_key_as_list(self): + # GH#41760 + mi = MultiIndex.from_tuples([("a", "x")], names=["level1", "level2"]) + ser = Series([1], index=mi) + with tm.assert_produces_warning(FutureWarning): + ser.xs(["a", "x"], axis=0, drop_level=False) + + with tm.assert_produces_warning(FutureWarning): + ser.xs(["a"], axis=0, drop_level=False)
- [x] closes #41760 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry cc @simonjayhawkins Should we include in 1.2.5 or push to 1.3?
https://api.github.com/repos/pandas-dev/pandas/pulls/41789
2021-06-02T20:36:06Z
2021-06-04T00:20:06Z
2021-06-04T00:20:05Z
2021-06-04T10:59:48Z
BLD: Adjust Numpy Minimum Versions for aarch64/arm64 compatibility
diff --git a/pyproject.toml b/pyproject.toml index 3947856d94d01..86b255ab6bf58 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,9 +5,17 @@ requires = [ "setuptools>=38.6.0", "wheel", "Cython>=0.29.21,<3", # Note: sync with setup.py - "numpy==1.17.3; python_version=='3.7'", - "numpy==1.18.3; python_version=='3.8'", - "numpy; python_version>='3.9'", + # Numpy requirements for different OS/architectures + # Copied from https://github.com/scipy/scipy/blob/master/pyproject.toml (which is also licensed under BSD) + "numpy==1.17.3; python_version=='3.7' and (platform_machine!='arm64' or platform_system!='Darwin') and platform_machine!='aarch64'", + "numpy==1.18.3; python_version=='3.8' and (platform_machine!='arm64' or platform_system!='Darwin') and platform_machine!='aarch64'", + "numpy==1.19.3; python_version>='3.9' and (platform_machine!='arm64' or platform_system!='Darwin') and platform_machine!='aarch64'", + # Aarch64(Python 3.9 requirements are the same as AMD64) + "numpy==1.19.2; python_version=='3.7' and platform_machine=='aarch64'", + "numpy==1.19.2; python_version=='3.8' and platform_machine=='aarch64'", + # Darwin Arm64 + "numpy>=1.20.0; python_version=='3.8' and platform_machine=='arm64' and platform_system=='Darwin'", + "numpy>=1.20.0; python_version=='3.9' and platform_machine=='arm64' and platform_system=='Darwin'" ] # uncomment to enable pep517 after versioneer problem is fixed. # https://github.com/python-versioneer/python-versioneer/issues/193
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41788
2021-06-02T20:29:46Z
2021-06-21T13:07:35Z
2021-06-21T13:07:35Z
2021-09-23T18:52:57Z
REF: de-duplicate nested-dict handling in DataFrame construction
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index 06620c2ad0dca..c1caf474b2020 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -52,8 +52,6 @@ def is_float_array(values: np.ndarray, skipna: bool = False): ... def is_integer_array(values: np.ndarray, skipna: bool = False): ... def is_bool_array(values: np.ndarray, skipna: bool = False): ... -def fast_multiget(mapping: dict, keys: np.ndarray, default=np.nan) -> np.ndarray: ... - def fast_unique_multiple_list_gen(gen: Generator, sort: bool = True) -> list: ... def fast_unique_multiple_list(lists: list, sort: bool = True) -> list: ... def fast_unique_multiple(arrays: list, sort: bool = True) -> list: ... diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 4d184ee13e3db..352f50df01dc9 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2841,25 +2841,3 @@ def to_object_array_tuples(rows: object) -> np.ndarray: result[i, j] = row[j] return result - - -@cython.wraparound(False) -@cython.boundscheck(False) -def fast_multiget(dict mapping, ndarray keys, default=np.nan) -> np.ndarray: - cdef: - Py_ssize_t i, n = len(keys) - object val - ndarray[object] output = np.empty(n, dtype='O') - - if n == 0: - # kludge, for Series - return np.empty(0, dtype='f8') - - for i in range(n): - val = keys[i] - if val in mapping: - output[i] = mapping[val] - else: - output[i] = default - - return maybe_convert_objects(output) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 5c7211a5d1852..161572f3f1ac3 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -780,22 +780,6 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> tuple[DtypeObj, return dtype, val -def dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]: - """ - Convert datetimelike-keyed dicts to a Timestamp-keyed dict. - - Parameters - ---------- - d: dict-like object - - Returns - ------- - dict - - """ - return {maybe_box_datetimelike(key): value for key, value in d.items()} - - def infer_dtype_from_array( arr, pandas_dtype: bool = False ) -> tuple[DtypeObj, ArrayLike]: diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 46eb138dc74d1..270eddf2bd3a5 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -27,7 +27,6 @@ from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, construct_1d_ndarray_preserving_na, - dict_compat, maybe_cast_to_datetime, maybe_convert_platform, maybe_infer_to_datetimelike, @@ -61,6 +60,7 @@ TimedeltaArray, ) from pandas.core.construction import ( + create_series_with_explicit_dtype, ensure_wrapped_if_datetimelike, extract_array, range_to_ndarray, @@ -68,9 +68,7 @@ ) from pandas.core.indexes import base as ibase from pandas.core.indexes.api import ( - DatetimeIndex, Index, - TimedeltaIndex, ensure_index, get_objs_combined_axis, union_indexes, @@ -566,7 +564,6 @@ def convert(v): def _homogenize(data, index: Index, dtype: DtypeObj | None) -> list[ArrayLike]: - oindex = None homogenized = [] for val in data: @@ -581,16 +578,10 @@ def _homogenize(data, index: Index, dtype: DtypeObj | None) -> list[ArrayLike]: val = val._values else: if isinstance(val, dict): - if oindex is None: - oindex = index.astype("O") - - if isinstance(index, (DatetimeIndex, TimedeltaIndex)): - # see test_constructor_dict_datetime64_index - val = dict_compat(val) - else: - # see test_constructor_subclass_dict - val = dict(val) - val = lib.fast_multiget(val, oindex._values, default=np.nan) + # see test_constructor_subclass_dict + # test_constructor_dict_datetime64_index + val = create_series_with_explicit_dtype(val, index=index)._values + val = sanitize_array( val, index, dtype=dtype, copy=False, raise_cast_failure=False ) diff --git a/pandas/tests/dtypes/cast/test_dict_compat.py b/pandas/tests/dtypes/cast/test_dict_compat.py deleted file mode 100644 index 13dc82d779f95..0000000000000 --- a/pandas/tests/dtypes/cast/test_dict_compat.py +++ /dev/null @@ -1,14 +0,0 @@ -import numpy as np - -from pandas.core.dtypes.cast import dict_compat - -from pandas import Timestamp - - -def test_dict_compat(): - data_datetime64 = {np.datetime64("1990-03-15"): 1, np.datetime64("2015-03-15"): 2} - data_unchanged = {1: 2, 3: 4, 5: 6} - expected = {Timestamp("1990-3-15"): 1, Timestamp("2015-03-15"): 2} - assert dict_compat(data_datetime64) == expected - assert dict_compat(expected) == expected - assert dict_compat(data_unchanged) == data_unchanged
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry This was the motivation for #41707. This will introduce some overhead, but will also prevent the behaviors from getting out of sync again.
https://api.github.com/repos/pandas-dev/pandas/pulls/41785
2021-06-02T14:33:22Z
2021-06-02T18:40:04Z
2021-06-02T18:40:04Z
2021-06-26T07:54:47Z
BUG: clean_index_list handle uint64 case
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index 06620c2ad0dca..92daad2d6a5d7 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -185,7 +185,7 @@ def maybe_indices_to_slice( ) -> slice | np.ndarray: ... # np.ndarray[np.uint8] def clean_index_list(obj: list) -> tuple[ - list | np.ndarray, # np.ndarray[object] | np.ndarray[np.int64] + list | np.ndarray, # np.ndarray[object | np.int64 | np.uint64] bool, ]: ... diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 4d184ee13e3db..cbe5a556d55b0 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -747,10 +747,14 @@ def clean_index_list(obj: list): object val bint all_arrays = True + # First check if we have a list of arraylikes, in which case we will + # pass them to MultiIndex.from_arrays for i in range(n): val = obj[i] if not (isinstance(val, list) or util.is_array(val) or hasattr(val, '_data')): + # TODO: EA? + # exclude tuples, frozensets as they may be contained in an Index all_arrays = False break @@ -762,11 +766,21 @@ def clean_index_list(obj: list): if inferred in ['string', 'bytes', 'mixed', 'mixed-integer']: return np.asarray(obj, dtype=object), 0 elif inferred in ['integer']: - # TODO: we infer an integer but it *could* be a uint64 - try: - return np.asarray(obj, dtype='int64'), 0 - except OverflowError: - return np.asarray(obj, dtype='object'), 0 + # we infer an integer but it *could* be a uint64 + + arr = np.asarray(obj) + if arr.dtype.kind not in ["i", "u"]: + # eg [0, uint64max] gets cast to float64, + # but then we know we have either uint64 or object + if (arr < 0).any(): + # TODO: similar to maybe_cast_to_integer_array + return np.asarray(obj, dtype="object"), 0 + + # GH#35481 + guess = np.asarray(obj, dtype="uint64") + return guess, 0 + + return arr, 0 return np.asarray(obj), 0 @@ -1552,9 +1566,7 @@ def infer_dtype(value: object, skipna: bool = True) -> str: for i in range(n): val = values[i] - if (util.is_integer_object(val) and - not util.is_timedelta64_object(val) and - not util.is_datetime64_object(val)): + if util.is_integer_object(val): return "mixed-integer" return "mixed" diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 02fd680775141..14ec3d6009b61 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6299,27 +6299,18 @@ def ensure_index(index_like: AnyArrayLike | Sequence, copy: bool = False) -> Ind if copy: index_like = index_like.copy() return index_like - if hasattr(index_like, "name"): - # https://github.com/python/mypy/issues/1424 - # error: Item "ExtensionArray" of "Union[ExtensionArray, - # Sequence[Any]]" has no attribute "name" - # error: Item "Sequence[Any]" of "Union[ExtensionArray, Sequence[Any]]" - # has no attribute "name" - # error: "Sequence[Any]" has no attribute "name" - # error: Item "Sequence[Any]" of "Union[Series, Sequence[Any]]" has no - # attribute "name" - # error: Item "Sequence[Any]" of "Union[Any, Sequence[Any]]" has no - # attribute "name" - name = index_like.name # type: ignore[union-attr, attr-defined] + + if isinstance(index_like, ABCSeries): + name = index_like.name return Index(index_like, name=name, copy=copy) if is_iterator(index_like): index_like = list(index_like) - # must check for exactly list here because of strict type - # check in clean_index_list if isinstance(index_like, list): - if type(index_like) != list: + if type(index_like) is not list: + # must check for exactly list here because of strict type + # check in clean_index_list index_like = list(index_like) converted, all_arrays = lib.clean_index_list(index_like) @@ -6329,13 +6320,6 @@ def ensure_index(index_like: AnyArrayLike | Sequence, copy: bool = False) -> Ind return MultiIndex.from_arrays(converted) else: - if isinstance(converted, np.ndarray) and converted.dtype == np.int64: - # Check for overflows if we should actually be uint64 - # xref GH#35481 - alt = np.asarray(index_like) - if alt.dtype == np.uint64: - converted = alt - index_like = converted else: # clean_index_list does the equivalent of copying diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index be5b89f08b5ca..d5555561088eb 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1934,7 +1934,9 @@ def _setitem_with_indexer_missing(self, indexer, value): # e.g. 0.0 -> 0 # GH#12246 if index.is_unique: - new_indexer = index.get_indexer([new_index[-1]]) + # pass new_index[-1:] instead if [new_index[-1]] + # so that we retain dtype + new_indexer = index.get_indexer(new_index[-1:]) if (new_indexer != -1).any(): # We get only here with loc, so can hard code return self._setitem_with_indexer(new_indexer, value, "loc") diff --git a/pandas/tests/libs/test_lib.py b/pandas/tests/libs/test_lib.py index 5b7e90fe16d8f..0b1f807f2da63 100644 --- a/pandas/tests/libs/test_lib.py +++ b/pandas/tests/libs/test_lib.py @@ -206,3 +206,15 @@ def test_no_default_pickle(): # GH#40397 obj = tm.round_trip_pickle(lib.no_default) assert obj is lib.no_default + + +def test_clean_index_list(): + # with both 0 and a large-uint64, np.array will infer to float64 + # https://github.com/numpy/numpy/issues/19146 + # but a more accurate choice would be uint64 + values = [0, np.iinfo(np.uint64).max] + + result, _ = lib.clean_index_list(values) + + expected = np.array(values, dtype="uint64") + tm.assert_numpy_array_equal(result, expected, check_dtype=True)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41784
2021-06-02T13:47:10Z
2021-06-02T15:16:26Z
2021-06-02T15:16:26Z
2021-06-02T16:58:01Z
TYP: remove future import from pyi file
diff --git a/pandas/_libs/tslibs/timezones.pyi b/pandas/_libs/tslibs/timezones.pyi index 346cc34576184..a631191f8b005 100644 --- a/pandas/_libs/tslibs/timezones.pyi +++ b/pandas/_libs/tslibs/timezones.pyi @@ -1,5 +1,3 @@ -from __future__ import annotations - from datetime import ( datetime, tzinfo, diff --git a/pandas/_libs/tslibs/vectorized.pyi b/pandas/_libs/tslibs/vectorized.pyi index e3d5acdfe2577..2a23289cdf61b 100644 --- a/pandas/_libs/tslibs/vectorized.pyi +++ b/pandas/_libs/tslibs/vectorized.pyi @@ -2,8 +2,6 @@ For cython types that cannot be represented precisely, closest-available python equivalents are used, and the precise types kept as adjacent comments. """ -from __future__ import annotations - from datetime import tzinfo import numpy as np
followup #41769 #41774 xref #41771
https://api.github.com/repos/pandas-dev/pandas/pulls/41783
2021-06-02T12:12:11Z
2021-06-02T15:14:32Z
2021-06-02T15:14:32Z
2021-06-18T02:24:25Z
Backport PR #41739: TST: Make ARM build work (not in the CI)
diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/circle-37-arm64.yaml similarity index 100% rename from ci/deps/travis-37-arm64.yaml rename to ci/deps/circle-37-arm64.yaml diff --git a/ci/setup_env.sh b/ci/setup_env.sh index c36422884f2ec..e6bd9950331ca 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -12,41 +12,30 @@ if [[ "$(uname)" == "Linux" && -n "$LC_ALL" ]]; then echo fi -MINICONDA_DIR="$HOME/miniconda3" - - -if [ -d "$MINICONDA_DIR" ]; then - echo - echo "rm -rf "$MINICONDA_DIR"" - rm -rf "$MINICONDA_DIR" -fi echo "Install Miniconda" -UNAME_OS=$(uname) -if [[ "$UNAME_OS" == 'Linux' ]]; then +DEFAULT_CONDA_URL="https://repo.continuum.io/miniconda/Miniconda3-latest" +if [[ "$(uname -m)" == 'aarch64' ]]; then + CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.10.1-4/Miniforge3-4.10.1-4-Linux-aarch64.sh" +elif [[ "$(uname)" == 'Linux' ]]; then if [[ "$BITS32" == "yes" ]]; then - CONDA_OS="Linux-x86" + CONDA_URL="$DEFAULT_CONDA_URL-Linux-x86.sh" else - CONDA_OS="Linux-x86_64" + CONDA_URL="$DEFAULT_CONDA_URL-Linux-x86_64.sh" fi -elif [[ "$UNAME_OS" == 'Darwin' ]]; then - CONDA_OS="MacOSX-x86_64" +elif [[ "$(uname)" == 'Darwin' ]]; then + CONDA_URL="$DEFAULT_CONDA_URL-MacOSX-x86_64.sh" else - echo "OS $UNAME_OS not supported" + echo "OS $(uname) not supported" exit 1 fi - -if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then - CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.8.5-1/Miniforge3-4.8.5-1-Linux-aarch64.sh" -else - CONDA_URL="https://repo.continuum.io/miniconda/Miniconda3-latest-$CONDA_OS.sh" -fi +echo "Downloading $CONDA_URL" wget -q $CONDA_URL -O miniconda.sh chmod +x miniconda.sh -# Installation path is required for ARM64 platform as miniforge script installs in path $HOME/miniforge3. +MINICONDA_DIR="$HOME/miniconda3" +rm -rf $MINICONDA_DIR ./miniconda.sh -b -p $MINICONDA_DIR - export PATH=$MINICONDA_DIR/bin:$PATH echo diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 2ac9b9e2c875c..9aa261fd745d5 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -77,6 +77,18 @@ def is_platform_mac() -> bool: return sys.platform == "darwin" +def is_platform_arm() -> bool: + """ + Checking if he running platform use ARM architecture. + + Returns + ------- + bool + True if the running platform uses ARM architecture. + """ + return platform.machine() in ("arm64", "aarch64") + + def import_lzma(): """ Importing the `lzma` module. diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index e2cdf76d038ec..218d247a25380 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -3,6 +3,7 @@ import numpy as np import pytest +from pandas.compat import is_platform_arm from pandas.errors import UnsupportedFunctionCall from pandas import ( @@ -891,6 +892,7 @@ def test_rolling_sem(frame_or_series): tm.assert_series_equal(result, expected) +@pytest.mark.xfail(is_platform_arm(), reason="GH 41740") @pytest.mark.parametrize( ("func", "third_value", "values"), [
Backport PR #41739
https://api.github.com/repos/pandas-dev/pandas/pulls/41781
2021-06-02T09:08:31Z
2021-06-02T10:26:55Z
2021-06-02T10:26:54Z
2021-06-02T10:26:59Z
DOC: 1.2.5 release date
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index 5b8b5eb9e651c..d3ceb2b919b5d 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -1,7 +1,7 @@ .. _whatsnew_125: -What's new in 1.2.5 (May ??, 2021) ----------------------------------- +What's new in 1.2.5 (June 22, 2021) +----------------------------------- These are the changes in pandas 1.2.5. See :ref:`release` for a full changelog including other versions of pandas. @@ -14,36 +14,15 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ -- Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) +- Fixed regression in :func:`concat` between two :class:`DataFrame` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) - Fixed regression in :meth:`DataFrame.sum` and :meth:`DataFrame.prod` when ``min_count`` and ``numeric_only`` are both given (:issue:`41074`) -- Regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`) -- Regression in :meth:`DataFrame.replace` and :meth:`Series.replace` when the values to replace is a NumPy float array (:issue:`40371`) -- Regression in :func:`ExcelFile` when a corrupt file is opened but not closed (:issue:`41778`) +- Fixed regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`) +- Fixed regression in :meth:`DataFrame.replace` and :meth:`Series.replace` when the values to replace is a NumPy float array (:issue:`40371`) +- Fixed regression in :func:`ExcelFile` when a corrupt file is opened but not closed (:issue:`41778`) - Fixed regression in :meth:`DataFrame.astype` with ``dtype=str`` failing to convert ``NaN`` in categorical columns (:issue:`41797`) .. --------------------------------------------------------------------------- - -.. _whatsnew_125.bug_fixes: - -Bug fixes -~~~~~~~~~ - -- -- - -.. --------------------------------------------------------------------------- - -.. _whatsnew_125.other: - -Other -~~~~~ - -- -- - -.. --------------------------------------------------------------------------- - .. _whatsnew_125.contributors: Contributors
https://api.github.com/repos/pandas-dev/pandas/pulls/41780
2021-06-02T07:57:22Z
2021-06-21T18:07:01Z
2021-06-21T18:07:01Z
2021-06-22T08:33:28Z
BUG: Series.loc[-1] with UInt64Index
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index b36499c340fd9..cac7b9d8677b0 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -945,6 +945,7 @@ Indexing - Bug in :meth:`DataFrame.loc` returning :class:`MultiIndex` in wrong order if indexer has duplicates (:issue:`40978`) - Bug in :meth:`DataFrame.__setitem__` raising ``TypeError`` when using a str subclass as the column name with a :class:`DatetimeIndex` (:issue:`37366`) - Bug in :meth:`PeriodIndex.get_loc` failing to raise ``KeyError`` when given a :class:`Period` with a mismatched ``freq`` (:issue:`41670`) +- Bug ``.loc.__getitem__`` with a :class:`UInt64Index` and negative-integer keys raising ``OverflowError`` instead of ``KeyError`` in some cases, wrapping around to positive integers in others (:issue:`41777`) Missing ^^^^^^^ diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index f7cec262ca302..3351bb7cac7d6 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -106,7 +106,8 @@ cdef class IndexEngine: try: return self.mapping.get_item(val) - except (TypeError, ValueError): + except (TypeError, ValueError, OverflowError): + # GH#41775 OverflowError e.g. if we are uint64 and val is -1 raise KeyError(val) cdef inline _get_loc_duplicates(self, object val): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 124903446220d..db718916d7fd7 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -5410,6 +5410,7 @@ def _find_common_type_compat(self, target) -> DtypeObj: return np.dtype("object") dtype = find_common_type([self.dtype, target_dtype]) + if dtype.kind in ["i", "u"]: # TODO: what about reversed with self being categorical? if ( diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 5f24eb0cfaad6..3dc46f04d1d45 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -49,7 +49,6 @@ TimedeltaArray, ) from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin -import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, @@ -599,7 +598,7 @@ def _convert_arr_indexer(self, keyarr): try: return self._data._validate_listlike(keyarr, allow_object=True) except (ValueError, TypeError): - return com.asarray_tuplesafe(keyarr) + return super()._convert_arr_indexer(keyarr) class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin): diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index de7c522b4fbec..e6526bd0eaf2f 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -37,7 +37,6 @@ ) from pandas.core.dtypes.generic import ABCSeries -import pandas.core.common as com from pandas.core.indexes.base import ( Index, maybe_extract_name, @@ -250,21 +249,6 @@ def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default): # we will try to coerce to integers return self._maybe_cast_indexer(label) - @doc(Index._convert_arr_indexer) - def _convert_arr_indexer(self, keyarr) -> np.ndarray: - if not is_unsigned_integer_dtype(self.dtype): - return super()._convert_arr_indexer(keyarr) - - # Cast the indexer to uint64 if possible so that the values returned - # from indexing are also uint64. - dtype = None - if is_integer_dtype(keyarr) or ( - lib.infer_dtype(keyarr, skipna=False) == "integer" - ): - dtype = np.dtype(np.uint64) - - return com.asarray_tuplesafe(keyarr, dtype=dtype) - # ---------------------------------------------------------------- @doc(Index._shallow_copy) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index ab868a3d3713d..dcccd42c52c8c 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1010,18 +1010,32 @@ def test_loc_copy_vs_view(self): def test_loc_uint64(self): # GH20722 # Test whether loc accept uint64 max value as index. - s = Series([1, 2], index=[np.iinfo("uint64").max - 1, np.iinfo("uint64").max]) + umax = np.iinfo("uint64").max + ser = Series([1, 2], index=[umax - 1, umax]) - result = s.loc[np.iinfo("uint64").max - 1] - expected = s.iloc[0] + result = ser.loc[umax - 1] + expected = ser.iloc[0] assert result == expected - result = s.loc[[np.iinfo("uint64").max - 1]] - expected = s.iloc[[0]] + result = ser.loc[[umax - 1]] + expected = ser.iloc[[0]] tm.assert_series_equal(result, expected) - result = s.loc[[np.iinfo("uint64").max - 1, np.iinfo("uint64").max]] - tm.assert_series_equal(result, s) + result = ser.loc[[umax - 1, umax]] + tm.assert_series_equal(result, ser) + + def test_loc_uint64_disallow_negative(self): + # GH#41775 + umax = np.iinfo("uint64").max + ser = Series([1, 2], index=[umax - 1, umax]) + + with pytest.raises(KeyError, match="-1"): + # don't wrap around + ser.loc[-1] + + with pytest.raises(KeyError, match="-1"): + # don't wrap around + ser.loc[[-1]] def test_loc_setitem_empty_append_expands_rows(self): # GH6173, various appends to an empty dataframe
- [x] closes #41775 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41777
2021-06-02T05:09:17Z
2021-06-03T17:15:56Z
2021-06-03T17:15:56Z
2021-06-03T18:35:02Z
ENH: maybe_convert_objects handle IntervalArray
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 1ebcdb347c428..4d184ee13e3db 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1187,6 +1187,7 @@ cdef class Seen: bint timedelta_ # seen_timedelta bint datetimetz_ # seen_datetimetz bint period_ # seen_period + bint interval_ # seen_interval def __cinit__(self, bint coerce_numeric=False): """ @@ -1212,6 +1213,7 @@ cdef class Seen: self.timedelta_ = False self.datetimetz_ = False self.period_ = False + self.interval_ = False self.coerce_numeric = coerce_numeric cdef inline bint check_uint64_conflict(self) except -1: @@ -2035,7 +2037,6 @@ cpdef bint is_interval_array(ndarray values): """ Is this an ndarray of Interval (or np.nan) with a single dtype? """ - cdef: Py_ssize_t i, n = len(values) str closed = None @@ -2320,6 +2321,7 @@ def maybe_convert_objects(ndarray[object] objects, bint convert_datetime=False, bint convert_timedelta=False, bint convert_period=False, + bint convert_interval=False, bint convert_to_nullable_integer=False) -> "ArrayLike": """ Type inference function-- convert object array to proper dtype @@ -2343,6 +2345,9 @@ def maybe_convert_objects(ndarray[object] objects, convert_period : bool, default False If an array-like object contains only (homogeneous-freq) Period values or NaT, whether to convert and return a PeriodArray. + convert_interval : bool, default False + If an array-like object contains only Interval objects (with matching + dtypes and closedness) or NaN, whether to convert to IntervalArray. convert_to_nullable_integer : bool, default False If an array-like object contains only integer values (and NaN) is encountered, whether to convert and return an IntegerArray. @@ -2473,6 +2478,13 @@ def maybe_convert_objects(ndarray[object] objects, except (ValueError, TypeError): seen.object_ = True break + elif is_interval(val): + if convert_interval: + seen.interval_ = True + break + else: + seen.object_ = True + break else: seen.object_ = True break @@ -2494,6 +2506,17 @@ def maybe_convert_objects(ndarray[object] objects, # unbox to PeriodArray return pi._data + seen.object_ = True + + if seen.interval_: + if is_interval_array(objects): + from pandas import IntervalIndex + ii = IntervalIndex(objects) + + # unbox to IntervalArray + return ii._data + + seen.object_ = True if not seen.object_: result = None diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index cd5e28baef16b..7e0b26391e132 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -813,6 +813,22 @@ def test_mixed_dtypes_remain_object_array(self): result = lib.maybe_convert_objects(arr, convert_datetime=True) tm.assert_numpy_array_equal(result, arr) + @pytest.mark.parametrize( + "idx", + [ + pd.IntervalIndex.from_breaks(range(5), closed="both"), + pd.period_range("2016-01-01", periods=3, freq="D"), + ], + ) + def test_maybe_convert_objects_ea(self, idx): + + result = lib.maybe_convert_objects( + np.array(idx, dtype=object), + convert_period=True, + convert_interval=True, + ) + tm.assert_extension_array_equal(result, idx._data) + class TestTypeInference:
Inching towards getting rid of special-casing interval+period in sanitize_array
https://api.github.com/repos/pandas-dev/pandas/pulls/41776
2021-06-02T04:40:03Z
2021-06-02T13:05:19Z
2021-06-02T13:05:19Z
2021-06-02T13:51:52Z
TYP: use type annotations in vectorized.pyi
diff --git a/pandas/_libs/tslibs/vectorized.pyi b/pandas/_libs/tslibs/vectorized.pyi index 6ed1e10ef2353..e3d5acdfe2577 100644 --- a/pandas/_libs/tslibs/vectorized.pyi +++ b/pandas/_libs/tslibs/vectorized.pyi @@ -2,11 +2,9 @@ For cython types that cannot be represented precisely, closest-available python equivalents are used, and the precise types kept as adjacent comments. """ +from __future__ import annotations + from datetime import tzinfo -from typing import ( - Optional, - Union, -) import numpy as np @@ -16,32 +14,24 @@ from pandas._libs.tslibs.offsets import BaseOffset def dt64arr_to_periodarr( stamps: np.ndarray, # const int64_t[:] freq: int, - tz: Optional[tzinfo], + tz: tzinfo | None, ) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1] - - def is_date_array_normalized( stamps: np.ndarray, # const int64_t[:] - tz: Optional[tzinfo] = None, + tz: tzinfo | None = None, ) -> bool: ... - - def normalize_i8_timestamps( stamps: np.ndarray, # const int64_t[:] - tz: Optional[tzinfo], + tz: tzinfo | None, ) -> np.ndarray: ... # np.ndarray[np.int64] - - def get_resolution( stamps: np.ndarray, # const int64_t[:] - tz: Optional[tzinfo] = None, + tz: tzinfo | None = None, ) -> Resolution: ... - - def ints_to_pydatetime( arr: np.ndarray, # const int64_t[:}] - tz: Optional[tzinfo] = None, - freq: Optional[Union[str, BaseOffset]] = None, + tz: tzinfo | None = None, + freq: str | BaseOffset | None = None, fold: bool = False, box: str = "datetime", ) -> np.ndarray: ... # np.ndarray[object]
https://api.github.com/repos/pandas-dev/pandas/pulls/41774
2021-06-02T00:38:40Z
2021-06-02T01:42:10Z
2021-06-02T01:42:10Z
2021-06-18T02:24:32Z
REF: Simplify Index.union
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 14ec3d6009b61..124903446220d 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -77,7 +77,6 @@ is_float_dtype, is_hashable, is_integer, - is_integer_dtype, is_interval_dtype, is_iterator, is_list_like, @@ -2963,20 +2962,7 @@ def union(self, other, sort=None): stacklevel=2, ) - dtype = find_common_type([self.dtype, other.dtype]) - if self._is_numeric_dtype and other._is_numeric_dtype: - # Right now, we treat union(int, float) a bit special. - # See https://github.com/pandas-dev/pandas/issues/26778 for discussion - # We may change union(int, float) to go to object. - # float | [u]int -> float (the special case) - # <T> | <T> -> T - # <T> | <U> -> object - if not (is_integer_dtype(self.dtype) and is_integer_dtype(other.dtype)): - dtype = np.dtype("float64") - else: - # one is int64 other is uint64 - dtype = np.dtype("object") - + dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) @@ -5410,6 +5396,19 @@ def _find_common_type_compat(self, target) -> DtypeObj: return IntervalDtype(np.float64, closed=self.closed) target_dtype, _ = infer_dtype_from(target, pandas_dtype=True) + + # special case: if one dtype is uint64 and the other a signed int, return object + # See https://github.com/pandas-dev/pandas/issues/26778 for discussion + # Now it's: + # * float | [u]int -> float + # * uint64 | signed int -> object + # We may change union(float | [u]int) to go to object. + if self.dtype == "uint64" or target_dtype == "uint64": + if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype( + target_dtype + ): + return np.dtype("object") + dtype = find_common_type([self.dtype, target_dtype]) if dtype.kind in ["i", "u"]: # TODO: what about reversed with self being categorical? diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py index 62c07f4306a96..087ccbef7b778 100644 --- a/pandas/tests/indexes/test_setops.py +++ b/pandas/tests/indexes/test_setops.py @@ -8,7 +8,7 @@ import numpy as np import pytest -from pandas.core.dtypes.common import is_dtype_equal +from pandas.core.dtypes.cast import find_common_type from pandas import ( CategoricalIndex, @@ -25,6 +25,7 @@ import pandas._testing as tm from pandas.api.types import ( is_datetime64tz_dtype, + is_signed_integer_dtype, pandas_dtype, ) @@ -48,7 +49,11 @@ def test_union_different_types(index_flat, index_flat2): idx1 = index_flat idx2 = index_flat2 - type_pair = tuple(sorted([idx1.dtype.type, idx2.dtype.type], key=lambda x: str(x))) + common_dtype = find_common_type([idx1.dtype, idx2.dtype]) + + any_uint64 = idx1.dtype == np.uint64 or idx2.dtype == np.uint64 + idx1_signed = is_signed_integer_dtype(idx1.dtype) + idx2_signed = is_signed_integer_dtype(idx2.dtype) # Union with a non-unique, non-monotonic index raises error # This applies to the boolean index @@ -58,23 +63,12 @@ def test_union_different_types(index_flat, index_flat2): res1 = idx1.union(idx2) res2 = idx2.union(idx1) - if is_dtype_equal(idx1.dtype, idx2.dtype): - assert res1.dtype == idx1.dtype - assert res2.dtype == idx1.dtype - - elif type_pair not in COMPATIBLE_INCONSISTENT_PAIRS: - # A union with a CategoricalIndex (even as dtype('O')) and a - # non-CategoricalIndex can only be made if both indices are monotonic. - # This is true before this PR as well. + if any_uint64 and (idx1_signed or idx2_signed): assert res1.dtype == np.dtype("O") assert res2.dtype == np.dtype("O") - - elif idx1.dtype.kind in ["f", "i", "u"] and idx2.dtype.kind in ["f", "i", "u"]: - assert res1.dtype == np.dtype("f8") - assert res2.dtype == np.dtype("f8") - else: - raise NotImplementedError + assert res1.dtype == common_dtype + assert res2.dtype == common_dtype @pytest.mark.parametrize(
This is the `Index.union` part of #41153. This helps simplify that PR. The special casing in `Index.union` is currently active if both are numeric. After #41153 it should only be special cased if one dtype is uint64 and other a signed int. So after this and #41153: * int8 & uint32 -> int64 * [u]int64 & float64 -> float64 * int64 & uint64 -> object * int8 & uint64 -> object The first and second case is handled correctly by `find_common_type`, but the others aren't currently. This PR changes no functionality itself, but prepares for the changes in #41153, where we want e.g. `NumericIndex[int8] .union(NumericIndex[uint32])` to give `NumericIndex[int64]` and not `Index[object]`.
https://api.github.com/repos/pandas-dev/pandas/pulls/41773
2021-06-02T00:07:30Z
2021-06-02T21:40:04Z
2021-06-02T21:40:04Z
2022-02-23T00:28:12Z
CLN: assorted follow-ups
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 409125b6d6691..1556c88aaecc6 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -643,6 +643,7 @@ Other API changes - Partially initialized :class:`CategoricalDtype` (i.e. those with ``categories=None`` objects will no longer compare as equal to fully initialized dtype objects. - Accessing ``_constructor_expanddim`` on a :class:`DataFrame` and ``_constructor_sliced`` on a :class:`Series` now raise an ``AttributeError``. Previously a ``NotImplementedError`` was raised (:issue:`38782`) - Added new ``engine`` and ``**engine_kwargs`` parameters to :meth:`DataFrame.to_sql` to support other future "SQL engines". Currently we still only use ``SQLAlchemy`` under the hood, but more engines are planned to be supported such as ``turbodbc`` (:issue:`36893`) +- Removed redundant ``freq`` from :class:`PeriodIndex` string representation (:issue:`41653`) Build ===== diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 6a270c0a55638..e2883dbf4c76b 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1461,7 +1461,7 @@ def infer_dtype(value: object, skipna: bool = True) -> str: for i in range(n): val = values[i] - # do not use is_nul_datetimelike to keep + # do not use is_null_datetimelike to keep # np.datetime64('nat') and np.timedelta64('nat') if val is None or util.is_nan(val): pass diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index f8f5e5e05bc35..30f42435ad177 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1266,14 +1266,14 @@ def compute(self, method: str) -> Series: return dropped.sort_values(ascending=ascending).head(n) # fast method - arr, pandas_dtype = _ensure_data(dropped.values) + arr, new_dtype = _ensure_data(dropped.values) if method == "nlargest": arr = -arr - if is_integer_dtype(pandas_dtype): + if is_integer_dtype(new_dtype): # GH 21426: ensure reverse ordering at boundaries arr -= 1 - elif is_bool_dtype(pandas_dtype): + elif is_bool_dtype(new_dtype): # GH 26154: ensure False is smaller than True arr = 1 - (-arr) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index ec69d9ccbdd90..020f708606353 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -2104,7 +2104,6 @@ def sequence_to_dt64ns( result = data.view(DT64NS_DTYPE) if copy: - # TODO: should this be deepcopy? result = result.copy() assert isinstance(result, np.ndarray), type(result) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index c2323c8697eee..d8c1b9cef468a 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -866,7 +866,7 @@ def start_time(self) -> DatetimeArray: def end_time(self) -> DatetimeArray: return self.to_timestamp(how="end") - def _require_matching_freq(self, other, base=False): + def _require_matching_freq(self, other, base: bool = False) -> None: # See also arrays.period.raise_on_incompatible if isinstance(other, BaseOffset): other_freq = other @@ -1057,7 +1057,7 @@ def dt64arr_to_periodarr(data, freq, tz=None): Returns ------- - ordinals : ndarray[int] + ordinals : ndarray[int64] freq : Tick The frequency extracted from the Series or DatetimeIndex if that's used. diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 30215b40593d3..eb203d349b4e7 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -776,6 +776,7 @@ def _engine(self) -> libindex.IndexEngine: target_values = self._get_engine_target() return self._engine_type(lambda: target_values, len(self)) + @final @cache_readonly def _dir_additions_for_owner(self) -> set[str_t]: """ @@ -6209,6 +6210,7 @@ def shape(self) -> Shape: # See GH#27775, GH#27384 for history/reasoning in how this is defined. return (len(self),) + @final def _deprecated_arg(self, value, name: str_t, methodname: str_t) -> None: """ Issue a FutureWarning if the arg/kwarg is not no_default.
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41772
2021-06-01T21:15:53Z
2021-06-02T01:38:46Z
2021-06-02T01:38:46Z
2021-06-02T02:31:33Z
TYP: use type annotations in tzconversion.pyi
diff --git a/pandas/_libs/tslibs/tzconversion.pyi b/pandas/_libs/tslibs/tzconversion.pyi index f47885a2e3306..1cbe55320099b 100644 --- a/pandas/_libs/tslibs/tzconversion.pyi +++ b/pandas/_libs/tslibs/tzconversion.pyi @@ -2,11 +2,7 @@ from datetime import ( timedelta, tzinfo, ) -from typing import ( - Iterable, - Optional, - Union, -) +from typing import Iterable import numpy as np @@ -14,12 +10,10 @@ def tz_convert_from_utc( vals: np.ndarray, # const int64_t[:] tz: tzinfo, ) -> np.ndarray: ... # np.ndarray[np.int64] - def tz_convert_from_utc_single(val: np.int64, tz: tzinfo) -> np.int64: ... - def tz_localize_to_utc( vals: np.ndarray, # np.ndarray[np.int64] - tz: Optional[tzinfo], - ambiguous: Optional[Union[str, bool, Iterable[bool]]] = None, - nonexistent: Optional[Union[str, timedelta, np.timedelta64]] = None, + tz: tzinfo | None, + ambiguous: str | bool | Iterable[bool] | None = None, + nonexistent: str | timedelta | np.timedelta64 | None = None, ) -> np.ndarray: ... # np.ndarray[np.int64]
https://api.github.com/repos/pandas-dev/pandas/pulls/41771
2021-06-01T19:18:35Z
2021-06-02T13:05:51Z
2021-06-02T13:05:51Z
2021-06-18T02:25:40Z
DEPR: DataFrame(floaty, dtype=inty) match Series
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index 760da36a30075..c32eda4928da7 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -652,7 +652,9 @@ class Rank: ] def setup(self, dtype): - self.df = DataFrame(np.random.randn(10000, 10), columns=range(10), dtype=dtype) + self.df = DataFrame( + np.random.randn(10000, 10).astype(dtype), columns=range(10), dtype=dtype + ) def time_rank(self, dtype): self.df.rank() diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index b36499c340fd9..0bca312c0bdce 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -700,6 +700,7 @@ Deprecations - Deprecated passing arguments as positional in :meth:`DataFrame.reset_index` (other than ``"level"``) and :meth:`Series.reset_index` (:issue:`41485`) - Deprecated construction of :class:`Series` or :class:`DataFrame` with ``DatetimeTZDtype`` data and ``datetime64[ns]`` dtype. Use ``Series(data).dt.tz_localize(None)`` instead (:issue:`41555`,:issue:`33401`) - Deprecated behavior of :class:`Series` construction with large-integer values and small-integer dtype silently overflowing; use ``Series(data).astype(dtype)`` instead (:issue:`41734`) +- Deprecated behavior of :class:`DataFrame` construction with floating data and integer dtype casting even when lossy; in a future version this will remain floating, matching :class:`Series` behavior (:issue:`41770`) - Deprecated inference of ``timedelta64[ns]``, ``datetime64[ns]``, or ``DatetimeTZDtype`` dtypes in :class:`Series` construction when data containing strings is passed and no ``dtype`` is passed (:issue:`33558`) - In a future version, constructing :class:`Series` or :class:`DataFrame` with ``datetime64[ns]`` data and ``DatetimeTZDtype`` will treat the data as wall-times instead of as UTC times (matching DatetimeIndex behavior). To treat the data as UTC times, use ``pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(dtype.tz)`` or ``pd.Series(data.view("int64"), dtype=dtype)`` (:issue:`33401`) - Deprecated passing arguments as positional in :meth:`DataFrame.set_axis` and :meth:`Series.set_axis` (other than ``"labels"``) (:issue:`41485`) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index edaa53cd55042..c877d27fd2392 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -24,6 +24,7 @@ Dtype, DtypeObj, ) +from pandas.errors import IntCastingNaNError from pandas.core.dtypes.base import ( ExtensionDtype, @@ -511,7 +512,24 @@ def sanitize_array( # possibility of nan -> garbage try: subarr = _try_cast(data, dtype, copy, True) + except IntCastingNaNError: + subarr = np.array(data, copy=copy) except ValueError: + if not raise_cast_failure: + # i.e. called via DataFrame constructor + warnings.warn( + "In a future version, passing float-dtype values and an " + "integer dtype to DataFrame will retain floating dtype " + "if they cannot be cast losslessly (matching Series behavior). " + "To retain the old behavior, use DataFrame(data).astype(dtype)", + FutureWarning, + stacklevel=4, + ) + # GH#40110 until the deprecation is enforced, we _dont_ + # ignore the dtype for DataFrame, and _do_ cast even though + # it is lossy. + dtype = cast(np.dtype, dtype) + return np.array(data, dtype=dtype, copy=copy) subarr = np.array(data, copy=copy) else: # we will try to copy by-definition here diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 161572f3f1ac3..177b1ccd166cb 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -2088,7 +2088,13 @@ def maybe_cast_to_integer_array( if is_unsigned_integer_dtype(dtype) and (arr < 0).any(): raise OverflowError("Trying to coerce negative values to unsigned integers") - if is_float_dtype(arr.dtype) or is_object_dtype(arr.dtype): + if is_float_dtype(arr.dtype): + if not np.isfinite(arr).all(): + raise IntCastingNaNError( + "Cannot convert non-finite values (NA or inf) to integer" + ) + raise ValueError("Trying to coerce float values to integers") + if is_object_dtype(arr.dtype): raise ValueError("Trying to coerce float values to integers") if casted.dtype < arr.dtype: @@ -2102,6 +2108,17 @@ def maybe_cast_to_integer_array( ) return casted + if arr.dtype.kind in ["m", "M"]: + # test_constructor_maskedarray_nonfloat + warnings.warn( + f"Constructing Series or DataFrame from {arr.dtype} values and " + f"dtype={dtype} is deprecated and will raise in a future version. " + "Use values.view(dtype) instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return casted + # No known cases that get here, but raising explicitly to cover our bases. raise ValueError(f"values cannot be losslessly cast to {dtype}") diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 270eddf2bd3a5..81bf3ca4ba07a 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -22,11 +22,9 @@ DtypeObj, Manager, ) -from pandas.errors import IntCastingNaNError from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, - construct_1d_ndarray_preserving_na, maybe_cast_to_datetime, maybe_convert_platform, maybe_infer_to_datetimelike, @@ -303,22 +301,12 @@ def ndarray_to_mgr( shape = values.shape flat = values.ravel() - if not is_integer_dtype(dtype): - # TODO: skipping integer_dtype is needed to keep the tests passing, - # not clear it is correct - # Note: we really only need _try_cast, but keeping to exposed funcs - values = sanitize_array( - flat, None, dtype=dtype, copy=copy, raise_cast_failure=True - ) - else: - try: - values = construct_1d_ndarray_preserving_na( - flat, dtype=dtype, copy=False - ) - except IntCastingNaNError: - # following Series, we ignore the dtype and retain floating - # values instead of casting nans to meaningless ints - pass + # GH#40110 see similar check inside sanitize_array + rcf = not (is_integer_dtype(dtype) and values.dtype.kind == "f") + + values = sanitize_array( + flat, None, dtype=dtype, copy=copy, raise_cast_failure=rcf + ) values = values.reshape(shape) diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index 6e176310da6b4..dac3c0382df01 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -603,7 +603,7 @@ def test_sort_index_level_large_cardinality(self): # GH#2684 (int64) index = MultiIndex.from_arrays([np.arange(4000)] * 3) - df = DataFrame(np.random.randn(4000), index=index, dtype=np.int64) + df = DataFrame(np.random.randn(4000).astype("int64"), index=index) # it works! result = df.sort_index(level=0) @@ -611,7 +611,7 @@ def test_sort_index_level_large_cardinality(self): # GH#2684 (int32) index = MultiIndex.from_arrays([np.arange(4000)] * 3) - df = DataFrame(np.random.randn(4000), index=index, dtype=np.int32) + df = DataFrame(np.random.randn(4000).astype("int32"), index=index) # it works! result = df.sort_index(level=0) diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py index 769b08373b890..5156d0371e9b7 100644 --- a/pandas/tests/frame/methods/test_to_csv.py +++ b/pandas/tests/frame/methods/test_to_csv.py @@ -714,7 +714,9 @@ def create_cols(name): np.random.randn(100, 5), dtype="float64", columns=create_cols("float") ) df_int = DataFrame( - np.random.randn(100, 5), dtype="int64", columns=create_cols("int") + np.random.randn(100, 5).astype("int64"), + dtype="int64", + columns=create_cols("int"), ) df_bool = DataFrame(True, index=df_float.index, columns=create_cols("bool")) df_object = DataFrame( @@ -765,7 +767,7 @@ def test_to_csv_dups_cols(self): tm.assert_frame_equal(result, df) df_float = DataFrame(np.random.randn(1000, 3), dtype="float64") - df_int = DataFrame(np.random.randn(1000, 3), dtype="int64") + df_int = DataFrame(np.random.randn(1000, 3)).astype("int64") df_bool = DataFrame(True, index=df_float.index, columns=range(3)) df_object = DataFrame("foo", index=df_float.index, columns=range(3)) df_dt = DataFrame(Timestamp("20010101"), index=df_float.index, columns=range(3)) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 784969c199c9f..6e0013c196760 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -10,6 +10,7 @@ import functools import itertools import re +import warnings import numpy as np import numpy.ma as ma @@ -999,7 +1000,17 @@ def test_constructor_maskedarray_nonfloat(self): assert isna(frame).values.all() # cast type - frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64) + msg = r"datetime64\[ns\] values and dtype=int64" + with tm.assert_produces_warning(FutureWarning, match=msg): + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="elementwise comparison failed", + ) + frame = DataFrame( + mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64 + ) assert frame.values.dtype == np.int64 # Check non-masked values @@ -2484,6 +2495,27 @@ def test_nested_list_columns(self): tm.assert_frame_equal(result, expected) +class TestDataFrameConstructorWithDtypeCoercion: + def test_floating_values_integer_dtype(self): + # GH#40110 make DataFrame behavior with arraylike floating data and + # inty dtype match Series behavior + + arr = np.random.randn(10, 5) + + msg = "if they cannot be cast losslessly" + with tm.assert_produces_warning(FutureWarning, match=msg): + DataFrame(arr, dtype="i8") + + with tm.assert_produces_warning(None): + # if they can be cast losslessly, no warning + DataFrame(arr.round(), dtype="i8") + + # with NaNs, we already have the correct behavior, so no warning + arr[0, 0] = np.nan + with tm.assert_produces_warning(None): + DataFrame(arr, dtype="i8") + + class TestDataFrameConstructorWithDatetimeTZ: @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) def test_construction_preserves_tzaware_dtypes(self, tz): diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py index c9a39eb460cf4..d010426bee53e 100644 --- a/pandas/tests/frame/test_nonunique_indexes.py +++ b/pandas/tests/frame/test_nonunique_indexes.py @@ -294,7 +294,7 @@ def test_multi_dtype2(self): def test_dups_across_blocks(self, using_array_manager): # dups across blocks df_float = DataFrame(np.random.randn(10, 3), dtype="float64") - df_int = DataFrame(np.random.randn(10, 3), dtype="int64") + df_int = DataFrame(np.random.randn(10, 3).astype("int64")) df_bool = DataFrame(True, index=df_float.index, columns=df_float.columns) df_object = DataFrame("foo", index=df_float.index, columns=df_float.columns) df_dt = DataFrame( diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 6f4949267c00c..26f2ba577d184 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -134,7 +134,10 @@ def test_setitem_series_int8(self, val, exp_dtype, request): ) request.node.add_marker(mark) - exp = pd.Series([1, val, 3, 4], dtype=np.int8) + warn = None if exp_dtype is np.int8 else FutureWarning + msg = "Values are too large to be losslessly cast to int8" + with tm.assert_produces_warning(warn, match=msg): + exp = pd.Series([1, val, 3, 4], dtype=np.int8) self._assert_setitem_series_conversion(obj, val, exp, exp_dtype) @pytest.mark.parametrize(
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry xref #40110, though we may still deprecate the Series behavior too, in which case we'll need to update the deprecation message here
https://api.github.com/repos/pandas-dev/pandas/pulls/41770
2021-06-01T18:37:02Z
2021-06-03T17:40:20Z
2021-06-03T17:40:20Z
2021-06-03T18:35:29Z
TYP: use type annotations in timezones.pyi
diff --git a/pandas/_libs/tslibs/timezones.pyi b/pandas/_libs/tslibs/timezones.pyi index 04a1b391dc30a..346cc34576184 100644 --- a/pandas/_libs/tslibs/timezones.pyi +++ b/pandas/_libs/tslibs/timezones.pyi @@ -1,32 +1,25 @@ +from __future__ import annotations + from datetime import ( datetime, tzinfo, ) -from typing import ( - Callable, - Optional, - Union, -) +from typing import Callable import numpy as np # imported from dateutil.tz dateutil_gettz: Callable[[str], tzinfo] - def tz_standardize(tz: tzinfo) -> tzinfo: ... - -def tz_compare(start: Optional[tzinfo], end: Optional[tzinfo]) -> bool: ... - +def tz_compare(start: tzinfo | None, end: tzinfo | None) -> bool: ... def infer_tzinfo( - start: Optional[datetime], end: Optional[datetime], -) -> Optional[tzinfo]: ... + start: datetime | None, + end: datetime | None, +) -> tzinfo | None: ... # ndarrays returned are both int64_t def get_dst_info(tz: tzinfo) -> tuple[np.ndarray, np.ndarray, str]: ... - -def maybe_get_tz(tz: Optional[Union[str, int, np.int64, tzinfo]]) -> Optional[tzinfo]: ... - -def get_timezone(tz: tzinfo) -> Union[tzinfo, str]: ... - -def is_utc(tz: Optional[tzinfo]) -> bool: ... +def maybe_get_tz(tz: str | int | np.int64 | tzinfo | None) -> tzinfo | None: ... +def get_timezone(tz: tzinfo) -> tzinfo | str: ... +def is_utc(tz: tzinfo | None) -> bool: ...
https://api.github.com/repos/pandas-dev/pandas/pulls/41769
2021-06-01T18:32:29Z
2021-06-01T23:16:21Z
2021-06-01T23:16:21Z
2022-11-18T02:20:24Z
Backport PR #41711: REGR: DataFrame reduction with min_count
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index e936519383520..500030e1304c6 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) +- Fixed regression in :meth:`DataFrame.sum` and :meth:`DataFrame.prod` when ``min_count`` and ``numeric_only`` are both given (:issue:`41074`) - Regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`) - Regression in :meth:`DataFrame.replace` and :meth:`Series.replace` when the values to replace is a NumPy float array (:issue:`40371`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4c156d7470364..92892ac0f26e0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8786,7 +8786,6 @@ def _reduce( **kwds, ): - min_count = kwds.get("min_count", 0) assert filter_type is None or filter_type == "bool", filter_type out_dtype = "bool" if filter_type == "bool" else None @@ -8831,7 +8830,7 @@ def _get_data() -> DataFrame: data = self._get_bool_data() return data - if (numeric_only is not None or axis == 0) and min_count == 0: + if numeric_only is not None or axis == 0: # For numeric_only non-None and axis non-None, we know # which blocks to use and no try/except is needed. # For numeric_only=None only the case with axis==0 and no object diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 523e19f6043da..a38b7a19dc80a 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -391,7 +391,7 @@ def reduce(self, func, ignore_failures: bool = False) -> List["Block"]: return [] raise - if np.ndim(result) == 0: + if self.values.ndim == 1: # TODO(EA2D): special case not needed with 2D EAs res_values = np.array([[result]]) else: diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index edc1b1e96509e..20adcee924a15 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -231,8 +231,7 @@ def _maybe_get_mask( """ if mask is None: if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype): - # Boolean data cannot contain nulls, so signal via mask being None - return None + return np.broadcast_to(False, values.shape) if skipna or needs_i8_conversion(values.dtype): mask = isna(values) diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index cb481613eb97f..b6eccc6999dec 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1,5 +1,6 @@ from datetime import timedelta from decimal import Decimal +import re from dateutil.tz import tzlocal import numpy as np @@ -783,34 +784,35 @@ def test_sum_corner(self): assert len(axis1) == 0 @pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)]) - def test_sum_prod_nanops(self, method, unit): + @pytest.mark.parametrize("numeric_only", [None, True, False]) + def test_sum_prod_nanops(self, method, unit, numeric_only): idx = ["a", "b", "c"] df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]}) # The default - result = getattr(df, method) + result = getattr(df, method)(numeric_only=numeric_only) expected = Series([unit, unit, unit], index=idx, dtype="float64") # min_count=1 - result = getattr(df, method)(min_count=1) + result = getattr(df, method)(numeric_only=numeric_only, min_count=1) expected = Series([unit, unit, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count=0 - result = getattr(df, method)(min_count=0) + result = getattr(df, method)(numeric_only=numeric_only, min_count=0) expected = Series([unit, unit, unit], index=idx, dtype="float64") tm.assert_series_equal(result, expected) - result = getattr(df.iloc[1:], method)(min_count=1) + result = getattr(df.iloc[1:], method)(numeric_only=numeric_only, min_count=1) expected = Series([unit, np.nan, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count > 1 df = DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) - result = getattr(df, method)(min_count=5) + result = getattr(df, method)(numeric_only=numeric_only, min_count=5) expected = Series(result, index=["A", "B"]) tm.assert_series_equal(result, expected) - result = getattr(df, method)(min_count=6) + result = getattr(df, method)(numeric_only=numeric_only, min_count=6) expected = Series(result, index=["A", "B"]) tm.assert_series_equal(result, expected) @@ -1491,3 +1493,16 @@ def test_minmax_extensionarray(method, numeric_only): [getattr(int64_info, method)], index=Index(["Int64"], dtype="object") ) tm.assert_series_equal(result, expected) + + +def test_prod_sum_min_count_mixed_object(): + # https://github.com/pandas-dev/pandas/issues/41074 + df = DataFrame([1, "a", True]) + + result = df.prod(axis=0, min_count=1, numeric_only=False) + expected = Series(["a"]) + tm.assert_series_equal(result, expected) + + msg = re.escape("unsupported operand type(s) for +: 'int' and 'str'") + with pytest.raises(TypeError, match=msg): + df.sum(axis=0, min_count=1, numeric_only=False)
Backport PR #41711
https://api.github.com/repos/pandas-dev/pandas/pulls/41766
2021-06-01T15:14:20Z
2021-06-01T16:58:47Z
2021-06-01T16:58:47Z
2021-06-01T16:58:52Z
Backport PR #41370 on branch 1.2.x (Pin fastparquet to leq 0.5.0)
diff --git a/ci/deps/actions-37-cov.yaml b/ci/deps/actions-37-cov.yaml index 5381caaa242cf..6bdbfa769f772 100644 --- a/ci/deps/actions-37-cov.yaml +++ b/ci/deps/actions-37-cov.yaml @@ -15,7 +15,7 @@ dependencies: - beautifulsoup4 - botocore>=1.11 - dask - - fastparquet>=0.4.0 + - fastparquet>=0.4.0, <=0.5.0 - fsspec>=0.7.4 - gcsfs>=0.6.0 - geopandas diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index 661d8813d32d2..fdea34d573340 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -15,7 +15,7 @@ dependencies: # pandas dependencies - blosc - bottleneck - - fastparquet>=0.4.0 + - fastparquet>=0.4.0, <=0.5.0 - flask - fsspec>=0.8.0 - matplotlib=3.1.3 diff --git a/environment.yml b/environment.yml index 72826124bc35d..5c47d9c5fa484 100644 --- a/environment.yml +++ b/environment.yml @@ -97,7 +97,7 @@ dependencies: - xlwt - odfpy - - fastparquet>=0.3.2 # pandas.read_parquet, DataFrame.to_parquet + - fastparquet>=0.3.2, <=0.5.0 # pandas.read_parquet, DataFrame.to_parquet - pyarrow>=0.15.0 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather - python-snappy # required by pyarrow diff --git a/requirements-dev.txt b/requirements-dev.txt index 5a64156fe997f..33073cf953729 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -62,7 +62,7 @@ xlrd xlsxwriter xlwt odfpy -fastparquet>=0.3.2 +fastparquet>=0.3.2, <=0.5.0 pyarrow>=0.15.0 python-snappy pyqt5>=5.9.2
Backport PR #41370: Pin fastparquet to leq 0.5.0
https://api.github.com/repos/pandas-dev/pandas/pulls/41765
2021-06-01T14:58:38Z
2021-06-01T16:56:41Z
2021-06-01T16:56:41Z
2021-06-01T16:56:41Z
Backport PR #41730: CI: suppress npdev warnings
diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml index 4cb4eaf95f6f5..2bb348a11655c 100644 --- a/ci/azure/posix.yml +++ b/ci/azure/posix.yml @@ -62,7 +62,7 @@ jobs: ENV_FILE: ci/deps/azure-38-numpydev.yaml CONDA_PY: "38" PATTERN: "not slow and not network" - TEST_ARGS: "-W error" + TEST_ARGS: "-W error -W \"ignore:Promotion of numbers and bools:FutureWarning\"" PANDAS_TESTING_MODE: "deprecate" EXTRA_APT: "xsel" diff --git a/pandas/tests/arithmetic/common.py b/pandas/tests/arithmetic/common.py index e26bb513838a5..d42da39ec8ff0 100644 --- a/pandas/tests/arithmetic/common.py +++ b/pandas/tests/arithmetic/common.py @@ -83,6 +83,7 @@ def xbox2(x): "Invalid comparison between", "Cannot compare type", "not supported between", + "could not be promoted", "invalid type promotion", ( # GH#36706 npdev 1.20.0 2020-09-28 diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py index 6dc3b3b13dd0c..4473d86fa04a1 100644 --- a/pandas/tests/arithmetic/test_interval.py +++ b/pandas/tests/arithmetic/test_interval.py @@ -235,7 +235,7 @@ def test_compare_list_like_nan(self, op, array, nulls_fixture, request): Categorical(list("abab")), Categorical(date_range("2017-01-01", periods=4)), pd.array(list("abcd")), - pd.array(["foo", 3.14, None, object()]), + pd.array(["foo", 3.14, None, object()], dtype=object), ], ids=lambda x: str(x.dtype), ) diff --git a/pandas/tests/frame/methods/test_to_records.py b/pandas/tests/frame/methods/test_to_records.py index e83882be9c680..0b710d7ebf7d7 100644 --- a/pandas/tests/frame/methods/test_to_records.py +++ b/pandas/tests/frame/methods/test_to_records.py @@ -3,6 +3,8 @@ import numpy as np import pytest +from pandas.compat.numpy import is_numpy_dev + from pandas import ( CategoricalDtype, DataFrame, @@ -162,20 +164,28 @@ def test_to_records_with_categorical(self): ), ), # Pass in a type instance. - ( + pytest.param( {"column_dtypes": str}, np.rec.array( [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")], ), + marks=pytest.mark.xfail( + is_numpy_dev, + reason="https://github.com/numpy/numpy/issues/19078", + ), ), # Pass in a dtype instance. - ( + pytest.param( {"column_dtypes": np.dtype("unicode")}, np.rec.array( [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")], ), + marks=pytest.mark.xfail( + is_numpy_dev, + reason="https://github.com/numpy/numpy/issues/19078", + ), ), # Pass in a dictionary (name-only). ( diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index e5ec3c5641bd2..51420859dc1bd 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -79,6 +79,7 @@ def check(df, df2): msgs = [ r"Invalid comparison between dtype=datetime64\[ns\] and ndarray", "invalid type promotion", + "could not be promoted", ( # npdev 1.20.0 r"The DTypes <class 'numpy.dtype\[.*\]'> and "
Backport PR #41730
https://api.github.com/repos/pandas-dev/pandas/pulls/41762
2021-06-01T13:50:51Z
2021-06-01T21:39:13Z
2021-06-01T21:39:13Z
2021-06-02T07:38:01Z
Backport PR #40555: BUG: Fix behavior of replace_list with mixed types.
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index 60e146b2212eb..e936519383520 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -16,7 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) - Regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`) -- +- Regression in :meth:`DataFrame.replace` and :meth:`Series.replace` when the values to replace is a NumPy float array (:issue:`40371`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index b6bca855a9f05..523e19f6043da 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -860,6 +860,15 @@ def _replace_list( """ See BlockManager._replace_list docstring. """ + + # https://github.com/pandas-dev/pandas/issues/40371 + # the following pairs check code caused a regression so we catch that case here + # until the issue is fixed properly in can_hold_element + + # error: "Iterable[Any]" has no attribute "tolist" + if hasattr(src_list, "tolist"): + src_list = src_list.tolist() # type: ignore[attr-defined] + # Exclude anything that we know we won't contain pairs = [ (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index c4f2e09911b34..0f85af6b26aa3 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1665,3 +1665,22 @@ def test_replace_bytes(self, frame_or_series): expected = obj.copy() obj = obj.replace({None: np.nan}) tm.assert_equal(obj, expected) + + @pytest.mark.parametrize( + "data, to_replace, value, expected", + [ + ([1], [1.0], [0], [0]), + ([1], [1], [0], [0]), + ([1.0], [1.0], [0], [0.0]), + ([1.0], [1], [0], [0.0]), + ], + ) + @pytest.mark.parametrize("box", [list, tuple, np.array]) + def test_replace_list_with_mixed_type( + self, data, to_replace, value, expected, box, frame_or_series + ): + # GH#40371 + obj = frame_or_series(data) + expected = frame_or_series(expected) + result = obj.replace(box(to_replace), value) + tm.assert_equal(result, expected)
Backport PR #40555 (removed typing changes - changes related to handling Categorical on master)
https://api.github.com/repos/pandas-dev/pandas/pulls/41761
2021-06-01T13:03:14Z
2021-06-01T13:54:19Z
2021-06-01T13:54:19Z
2021-06-01T13:54:31Z
CI: Activating CircleCI
diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000000000..5ff2f783e6a96 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,18 @@ +version: 2.1 + +jobs: + test-arm: + machine: + image: ubuntu-2004:202101-01 + resource_class: arm.medium + environment: + ENV_FILE: ci/deps/circle-37-arm64.yaml + PYTEST_WORKERS: auto + PATTERN: "not slow and not network and not clipboard and not arm_slow" + steps: + - run: echo "CircleCI is working" + +workflows: + test: + jobs: + - test-arm
We'll start running ARM tests in #41739. In order to see the result of the execution in that PR, we need `.circleci/config.yml` to exist in master, so CircleCI starts showing in GitHub. Adding the config file, but just doing an echo (and not the checkout, set up or the environment or the running of the tests). So we can add CircleCI safely, and see results before merging #41739.
https://api.github.com/repos/pandas-dev/pandas/pulls/41752
2021-06-01T00:36:21Z
2021-06-01T01:55:44Z
2021-06-01T01:55:44Z
2021-06-01T01:55:44Z
CLN: datetimelike setops
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 5f24eb0cfaad6..484b581a898d8 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -632,10 +632,6 @@ def is_type_compatible(self, kind: str) -> bool: # -------------------------------------------------------------------- # Set Operation Methods - def _difference(self, other, sort=None): - new_idx = super()._difference(other, sort=sort)._with_freq(None) - return new_idx - def _intersection(self, other: Index, sort=False) -> Index: """ intersection specialized to the case with matching dtypes. @@ -781,13 +777,8 @@ def _union(self, other, sort): if self._can_fast_union(other): result = self._fast_union(other, sort=sort) - if sort is None: - # In the case where sort is None, _can_fast_union - # implies that result.freq should match self.freq - assert result.freq == self.freq, (result.freq, self.freq) - elif result.freq is None: - # TODO: no tests rely on this; needed? - result = result._with_freq("infer") + # in the case with sort=None, the _can_fast_union check ensures + # that result.freq == self.freq return result else: i8self = Int64Index._simple_new(self.asi8)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41750
2021-05-31T23:50:26Z
2021-06-04T13:21:05Z
2021-06-04T13:21:05Z
2021-06-04T16:09:01Z
BUG: lib.infer_dtype with incompatible intervals
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index d7e15bb2ad197..ba82a7840c4f9 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2029,16 +2029,59 @@ cdef bint is_period_array(ndarray[object] values): return True -cdef class IntervalValidator(Validator): - cdef inline bint is_value_typed(self, object value) except -1: - return is_interval(value) - - cpdef bint is_interval_array(ndarray values): + """ + Is this an ndarray of Interval (or np.nan) with a single dtype? + """ + cdef: - IntervalValidator validator = IntervalValidator(len(values), - skipna=True) - return validator.validate(values) + Py_ssize_t i, n = len(values) + str closed = None + bint numeric = False + bint dt64 = False + bint td64 = False + object val + + if len(values) == 0: + return False + + for val in values: + if is_interval(val): + if closed is None: + closed = val.closed + numeric = ( + util.is_float_object(val.left) + or util.is_integer_object(val.left) + ) + td64 = is_timedelta(val.left) + dt64 = PyDateTime_Check(val.left) + elif val.closed != closed: + # mismatched closedness + return False + elif numeric: + if not ( + util.is_float_object(val.left) + or util.is_integer_object(val.left) + ): + # i.e. datetime64 or timedelta64 + return False + elif td64: + if not is_timedelta(val.left): + return False + elif dt64: + if not PyDateTime_Check(val.left): + return False + else: + raise ValueError(val) + elif util.is_nan(val) or val is None: + pass + else: + return False + + if closed is None: + # we saw all-NAs, no actual Intervals + return False + return True @cython.boundscheck(False) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 0c299056075c1..d34ae6179fe76 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -317,12 +317,7 @@ def array( return PeriodArray._from_sequence(data, copy=copy) elif inferred_dtype == "interval": - try: - return IntervalArray(data, copy=copy) - except ValueError: - # We may have a mixture of `closed` here. - # We choose to return an ndarray, rather than raising. - pass + return IntervalArray(data, copy=copy) elif inferred_dtype.startswith("datetime"): # datetime, datetime64 diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 30215b40593d3..aa8be070df312 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6441,12 +6441,8 @@ def _maybe_cast_data_without_dtype(subarr: np.ndarray) -> ArrayLike: return data elif inferred == "interval": - try: - ia_data = IntervalArray._from_sequence(subarr, copy=False) - return ia_data - except (ValueError, TypeError): - # GH27172: mixed closed Intervals --> object dtype - pass + ia_data = IntervalArray._from_sequence(subarr, copy=False) + return ia_data elif inferred == "boolean": # don't support boolean explicitly ATM pass diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 09efa97871fae..073a1ff28815b 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -1458,17 +1458,54 @@ def test_categorical(self): result = lib.infer_dtype(Series(arr), skipna=True) assert result == "categorical" - def test_interval(self): + @pytest.mark.parametrize("asobject", [True, False]) + def test_interval(self, asobject): idx = pd.IntervalIndex.from_breaks(range(5), closed="both") + if asobject: + idx = idx.astype(object) + inferred = lib.infer_dtype(idx, skipna=False) assert inferred == "interval" inferred = lib.infer_dtype(idx._data, skipna=False) assert inferred == "interval" - inferred = lib.infer_dtype(Series(idx), skipna=False) + inferred = lib.infer_dtype(Series(idx, dtype=idx.dtype), skipna=False) assert inferred == "interval" + @pytest.mark.parametrize("value", [Timestamp(0), Timedelta(0), 0, 0.0]) + def test_interval_mismatched_closed(self, value): + + first = Interval(value, value, closed="left") + second = Interval(value, value, closed="right") + + # if closed match, we should infer "interval" + arr = np.array([first, first], dtype=object) + assert lib.infer_dtype(arr, skipna=False) == "interval" + + # if closed dont match, we should _not_ get "interval" + arr2 = np.array([first, second], dtype=object) + assert lib.infer_dtype(arr2, skipna=False) == "mixed" + + def test_interval_mismatched_subtype(self): + first = Interval(0, 1, closed="left") + second = Interval(Timestamp(0), Timestamp(1), closed="left") + third = Interval(Timedelta(0), Timedelta(1), closed="left") + + arr = np.array([first, second]) + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + arr = np.array([second, third]) + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + arr = np.array([first, third]) + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + # float vs int subdtype are compatible + flt_interval = Interval(1.5, 2.5, closed="left") + arr = np.array([first, flt_interval], dtype=object) + assert lib.infer_dtype(arr, skipna=False) == "interval" + @pytest.mark.parametrize("klass", [pd.array, Series]) @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.parametrize("data", [["a", "b", "c"], ["a", "b", pd.NA]])
Nothing user-facing.
https://api.github.com/repos/pandas-dev/pandas/pulls/41749
2021-05-31T20:36:32Z
2021-06-02T02:08:32Z
2021-06-02T02:08:32Z
2021-06-02T02:34:12Z
whatsnew 1.3.0
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 849b9d45da5ad..2945fc760e01a 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -28,7 +28,7 @@ Enhancements Custom HTTP(s) headers when reading csv or json files ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When reading from a remote URL that is not handled by fsspec (ie. HTTP and +When reading from a remote URL that is not handled by fsspec (e.g. HTTP and HTTPS) the dictionary passed to ``storage_options`` will be used to create the headers included in the request. This can be used to control the User-Agent header or send other custom headers (:issue:`36688`). @@ -110,42 +110,32 @@ both XPath 1.0 and XSLT 1.0 are available. (:issue:`27554`) For more, see :ref:`io.xml` in the user guide on IO tools. -Styler Upgrades -^^^^^^^^^^^^^^^ - -We provided some focused development on :class:`.Styler`, including altering methods -to accept more universal CSS language for arguments, such as ``'color:red;'`` instead of -``[('color', 'red')]`` (:issue:`39564`). This is also added to the built-in methods -to allow custom CSS highlighting instead of default background coloring (:issue:`40242`). -Enhancements to other built-in methods include extending the :meth:`.Styler.background_gradient` -method to shade elements based on a given gradient map and not be restricted only to -values in the DataFrame (:issue:`39930` :issue:`22727` :issue:`28901`). Additional -built-in methods such as :meth:`.Styler.highlight_between`, :meth:`.Styler.highlight_quantile` -and :math:`.Styler.text_gradient` have been added (:issue:`39821`, :issue:`40926`, :issue:`41098`). - -The :meth:`.Styler.apply` now consistently allows functions with ``ndarray`` output to -allow more flexible development of UDFs when ``axis`` is ``None`` ``0`` or ``1`` (:issue:`39393`). - -:meth:`.Styler.set_tooltips` is a new method that allows adding on hover tooltips to -enhance interactive displays (:issue:`35643`). :meth:`.Styler.set_td_classes`, which was recently -introduced in v1.2.0 (:issue:`36159`) to allow adding specific CSS classes to data cells, has -been made as performant as :meth:`.Styler.apply` and :meth:`.Styler.applymap` (:issue:`40453`), -if not more performant in some cases. The overall performance of HTML -render times has been considerably improved to -match :meth:`DataFrame.to_html` (:issue:`39952` :issue:`37792` :issue:`40425`). - -The :meth:`.Styler.format` has had upgrades to easily format missing data, -precision, and perform HTML escaping (:issue:`40437` :issue:`40134`). There have been numerous other bug fixes to -properly format HTML and eliminate some inconsistencies (:issue:`39942` :issue:`40356` :issue:`39807` :issue:`39889` :issue:`39627`) - -:class:`.Styler` has also been compatible with non-unique index or columns, at least for as many features as are fully compatible, others made only partially compatible (:issue:`41269`). -One also has greater control of the display through separate sparsification of the index or columns, using the new 'styler' options context (:issue:`41142`). -Render trimming has also been added for large numbers of data elements to avoid browser overload (:issue:`40712`). - -We have added an extension to allow LaTeX styling as an alternative to CSS styling and a method :meth:`.Styler.to_latex` -which renders the necessary LaTeX format including built-up styles (:issue:`21673`, :issue:`41659`). An additional file io function :meth:`Styler.to_html` has been added for convenience (:issue:`40312`). - -Documentation has also seen major revisions in light of new features (:issue:`39720` :issue:`39317` :issue:`40493`) +.. _whatsnew_130.styler_enhancements: + +Styler enhancements +^^^^^^^^^^^^^^^^^^^ + +We provided some focused development on :class:`.Styler`. See also the `Styler documentation <../user_guide/style.ipynb>`_ +which has been revised and improved (:issue:`39720`, :issue:`39317`, :issue:`40493`). + + - The method :meth:`.Styler.set_table_styles` can now accept more natural CSS language for arguments, such as ``'color:red;'`` instead of ``[('color', 'red')]`` (:issue:`39563`) + - The methods :meth:`.Styler.highlight_null`, :meth:`.Styler.highlight_min`, and :meth:`.Styler.highlight_max` now allow custom CSS highlighting instead of the default background coloring (:issue:`40242`) + - :meth:`.Styler.apply` now accepts functions that return an ``ndarray`` when ``axis=None``, making it now consistent with the ``axis=0`` and ``axis=1`` behavior (:issue:`39359`) + - When incorrectly formatted CSS is given via :meth:`.Styler.apply` or :meth:`.Styler.applymap`, an error is now raised upon rendering (:issue:`39660`) + - :meth:`.Styler.format` now accepts the keyword argument ``escape`` for optional HTML and LaTex escaping (:issue:`40388`, :issue:`41619`) + - :meth:`.Styler.background_gradient` has gained the argument ``gmap`` to supply a specific gradient map for shading (:issue:`22727`) + - :meth:`.Styler.clear` now clears :attr:`Styler.hidden_index` and :attr:`Styler.hidden_columns` as well (:issue:`40484`) + - Added the method :meth:`.Styler.highlight_between` (:issue:`39821`) + - Added the method :meth:`.Styler.highlight_quantile` (:issue:`40926`) + - Added the method :meth:`.Styler.text_gradient` (:issue:`41098`) + - Added the method :meth:`.Styler.set_tooltips` to allow hover tooltips; this can be used enhance interactive displays (:issue:`21266`, :issue:`40284`) + - Added the parameter ``precision`` to the method :meth:`.Styler.format` to control the display of floating point numbers (:issue:`40134`) + - :class:`.Styler` rendered HTML output now follows the `w3 HTML Style Guide <https://www.w3schools.com/html/html5_syntax.asp>`_ (:issue:`39626`) + - Many features of the :class:`.Styler` class are now either partially or fully usable on a DataFrame with a non-unique indexes or columns (:issue:`41143`) + - One has greater control of the display through separate sparsification of the index or columns using the :ref:`new styler options <options.available>`, which are also usable via :func:`option_context` (:issue:`41142`) + - Added the option ``styler.render.max_elements`` to avoid browser overload when styling large DataFrames (:issue:`40712`) + - Added the method :meth:`.Styler.to_latex` (:issue:`21673`) + - Added the method :meth:`.Styler.to_html` (:issue:`13379`) .. _whatsnew_130.dataframe_honors_copy_with_dict: @@ -153,7 +143,7 @@ DataFrame constructor honors ``copy=False`` with dict ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When passing a dictionary to :class:`DataFrame` with ``copy=False``, -a copy will no longer be made (:issue:`32960`) +a copy will no longer be made (:issue:`32960`). .. ipython:: python @@ -223,10 +213,12 @@ String accessor methods returning integers will return a value with :class:`Int6 s.str.count("a") -Centered Datetime-Like Rolling Windows +.. _whatsnew_130.centered_datetimelike_rolling_window: + +Centered datetime-like rolling windows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When performing rolling calculations on :class:`DataFrame` and :class:`Series` +When performing rolling calculations on DataFrame and Series objects with a datetime-like index, a centered datetime-like window can now be used (:issue:`38780`). For example: @@ -245,37 +237,28 @@ For example: Other enhancements ^^^^^^^^^^^^^^^^^^ -- :meth:`DataFrame.rolling`, :meth:`Series.rolling`, :meth:`DataFrame.expanding`, and :meth:`Series.expanding` now support a ``method`` argument with a ``'table'`` option that performs the windowing operation over an entire :class:`DataFrame`. See :ref:`Window Overview <window.overview>` for performance and functional benefits (:issue:`15095`, :issue:`38995`) +- :meth:`DataFrame.rolling`, :meth:`Series.rolling`, :meth:`DataFrame.expanding`, and :meth:`Series.expanding` now support a ``method`` argument with a ``'table'`` option that performs the windowing operation over an entire DataFrame. See :ref:`Window Overview <window.overview>` for performance and functional benefits (:issue:`15095`, :issue:`38995`) - Added :meth:`MultiIndex.dtypes` (:issue:`37062`) - Added ``end`` and ``end_day`` options for the ``origin`` argument in :meth:`DataFrame.resample` (:issue:`37804`) -- Improve error message when ``usecols`` and ``names`` do not match for :func:`read_csv` and ``engine="c"`` (:issue:`29042`) -- Improved consistency of error messages when passing an invalid ``win_type`` argument in :class:`Window` (:issue:`15969`) +- Improved error message when ``usecols`` and ``names`` do not match for :func:`read_csv` and ``engine="c"`` (:issue:`29042`) +- Improved consistency of error messages when passing an invalid ``win_type`` argument in :ref:`Window methods <api.window>` (:issue:`15969`) - :func:`read_sql_query` now accepts a ``dtype`` argument to cast the columnar data from the SQL database based on user input (:issue:`10285`) - Improved integer type mapping from pandas to SQLAlchemy when using :meth:`DataFrame.to_sql` (:issue:`35076`) - :func:`to_numeric` now supports downcasting of nullable ``ExtensionDtype`` objects (:issue:`33013`) -- Add support for dict-like names in :class:`MultiIndex.set_names` and :class:`MultiIndex.rename` (:issue:`20421`) -- :func:`read_excel` can now auto detect .xlsb files and older .xls files (:issue:`35416`, :issue:`41225`) +- Added support for dict-like names in :class:`MultiIndex.set_names` and :class:`MultiIndex.rename` (:issue:`20421`) +- :func:`read_excel` can now auto-detect .xlsb files and older .xls files (:issue:`35416`, :issue:`41225`) - :class:`ExcelWriter` now accepts an ``if_sheet_exists`` parameter to control the behaviour of append mode when writing to existing sheets (:issue:`40230`) -- :meth:`.Rolling.sum`, :meth:`.Expanding.sum`, :meth:`.Rolling.mean`, :meth:`.Expanding.mean`, :meth:`.ExponentialMovingWindow.mean`, :meth:`.Rolling.median`, :meth:`.Expanding.median`, :meth:`.Rolling.max`, :meth:`.Expanding.max`, :meth:`.Rolling.min`, and :meth:`.Expanding.min` now support ``Numba`` execution with the ``engine`` keyword (:issue:`38895`, :issue:`41267`) +- :meth:`.Rolling.sum`, :meth:`.Expanding.sum`, :meth:`.Rolling.mean`, :meth:`.Expanding.mean`, :meth:`.ExponentialMovingWindow.mean`, :meth:`.Rolling.median`, :meth:`.Expanding.median`, :meth:`.Rolling.max`, :meth:`.Expanding.max`, :meth:`.Rolling.min`, and :meth:`.Expanding.min` now support `Numba <http://numba.pydata.org/>`_ execution with the ``engine`` keyword (:issue:`38895`, :issue:`41267`) - :meth:`DataFrame.apply` can now accept NumPy unary operators as strings, e.g. ``df.apply("sqrt")``, which was already the case for :meth:`Series.apply` (:issue:`39116`) - :meth:`DataFrame.apply` can now accept non-callable DataFrame properties as strings, e.g. ``df.apply("size")``, which was already the case for :meth:`Series.apply` (:issue:`39116`) -- :meth:`DataFrame.applymap` can now accept kwargs to pass on to func (:issue:`39987`) +- :meth:`DataFrame.applymap` can now accept kwargs to pass on to the user-provided ``func`` (:issue:`39987`) - Passing a :class:`DataFrame` indexer to ``iloc`` is now disallowed for :meth:`Series.__getitem__` and :meth:`DataFrame.__getitem__` (:issue:`39004`) - :meth:`Series.apply` can now accept list-like or dictionary-like arguments that aren't lists or dictionaries, e.g. ``ser.apply(np.array(["sum", "mean"]))``, which was already the case for :meth:`DataFrame.apply` (:issue:`39140`) - :meth:`DataFrame.plot.scatter` can now accept a categorical column for the argument ``c`` (:issue:`12380`, :issue:`31357`) -- :meth:`.Styler.set_tooltips` allows on hover tooltips to be added to styled HTML dataframes (:issue:`35643`, :issue:`21266`, :issue:`39317`, :issue:`39708`, :issue:`40284`) -- :meth:`.Styler.set_table_styles` amended to optionally allow certain css-string input arguments (:issue:`39564`) -- :meth:`.Styler.apply` now more consistently accepts ndarray function returns, i.e. in all cases for ``axis`` is ``0, 1 or None`` (:issue:`39359`) -- :meth:`.Styler.apply` and :meth:`.Styler.applymap` now raise errors if incorrectly formatted CSS is passed on render(:issue:`39660`) -- :meth:`.Styler.format` now accepts the keyword argument ``escape`` for optional HTML and LaTeX escaping (:issue:`40437`) -- :meth:`.Styler.background_gradient` now allows the ability to supply a specific gradient map (:issue:`22727`) -- :meth:`.Styler.clear` now clears :attr:`Styler.hidden_index` and :attr:`Styler.hidden_columns` as well (:issue:`40484`) -- Builtin highlighting methods in :class:`.Styler` have a more consistent signature and css customisability (:issue:`40242`) -- :meth:`.Styler.highlight_between` added to list of builtin styling methods (:issue:`39821`) - :meth:`Series.loc` now raises a helpful error message when the Series has a :class:`MultiIndex` and the indexer has too many dimensions (:issue:`35349`) - :func:`read_stata` now supports reading data from compressed files (:issue:`26599`) -- Add support for parsing ``ISO 8601``-like timestamps with negative signs to :class:`Timedelta` (:issue:`37172`) -- Add support for unary operators in :class:`FloatingArray` (:issue:`38749`) +- Added support for parsing ``ISO 8601``-like timestamps with negative signs to :class:`Timedelta` (:issue:`37172`) +- Added support for unary operators in :class:`FloatingArray` (:issue:`38749`) - :class:`RangeIndex` can now be constructed by passing a ``range`` object directly e.g. ``pd.RangeIndex(range(3))`` (:issue:`12067`) - :meth:`Series.round` and :meth:`DataFrame.round` now work with nullable integer and floating dtypes (:issue:`38844`) - :meth:`read_csv` and :meth:`read_json` expose the argument ``encoding_errors`` to control how encoding errors are handled (:issue:`39450`) @@ -301,8 +284,8 @@ These are bug fixes that might have notable behavior changes. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Previously, when calling :meth:`Categorical.unique` with categorical data, unused categories in the new array -would be removed, meaning that the dtype of the new array would be different than the -original, if some categories are not present in the unique array (:issue:`18291`) +would be removed, making the dtype of the new array different than the +original (:issue:`18291`) As an example of this, given: @@ -458,7 +441,7 @@ In pandas 1.3.0, ``df`` continues to share data with ``values`` .. _whatsnew_130.notable_bug_fixes.setitem_never_inplace: -Never Operate Inplace When Setting ``frame[keys] = values`` +Never operate inplace when setting ``frame[keys] = values`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When setting multiple columns using ``frame[keys] = values`` new arrays will @@ -493,7 +476,7 @@ In the new behavior, we get a new array, and retain an integer-dtyped ``5``: .. _whatsnew_130.notable_bug_fixes.setitem_with_bool_casting: -Consistent Casting With Setting Into Boolean Series +Consistent casting with setting into Boolean Series ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Setting non-boolean values into a :class:`Series` with ``dtype=bool`` now consistently @@ -695,12 +678,12 @@ Other API changes ^^^^^^^^^^^^^^^^^ - Partially initialized :class:`CategoricalDtype` objects (i.e. those with ``categories=None``) will no longer compare as equal to fully initialized dtype objects (:issue:`38516`) - Accessing ``_constructor_expanddim`` on a :class:`DataFrame` and ``_constructor_sliced`` on a :class:`Series` now raise an ``AttributeError``. Previously a ``NotImplementedError`` was raised (:issue:`38782`) -- Added new ``engine`` and ``**engine_kwargs`` parameters to :meth:`DataFrame.to_sql` to support other future "SQL engines". Currently we still only use ``SQLAlchemy`` under the hood, but more engines are planned to be supported such as ``turbodbc`` (:issue:`36893`) +- Added new ``engine`` and ``**engine_kwargs`` parameters to :meth:`DataFrame.to_sql` to support other future "SQL engines". Currently we still only use ``SQLAlchemy`` under the hood, but more engines are planned to be supported such as `turbodbc <https://turbodbc.readthedocs.io/en/latest/>`_ (:issue:`36893`) - Removed redundant ``freq`` from :class:`PeriodIndex` string representation (:issue:`41653`) - :meth:`ExtensionDtype.construct_array_type` is now a required method instead of an optional one for :class:`ExtensionDtype` subclasses (:issue:`24860`) Build -===== +^^^^^ - Documentation in ``.pptx`` and ``.pdf`` formats are no longer included in wheels or source distributions. (:issue:`30741`) @@ -770,7 +753,7 @@ Deprecations .. _whatsnew_130.deprecations.nuisance_columns: -Deprecated Dropping Nuisance Columns in DataFrame Reductions and DataFrameGroupBy Operations +Deprecated dropping nuisance columns in DataFrame reductions and DataFrameGroupBy operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Calling a reduction (e.g. ``.min``, ``.max``, ``.sum``) on a :class:`DataFrame` with ``numeric_only=None`` (the default), columns where the reduction raises a ``TypeError`` @@ -857,7 +840,7 @@ Performance improvements - Performance improvement in :meth:`IntervalIndex.isin` (:issue:`38353`) - Performance improvement in :meth:`Series.mean` for nullable data types (:issue:`34814`) - Performance improvement in :meth:`Series.isin` for nullable data types (:issue:`38340`) -- Performance improvement in :meth:`DataFrame.fillna` with ``method="pad|backfill"`` for nullable floating and nullable integer dtypes (:issue:`39953`) +- Performance improvement in :meth:`DataFrame.fillna` with ``method="pad"`` or ``method="backfill"`` for nullable floating and nullable integer dtypes (:issue:`39953`) - Performance improvement in :meth:`DataFrame.corr` for ``method=kendall`` (:issue:`28329`) - Performance improvement in :meth:`DataFrame.corr` for ``method=spearman`` (:issue:`40956`) - Performance improvement in :meth:`.Rolling.corr` and :meth:`.Rolling.cov` (:issue:`39388`) @@ -865,7 +848,8 @@ Performance improvements - Performance improvement in :func:`unique` for object data type (:issue:`37615`) - Performance improvement in :func:`json_normalize` for basic cases (including separators) (:issue:`40035` :issue:`15621`) - Performance improvement in :class:`.ExpandingGroupby` aggregation methods (:issue:`39664`) -- Performance improvement in :class:`.Styler` where render times are more than 50% reduced (:issue:`39972` :issue:`39952`) +- Performance improvement in :class:`.Styler` where render times are more than 50% reduced and now matches :meth:`DataFrame.to_html` (:issue:`39972` :issue:`39952`, :issue:`40425`) +- The method :meth:`.Styler.set_td_classes` is now as performant as :meth:`.Styler.apply` and :meth:`.Styler.applymap`, and even more so in some cases (:issue:`40453`) - Performance improvement in :meth:`.ExponentialMovingWindow.mean` with ``times`` (:issue:`39784`) - Performance improvement in :meth:`.GroupBy.apply` when requiring the python fallback implementation (:issue:`40176`) - Performance improvement in the conversion of a PyArrow Boolean array to a pandas nullable Boolean array (:issue:`41051`) @@ -978,7 +962,7 @@ Indexing - Bug in :meth:`DataFrame.reindex` and :meth:`Series.reindex` with timezone aware indexes raising a ``TypeError`` for ``method="ffill"`` and ``method="bfill"`` and specified ``tolerance`` (:issue:`38566`) - Bug in :meth:`DataFrame.reindex` with ``datetime64[ns]`` or ``timedelta64[ns]`` incorrectly casting to integers when the ``fill_value`` requires casting to object dtype (:issue:`39755`) - Bug in :meth:`DataFrame.__setitem__` raising a ``ValueError`` when setting on an empty :class:`DataFrame` using specified columns and a nonempty :class:`DataFrame` value (:issue:`38831`) -- Bug in :meth:`DataFrame.loc.__setitem__` raising ValueError when expanding unique column for :class:`DataFrame` with duplicate columns (:issue:`38521`) +- Bug in :meth:`DataFrame.loc.__setitem__` raising a ``ValueError`` when operating on a unique column when the :class:`DataFrame` has duplicate columns (:issue:`38521`) - Bug in :meth:`DataFrame.iloc.__setitem__` and :meth:`DataFrame.loc.__setitem__` with mixed dtypes when setting with a dictionary value (:issue:`38335`) - Bug in :meth:`Series.loc.__setitem__` and :meth:`DataFrame.loc.__setitem__` raising ``KeyError`` when provided a Boolean generator (:issue:`39614`) - Bug in :meth:`Series.iloc` and :meth:`DataFrame.iloc` raising a ``KeyError`` when provided a generator (:issue:`39614`) @@ -1041,7 +1025,7 @@ I/O - Allow custom error values for the ``parse_dates`` argument of :func:`read_sql`, :func:`read_sql_query` and :func:`read_sql_table` (:issue:`35185`) - Bug in :meth:`DataFrame.to_hdf` and :meth:`Series.to_hdf` raising a ``KeyError`` when trying to apply for subclasses of ``DataFrame`` or ``Series`` (:issue:`33748`) - Bug in :meth:`.HDFStore.put` raising a wrong ``TypeError`` when saving a DataFrame with non-string dtype (:issue:`34274`) -- Bug in :func:`json_normalize` resulting in the first element of a generator object not being included in the returned ``DataFrame`` (:issue:`35923`) +- Bug in :func:`json_normalize` resulting in the first element of a generator object not being included in the returned DataFrame (:issue:`35923`) - Bug in :func:`read_csv` applying the thousands separator to date columns when the column should be parsed for dates and ``usecols`` is specified for ``engine="python"`` (:issue:`39365`) - Bug in :func:`read_excel` forward filling :class:`MultiIndex` names when multiple header and index columns are specified (:issue:`34673`) - Bug in :func:`read_excel` not respecting :func:`set_option` (:issue:`34252`) @@ -1062,7 +1046,7 @@ I/O - Bug in :func:`read_csv` silently ignoring ``sep`` if ``delimiter`` and ``sep`` are defined, now raising a ``ValueError`` (:issue:`39823`) - Bug in :func:`read_csv` and :func:`read_table` misinterpreting arguments when ``sys.setprofile`` had been previously called (:issue:`41069`) - Bug in the conversion from PyArrow to pandas (e.g. for reading Parquet) with nullable dtypes and a PyArrow array whose data buffer size is not a multiple of the dtype size (:issue:`40896`) -- Bug in :func:`read_excel` would raise an error when pandas could not determine the file type, even when user specified the ``engine`` argument (:issue:`41225`) +- Bug in :func:`read_excel` would raise an error when pandas could not determine the file type even though the user specified the ``engine`` argument (:issue:`41225`) - Bug in :func:`read_clipboard` copying from an excel file shifts values into the wrong column if there are null values in first column (:issue:`41108`) Period @@ -1087,7 +1071,7 @@ Groupby/resample/rolling - Bug in :meth:`.SeriesGroupBy.value_counts` where unobserved categories in a grouped categorical Series were not tallied (:issue:`38672`) - Bug in :meth:`.SeriesGroupBy.value_counts` where an error was raised on an empty Series (:issue:`39172`) - Bug in :meth:`.GroupBy.indices` would contain non-existent indices when null values were present in the groupby keys (:issue:`9304`) -- Fixed bug in :meth:`.GroupBy.sum` causing loss of precision through using Kahan summation (:issue:`38778`) +- Fixed bug in :meth:`.GroupBy.sum` causing a loss of precision by now using Kahan summation (:issue:`38778`) - Fixed bug in :meth:`.GroupBy.cumsum` and :meth:`.GroupBy.mean` causing loss of precision through using Kahan summation (:issue:`38934`) - Bug in :meth:`.Resampler.aggregate` and :meth:`DataFrame.transform` raising a ``TypeError`` instead of ``SpecificationError`` when missing keys had mixed dtypes (:issue:`39025`) - Bug in :meth:`.DataFrameGroupBy.idxmin` and :meth:`.DataFrameGroupBy.idxmax` with ``ExtensionDtype`` columns (:issue:`38733`) @@ -1160,10 +1144,10 @@ Sparse ExtensionArray ^^^^^^^^^^^^^^ -- Bug in :meth:`DataFrame.where` when ``other`` is a :class:`Series` with :class:`ExtensionArray` dtype (:issue:`38729`) +- Bug in :meth:`DataFrame.where` when ``other`` is a Series with an :class:`ExtensionDtype` (:issue:`38729`) - Fixed bug where :meth:`Series.idxmax`, :meth:`Series.idxmin`, :meth:`Series.argmax`, and :meth:`Series.argmin` would fail when the underlying data is an :class:`ExtensionArray` (:issue:`32749`, :issue:`33719`, :issue:`36566`) - Fixed bug where some properties of subclasses of :class:`PandasExtensionDtype` where improperly cached (:issue:`40329`) -- Bug in :meth:`DataFrame.mask` where masking a :class:`Dataframe` with an :class:`ExtensionArray` dtype raises ``ValueError`` (:issue:`40941`) +- Bug in :meth:`DataFrame.mask` where masking a DataFrame with an :class:`ExtensionDtype` raises a ``ValueError`` (:issue:`40941`) Styler ^^^^^^ @@ -1172,10 +1156,10 @@ Styler - :class:`.Styler` rendered HTML output has seen minor alterations to support w3 good code standards (:issue:`39626`) - Bug in :class:`.Styler` where rendered HTML was missing a column class identifier for certain header cells (:issue:`39716`) - Bug in :meth:`.Styler.background_gradient` where text-color was not determined correctly (:issue:`39888`) -- Bug in :class:`.Styler` where multiple elements in CSS-selectors were not correctly added to ``table_styles`` (:issue:`39942`) +- Bug in :meth:`.Styler.set_table_styles` where multiple elements in CSS-selectors of the ``table_styles`` argument were not correctly added (:issue:`34061`) - Bug in :class:`.Styler` where copying from Jupyter dropped the top left cell and misaligned headers (:issue:`12147`) - Bug in :class:`Styler.where` where ``kwargs`` were not passed to the applicable callable (:issue:`40845`) -- Bug in :class:`.Styler` caused CSS to duplicate on multiple renders (:issue:`39395`, :issue:`40334`) +- Bug in :class:`.Styler` causing CSS to duplicate on multiple renders (:issue:`39395`, :issue:`40334`) Other
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them Minor fixups. After I get feedback here, I plan to make a PR with detailed instructions about adding to the whatsnew. cc @attack68 - the Styler sections had a major revisions, wanted to get any feedback here. cc @jorisvandenbossche @simonjayhawkins
https://api.github.com/repos/pandas-dev/pandas/pulls/41747
2021-05-31T18:21:33Z
2021-06-09T11:56:59Z
2021-06-09T11:56:59Z
2021-06-09T15:50:27Z
Fix 32bit test; follow-up to #41709
diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py index 65ae904d1083a..b4012c6a842a6 100644 --- a/pandas/tests/indexes/interval/test_constructors.py +++ b/pandas/tests/indexes/interval/test_constructors.py @@ -412,8 +412,8 @@ def test_constructor_errors(self, constructor): with pytest.raises(TypeError, match=msg): constructor(5) - # not an interval - msg = "type <class 'numpy.int64'> with value 0 is not an interval" + # not an interval; dtype depends on 32bit/windows builds + msg = "type <class 'numpy.int(32|64)'> with value 0 is not an interval" with pytest.raises(TypeError, match=msg): constructor([0, 1])
xref #41709
https://api.github.com/repos/pandas-dev/pandas/pulls/41746
2021-05-31T16:21:26Z
2021-05-31T21:30:13Z
2021-05-31T21:30:13Z
2021-05-31T21:36:53Z
REF: remove unnecessary try/excepts
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 95d9409b265ce..47779dd6dba25 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -439,12 +439,6 @@ def __init__( "explicitly specify the categories order " "by passing in a categories argument." ) from err - except ValueError as err: - - # TODO(EA2D) - raise NotImplementedError( - "> 1 ndim Categorical are not supported at this time" - ) from err # we're inferring from values dtype = CategoricalDtype(categories, dtype.ordered) diff --git a/pandas/core/common.py b/pandas/core/common.py index 04ff2d2c4618f..c0e44a437f59e 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -142,11 +142,8 @@ def is_bool_indexer(key: Any) -> bool: elif is_bool_dtype(key.dtype): return True elif isinstance(key, list): - try: - arr = np.asarray(key) - return arr.dtype == np.bool_ and len(arr) == len(key) - except TypeError: # pragma: no cover - return False + arr = np.asarray(key) + return arr.dtype == np.bool_ and len(arr) == len(key) return False diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 0267116cdfb99..62b75dd90c79b 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -17,7 +17,6 @@ import numpy.ma as ma from pandas._libs import lib -from pandas._libs.tslibs import IncompatibleFrequency from pandas._typing import ( AnyArrayLike, ArrayLike, @@ -289,9 +288,9 @@ def array( IntegerArray, IntervalArray, PandasArray, + PeriodArray, StringArray, TimedeltaArray, - period_array, ) if lib.is_scalar(data): @@ -315,12 +314,8 @@ def array( if dtype is None: inferred_dtype = lib.infer_dtype(data, skipna=True) if inferred_dtype == "period": - try: - return period_array(data, copy=copy) - except IncompatibleFrequency: - # We may have a mixture of frequencies. - # We choose to return an ndarray, rather than raising. - pass + return PeriodArray._from_sequence(data, copy=copy) + elif inferred_dtype == "interval": try: return IntervalArray(data, copy=copy) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 2a50ebd959ace..11f4da02c0d2f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6468,11 +6468,8 @@ def _maybe_cast_data_without_dtype(subarr: np.ndarray) -> ArrayLike: tda = TimedeltaArray._from_sequence(subarr, copy=False) return tda elif inferred == "period": - try: - parr = PeriodArray._from_sequence(subarr) - return parr - except IncompatibleFrequency: - pass + parr = PeriodArray._from_sequence(subarr) + return parr return subarr
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41744
2021-05-31T15:11:35Z
2021-05-31T20:18:52Z
2021-05-31T20:18:52Z
2021-05-31T20:20:19Z
ENH: Improve error message in corr/cov for Rolling/Expanding/EWM when other isn't a DataFrame/Series
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index ea9017da8a2f9..4655968eb07b5 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -233,6 +233,7 @@ Other enhancements - Add keyword ``sort`` to :func:`pivot_table` to allow non-sorting of the result (:issue:`39143`) - Add keyword ``dropna`` to :meth:`DataFrame.value_counts` to allow counting rows that include ``NA`` values (:issue:`41325`) - :meth:`Series.replace` will now cast results to ``PeriodDtype`` where possible instead of ``object`` dtype (:issue:`41526`) +- Improved error message in ``corr` and ``cov`` methods on :class:`.Rolling`, :class:`.Expanding`, and :class:`.ExponentialMovingWindow` when ``other`` is not a :class:`DataFrame` or :class:`Series` (:issue:`41741`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index d85aa20de5ab4..e0720c5d86df1 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -1,7 +1,6 @@ """Common utility functions for rolling operations""" from collections import defaultdict from typing import cast -import warnings import numpy as np @@ -15,17 +14,7 @@ def flex_binary_moment(arg1, arg2, f, pairwise=False): - if not ( - isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) - and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame)) - ): - raise TypeError( - "arguments to moment function must be of type np.ndarray/Series/DataFrame" - ) - - if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance( - arg2, (np.ndarray, ABCSeries) - ): + if isinstance(arg1, ABCSeries) and isinstance(arg2, ABCSeries): X, Y = prep_binary(arg1, arg2) return f(X, Y) @@ -43,7 +32,7 @@ def dataframe_from_int_dict(data, frame_template): if pairwise is False: if arg1 is arg2: # special case in order to handle duplicate column names - for i, col in enumerate(arg1.columns): + for i in range(len(arg1.columns)): results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i]) return dataframe_from_int_dict(results, arg1) else: @@ -51,23 +40,17 @@ def dataframe_from_int_dict(data, frame_template): raise ValueError("'arg1' columns are not unique") if not arg2.columns.is_unique: raise ValueError("'arg2' columns are not unique") - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - X, Y = arg1.align(arg2, join="outer") - X = X + 0 * Y - Y = Y + 0 * X - - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - res_columns = arg1.columns.union(arg2.columns) + X, Y = arg1.align(arg2, join="outer") + X, Y = prep_binary(X, Y) + res_columns = arg1.columns.union(arg2.columns) for col in res_columns: if col in X and col in Y: results[col] = f(X[col], Y[col]) return DataFrame(results, index=X.index, columns=res_columns) elif pairwise is True: results = defaultdict(dict) - for i, k1 in enumerate(arg1.columns): - for j, k2 in enumerate(arg2.columns): + for i in range(len(arg1.columns)): + for j in range(len(arg2.columns)): if j < i and arg2 is arg1: # Symmetric case results[i][j] = results[j][i] @@ -85,10 +68,10 @@ def dataframe_from_int_dict(data, frame_template): result = concat( [ concat( - [results[i][j] for j, c in enumerate(arg2.columns)], + [results[i][j] for j in range(len(arg2.columns))], ignore_index=True, ) - for i, c in enumerate(arg1.columns) + for i in range(len(arg1.columns)) ], ignore_index=True, axis=1, @@ -135,13 +118,10 @@ def dataframe_from_int_dict(data, frame_template): ) return result - - else: - raise ValueError("'pairwise' is not True/False") else: results = { i: f(*prep_binary(arg1.iloc[:, i], arg2)) - for i, col in enumerate(arg1.columns) + for i in range(len(arg1.columns)) } return dataframe_from_int_dict(results, arg1) @@ -165,11 +145,7 @@ def zsqrt(x): def prep_binary(arg1, arg2): - if not isinstance(arg2, type(arg1)): - raise Exception("Input arrays must be of the same type!") - # mask out values, this also makes a common index... X = arg1 + 0 * arg2 Y = arg2 + 0 * arg1 - return X, Y diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index dfb74b38cd9cf..2d5f148a6437a 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -472,6 +472,8 @@ def _apply_pairwise( other = target # only default unset pairwise = True if pairwise is None else pairwise + elif not isinstance(other, (ABCDataFrame, ABCSeries)): + raise ValueError("other must be a DataFrame or Series") return flex_binary_moment(target, other, func, pairwise=bool(pairwise)) diff --git a/pandas/tests/window/moments/test_moments_consistency_ewm.py b/pandas/tests/window/moments/test_moments_consistency_ewm.py index a36091ab8934e..c79d02fd3237e 100644 --- a/pandas/tests/window/moments/test_moments_consistency_ewm.py +++ b/pandas/tests/window/moments/test_moments_consistency_ewm.py @@ -64,9 +64,9 @@ def test_different_input_array_raise_exception(name): A = Series(np.random.randn(50), index=np.arange(50)) A[:10] = np.NaN - msg = "Input arrays must be of the same type!" + msg = "other must be a DataFrame or Series" # exception raised is Exception - with pytest.raises(Exception, match=msg): + with pytest.raises(ValueError, match=msg): getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50)) diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py index 28fd5633de02e..7ec5846ef4acf 100644 --- a/pandas/tests/window/moments/test_moments_consistency_rolling.py +++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py @@ -13,7 +13,6 @@ Series, ) import pandas._testing as tm -from pandas.core.window.common import flex_binary_moment def _rolling_consistency_cases(): @@ -133,14 +132,6 @@ def test_rolling_corr_with_zero_variance(window): assert s.rolling(window=window).corr(other=other).isna().all() -def test_flex_binary_moment(): - # GH3155 - # don't blow the stack - msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame" - with pytest.raises(TypeError, match=msg): - flex_binary_moment(5, 6, None) - - def test_corr_sanity(): # GH 3155 df = DataFrame(
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Also some internal method cleanups
https://api.github.com/repos/pandas-dev/pandas/pulls/41741
2021-05-31T03:56:59Z
2021-05-31T14:48:46Z
2021-05-31T14:48:46Z
2021-05-31T16:30:49Z
TST: Make ARM build work (not in the CI)
diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 5ff2f783e6a96..0000000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: 2.1 - -jobs: - test-arm: - machine: - image: ubuntu-2004:202101-01 - resource_class: arm.medium - environment: - ENV_FILE: ci/deps/circle-37-arm64.yaml - PYTEST_WORKERS: auto - PATTERN: "not slow and not network and not clipboard and not arm_slow" - steps: - - run: echo "CircleCI is working" - -workflows: - test: - jobs: - - test-arm diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/circle-37-arm64.yaml similarity index 100% rename from ci/deps/travis-37-arm64.yaml rename to ci/deps/circle-37-arm64.yaml diff --git a/ci/setup_env.sh b/ci/setup_env.sh index c36422884f2ec..e6bd9950331ca 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -12,41 +12,30 @@ if [[ "$(uname)" == "Linux" && -n "$LC_ALL" ]]; then echo fi -MINICONDA_DIR="$HOME/miniconda3" - - -if [ -d "$MINICONDA_DIR" ]; then - echo - echo "rm -rf "$MINICONDA_DIR"" - rm -rf "$MINICONDA_DIR" -fi echo "Install Miniconda" -UNAME_OS=$(uname) -if [[ "$UNAME_OS" == 'Linux' ]]; then +DEFAULT_CONDA_URL="https://repo.continuum.io/miniconda/Miniconda3-latest" +if [[ "$(uname -m)" == 'aarch64' ]]; then + CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.10.1-4/Miniforge3-4.10.1-4-Linux-aarch64.sh" +elif [[ "$(uname)" == 'Linux' ]]; then if [[ "$BITS32" == "yes" ]]; then - CONDA_OS="Linux-x86" + CONDA_URL="$DEFAULT_CONDA_URL-Linux-x86.sh" else - CONDA_OS="Linux-x86_64" + CONDA_URL="$DEFAULT_CONDA_URL-Linux-x86_64.sh" fi -elif [[ "$UNAME_OS" == 'Darwin' ]]; then - CONDA_OS="MacOSX-x86_64" +elif [[ "$(uname)" == 'Darwin' ]]; then + CONDA_URL="$DEFAULT_CONDA_URL-MacOSX-x86_64.sh" else - echo "OS $UNAME_OS not supported" + echo "OS $(uname) not supported" exit 1 fi - -if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then - CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.8.5-1/Miniforge3-4.8.5-1-Linux-aarch64.sh" -else - CONDA_URL="https://repo.continuum.io/miniconda/Miniconda3-latest-$CONDA_OS.sh" -fi +echo "Downloading $CONDA_URL" wget -q $CONDA_URL -O miniconda.sh chmod +x miniconda.sh -# Installation path is required for ARM64 platform as miniforge script installs in path $HOME/miniforge3. +MINICONDA_DIR="$HOME/miniconda3" +rm -rf $MINICONDA_DIR ./miniconda.sh -b -p $MINICONDA_DIR - export PATH=$MINICONDA_DIR/bin:$PATH echo diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 8d64bf8852946..369832e9bc05c 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -92,6 +92,18 @@ def is_platform_mac() -> bool: return sys.platform == "darwin" +def is_platform_arm() -> bool: + """ + Checking if he running platform use ARM architecture. + + Returns + ------- + bool + True if the running platform uses ARM architecture. + """ + return platform.machine() in ("arm64", "aarch64") + + def import_lzma(): """ Importing the `lzma` module. diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py index cac145aa30fd0..bdb9c3f97e798 100644 --- a/pandas/tests/indexes/interval/test_astype.py +++ b/pandas/tests/indexes/interval/test_astype.py @@ -3,6 +3,8 @@ import numpy as np import pytest +from pandas.compat import is_platform_arm + from pandas.core.dtypes.dtypes import ( CategoricalDtype, IntervalDtype, @@ -168,6 +170,7 @@ def test_subtype_integer_with_non_integer_borders(self, subtype): ) tm.assert_index_equal(result, expected) + @pytest.mark.xfail(is_platform_arm(), reason="GH 41740") def test_subtype_integer_errors(self): # float64 -> uint64 fails with negative values index = interval_range(-10.0, 10.0) diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py index eecb9492f29e3..643a5617abbeb 100644 --- a/pandas/tests/tools/test_to_numeric.py +++ b/pandas/tests/tools/test_to_numeric.py @@ -4,6 +4,8 @@ from numpy import iinfo import pytest +from pandas.compat import is_platform_arm + import pandas as pd from pandas import ( DataFrame, @@ -750,7 +752,7 @@ def test_to_numeric_from_nullable_string(values, nullable_string_dtype, expected "UInt64", "signed", "UInt64", - marks=pytest.mark.xfail(reason="GH38798"), + marks=pytest.mark.xfail(not is_platform_arm(), reason="GH38798"), ), ([1, 1], "Int64", "unsigned", "UInt8"), ([1.0, 1.0], "Float32", "unsigned", "UInt8"), diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index c28d54dd9fbfb..17a6d9216ca92 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -6,6 +6,7 @@ import numpy as np import pytest +from pandas.compat import is_platform_arm from pandas.errors import UnsupportedFunctionCall from pandas import ( @@ -1072,6 +1073,7 @@ def test_rolling_sem(frame_or_series): tm.assert_series_equal(result, expected) +@pytest.mark.xfail(is_platform_arm(), reason="GH 41740") @pytest.mark.parametrize( ("func", "third_value", "values"), [
- [X] closes #41737 This seems to be working, see: https://app.circleci.com/pipelines/github/datapythonista/pandas/11/workflows/2ca0487f-5a63-4f01-936c-e83827db714f/jobs/11 But tests are failing. I opened #41740 the the failures.
https://api.github.com/repos/pandas-dev/pandas/pulls/41739
2021-05-31T01:04:02Z
2021-06-01T22:29:33Z
2021-06-01T22:29:33Z
2021-06-04T01:22:15Z
CLN: Remove travis build
diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 52fadca6b7846..0000000000000 --- a/.travis.yml +++ /dev/null @@ -1,72 +0,0 @@ -language: python -python: 3.7 - -addons: - apt: - update: true - packages: - - xvfb - -services: - - xvfb - -# To turn off cached cython files and compiler cache -# set NOCACHE-true -# To delete caches go to https://travis-ci.org/OWNER/REPOSITORY/caches or run -# travis cache --delete inside the project directory from the travis command line client -# The cache directories will be deleted if anything in ci/ changes in a commit -cache: - apt: true - ccache: true - directories: - - $HOME/.cache # cython cache - -env: - global: - # create a github personal access token - # cd pandas-dev/pandas - # travis encrypt 'PANDAS_GH_TOKEN=personal_access_token' -r pandas-dev/pandas - - secure: "EkWLZhbrp/mXJOx38CHjs7BnjXafsqHtwxPQrqWy457VDFWhIY1DMnIR/lOWG+a20Qv52sCsFtiZEmMfUjf0pLGXOqurdxbYBGJ7/ikFLk9yV2rDwiArUlVM9bWFnFxHvdz9zewBH55WurrY4ShZWyV+x2dWjjceWG5VpWeI6sA=" - -git: - depth: false - -matrix: - fast_finish: true - - include: - - arch: arm64-graviton2 - virt: lxd - group: edge - env: - - JOB="3.7, arm64" PYTEST_WORKERS="auto" ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard and not arm_slow)" - -before_install: - - echo "before_install" - # Use blocking IO on travis. Ref: https://github.com/travis-ci/travis-ci/issues/8920#issuecomment-352661024 - - python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);' - - export PATH="$HOME/miniconda3/bin:$PATH" - - df -h - - pwd - - uname -a - - git --version - - ./ci/check_git_tags.sh - -install: - - echo "install start" - - ci/prep_cython_cache.sh - - ci/setup_env.sh - - ci/submit_cython_cache.sh - - echo "install done" - -script: - - echo "script start" - - echo "$JOB" - - source activate pandas-dev - - ci/run_tests.sh - -after_script: - - echo "after_script start" - - source activate pandas-dev && pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd - - ci/print_skipped.py - - echo "after_script done" diff --git a/ci/check_git_tags.sh b/ci/check_git_tags.sh deleted file mode 100755 index 9dbcd4f98683e..0000000000000 --- a/ci/check_git_tags.sh +++ /dev/null @@ -1,28 +0,0 @@ -set -e - -if [[ ! $(git tag) ]]; then - echo "No git tags in clone, please sync your git tags with upstream using:" - echo " git fetch --tags upstream" - echo " git push --tags origin" - echo "" - echo "If the issue persists, the clone depth needs to be increased in .travis.yml" - exit 1 -fi - -# This will error if there are no tags and we omit --always -DESCRIPTION=$(git describe --long --tags) -echo "$DESCRIPTION" - -if [[ "$DESCRIPTION" == *"untagged"* ]]; then - echo "Unable to determine most recent tag, aborting build" - exit 1 -else - if [[ "$DESCRIPTION" != *"g"* ]]; then - # A good description will have the hash prefixed by g, a bad one will be - # just the hash - echo "Unable to determine most recent tag, aborting build" - exit 1 - else - echo "$(git tag)" - fi -fi diff --git a/ci/prep_cython_cache.sh b/ci/prep_cython_cache.sh deleted file mode 100755 index 18d9388327ddc..0000000000000 --- a/ci/prep_cython_cache.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -ls "$HOME/.cache/" - -PYX_CACHE_DIR="$HOME/.cache/pyxfiles" -pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx" -o -name "*.pxd" -o -name "*.pxi.in"` -pyx_cache_file_list=`find ${PYX_CACHE_DIR} -name "*.pyx" -o -name "*.pxd" -o -name "*.pxi.in"` - -CACHE_File="$HOME/.cache/cython_files.tar" - -# Clear the cython cache 0 = NO, 1 = YES -clear_cache=0 - -pyx_files=`echo "$pyx_file_list" | wc -l` -pyx_cache_files=`echo "$pyx_cache_file_list" | wc -l` - -if [[ pyx_files -ne pyx_cache_files ]] -then - echo "Different number of pyx files" - clear_cache=1 -fi - -home_dir=$(pwd) - -if [ -f "$CACHE_File" ] && [ -z "$NOCACHE" ] && [ -d "$PYX_CACHE_DIR" ]; then - - echo "Cache available - checking pyx diff" - - for i in ${pyx_file_list} - do - diff=`diff -u $i $PYX_CACHE_DIR${i}` - if [[ $? -eq 2 ]] - then - echo "${i##*/} can't be diffed; probably not in cache" - clear_cache=1 - fi - if [[ ! -z $diff ]] - then - echo "${i##*/} has changed:" - echo $diff - clear_cache=1 - fi - done - - if [ "$TRAVIS_PULL_REQUEST" == "false" ] - then - echo "Not a PR" - # Uncomment next 2 lines to turn off cython caching not in a PR - # echo "Non PR cython caching is disabled" - # clear_cache=1 - else - echo "In a PR" - # Uncomment next 2 lines to turn off cython caching in a PR - # echo "PR cython caching is disabled" - # clear_cache=1 - fi - -fi - -if [ $clear_cache -eq 0 ] && [ -z "$NOCACHE" ] -then - # No and nocache is not set - echo "Will reuse cached cython file" - cd / - tar xvmf $CACHE_File - cd $home_dir -else - echo "Rebuilding cythonized files" - echo "No cache = $NOCACHE" - echo "Clear cache (1=YES) = $clear_cache" -fi - - -exit 0 diff --git a/ci/setup_env.sh b/ci/setup_env.sh index c36422884f2ec..70a28d4467dfe 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -63,29 +63,6 @@ conda update -n base conda echo "conda info -a" conda info -a -echo -echo "set the compiler cache to work" -if [ -z "$NOCACHE" ] && [ "${TRAVIS_OS_NAME}" == "linux" ]; then - echo "Using ccache" - export PATH=/usr/lib/ccache:/usr/lib64/ccache:$PATH - GCC=$(which gcc) - echo "gcc: $GCC" - CCACHE=$(which ccache) - echo "ccache: $CCACHE" - export CC='ccache gcc' -elif [ -z "$NOCACHE" ] && [ "${TRAVIS_OS_NAME}" == "osx" ]; then - echo "Install ccache" - brew install ccache > /dev/null 2>&1 - echo "Using ccache" - export PATH=/usr/local/opt/ccache/libexec:$PATH - gcc=$(which gcc) - echo "gcc: $gcc" - CCACHE=$(which ccache) - echo "ccache: $CCACHE" -else - echo "Not using ccache" -fi - echo "source deactivate" source deactivate diff --git a/ci/submit_cython_cache.sh b/ci/submit_cython_cache.sh deleted file mode 100755 index b87acef0ba11c..0000000000000 --- a/ci/submit_cython_cache.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -CACHE_File="$HOME/.cache/cython_files.tar" -PYX_CACHE_DIR="$HOME/.cache/pyxfiles" -pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx" -o -name "*.pxd" -o -name "*.pxi.in"` - -rm -rf $CACHE_File -rm -rf $PYX_CACHE_DIR - -home_dir=$(pwd) - -mkdir -p $PYX_CACHE_DIR -rsync -Rv $pyx_file_list $PYX_CACHE_DIR - -echo "pyx files:" -echo $pyx_file_list - -tar cf ${CACHE_File} --files-from /dev/null - -for i in ${pyx_file_list} -do - f=${i%.pyx} - ls $f.{c,cpp} | tar rf ${CACHE_File} -T - -done - -echo "Cython files in cache tar:" -tar tvf ${CACHE_File} - -exit 0
- [X] closes #38943 Whenever we can assume that Travis is not an option for ARM builds (or any other builds due to the lack of free credits), this is the PR that removes all travis specific stuff.
https://api.github.com/repos/pandas-dev/pandas/pulls/41738
2021-05-30T23:06:30Z
2021-06-01T13:51:18Z
2021-06-01T13:51:18Z
2021-06-01T13:51:18Z
CLN: Remove old docs README
diff --git a/doc/README.rst b/doc/README.rst deleted file mode 100644 index 5423e7419d03b..0000000000000 --- a/doc/README.rst +++ /dev/null @@ -1 +0,0 @@ -See `contributing.rst <https://pandas-docs.github.io/pandas-docs-travis/contributing.html>`_ in this repo.
Not sure why a README was created under `docs` in the first place. But having a docs/README that instead of having docs related information, just points to the contributing page, doesn't seem very useful, since the main README is already providing better information on how to contribute. And in any case, the link in that README file is very old.
https://api.github.com/repos/pandas-dev/pandas/pulls/41735
2021-05-30T21:09:50Z
2021-05-31T15:46:23Z
2021-05-31T15:46:23Z
2021-05-31T15:46:27Z
DEPR: silent overflow on Series construction
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 3c4b38a93b8ee..18ab118c4bf16 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -698,6 +698,7 @@ Deprecations - Deprecated passing arguments (apart from ``value``) as positional in :meth:`DataFrame.fillna` and :meth:`Series.fillna` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.reset_index` (other than ``"level"``) and :meth:`Series.reset_index` (:issue:`41485`) - Deprecated construction of :class:`Series` or :class:`DataFrame` with ``DatetimeTZDtype`` data and ``datetime64[ns]`` dtype. Use ``Series(data).dt.tz_localize(None)`` instead (:issue:`41555`,:issue:`33401`) +- Deprecated behavior of :class:`Series` construction with large-integer values and small-integer dtype silently overflowing; use ``Series(data).astype(dtype)`` instead (:issue:`41734`) - Deprecated inference of ``timedelta64[ns]``, ``datetime64[ns]``, or ``DatetimeTZDtype`` dtypes in :class:`Series` construction when data containing strings is passed and no ``dtype`` is passed (:issue:`33558`) - In a future version, constructing :class:`Series` or :class:`DataFrame` with ``datetime64[ns]`` data and ``DatetimeTZDtype`` will treat the data as wall-times instead of as UTC times (matching DatetimeIndex behavior). To treat the data as UTC times, use ``pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(dtype.tz)`` or ``pd.Series(data.view("int64"), dtype=dtype)`` (:issue:`33401`) - Deprecated passing arguments as positional in :meth:`DataFrame.set_axis` and :meth:`Series.set_axis` (other than ``"labels"``) (:issue:`41485`) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 8a230e5da01dc..5c7211a5d1852 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -2036,7 +2036,7 @@ def construct_1d_ndarray_preserving_na( def maybe_cast_to_integer_array( arr: list | np.ndarray, dtype: np.dtype, copy: bool = False -): +) -> np.ndarray: """ Takes any dtype and returns the casted version, raising for when data is incompatible with integer/unsigned integer dtypes. @@ -2107,6 +2107,20 @@ def maybe_cast_to_integer_array( if is_float_dtype(arr.dtype) or is_object_dtype(arr.dtype): raise ValueError("Trying to coerce float values to integers") + if casted.dtype < arr.dtype: + # GH#41734 e.g. [1, 200, 923442] and dtype="int8" -> overflows + warnings.warn( + f"Values are too large to be losslessly cast to {dtype}. " + "In a future version this will raise OverflowError. To retain the " + f"old behavior, use pd.Series(values).astype({dtype})", + FutureWarning, + stacklevel=find_stack_level(), + ) + return casted + + # No known cases that get here, but raising explicitly to cover our bases. + raise ValueError(f"values cannot be losslessly cast to {dtype}") + def convert_scalar_for_putitemlike(scalar: Scalar, dtype: np.dtype) -> Scalar: """ diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py index 4a7c4faade00d..b617514f383af 100644 --- a/pandas/tests/frame/test_stack_unstack.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -358,7 +358,7 @@ def test_unstack_preserve_dtypes(self): "E": Series([1.0, 50.0, 100.0]).astype("float32"), "F": Series([3.0, 4.0, 5.0]).astype("float64"), "G": False, - "H": Series([1, 200, 923442], dtype="int8"), + "H": Series([1, 200, 923442]).astype("int8"), } ) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index f03322f9b0d6c..9376bd5f025b3 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -711,6 +711,21 @@ def test_constructor_cast(self): with pytest.raises(ValueError, match=msg): Series(["a", "b", "c"], dtype=float) + def test_constructor_signed_int_overflow_deprecation(self): + # GH#41734 disallow silent overflow + msg = "Values are too large to be losslessly cast" + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([1, 200, 923442], dtype="int8") + + expected = Series([1, -56, 50], dtype="int8") + tm.assert_series_equal(ser, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([1, 200, 923442], dtype="uint8") + + expected = Series([1, 200, 50], dtype="uint8") + tm.assert_series_equal(ser, expected) + def test_constructor_unsigned_dtype_overflow(self, uint_dtype): # see gh-15832 msg = "Trying to coerce negative values to unsigned integers"
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry xref #40114 which tried to change the behavior without a deprecation cycle. Handling DataFrame separately, as it is an entirely separate barrel of worms.
https://api.github.com/repos/pandas-dev/pandas/pulls/41734
2021-05-30T16:15:57Z
2021-06-01T14:54:02Z
2021-06-01T14:54:02Z
2021-06-01T16:00:09Z
DEPR: ignoring dtype in DataFrame constructor failures
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index e06085c4c5c26..b8c28bb8daadd 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -233,7 +233,7 @@ Other enhancements - Add keyword ``sort`` to :func:`pivot_table` to allow non-sorting of the result (:issue:`39143`) - Add keyword ``dropna`` to :meth:`DataFrame.value_counts` to allow counting rows that include ``NA`` values (:issue:`41325`) - :meth:`Series.replace` will now cast results to ``PeriodDtype`` where possible instead of ``object`` dtype (:issue:`41526`) -- Improved error message in ``corr` and ``cov`` methods on :class:`.Rolling`, :class:`.Expanding`, and :class:`.ExponentialMovingWindow` when ``other`` is not a :class:`DataFrame` or :class:`Series` (:issue:`41741`) +- Improved error message in ``corr`` and ``cov`` methods on :class:`.Rolling`, :class:`.Expanding`, and :class:`.ExponentialMovingWindow` when ``other`` is not a :class:`DataFrame` or :class:`Series` (:issue:`41741`) .. --------------------------------------------------------------------------- @@ -686,6 +686,7 @@ Deprecations - Deprecated passing arguments (apart from ``cond`` and ``other``) as positional in :meth:`DataFrame.mask` and :meth:`Series.mask` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.clip` and :meth:`Series.clip` (other than ``"upper"`` and ``"lower"``) (:issue:`41485`) - Deprecated special treatment of lists with first element a Categorical in the :class:`DataFrame` constructor; pass as ``pd.DataFrame({col: categorical, ...})`` instead (:issue:`38845`) +- Deprecated behavior of :class:`DataFrame` constructor when a ``dtype`` is passed and the data cannot be cast to that dtype. In a future version, this will raise instead of being silently ignored (:issue:`24435`) - Deprecated passing arguments as positional (except for ``"method"``) in :meth:`DataFrame.interpolate` and :meth:`Series.interpolate` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.ffill`, :meth:`Series.ffill`, :meth:`DataFrame.bfill`, and :meth:`Series.bfill` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.sort_values` (other than ``"by"``) and :meth:`Series.sort_values` (:issue:`41485`) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 0c299056075c1..ff73bc227fdb2 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -12,6 +12,7 @@ Sequence, cast, ) +import warnings import numpy as np import numpy.ma as ma @@ -745,6 +746,17 @@ def _try_cast( if raise_cast_failure: raise else: + # we only get here with raise_cast_failure False, which means + # called via the DataFrame constructor + # GH#24435 + warnings.warn( + f"Could not cast to {dtype}, falling back to object. This " + "behavior is deprecated. In a future version, when a dtype is " + "passed to 'DataFrame', either all columns will be cast to that " + "dtype, or a TypeError will be raised", + FutureWarning, + stacklevel=7, + ) subarr = np.array(arr, dtype=object, copy=copy) return subarr diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index ba0acdc4f947b..34854be29ad1f 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -258,8 +258,11 @@ def f(dtype): f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")]) # these work (though results may be unexpected) - f("int64") - f("float64") + depr_msg = "either all columns will be cast to that dtype, or a TypeError will" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + f("int64") + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + f("float64") # 10822 # invalid error message on dt inference diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index d118a376b56ec..784969c199c9f 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -207,7 +207,9 @@ def test_constructor_mixed(self, float_string_frame): assert float_string_frame["foo"].dtype == np.object_ def test_constructor_cast_failure(self): - foo = DataFrame({"a": ["a", "b", "c"]}, dtype=np.float64) + msg = "either all columns will be cast to that dtype, or a TypeError will" + with tm.assert_produces_warning(FutureWarning, match=msg): + foo = DataFrame({"a": ["a", "b", "c"]}, dtype=np.float64) assert foo["a"].dtype == object # GH 3010, constructing with odd arrays @@ -683,7 +685,10 @@ def test_constructor_dict_cast2(self): "A": dict(zip(range(20), tm.makeStringIndex(20))), "B": dict(zip(range(15), np.random.randn(15))), } - frame = DataFrame(test_data, dtype=float) + msg = "either all columns will be cast to that dtype, or a TypeError will" + with tm.assert_produces_warning(FutureWarning, match=msg): + frame = DataFrame(test_data, dtype=float) + assert len(frame) == 20 assert frame["A"].dtype == np.object_ assert frame["B"].dtype == np.float64 diff --git a/pandas/tests/indexing/multiindex/test_getitem.py b/pandas/tests/indexing/multiindex/test_getitem.py index f07bf3464b74c..f1fbe0c5a6b9c 100644 --- a/pandas/tests/indexing/multiindex/test_getitem.py +++ b/pandas/tests/indexing/multiindex/test_getitem.py @@ -206,27 +206,26 @@ def test_frame_getitem_nan_multiindex(nulls_fixture): df = DataFrame( [[11, n, 13], [21, n, 23], [31, n, 33], [41, n, 43]], columns=cols, - dtype="int64", ).set_index(["a", "b"]) + df["c"] = df["c"].astype("int64") idx = (21, n) result = df.loc[:idx] - expected = DataFrame( - [[11, n, 13], [21, n, 23]], columns=cols, dtype="int64" - ).set_index(["a", "b"]) + expected = DataFrame([[11, n, 13], [21, n, 23]], columns=cols).set_index(["a", "b"]) + expected["c"] = expected["c"].astype("int64") tm.assert_frame_equal(result, expected) result = df.loc[idx:] expected = DataFrame( - [[21, n, 23], [31, n, 33], [41, n, 43]], columns=cols, dtype="int64" + [[21, n, 23], [31, n, 33], [41, n, 43]], columns=cols ).set_index(["a", "b"]) + expected["c"] = expected["c"].astype("int64") tm.assert_frame_equal(result, expected) idx1, idx2 = (21, n), (31, n) result = df.loc[idx1:idx2] - expected = DataFrame( - [[21, n, 23], [31, n, 33]], columns=cols, dtype="int64" - ).set_index(["a", "b"]) + expected = DataFrame([[21, n, 23], [31, n, 33]], columns=cols).set_index(["a", "b"]) + expected["c"] = expected["c"].astype("int64") tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index c1a096ed06efc..ab868a3d3713d 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -284,7 +284,12 @@ def test_loc_setitem_dtype(self): df.loc[:, cols] = df.loc[:, cols].astype("float32") expected = DataFrame( - {"id": ["A"], "a": [1.2], "b": [0.0], "c": [-2.5]}, dtype="float32" + { + "id": ["A"], + "a": np.array([1.2], dtype="float32"), + "b": np.array([0.0], dtype="float32"), + "c": np.array([-2.5], dtype="float32"), + } ) # id is inferred as object tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/reshape/test_get_dummies.py b/pandas/tests/reshape/test_get_dummies.py index 8af49ac20987a..653ea88ed62ac 100644 --- a/pandas/tests/reshape/test_get_dummies.py +++ b/pandas/tests/reshape/test_get_dummies.py @@ -272,8 +272,9 @@ def test_dataframe_dummies_subset(self, df, sparse): "from_A_a": [1, 0, 1], "from_A_b": [0, 1, 0], }, - dtype=np.uint8, ) + cols = expected.columns + expected[cols[1:]] = expected[cols[1:]].astype(np.uint8) expected[["C"]] = df[["C"]] if sparse: cols = ["from_A_a", "from_A_b"]
- [x] closes #24435 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41733
2021-05-30T15:40:44Z
2021-06-01T13:52:10Z
2021-06-01T13:52:10Z
2021-06-01T15:58:07Z
[ArrowStringArray] ENH: raise an ImportError when trying to create an arrow string dtype if pyarrow is not installed
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 219c52c4a65b9..d6d7743f3f5f3 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -23,11 +23,11 @@ type_t, ) from pandas.compat import ( + pa_version_under1p0, pa_version_under2p0, pa_version_under3p0, pa_version_under4p0, ) -from pandas.compat.pyarrow import pa_version_under1p0 from pandas.util._decorators import doc from pandas.util._validators import validate_fillna_kwargs @@ -55,31 +55,33 @@ ) from pandas.core.strings.object_array import ObjectStringArrayMixin -try: +# PyArrow backed StringArrays are available starting at 1.0.0, but this +# file is imported from even if pyarrow is < 1.0.0, before pyarrow.compute +# and its compute functions existed. GH38801 +if not pa_version_under1p0: import pyarrow as pa -except ImportError: - pa = None -else: - # PyArrow backed StringArrays are available starting at 1.0.0, but this - # file is imported from even if pyarrow is < 1.0.0, before pyarrow.compute - # and its compute functions existed. GH38801 - if not pa_version_under1p0: - import pyarrow.compute as pc - - ARROW_CMP_FUNCS = { - "eq": pc.equal, - "ne": pc.not_equal, - "lt": pc.less, - "gt": pc.greater, - "le": pc.less_equal, - "ge": pc.greater_equal, - } + import pyarrow.compute as pc + + ARROW_CMP_FUNCS = { + "eq": pc.equal, + "ne": pc.not_equal, + "lt": pc.less, + "gt": pc.greater, + "le": pc.less_equal, + "ge": pc.greater_equal, + } if TYPE_CHECKING: from pandas import Series +def _chk_pyarrow_available() -> None: + if pa_version_under1p0: + msg = "pyarrow>=1.0.0 is required for PyArrow backed StringArray." + raise ImportError(msg) + + @register_extension_dtype class ArrowStringDtype(StringDtype): """ @@ -112,6 +114,9 @@ class ArrowStringDtype(StringDtype): #: StringDtype.na_value uses pandas.NA na_value = libmissing.NA + def __init__(self): + _chk_pyarrow_available() + @property def type(self) -> type[str]: return str @@ -213,10 +218,8 @@ class ArrowStringArray(OpsMixin, ExtensionArray, ObjectStringArrayMixin): Length: 4, dtype: arrow_string """ - _dtype = ArrowStringDtype() - def __init__(self, values): - self._chk_pyarrow_available() + self._dtype = ArrowStringDtype() if isinstance(values, pa.Array): self._data = pa.chunked_array([values]) elif isinstance(values, pa.ChunkedArray): @@ -229,19 +232,11 @@ def __init__(self, values): "ArrowStringArray requires a PyArrow (chunked) array of string type" ) - @classmethod - def _chk_pyarrow_available(cls) -> None: - # TODO: maybe update import_optional_dependency to allow a minimum - # version to be specified rather than use the global minimum - if pa is None or pa_version_under1p0: - msg = "pyarrow>=1.0.0 is required for PyArrow backed StringArray." - raise ImportError(msg) - @classmethod def _from_sequence(cls, scalars, dtype: Dtype | None = None, copy: bool = False): from pandas.core.arrays.masked import BaseMaskedArray - cls._chk_pyarrow_available() + _chk_pyarrow_available() if isinstance(scalars, BaseMaskedArray): # avoid costly conversion to object dtype in ensure_string_array and diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py index ec7f57940a67f..3db8333798e36 100644 --- a/pandas/tests/arrays/string_/test_string_arrow.py +++ b/pandas/tests/arrays/string_/test_string_arrow.py @@ -3,14 +3,25 @@ import numpy as np import pytest -from pandas.core.arrays.string_arrow import ArrowStringArray +from pandas.compat import pa_version_under1p0 -pa = pytest.importorskip("pyarrow", minversion="1.0.0") +from pandas.core.arrays.string_arrow import ( + ArrowStringArray, + ArrowStringDtype, +) +@pytest.mark.skipif( + pa_version_under1p0, + reason="pyarrow>=1.0.0 is required for PyArrow backed StringArray", +) @pytest.mark.parametrize("chunked", [True, False]) -@pytest.mark.parametrize("array", [np, pa]) +@pytest.mark.parametrize("array", ["numpy", "pyarrow"]) def test_constructor_not_string_type_raises(array, chunked): + import pyarrow as pa + + array = pa if array == "pyarrow" else np + arr = array.array([1, 2, 3]) if chunked: if array is np: @@ -24,3 +35,20 @@ def test_constructor_not_string_type_raises(array, chunked): ) with pytest.raises(ValueError, match=msg): ArrowStringArray(arr) + + +@pytest.mark.skipif( + not pa_version_under1p0, + reason="pyarrow is installed", +) +def test_pyarrow_not_installed_raises(): + msg = re.escape("pyarrow>=1.0.0 is required for PyArrow backed StringArray") + + with pytest.raises(ImportError, match=msg): + ArrowStringDtype() + + with pytest.raises(ImportError, match=msg): + ArrowStringArray([]) + + with pytest.raises(ImportError, match=msg): + ArrowStringArray._from_sequence(["a", None, "b"])
xref https://github.com/pandas-dev/pandas/pull/39908#discussion_r585581525
https://api.github.com/repos/pandas-dev/pandas/pulls/41732
2021-05-30T10:54:15Z
2021-05-31T15:49:51Z
2021-05-31T15:49:51Z
2021-05-31T17:25:55Z
DEPR: datetimelike inference with strings
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index e06085c4c5c26..091380d6ccb6c 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -697,6 +697,7 @@ Deprecations - Deprecated passing arguments (apart from ``value``) as positional in :meth:`DataFrame.fillna` and :meth:`Series.fillna` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.reset_index` (other than ``"level"``) and :meth:`Series.reset_index` (:issue:`41485`) - Deprecated construction of :class:`Series` or :class:`DataFrame` with ``DatetimeTZDtype`` data and ``datetime64[ns]`` dtype. Use ``Series(data).dt.tz_localize(None)`` instead (:issue:`41555`,:issue:`33401`) +- Deprecated inference of ``timedelta64[ns]``, ``datetime64[ns]``, or ``DatetimeTZDtype`` dtypes in :class:`Series` construction when data containing strings is passed and no ``dtype`` is passed (:issue:`33558`) - In a future version, constructing :class:`Series` or :class:`DataFrame` with ``datetime64[ns]`` data and ``DatetimeTZDtype`` will treat the data as wall-times instead of as UTC times (matching DatetimeIndex behavior). To treat the data as UTC times, use ``pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(dtype.tz)`` or ``pd.Series(data.view("int64"), dtype=dtype)`` (:issue:`33401`) - Deprecated passing arguments as positional in :meth:`DataFrame.set_axis` and :meth:`Series.set_axis` (other than ``"labels"``) (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.where` and :meth:`Series.where` (other than ``"cond"`` and ``"other"``) (:issue:`41485`) diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index 5e1cc612bed57..06620c2ad0dca 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -153,7 +153,7 @@ def ensure_string_array( def infer_datetimelike_array( arr: np.ndarray # np.ndarray[object] -) -> str: ... +) -> tuple[str, bool]: ... def astype_intsafe( arr: np.ndarray, # np.ndarray[object] diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index d7e15bb2ad197..6a270c0a55638 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1558,7 +1558,7 @@ def infer_dtype(value: object, skipna: bool = True) -> str: return "mixed" -def infer_datetimelike_array(arr: ndarray[object]) -> str: +def infer_datetimelike_array(arr: ndarray[object]) -> tuple[str, bool]: """ Infer if we have a datetime or timedelta array. - date: we have *only* date and maybe strings, nulls @@ -1576,12 +1576,13 @@ def infer_datetimelike_array(arr: ndarray[object]) -> str: Returns ------- str: {datetime, timedelta, date, nat, mixed} + bool """ cdef: Py_ssize_t i, n = len(arr) bint seen_timedelta = False, seen_date = False, seen_datetime = False bint seen_tz_aware = False, seen_tz_naive = False - bint seen_nat = False + bint seen_nat = False, seen_str = False list objs = [] object v @@ -1589,6 +1590,7 @@ def infer_datetimelike_array(arr: ndarray[object]) -> str: v = arr[i] if isinstance(v, str): objs.append(v) + seen_str = True if len(objs) == 3: break @@ -1609,7 +1611,7 @@ def infer_datetimelike_array(arr: ndarray[object]) -> str: seen_tz_aware = True if seen_tz_naive and seen_tz_aware: - return "mixed" + return "mixed", seen_str elif util.is_datetime64_object(v): # np.datetime64 seen_datetime = True @@ -1619,16 +1621,16 @@ def infer_datetimelike_array(arr: ndarray[object]) -> str: # timedelta, or timedelta64 seen_timedelta = True else: - return "mixed" + return "mixed", seen_str if seen_date and not (seen_datetime or seen_timedelta): - return "date" + return "date", seen_str elif seen_datetime and not seen_timedelta: - return "datetime" + return "datetime", seen_str elif seen_timedelta and not seen_datetime: - return "timedelta" + return "timedelta", seen_str elif seen_nat: - return "nat" + return "nat", seen_str # short-circuit by trying to # actually convert these strings @@ -1637,14 +1639,14 @@ def infer_datetimelike_array(arr: ndarray[object]) -> str: if len(objs): try: array_to_datetime(objs, errors="raise") - return "datetime" + return "datetime", seen_str except (ValueError, TypeError): pass # we are *not* going to infer from strings # for timedelta as too much ambiguity - return 'mixed' + return "mixed", seen_str cdef inline bint is_timedelta(object o): diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index c3efbfb426ab3..8a230e5da01dc 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1543,7 +1543,7 @@ def try_timedelta(v: np.ndarray) -> np.ndarray: else: return td_values.reshape(shape) - inferred_type = lib.infer_datetimelike_array(ensure_object(v)) + inferred_type, seen_str = lib.infer_datetimelike_array(ensure_object(v)) if inferred_type == "datetime": # error: Incompatible types in assignment (expression has type "ExtensionArray", @@ -1572,6 +1572,15 @@ def try_timedelta(v: np.ndarray) -> np.ndarray: # "ExtensionArray", variable has type "Union[ndarray, List[Any]]") value = try_datetime(v) # type: ignore[assignment] + if value.dtype.kind in ["m", "M"] and seen_str: + warnings.warn( + f"Inferring {value.dtype} from data containing strings is deprecated " + "and will be removed in a future version. To retain the old behavior " + "explicitly pass Series(data, dtype={value.dtype})", + FutureWarning, + stacklevel=find_stack_level(), + ) + # return v.reshape(shape) return value diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py index 88c3ad228f8c3..7e8dbea07709f 100644 --- a/pandas/tests/apply/test_series_apply.py +++ b/pandas/tests/apply/test_series_apply.py @@ -859,7 +859,9 @@ def test_apply_to_timedelta(): list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT] a = pd.to_timedelta(list_of_strings) # noqa - b = Series(list_of_strings).apply(pd.to_timedelta) # noqa + with tm.assert_produces_warning(FutureWarning, match="Inferring timedelta64"): + ser = Series(list_of_strings) + b = ser.apply(pd.to_timedelta) # noqa # Can't compare until apply on a Series gives the correct dtype # assert_series_equal(a, b) diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 215b51dd88ef4..6b3309ba8ea1b 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -328,7 +328,7 @@ def test_dt64arr_timestamp_equality(self, box_with_array): box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray ) - ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), "NaT"]) + ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT]) ser = tm.box_expected(ser, box_with_array) result = ser != ser diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 09efa97871fae..31903c559d8df 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -1169,7 +1169,7 @@ def test_infer_dtype_period_with_na(self, na_value): ], ) def test_infer_datetimelike_array_datetime(self, data): - assert lib.infer_datetimelike_array(data) == "datetime" + assert lib.infer_datetimelike_array(data) == ("datetime", False) @pytest.mark.parametrize( "data", @@ -1181,11 +1181,11 @@ def test_infer_datetimelike_array_datetime(self, data): ], ) def test_infer_datetimelike_array_timedelta(self, data): - assert lib.infer_datetimelike_array(data) == "timedelta" + assert lib.infer_datetimelike_array(data) == ("timedelta", False) def test_infer_datetimelike_array_date(self): arr = [date(2017, 6, 12), date(2017, 3, 11)] - assert lib.infer_datetimelike_array(arr) == "date" + assert lib.infer_datetimelike_array(arr) == ("date", False) @pytest.mark.parametrize( "data", @@ -1200,7 +1200,7 @@ def test_infer_datetimelike_array_date(self): ], ) def test_infer_datetimelike_array_mixed(self, data): - assert lib.infer_datetimelike_array(data) == "mixed" + assert lib.infer_datetimelike_array(data)[0] == "mixed" @pytest.mark.parametrize( "first, expected", @@ -1218,7 +1218,7 @@ def test_infer_datetimelike_array_mixed(self, data): @pytest.mark.parametrize("second", [None, np.nan]) def test_infer_datetimelike_array_nan_nat_like(self, first, second, expected): first.append(second) - assert lib.infer_datetimelike_array(first) == expected + assert lib.infer_datetimelike_array(first) == (expected, False) def test_infer_dtype_all_nan_nat_like(self): arr = np.array([np.nan, np.nan]) diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py index 7cc2b7f72fb69..82e6c4daf9515 100644 --- a/pandas/tests/resample/test_time_grouper.py +++ b/pandas/tests/resample/test_time_grouper.py @@ -305,27 +305,30 @@ def test_groupby_resample_interpolate(): .resample("1D") .interpolate(method="linear") ) - expected_ind = pd.MultiIndex.from_tuples( - [ - (50, "2018-01-07"), - (50, Timestamp("2018-01-08")), - (50, Timestamp("2018-01-09")), - (50, Timestamp("2018-01-10")), - (50, Timestamp("2018-01-11")), - (50, Timestamp("2018-01-12")), - (50, Timestamp("2018-01-13")), - (50, Timestamp("2018-01-14")), - (50, Timestamp("2018-01-15")), - (50, Timestamp("2018-01-16")), - (50, Timestamp("2018-01-17")), - (50, Timestamp("2018-01-18")), - (50, Timestamp("2018-01-19")), - (50, Timestamp("2018-01-20")), - (50, Timestamp("2018-01-21")), - (60, Timestamp("2018-01-14")), - ], - names=["volume", "week_starting"], - ) + + msg = "containing strings is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected_ind = pd.MultiIndex.from_tuples( + [ + (50, "2018-01-07"), + (50, Timestamp("2018-01-08")), + (50, Timestamp("2018-01-09")), + (50, Timestamp("2018-01-10")), + (50, Timestamp("2018-01-11")), + (50, Timestamp("2018-01-12")), + (50, Timestamp("2018-01-13")), + (50, Timestamp("2018-01-14")), + (50, Timestamp("2018-01-15")), + (50, Timestamp("2018-01-16")), + (50, Timestamp("2018-01-17")), + (50, Timestamp("2018-01-18")), + (50, Timestamp("2018-01-19")), + (50, Timestamp("2018-01-20")), + (50, Timestamp("2018-01-21")), + (60, Timestamp("2018-01-14")), + ], + names=["volume", "week_starting"], + ) expected = DataFrame( data={ "price": [ diff --git a/pandas/tests/series/accessors/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py index dcdee01bd4df8..62a9099fab1ad 100644 --- a/pandas/tests/series/accessors/test_dt_accessor.py +++ b/pandas/tests/series/accessors/test_dt_accessor.py @@ -679,6 +679,7 @@ def test_dt_timetz_accessor(self, tz_naive_fixture): [["2016-01-07", "2016-01-01"], [[2016, 1, 4], [2015, 53, 5]]], ], ) + @pytest.mark.filterwarnings("ignore:Inferring datetime64:FutureWarning") def test_isocalendar(self, input_series, expected_output): result = pd.to_datetime(Series(input_series)).dt.isocalendar() expected_frame = DataFrame( diff --git a/pandas/tests/series/methods/test_combine_first.py b/pandas/tests/series/methods/test_combine_first.py index 4c254c6db2a70..b838797b5f9b9 100644 --- a/pandas/tests/series/methods/test_combine_first.py +++ b/pandas/tests/series/methods/test_combine_first.py @@ -78,7 +78,11 @@ def test_combine_first_dt64(self): s0 = to_datetime(Series(["2010", np.NaN])) s1 = Series([np.NaN, "2011"]) rs = s0.combine_first(s1) - xp = Series([datetime(2010, 1, 1), "2011"]) + + msg = "containing strings is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + xp = Series([datetime(2010, 1, 1), "2011"]) + tm.assert_series_equal(rs, xp) def test_combine_first_dt_tz_values(self, tz_naive_fixture): diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py index 82c52bdaa29d7..1aec2a5e5d726 100644 --- a/pandas/tests/series/methods/test_fillna.py +++ b/pandas/tests/series/methods/test_fillna.py @@ -319,8 +319,11 @@ def test_datetime64_fillna(self): # GH#6587 # make sure that we are treating as integer when filling - # this also tests inference of a datetime-like with NaT's - ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"]) + msg = "containing strings is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + # this also tests inference of a datetime-like with NaT's + ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"]) + expected = Series( [ "2013-08-05 15:30:00.000001", diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 646d1f0ab1508..f03322f9b0d6c 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -900,14 +900,23 @@ def test_constructor_dtype_datetime64_7(self): def test_constructor_dtype_datetime64_6(self): # these will correctly infer a datetime - s = Series([None, NaT, "2013-08-05 15:30:00.000001"]) - assert s.dtype == "datetime64[ns]" - s = Series([np.nan, NaT, "2013-08-05 15:30:00.000001"]) - assert s.dtype == "datetime64[ns]" - s = Series([NaT, None, "2013-08-05 15:30:00.000001"]) - assert s.dtype == "datetime64[ns]" - s = Series([NaT, np.nan, "2013-08-05 15:30:00.000001"]) - assert s.dtype == "datetime64[ns]" + msg = "containing strings is deprecated" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([None, NaT, "2013-08-05 15:30:00.000001"]) + assert ser.dtype == "datetime64[ns]" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([np.nan, NaT, "2013-08-05 15:30:00.000001"]) + assert ser.dtype == "datetime64[ns]" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([NaT, None, "2013-08-05 15:30:00.000001"]) + assert ser.dtype == "datetime64[ns]" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([NaT, np.nan, "2013-08-05 15:30:00.000001"]) + assert ser.dtype == "datetime64[ns]" def test_constructor_dtype_datetime64_5(self): # tz-aware (UTC and other tz's) @@ -1379,14 +1388,22 @@ def test_constructor_dtype_timedelta64(self): assert td.dtype == "object" # these will correctly infer a timedelta - s = Series([None, NaT, "1 Day"]) - assert s.dtype == "timedelta64[ns]" - s = Series([np.nan, NaT, "1 Day"]) - assert s.dtype == "timedelta64[ns]" - s = Series([NaT, None, "1 Day"]) - assert s.dtype == "timedelta64[ns]" - s = Series([NaT, np.nan, "1 Day"]) - assert s.dtype == "timedelta64[ns]" + msg = "containing strings is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([None, NaT, "1 Day"]) + assert ser.dtype == "timedelta64[ns]" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([np.nan, NaT, "1 Day"]) + assert ser.dtype == "timedelta64[ns]" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([NaT, None, "1 Day"]) + assert ser.dtype == "timedelta64[ns]" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([NaT, np.nan, "1 Day"]) + assert ser.dtype == "timedelta64[ns]" # GH 16406 def test_constructor_mixed_tz(self): diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py index 1fc383521d31f..eb26ae688f00e 100644 --- a/pandas/tests/tools/test_to_timedelta.py +++ b/pandas/tests/tools/test_to_timedelta.py @@ -187,6 +187,16 @@ def test_to_timedelta_via_apply(self): result = Series([to_timedelta("00:00:01")]) tm.assert_series_equal(result, expected) + def test_to_timedelta_inference_without_warning(self): + # GH#41731 inference produces a warning in the Series constructor, + # but _not_ in to_timedelta + vals = ["00:00:01", pd.NaT] + with tm.assert_produces_warning(None): + result = to_timedelta(vals) + + expected = TimedeltaIndex([pd.Timedelta(seconds=1), pd.NaT]) + tm.assert_index_equal(result, expected) + def test_to_timedelta_on_missing_values(self): # GH5438 timedelta_NaT = np.timedelta64("NaT") @@ -197,7 +207,8 @@ def test_to_timedelta_on_missing_values(self): ) tm.assert_series_equal(actual, expected) - actual = to_timedelta(Series(["00:00:01", pd.NaT])) + with tm.assert_produces_warning(FutureWarning, match="Inferring timedelta64"): + actual = to_timedelta(Series(["00:00:01", pd.NaT])) tm.assert_series_equal(actual, expected) actual = to_timedelta(np.nan)
- [x] closes #33558 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry this difference in behavior is one of the biggest things left keeping us from sharing more between Index/Series constructors.
https://api.github.com/repos/pandas-dev/pandas/pulls/41731
2021-05-30T05:28:55Z
2021-06-01T13:53:05Z
2021-06-01T13:53:05Z
2021-06-01T15:59:19Z
CI: suppress npdev warnings
diff --git a/pandas/conftest.py b/pandas/conftest.py index f948dc11bc014..329023ed7ba6a 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -66,6 +66,11 @@ MultiIndex, ) +# Until https://github.com/numpy/numpy/issues/19078 is sorted out, just suppress +suppress_npdev_promotion_warning = pytest.mark.filterwarnings( + "ignore:Promotion of numbers and bools:FutureWarning" +) + # ---------------------------------------------------------------- # Configuration / Settings # ---------------------------------------------------------------- @@ -112,6 +117,8 @@ def pytest_collection_modifyitems(items): if "/frame/" in item.nodeid: item.add_marker(pytest.mark.arraymanager) + item.add_marker(suppress_npdev_promotion_warning) + # Hypothesis hypothesis.settings.register_profile( diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py index 1bbe90f3cb58c..12220e825aed4 100644 --- a/pandas/tests/arithmetic/test_interval.py +++ b/pandas/tests/arithmetic/test_interval.py @@ -238,7 +238,7 @@ def test_compare_list_like_nan(self, op, interval_array, nulls_fixture, request) Categorical(list("abab")), Categorical(date_range("2017-01-01", periods=4)), pd.array(list("abcd")), - pd.array(["foo", 3.14, None, object()]), + pd.array(["foo", 3.14, None, object()], dtype=object), ], ids=lambda x: str(x.dtype), ) diff --git a/pandas/tests/frame/methods/test_to_records.py b/pandas/tests/frame/methods/test_to_records.py index 2c96cf291c154..ba8fe25401e8c 100644 --- a/pandas/tests/frame/methods/test_to_records.py +++ b/pandas/tests/frame/methods/test_to_records.py @@ -3,6 +3,8 @@ import numpy as np import pytest +from pandas.compat import is_numpy_dev + from pandas import ( CategoricalDtype, DataFrame, @@ -171,20 +173,28 @@ def test_to_records_with_categorical(self): ), ), # Pass in a type instance. - ( + pytest.param( {"column_dtypes": str}, np.rec.array( [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")], ), + marks=pytest.mark.xfail( + is_numpy_dev, + reason="https://github.com/numpy/numpy/issues/19078", + ), ), # Pass in a dtype instance. - ( + pytest.param( {"column_dtypes": np.dtype("unicode")}, np.rec.array( [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")], ), + marks=pytest.mark.xfail( + is_numpy_dev, + reason="https://github.com/numpy/numpy/issues/19078", + ), ), # Pass in a dictionary (name-only). (
Let's try this again
https://api.github.com/repos/pandas-dev/pandas/pulls/41730
2021-05-30T03:18:54Z
2021-05-31T15:47:16Z
2021-05-31T15:47:15Z
2021-06-01T14:02:17Z
TYP: tighten types in core.construction
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6e71cb49596c8..bc44b23da25d5 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -728,6 +728,15 @@ def __init__( if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") + # Argument 1 to "ensure_index" has incompatible type "Collection[Any]"; + # expected "Union[Union[Union[ExtensionArray, ndarray], + # Index, Series], Sequence[Any]]" + index = ensure_index(index) # type: ignore[arg-type] + # Argument 1 to "ensure_index" has incompatible type "Collection[Any]"; + # expected "Union[Union[Union[ExtensionArray, ndarray], + # Index, Series], Sequence[Any]]" + columns = ensure_index(columns) # type: ignore[arg-type] + if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) @@ -2325,6 +2334,7 @@ def _from_arrays( dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") + columns = ensure_index(columns) mgr = arrays_to_mgr( arrays, columns, diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 3a8915e94135a..46eb138dc74d1 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -47,10 +47,7 @@ from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, - ABCDatetimeIndex, - ABCIndex, ABCSeries, - ABCTimedeltaIndex, ) from pandas.core import ( @@ -71,7 +68,9 @@ ) from pandas.core.indexes import base as ibase from pandas.core.indexes.api import ( + DatetimeIndex, Index, + TimedeltaIndex, ensure_index, get_objs_combined_axis, union_indexes, @@ -101,7 +100,7 @@ def arrays_to_mgr( arrays, - arr_names, + arr_names: Index, index, columns, *, @@ -115,8 +114,6 @@ def arrays_to_mgr( Needs to handle a lot of exceptional cases. """ - arr_names = ensure_index(arr_names) - if verify_integrity: # figure out the index, if necessary if index is None: @@ -286,10 +283,12 @@ def ndarray_to_mgr( if columns is None: columns = Index(range(len(values))) + else: + columns = ensure_index(columns) return arrays_to_mgr(values, columns, index, columns, dtype=dtype, typ=typ) - if is_extension_array_dtype(vdtype) and not is_1d_only_ea_dtype(vdtype): + elif is_extension_array_dtype(vdtype) and not is_1d_only_ea_dtype(vdtype): # i.e. Datetime64TZ values = extract_array(values, extract_numpy=True) if copy: @@ -454,7 +453,7 @@ def dict_to_mgr( arrays = [com.maybe_iterable_to_list(data[k]) for k in keys] # GH#24096 need copy to be deep for datetime64tz case # TODO: See if we can avoid these copies - arrays = [arr if not isinstance(arr, ABCIndex) else arr._data for arr in arrays] + arrays = [arr if not isinstance(arr, Index) else arr._data for arr in arrays] arrays = [ arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays ] @@ -480,7 +479,7 @@ def nested_data_to_arrays( columns: Index | None, index: Index | None, dtype: DtypeObj | None, -): +) -> tuple[list[ArrayLike], Index, Index]: """ Convert a single sequence of arrays to multiple arrays. """ @@ -548,7 +547,7 @@ def convert(v): if is_list_like(values[0]): values = np.array([convert(v) for v in values]) elif isinstance(values[0], np.ndarray) and values[0].ndim == 0: - # GH#21861 + # GH#21861 see test_constructor_list_of_lists values = np.array([convert(v) for v in values]) else: values = convert(values) @@ -566,31 +565,30 @@ def convert(v): return values -def _homogenize(data, index: Index, dtype: DtypeObj | None): +def _homogenize(data, index: Index, dtype: DtypeObj | None) -> list[ArrayLike]: oindex = None homogenized = [] for val in data: if isinstance(val, ABCSeries): if dtype is not None: - val = val.astype(dtype) + val = val.astype(dtype, copy=False) if val.index is not index: # Forces alignment. No need to copy data since we # are putting it into an ndarray later val = val.reindex(index, copy=False) - # TODO extract_array should be preferred, but that gives failures for - # `extension/test_numpy.py` (extract_array will convert numpy arrays - # to PandasArray), see https://github.com/pandas-dev/pandas/issues/40021 - # val = extract_array(val, extract_numpy=True) + val = val._values else: if isinstance(val, dict): if oindex is None: oindex = index.astype("O") - if isinstance(index, (ABCDatetimeIndex, ABCTimedeltaIndex)): + if isinstance(index, (DatetimeIndex, TimedeltaIndex)): + # see test_constructor_dict_datetime64_index val = dict_compat(val) else: + # see test_constructor_subclass_dict val = dict(val) val = lib.fast_multiget(val, oindex._values, default=np.nan) val = sanitize_array( @@ -749,6 +747,7 @@ def to_arrays( Return list of arrays, columns. """ if isinstance(data, ABCDataFrame): + # see test_from_records_with_index_data, test_from_records_bad_index_column if columns is not None: arrays = [ data._ixs(i, axis=1).values @@ -884,7 +883,7 @@ def _list_of_dict_to_arrays( # assure that they are of the base dict class and not of derived # classes - data = [(type(d) is dict) and d or dict(d) for d in data] + data = [d if type(d) is dict else dict(d) for d in data] content = lib.dicts_to_array(data, list(columns)) return content, columns
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41729
2021-05-30T03:07:06Z
2021-05-31T21:30:51Z
2021-05-31T21:30:51Z
2021-05-31T21:37:32Z
REF: more explicit dtypes in strings.accessor
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 7643019ff8c55..29d37599b0785 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -13,7 +13,10 @@ import numpy as np import pandas._libs.lib as lib -from pandas._typing import FrameOrSeriesUnion +from pandas._typing import ( + DtypeObj, + FrameOrSeriesUnion, +) from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( @@ -209,8 +212,12 @@ def _validate(data): # see _libs/lib.pyx for list of inferred types allowed_types = ["string", "empty", "bytes", "mixed", "mixed-integer"] - values = getattr(data, "values", data) # Series / Index - values = getattr(values, "categories", values) # categorical / normal + # TODO: avoid kludge for tests.extension.test_numpy + from pandas.core.internals.managers import _extract_array + + data = _extract_array(data) + + values = getattr(data, "categories", data) # categorical / normal inferred_dtype = lib.infer_dtype(values, skipna=True) @@ -242,6 +249,7 @@ def _wrap_result( expand: bool | None = None, fill_value=np.nan, returns_string=True, + returns_bool: bool = False, ): from pandas import ( Index, @@ -319,11 +327,17 @@ def cons_row(x): else: index = self._orig.index # This is a mess. - dtype: str | None - if self._is_string and returns_string: - dtype = self._orig.dtype + dtype: DtypeObj | str | None + vdtype = getattr(result, "dtype", None) + if self._is_string: + if is_bool_dtype(vdtype): + dtype = result.dtype + elif returns_string: + dtype = self._orig.dtype + else: + dtype = vdtype else: - dtype = None + dtype = vdtype if expand: cons = self._orig._constructor_expanddim @@ -331,7 +345,7 @@ def cons_row(x): else: # Must be a Series cons = self._orig._constructor - result = cons(result, name=name, index=index) + result = cons(result, name=name, index=index, dtype=dtype) result = result.__finalize__(self._orig, method="str") if name is not None and result.ndim == 1: # __finalize__ might copy over the original name, but we may @@ -369,7 +383,7 @@ def _get_series_list(self, others): if isinstance(others, ABCSeries): return [others] elif isinstance(others, ABCIndex): - return [Series(others._values, index=idx)] + return [Series(others._values, index=idx, dtype=others.dtype)] elif isinstance(others, ABCDataFrame): return [others[x] for x in others] elif isinstance(others, np.ndarray) and others.ndim == 2: @@ -547,7 +561,7 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"): sep = "" if isinstance(self._orig, ABCIndex): - data = Series(self._orig, index=self._orig) + data = Series(self._orig, index=self._orig, dtype=self._orig.dtype) else: # Series data = self._orig
Experimenting with #40489 I found that making Series inference behavior more like Index behavior broke a bunch of strings tests. This makes the strings code robust to that potential change by being more explicit about dtypes.
https://api.github.com/repos/pandas-dev/pandas/pulls/41727
2021-05-29T20:46:26Z
2021-06-09T00:27:45Z
2021-06-09T00:27:45Z
2021-06-09T00:34:50Z
TST: Check map function works with StringDtype (#40823)
diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py index 88c3ad228f8c3..6837bdb1a6b9c 100644 --- a/pandas/tests/apply/test_series_apply.py +++ b/pandas/tests/apply/test_series_apply.py @@ -115,6 +115,20 @@ def func(x): ser.apply(func) +def test_series_map_stringdtype(any_string_dtype): + # map test on StringDType, GH#40823 + ser1 = Series( + data=["cat", "dog", "rabbit"], + index=["id1", "id2", "id3"], + dtype=any_string_dtype, + ) + ser2 = Series(data=["id3", "id2", "id1", "id7000"], dtype=any_string_dtype) + result = ser2.map(ser1) + expected = Series(data=["rabbit", "dog", "cat", pd.NA], dtype=any_string_dtype) + + tm.assert_series_equal(result, expected) + + def test_apply_box(): # ufunc will not be boxed. Same test cases as the test_map_box vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
Checks map function output on 2 DataFrames with data type StringDtype. - [ ] closes #40823 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41723
2021-05-29T17:44:40Z
2021-06-02T13:24:14Z
2021-06-02T13:24:14Z
2021-06-02T13:24:18Z
DEPR: Deprecated passing arguments as positional in pd.concat
diff --git a/doc/source/whatsnew/v0.17.0.rst b/doc/source/whatsnew/v0.17.0.rst index d8f39a7d6e3c0..991b9a40d151b 100644 --- a/doc/source/whatsnew/v0.17.0.rst +++ b/doc/source/whatsnew/v0.17.0.rst @@ -423,7 +423,7 @@ Other enhancements .. code-block:: ipython - In [1]: pd.concat([foo, bar, baz], 1) + In [1]: pd.concat([foo, bar, baz], axis=1) Out[1]: 0 1 2 0 1 1 4 @@ -433,7 +433,7 @@ Other enhancements .. ipython:: python - pd.concat([foo, bar, baz], 1) + pd.concat([foo, bar, baz], axis=1) - ``DataFrame`` has gained the ``nlargest`` and ``nsmallest`` methods (:issue:`10393`) diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index ce613fd78c1e1..55e8196754fdb 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -711,6 +711,7 @@ Deprecations - Deprecated passing lists as ``key`` to :meth:`DataFrame.xs` and :meth:`Series.xs` (:issue:`41760`) - Deprecated passing arguments as positional in :meth:`DataFrame.drop` (other than ``"labels"``) and :meth:`Series.drop` (:issue:`41485`) - Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_table` (:issue:`41485`) +- Deprecated passing arguments as positional (other than ``objs``) in :func:`concat` (:issue:`41485`) .. _whatsnew_130.deprecations.nuisance_columns: diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index b3b453ea6355a..ea34bc75b4e31 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -16,7 +16,10 @@ import numpy as np from pandas._typing import FrameOrSeriesUnion -from pandas.util._decorators import cache_readonly +from pandas.util._decorators import ( + cache_readonly, + deprecate_nonkeyword_arguments, +) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( @@ -84,6 +87,7 @@ def concat( ... +@deprecate_nonkeyword_arguments(version=None, allowed_args=["objs"]) def concat( objs: Iterable[NDFrame] | Mapping[Hashable, NDFrame], axis=0, diff --git a/pandas/io/stata.py b/pandas/io/stata.py index e4f3bcb89cf7e..1fef33558dd9a 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1762,7 +1762,7 @@ def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFra columns = data.columns replacement_df = DataFrame(replacements) replaced = concat( - [data.drop(replacement_df.columns, axis=1), replacement_df], 1 + [data.drop(replacement_df.columns, axis=1), replacement_df], axis=1 ) data = replaced[columns] return data diff --git a/pandas/tests/io/pytables/test_complex.py b/pandas/tests/io/pytables/test_complex.py index 8e1dee5873512..6cfe80ae5c87c 100644 --- a/pandas/tests/io/pytables/test_complex.py +++ b/pandas/tests/io/pytables/test_complex.py @@ -205,4 +205,4 @@ def test_complex_append(setup_path): store.append("df", df, data_columns=["b"]) store.append("df", df) result = store.select("df") - tm.assert_frame_equal(pd.concat([df, df], 0), result) + tm.assert_frame_equal(pd.concat([df, df], axis=0), result) diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py index 96b88dc61cfed..17a7089f0ac85 100644 --- a/pandas/tests/reshape/concat/test_concat.py +++ b/pandas/tests/reshape/concat/test_concat.py @@ -638,3 +638,18 @@ def test_concat_multiindex_with_empty_rangeindex(): result = concat([df1, df2]) expected = DataFrame([[1, 2], [np.nan, np.nan]], columns=mi) tm.assert_frame_equal(result, expected) + + +def test_concat_posargs_deprecation(): + # https://github.com/pandas-dev/pandas/issues/41485 + df = DataFrame([[1, 2, 3]], index=["a"]) + df2 = DataFrame([[4, 5, 6]], index=["b"]) + + msg = ( + "In a future version of pandas all arguments of concat " + "except for the argument 'objs' will be keyword-only" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = concat([df, df2], 0) + expected = DataFrame([[1, 2, 3], [4, 5, 6]], index=["a", "b"]) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/concat/test_invalid.py b/pandas/tests/reshape/concat/test_invalid.py index 95a81ce61c785..cd2a7ca33a267 100644 --- a/pandas/tests/reshape/concat/test_invalid.py +++ b/pandas/tests/reshape/concat/test_invalid.py @@ -27,13 +27,12 @@ def test_concat_invalid(self): def test_concat_invalid_first_argument(self): df1 = tm.makeCustomDataframe(10, 2) - df2 = tm.makeCustomDataframe(10, 2) msg = ( "first argument must be an iterable of pandas " 'objects, you passed an object of type "DataFrame"' ) with pytest.raises(TypeError, match=msg): - concat(df1, df2) + concat(df1) # generator ok though concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
- [x] xref #41485 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41718
2021-05-29T14:24:17Z
2021-06-07T16:46:24Z
2021-06-07T16:46:24Z
2021-06-07T16:46:28Z
CLN: Deprecate non-keyword arguments in read_table #41485
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index ea9017da8a2f9..38682d188e57a 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -700,7 +700,7 @@ Deprecations - Deprecated passing arguments as positional in :meth:`DataFrame.where` and :meth:`Series.where` (other than ``"cond"`` and ``"other"``) (:issue:`41485`) - Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_csv` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.drop` (other than ``"labels"``) and :meth:`Series.drop` (:issue:`41485`) -- +- Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_table` (:issue:`41485`) .. _whatsnew_130.deprecations.nuisance_columns: diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 8bf1ab1260b8e..a384846b7a063 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -592,6 +592,9 @@ def read_csv( return _read(filepath_or_buffer, kwds) +@deprecate_nonkeyword_arguments( + version=None, allowed_args=["filepath_or_buffer"], stacklevel=3 +) @Appender( _doc_read_csv_and_table.format( func_name="read_table", diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index 97b3be1306cd5..8fa2d7f7b8d65 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -823,3 +823,15 @@ def test_malformed_second_line(all_parsers): result = parser.read_csv(StringIO(data), skip_blank_lines=False, header=1) expected = DataFrame({"a": ["b"]}) tm.assert_frame_equal(result, expected) + + +def test_read_table_posargs_deprecation(all_parsers): + # https://github.com/pandas-dev/pandas/issues/41485 + data = StringIO("a\tb\n1\t2") + parser = all_parsers + msg = ( + "In a future version of pandas all arguments of read_table " + "except for the argument 'filepath_or_buffer' will be keyword-only" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + parser.read_table(data, " ")
- [x] xref #41485 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41717
2021-05-29T13:37:49Z
2021-05-31T14:52:28Z
2021-05-31T14:52:28Z
2021-05-31T14:52:34Z
ENH: maybe_convert_objects corner cases
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index 06620c2ad0dca..b84cacfd74840 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -11,7 +11,10 @@ from typing import ( import numpy as np -from pandas._typing import ArrayLike +from pandas._typing import ( + ArrayLike, + DtypeObj, +) # placeholder until we can specify np.ndarray[object, ndim=2] ndarray_obj_2d = np.ndarray @@ -73,6 +76,7 @@ def maybe_convert_objects( convert_timedelta: bool = ..., convert_period: Literal[False] = ..., convert_to_nullable_integer: Literal[False] = ..., + dtype_if_all_nat: DtypeObj | None = ..., ) -> np.ndarray: ... @overload @@ -85,6 +89,7 @@ def maybe_convert_objects( convert_timedelta: bool = ..., convert_period: bool = ..., convert_to_nullable_integer: Literal[True] = ..., + dtype_if_all_nat: DtypeObj | None = ..., ) -> ArrayLike: ... @overload @@ -97,6 +102,7 @@ def maybe_convert_objects( convert_timedelta: bool = ..., convert_period: bool = ..., convert_to_nullable_integer: bool = ..., + dtype_if_all_nat: DtypeObj | None = ..., ) -> ArrayLike: ... @overload @@ -109,6 +115,7 @@ def maybe_convert_objects( convert_timedelta: bool = ..., convert_period: Literal[True] = ..., convert_to_nullable_integer: bool = ..., + dtype_if_all_nat: DtypeObj | None = ..., ) -> ArrayLike: ... @overload @@ -121,6 +128,7 @@ def maybe_convert_objects( convert_timedelta: bool = ..., convert_period: bool = ..., convert_to_nullable_integer: bool = ..., + dtype_if_all_nat: DtypeObj | None = ..., ) -> ArrayLike: ... @overload diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 4d184ee13e3db..73ff3b85ca46b 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -84,6 +84,10 @@ from pandas._libs.util cimport ( ) from pandas._libs.tslib import array_to_datetime +from pandas._libs.tslibs import ( + OutOfBoundsDatetime, + OutOfBoundsTimedelta, +) from pandas._libs.tslibs.period import Period from pandas._libs.missing cimport ( @@ -1640,7 +1644,8 @@ def infer_datetimelike_array(arr: ndarray[object]) -> tuple[str, bool]: # convert *every* string array if len(objs): try: - array_to_datetime(objs, errors="raise") + # require_iso8601 as in maybe_infer_to_datetimelike + array_to_datetime(objs, errors="raise", require_iso8601=True) return "datetime", seen_str except (ValueError, TypeError): pass @@ -2322,7 +2327,8 @@ def maybe_convert_objects(ndarray[object] objects, bint convert_timedelta=False, bint convert_period=False, bint convert_interval=False, - bint convert_to_nullable_integer=False) -> "ArrayLike": + bint convert_to_nullable_integer=False, + object dtype_if_all_nat=None) -> "ArrayLike": """ Type inference function-- convert object array to proper dtype @@ -2351,6 +2357,8 @@ def maybe_convert_objects(ndarray[object] objects, convert_to_nullable_integer : bool, default False If an array-like object contains only integer values (and NaN) is encountered, whether to convert and return an IntegerArray. + dtype_if_all_nat : np.dtype, ExtensionDtype, or None, default None + Dtype to cast to if we have all-NaT. Returns ------- @@ -2419,8 +2427,12 @@ def maybe_convert_objects(ndarray[object] objects, seen.float_ = True elif is_timedelta(val): if convert_timedelta: - itimedeltas[i] = convert_to_timedelta64(val, "ns").view("i8") seen.timedelta_ = True + try: + itimedeltas[i] = convert_to_timedelta64(val, "ns").view("i8") + except OutOfBoundsTimedelta: + seen.object_ = True + break else: seen.object_ = True break @@ -2457,8 +2469,12 @@ def maybe_convert_objects(ndarray[object] objects, break else: seen.datetime_ = True - idatetimes[i] = convert_to_tsobject( - val, None, None, 0, 0).value + try: + idatetimes[i] = convert_to_tsobject( + val, None, None, 0, 0).value + except OutOfBoundsDatetime: + seen.object_ = True + break else: seen.object_ = True break @@ -2546,8 +2562,13 @@ def maybe_convert_objects(ndarray[object] objects, elif seen.nat_: if not seen.numeric_: if convert_datetime and convert_timedelta: - # TODO: array full of NaT ambiguity resolve here needed - pass + dtype = dtype_if_all_nat + if dtype is not None: + # otherwise we keep object dtype + result = _infer_all_nats( + dtype, datetimes, timedeltas + ) + elif convert_datetime: result = datetimes elif convert_timedelta: @@ -2586,8 +2607,13 @@ def maybe_convert_objects(ndarray[object] objects, elif seen.nat_: if not seen.numeric_: if convert_datetime and convert_timedelta: - # TODO: array full of NaT ambiguity resolve here needed - pass + dtype = dtype_if_all_nat + if dtype is not None: + # otherwise we keep object dtype + result = _infer_all_nats( + dtype, datetimes, timedeltas + ) + elif convert_datetime: result = datetimes elif convert_timedelta: @@ -2618,6 +2644,26 @@ def maybe_convert_objects(ndarray[object] objects, return objects +cdef _infer_all_nats(dtype, ndarray datetimes, ndarray timedeltas): + """ + If we have all-NaT values, cast these to the given dtype. + """ + if isinstance(dtype, np.dtype): + if dtype == "M8[ns]": + result = datetimes + elif dtype == "m8[ns]": + result = timedeltas + else: + raise ValueError(dtype) + else: + # ExtensionDtype + cls = dtype.construct_array_type() + i8vals = np.empty(len(datetimes), dtype="i8") + i8vals.fill(NPY_NAT) + result = cls(i8vals, dtype=dtype) + return result + + class NoDefault(Enum): # We make this an Enum # 1) because it round-trips through pickle correctly (see GH#40397) diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 7e0b26391e132..3c541a309e42a 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -665,6 +665,57 @@ def test_maybe_convert_objects_datetime(self): ) tm.assert_numpy_array_equal(out, exp) + def test_maybe_convert_objects_dtype_if_all_nat(self): + arr = np.array([pd.NaT, pd.NaT], dtype=object) + out = lib.maybe_convert_objects( + arr, convert_datetime=True, convert_timedelta=True + ) + # no dtype_if_all_nat passed -> we dont guess + tm.assert_numpy_array_equal(out, arr) + + out = lib.maybe_convert_objects( + arr, + convert_datetime=True, + convert_timedelta=True, + dtype_if_all_nat=np.dtype("timedelta64[ns]"), + ) + exp = np.array(["NaT", "NaT"], dtype="timedelta64[ns]") + tm.assert_numpy_array_equal(out, exp) + + out = lib.maybe_convert_objects( + arr, + convert_datetime=True, + convert_timedelta=True, + dtype_if_all_nat=np.dtype("datetime64[ns]"), + ) + exp = np.array(["NaT", "NaT"], dtype="datetime64[ns]") + tm.assert_numpy_array_equal(out, exp) + + def test_maybe_convert_objects_dtype_if_all_nat_invalid(self): + # we accept datetime64[ns], timedelta64[ns], and EADtype + arr = np.array([pd.NaT, pd.NaT], dtype=object) + + with pytest.raises(ValueError, match="int64"): + lib.maybe_convert_objects( + arr, + convert_datetime=True, + convert_timedelta=True, + dtype_if_all_nat=np.dtype("int64"), + ) + + @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"]) + def test_maybe_convert_objects_datetime_overflow_safe(self, dtype): + stamp = datetime(2363, 10, 4) # Enterprise-D launch date + if dtype == "timedelta64[ns]": + stamp = stamp - datetime(1970, 1, 1) + arr = np.array([stamp], dtype=object) + + out = lib.maybe_convert_objects( + arr, convert_datetime=True, convert_timedelta=True + ) + # no OutOfBoundsDatetime/OutOfBoundsTimedeltas + tm.assert_numpy_array_equal(out, arr) + def test_maybe_convert_objects_timedelta64_nat(self): obj = np.timedelta64("NaT", "ns") arr = np.array([obj], dtype=object)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41714
2021-05-29T04:30:18Z
2021-06-02T15:18:33Z
2021-06-02T15:18:33Z
2021-06-02T17:00:44Z
TST: More old issues
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index a8df09d479f22..62d7535159f13 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -638,6 +638,19 @@ def test_setitem_dtypes_bytes_type_to_object(self): expected = Series([np.uint32, object, object, np.uint8], index=list("abcd")) tm.assert_series_equal(result, expected) + def test_boolean_mask_nullable_int64(self): + # GH 28928 + result = DataFrame({"a": [3, 4], "b": [5, 6]}).astype( + {"a": "int64", "b": "Int64"} + ) + mask = Series(False, index=result.index) + result.loc[mask, "a"] = result["a"] + result.loc[mask, "b"] = result["b"] + expected = DataFrame({"a": [3, 4], "b": [5, 6]}).astype( + {"a": "int64", "b": "Int64"} + ) + tm.assert_frame_equal(result, expected) + class TestSetitemTZAwareValues: @pytest.fixture diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py index f9535e9c7ef17..80f97ecaee121 100644 --- a/pandas/tests/frame/methods/test_append.py +++ b/pandas/tests/frame/methods/test_append.py @@ -238,3 +238,22 @@ def test_append_numpy_bug_1681(self, dtype): result = df.append(other) assert (result["B"] == index).all() + + @pytest.mark.filterwarnings("ignore:The values in the array:RuntimeWarning") + def test_multiindex_column_append_multiple(self): + # GH 29699 + df = DataFrame( + [[1, 11], [2, 12], [3, 13]], + columns=pd.MultiIndex.from_tuples( + [("multi", "col1"), ("multi", "col2")], names=["level1", None] + ), + ) + df2 = df.copy() + for i in range(1, 10): + df[i, "colA"] = 10 + df = df.append(df2, ignore_index=True) + result = df["multi"] + expected = DataFrame( + {"col1": [1, 2, 3] * (i + 1), "col2": [11, 12, 13] * (i + 1)} + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_drop.py b/pandas/tests/frame/methods/test_drop.py index 76e24a27e0854..b3eeab9db4ad5 100644 --- a/pandas/tests/frame/methods/test_drop.py +++ b/pandas/tests/frame/methods/test_drop.py @@ -502,3 +502,9 @@ def test_drop_inplace_no_leftover_column_reference(self): tm.assert_index_equal(df.columns, Index([], dtype="object")) a -= a.mean() tm.assert_index_equal(df.columns, Index([], dtype="object")) + + def test_drop_level_missing_label_multiindex(self): + # GH 18561 + df = DataFrame(index=MultiIndex.from_product([range(3), range(3)])) + with pytest.raises(KeyError, match="labels \\[5\\] not found in level"): + df.drop(5, level=0) diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py index 8a3ac265db154..84992982a104a 100644 --- a/pandas/tests/frame/methods/test_reindex.py +++ b/pandas/tests/frame/methods/test_reindex.py @@ -60,6 +60,24 @@ def test_set_reset_index_intervalindex(self): df = df.reset_index() + def test_setitem_reset_index_dtypes(self): + # GH 22060 + df = DataFrame(columns=["a", "b", "c"]).astype( + {"a": "datetime64[ns]", "b": np.int64, "c": np.float64} + ) + df1 = df.set_index(["a"]) + df1["d"] = [] + result = df1.reset_index() + expected = DataFrame(columns=["a", "b", "c", "d"], index=range(0)).astype( + {"a": "datetime64[ns]", "b": np.int64, "c": np.float64, "d": np.float64} + ) + tm.assert_frame_equal(result, expected) + + df2 = df.set_index(["a", "b"]) + df2["d"] = [] + result = df2.reset_index() + tm.assert_frame_equal(result, expected) + class TestDataFrameSelectReindex: # These are specific reindex-based tests; other indexing tests should go in diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index dbb6bb116828a..6e176310da6b4 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -775,6 +775,16 @@ def test_sort_index_ascending_bad_value_raises(self, ascending): with pytest.raises(ValueError, match=match): df.sort_index(axis=0, ascending=ascending, na_position="first") + def test_sort_index_use_inf_as_na(self): + # GH 29687 + expected = DataFrame( + {"col1": [1, 2, 3], "col2": [3, 4, 5]}, + index=pd.date_range("2020", periods=3), + ) + with pd.option_context("mode.use_inf_as_na", True): + result = expected.sort_index() + tm.assert_frame_equal(result, expected) + class TestDataFrameSortIndexKey: def test_sort_multi_index_key(self): diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index 03c5b6e027dac..e2cfc50510173 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -322,3 +322,11 @@ def test_frame_to_string_with_periodindex(self): # it works! frame.to_string() + + def test_datetime64tz_slice_non_truncate(self): + # GH 30263 + df = DataFrame({"x": date_range("2019", periods=10, tz="UTC")}) + expected = repr(df) + df = df.iloc[:, :5] + result = repr(df) + assert result == expected diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 70bdfe92602b2..719fdb353e3cf 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2338,3 +2338,24 @@ def test_groupby_filtered_df_std(): index=Index([True], name="groupby_col"), ) tm.assert_frame_equal(result, expected) + + +def test_datetime_categorical_multikey_groupby_indices(): + # GH 26859 + df = DataFrame( + { + "a": Series(list("abc")), + "b": Series( + to_datetime(["2018-01-01", "2018-02-01", "2018-03-01"]), + dtype="category", + ), + "c": Categorical.from_codes([-1, 0, 1], categories=[0, 1]), + } + ) + result = df.groupby(["a", "b"]).indices + expected = { + ("a", Timestamp("2018-01-01 00:00:00")): np.array([0]), + ("b", Timestamp("2018-02-01 00:00:00")): np.array([1]), + ("c", Timestamp("2018-03-01 00:00:00")): np.array([2]), + } + assert result == expected diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py index dfbf1a5b2cdc2..e7a5e931f5297 100644 --- a/pandas/tests/groupby/test_nth.py +++ b/pandas/tests/groupby/test_nth.py @@ -663,3 +663,29 @@ def test_first_categorical_and_datetime_data_nat(): ) expected.index = Index(["first", "second", "third"], name="group") tm.assert_frame_equal(result, expected) + + +def test_first_multi_key_groupbby_categorical(): + # GH 22512 + df = DataFrame( + { + "A": [1, 1, 1, 2, 2], + "B": [100, 100, 200, 100, 100], + "C": ["apple", "orange", "mango", "mango", "orange"], + "D": ["jupiter", "mercury", "mars", "venus", "venus"], + } + ) + df = df.astype({"D": "category"}) + result = df.groupby(by=["A", "B"]).first() + expected = DataFrame( + { + "C": ["apple", "mango", "mango"], + "D": Series(["jupiter", "mars", "venus"]).astype( + pd.CategoricalDtype(["jupiter", "mars", "mercury", "venus"]) + ), + } + ) + expected.index = MultiIndex.from_tuples( + [(1, 100), (1, 200), (2, 100)], names=["A", "B"] + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 281bfb19eb6fa..fc07c14f1e179 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -13,6 +13,7 @@ import pandas.util._test_decorators as td from pandas import ( + NA, Categorical, CategoricalDtype, DataFrame, @@ -1340,3 +1341,10 @@ def test_iloc_setitem_pure_position_based(self): ser1.iloc[1:3] = ser2.iloc[1:3] expected = Series([1, 5, 6]) tm.assert_series_equal(ser1, expected) + + def test_iloc_nullable_int64_size_1_nan(self): + # GH 31861 + result = DataFrame({"a": ["test"], "b": [np.nan]}) + result.loc[:, "b"] = result.loc[:, "b"].astype("Int64") + expected = DataFrame({"a": ["test"], "b": array([NA], dtype="Int64")}) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 77b155f01a2ea..cd07b3814d023 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -2479,3 +2479,11 @@ def test_merge_string_float_column_result(): [[9, 10, 1, 2], [11, 12, 3, 4]], columns=pd.Index(["x", "y", "a", 114.0]) ) tm.assert_frame_equal(result, expected) + + +def test_mergeerror_on_left_index_mismatched_dtypes(): + # GH 22449 + df_1 = DataFrame(data=["X"], columns=["C"], index=[22]) + df_2 = DataFrame(data=["X"], columns=["C"], index=[999]) + with pytest.raises(MergeError, match="Can only pass argument"): + merge(df_1, df_2, on=["C"], left_index=True) diff --git a/pandas/tests/series/methods/test_sort_values.py b/pandas/tests/series/methods/test_sort_values.py index 28332a94207fe..67f986c0949ca 100644 --- a/pandas/tests/series/methods/test_sort_values.py +++ b/pandas/tests/series/methods/test_sort_values.py @@ -199,6 +199,13 @@ def test_sort_values_pos_args_deprecation(self): expected = Series([1, 2, 3]) tm.assert_series_equal(result, expected) + def test_mergesort_decending_stability(self): + # GH 28697 + s = Series([1, 2, 1, 3], ["first", "b", "second", "c"]) + result = s.sort_values(ascending=False, kind="mergesort") + expected = Series([3, 2, 1, 1], ["c", "b", "first", "second"]) + tm.assert_series_equal(result, expected) + class TestSeriesSortingKey: def test_sort_values_key(self): diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 3eb3892279832..8872b76cd9bce 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -870,3 +870,21 @@ def test_dateoffset_immutable(attribute): msg = "DateOffset objects are immutable" with pytest.raises(AttributeError, match=msg): setattr(offset, attribute, 5) + + +@pytest.mark.parametrize( + "weekmask, expected_time, mult", + [ + ["Mon Tue Wed Thu Fri Sat", "2018-11-10 09:00:00", 10], + ["Tue Wed Thu Fri Sat", "2018-11-13 08:00:00", 18], + ], +) +def test_custom_businesshour_weekmask_and_holidays(weekmask, expected_time, mult): + # GH 23542 + holidays = ["2018-11-09"] + bh = CustomBusinessHour( + start="08:00", end="17:00", weekmask=weekmask, holidays=holidays + ) + result = Timestamp("2018-11-08 08:00") + mult * bh + expected = Timestamp(expected_time) + assert result == expected diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 7a3e1e002759d..c28d54dd9fbfb 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -1411,3 +1411,11 @@ def test_rolling_sum_all_nan_window_floating_artifacts(): result = df.rolling(3, min_periods=0).sum() expected = DataFrame([0.002, 0.010, 0.015, 0.013, 0.005, 0.0]) tm.assert_frame_equal(result, expected) + + +def test_rolling_zero_window(): + # GH 22719 + s = Series(range(1)) + result = s.rolling(0).min() + expected = Series([np.nan]) + tm.assert_series_equal(result, expected)
- [x] closes #18561 - [x] closes #22060 - [x] closes #22449 - [x] closes #22512 - [x] closes #22719 - [x] closes #23542 - [x] closes #26859 - [x] closes #28697 - [x] closes #28928 - [x] closes #29687 - [x] closes #30263 - [x] closes #29699 - [x] closes #31861 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41712
2021-05-29T02:00:11Z
2021-05-31T14:27:05Z
2021-05-31T14:27:01Z
2021-05-31T16:30:56Z
REGR: DataFrame reduction with min_count
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index 60e146b2212eb..1d7b7a762e2ae 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) +- Fixed regression in :meth:`DataFrame.sum` and :meth:`DataFrame.prod` when ``min_count`` and ``numeric_only`` are both given (:issue:`41074`) - Regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`) - diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bc44b23da25d5..4eef4ee2a3e80 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9775,7 +9775,6 @@ def _reduce( **kwds, ): - min_count = kwds.get("min_count", 0) assert filter_type is None or filter_type == "bool", filter_type out_dtype = "bool" if filter_type == "bool" else None @@ -9824,7 +9823,7 @@ def _get_data() -> DataFrame: data = self._get_bool_data() return data - if (numeric_only is not None or axis == 0) and min_count == 0: + if numeric_only is not None or axis == 0: # For numeric_only non-None and axis non-None, we know # which blocks to use and no try/except is needed. # For numeric_only=None only the case with axis==0 and no object diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index d1eb50f2702ba..b5b4c7000fddf 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -393,7 +393,7 @@ def reduce(self, func, ignore_failures: bool = False) -> list[Block]: return [] raise - if np.ndim(result) == 0: + if self.values.ndim == 1: # TODO(EA2D): special case not needed with 2D EAs res_values = np.array([[result]]) else: diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index b8909f16ee876..673c482bced18 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -245,8 +245,7 @@ def _maybe_get_mask( """ if mask is None: if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype): - # Boolean data cannot contain nulls, so signal via mask being None - return None + return np.broadcast_to(False, values.shape) if skipna or needs_i8_conversion(values.dtype): mask = isna(values) diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 564f5d20b0301..9d778cdee6a5b 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1,5 +1,6 @@ from datetime import timedelta from decimal import Decimal +import re from dateutil.tz import tzlocal import numpy as np @@ -811,35 +812,36 @@ def test_sum_corner(self): assert len(axis1) == 0 @pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)]) - def test_sum_prod_nanops(self, method, unit): + @pytest.mark.parametrize("numeric_only", [None, True, False]) + def test_sum_prod_nanops(self, method, unit, numeric_only): idx = ["a", "b", "c"] df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]}) # The default - result = getattr(df, method)() + result = getattr(df, method)(numeric_only=numeric_only) expected = Series([unit, unit, unit], index=idx, dtype="float64") tm.assert_series_equal(result, expected) # min_count=1 - result = getattr(df, method)(min_count=1) + result = getattr(df, method)(numeric_only=numeric_only, min_count=1) expected = Series([unit, unit, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count=0 - result = getattr(df, method)(min_count=0) + result = getattr(df, method)(numeric_only=numeric_only, min_count=0) expected = Series([unit, unit, unit], index=idx, dtype="float64") tm.assert_series_equal(result, expected) - result = getattr(df.iloc[1:], method)(min_count=1) + result = getattr(df.iloc[1:], method)(numeric_only=numeric_only, min_count=1) expected = Series([unit, np.nan, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count > 1 df = DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) - result = getattr(df, method)(min_count=5) + result = getattr(df, method)(numeric_only=numeric_only, min_count=5) expected = Series(result, index=["A", "B"]) tm.assert_series_equal(result, expected) - result = getattr(df, method)(min_count=6) + result = getattr(df, method)(numeric_only=numeric_only, min_count=6) expected = Series(result, index=["A", "B"]) tm.assert_series_equal(result, expected) @@ -1685,7 +1687,7 @@ def test_minmax_extensionarray(method, numeric_only): @pytest.mark.parametrize("meth", ["max", "min", "sum", "mean", "median"]) -def test_groupy_regular_arithmetic_equivalent(meth): +def test_groupby_regular_arithmetic_equivalent(meth): # GH#40660 df = DataFrame( {"a": [pd.Timedelta(hours=6), pd.Timedelta(hours=7)], "b": [12.1, 13.3]} @@ -1708,3 +1710,16 @@ def test_frame_mixed_numeric_object_with_timestamp(ts_value): result = df.sum() expected = Series([1, 1.1, "foo"], index=list("abc")) tm.assert_series_equal(result, expected) + + +def test_prod_sum_min_count_mixed_object(): + # https://github.com/pandas-dev/pandas/issues/41074 + df = DataFrame([1, "a", True]) + + result = df.prod(axis=0, min_count=1, numeric_only=False) + expected = Series(["a"]) + tm.assert_series_equal(result, expected) + + msg = re.escape("unsupported operand type(s) for +: 'int' and 'str'") + with pytest.raises(TypeError, match=msg): + df.sum(axis=0, min_count=1, numeric_only=False)
- [ ] closes #41074 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry tests copied from #41701; i think this gets at the root problem cc @simonjayhawkins
https://api.github.com/repos/pandas-dev/pandas/pulls/41711
2021-05-28T23:02:15Z
2021-06-01T14:54:44Z
2021-06-01T14:54:44Z
2021-06-10T07:19:12Z
REF: avoid maybe_convert_platform
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index a99bf245a6073..4aa3bab168ac6 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -32,7 +32,6 @@ from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender -from pandas.core.dtypes.cast import maybe_convert_platform from pandas.core.dtypes.common import ( is_categorical_dtype, is_datetime64_dtype, @@ -1650,4 +1649,6 @@ def _maybe_convert_platform_interval(values) -> ArrayLike: else: values = extract_array(values, extract_numpy=True) - return maybe_convert_platform(values) + if not hasattr(values, "dtype"): + return np.asarray(values) + return values diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 5c2bed109e3bf..3a8915e94135a 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -537,9 +537,6 @@ def _prep_ndarray(values, copy: bool = True) -> np.ndarray: def convert(v): if not is_list_like(v) or isinstance(v, ABCDataFrame): return v - elif not hasattr(v, "dtype") and not isinstance(v, (list, tuple, range)): - # TODO: should we cast these to list? - return v v = extract_array(v, extract_numpy=True) res = maybe_convert_platform(v)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry After this it is exclusively used in core.construction and core.internals.construction
https://api.github.com/repos/pandas-dev/pandas/pulls/41709
2021-05-28T19:22:18Z
2021-05-31T16:17:27Z
2021-05-31T16:17:27Z
2021-05-31T16:19:09Z
CLN: Removing unused Travis files for GBQ
diff --git a/.travis.yml b/.travis.yml index 540cd026a43d5..52fadca6b7846 100644 --- a/.travis.yml +++ b/.travis.yml @@ -45,7 +45,6 @@ before_install: - echo "before_install" # Use blocking IO on travis. Ref: https://github.com/travis-ci/travis-ci/issues/8920#issuecomment-352661024 - python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);' - - source ci/travis_process_gbq_encryption.sh - export PATH="$HOME/miniconda3/bin:$PATH" - df -h - pwd diff --git a/ci/travis_encrypt_gbq.sh b/ci/travis_encrypt_gbq.sh deleted file mode 100755 index 7d5692d9520af..0000000000000 --- a/ci/travis_encrypt_gbq.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -GBQ_JSON_FILE=$1 - -if [[ $# -ne 1 ]]; then - echo -e "Too few arguments.\nUsage: ./travis_encrypt_gbq.sh "\ - "<gbq-json-credentials-file>" - exit 1 -fi - -if [[ $GBQ_JSON_FILE != *.json ]]; then - echo "ERROR: Expected *.json file" - exit 1 -fi - -if [[ ! -f $GBQ_JSON_FILE ]]; then - echo "ERROR: File $GBQ_JSON_FILE does not exist" - exit 1 -fi - -echo "Encrypting $GBQ_JSON_FILE..." -read -d "\n" TRAVIS_KEY TRAVIS_IV <<<$(travis encrypt-file -r pandas-dev/pandas $GBQ_JSON_FILE \ -travis_gbq.json.enc -f | grep -o "\w*_iv\|\w*_key"); - -echo "Adding your secure key to travis_gbq_config.txt ..." -echo -e "TRAVIS_IV_ENV=$TRAVIS_IV\nTRAVIS_KEY_ENV=$TRAVIS_KEY"\ -> travis_gbq_config.txt - -echo "Done. Removing file $GBQ_JSON_FILE" -rm $GBQ_JSON_FILE - -echo -e "Created encrypted credentials file travis_gbq.json.enc.\n"\ - "NOTE: Do NOT commit the *.json file containing your unencrypted" \ - "private key" diff --git a/ci/travis_gbq.json.enc b/ci/travis_gbq.json.enc deleted file mode 100644 index 6e0b6cee4048c..0000000000000 Binary files a/ci/travis_gbq.json.enc and /dev/null differ diff --git a/ci/travis_gbq_config.txt b/ci/travis_gbq_config.txt deleted file mode 100644 index dc857c450331c..0000000000000 --- a/ci/travis_gbq_config.txt +++ /dev/null @@ -1,2 +0,0 @@ -TRAVIS_IV_ENV=encrypted_e05c934e101e_iv -TRAVIS_KEY_ENV=encrypted_e05c934e101e_key diff --git a/ci/travis_process_gbq_encryption.sh b/ci/travis_process_gbq_encryption.sh deleted file mode 100755 index b5118ad5defc6..0000000000000 --- a/ci/travis_process_gbq_encryption.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -source ci/travis_gbq_config.txt - -if [[ -n ${SERVICE_ACCOUNT_KEY} ]]; then - echo "${SERVICE_ACCOUNT_KEY}" > ci/travis_gbq.json; -elif [[ -n ${!TRAVIS_IV_ENV} ]]; then - openssl aes-256-cbc -K ${!TRAVIS_KEY_ENV} -iv ${!TRAVIS_IV_ENV} \ - -in ci/travis_gbq.json.enc -out ci/travis_gbq.json -d; - export GBQ_PROJECT_ID='pandas-gbq-tests'; - echo 'Successfully decrypted gbq credentials' -fi
If I'm not missing anything, Travis is only used to test on ARM64, and it's not testing on GBQ, since it doesn't have its dependencies in the conda environment. So, I think all GBQ stuff for Travis can be removed.
https://api.github.com/repos/pandas-dev/pandas/pulls/41708
2021-05-28T18:07:21Z
2021-05-30T19:25:04Z
2021-05-30T19:25:04Z
2021-05-30T19:25:24Z
BUG: MultiIndex.reindex with non-MultiIndex; Series constructor
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index e06085c4c5c26..5d92aefdb4eb1 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -233,7 +233,7 @@ Other enhancements - Add keyword ``sort`` to :func:`pivot_table` to allow non-sorting of the result (:issue:`39143`) - Add keyword ``dropna`` to :meth:`DataFrame.value_counts` to allow counting rows that include ``NA`` values (:issue:`41325`) - :meth:`Series.replace` will now cast results to ``PeriodDtype`` where possible instead of ``object`` dtype (:issue:`41526`) -- Improved error message in ``corr` and ``cov`` methods on :class:`.Rolling`, :class:`.Expanding`, and :class:`.ExponentialMovingWindow` when ``other`` is not a :class:`DataFrame` or :class:`Series` (:issue:`41741`) +- Improved error message in ``corr`` and ``cov`` methods on :class:`.Rolling`, :class:`.Expanding`, and :class:`.ExponentialMovingWindow` when ``other`` is not a :class:`DataFrame` or :class:`Series` (:issue:`41741`) .. --------------------------------------------------------------------------- @@ -959,6 +959,7 @@ MultiIndex - Bug in :meth:`MultiIndex.equals` incorrectly returning ``True`` when :class:`MultiIndex` containing ``NaN`` even when they are differently ordered (:issue:`38439`) - Bug in :meth:`MultiIndex.intersection` always returning empty when intersecting with :class:`CategoricalIndex` (:issue:`38653`) - Bug in :meth:`MultiIndex.reindex` raising ``ValueError`` with empty MultiIndex and indexing only a specific level (:issue:`41170`) +- Bug in :meth:`MultiIndex.reindex` raising ``TypeError`` when reindexing against a flat :class:`Index` (:issue:`41707`) I/O ^^^ @@ -1073,6 +1074,7 @@ Reshaping - Bug in :meth:`DataFrame.sort_values` not reshaping index correctly after sorting on columns, when ``ignore_index=True`` (:issue:`39464`) - Bug in :meth:`DataFrame.append` returning incorrect dtypes with combinations of ``ExtensionDtype`` dtypes (:issue:`39454`) - Bug in :meth:`DataFrame.append` returning incorrect dtypes with combinations of ``datetime64`` and ``timedelta64`` dtypes (:issue:`39574`) +- Bug in :meth:`DataFrame.append` with a :class:`DataFrame` with a :class:`MultiIndex` and appending a :class:`Series` whose :class:`Index` is not a :class:`MultiIndex` (:issue:`41707`) - Bug in :meth:`DataFrame.pivot_table` returning a ``MultiIndex`` for a single value when operating on and empty ``DataFrame`` (:issue:`13483`) - Allow :class:`Index` to be passed to the :func:`numpy.all` function (:issue:`40180`) - Bug in :meth:`DataFrame.stack` not preserving ``CategoricalDtype`` in a ``MultiIndex`` (:issue:`36991`) @@ -1127,6 +1129,7 @@ Other - Bug in :meth:`DataFrame.clip` not interpreting missing values as no threshold (:issue:`40420`) - Bug in :class:`Series` backed by :class:`DatetimeArray` or :class:`TimedeltaArray` sometimes failing to set the array's ``freq`` to ``None`` (:issue:`41425`) - Bug in creating a :class:`Series` from a ``range`` object that does not fit in the bounds of ``int64`` dtype (:issue:`30173`) +- Bug in creating a :class:`Series` from a ``dict`` with all-tuple keys and an :class:`Index` that requires reindexing (:issue:`41707`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bc44b23da25d5..6d4b723cdf921 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8921,10 +8921,7 @@ def append( index = Index([other.name], name=self.index.name) idx_diff = other.index.difference(self.columns) - try: - combined_columns = self.columns.append(idx_diff) - except TypeError: - combined_columns = self.columns.astype(object).append(idx_diff) + combined_columns = self.columns.append(idx_diff) other = ( other.reindex(combined_columns, copy=False) .to_frame() diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 6ae906edd1d81..6dcb2a44e7d3d 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -774,9 +774,11 @@ def _get_indexer_pointwise(self, target: Index) -> tuple[np.ndarray, np.ndarray] except KeyError: missing.append(i) locs = np.array([-1]) - except InvalidIndexError as err: - # i.e. non-scalar key - raise TypeError(key) from err + except InvalidIndexError: + # i.e. non-scalar key e.g. a tuple. + # see test_append_different_columns_types_raises + missing.append(i) + locs = np.array([-1]) indexer.append(locs) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 59882422f5439..805420a83108a 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2541,9 +2541,11 @@ def reindex( elif (indexer >= 0).all(): target = self.take(indexer) else: - # hopefully? - target = MultiIndex.from_tuples(target) - + try: + target = MultiIndex.from_tuples(target) + except TypeError: + # not all tuples, see test_constructor_dict_multiindex_reindex_flat + return target, indexer if ( preserve_names and target.nlevels == self.nlevels diff --git a/pandas/tests/indexes/multi/test_reindex.py b/pandas/tests/indexes/multi/test_reindex.py index 3b0fcd72f3123..38ff6efec40c9 100644 --- a/pandas/tests/indexes/multi/test_reindex.py +++ b/pandas/tests/indexes/multi/test_reindex.py @@ -115,3 +115,14 @@ def test_reindex_empty_with_level(values): expected_indexer = np.array([], dtype=result_indexer.dtype) tm.assert_index_equal(result, expected) tm.assert_numpy_array_equal(result_indexer, expected_indexer) + + +def test_reindex_not_all_tuples(): + keys = [("i", "i"), ("i", "j"), ("j", "i"), "j"] + mi = MultiIndex.from_tuples(keys[:-1]) + idx = Index(keys) + res, indexer = mi.reindex(idx) + + tm.assert_index_equal(res, idx) + expected = np.array([0, 1, 2, -1], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) diff --git a/pandas/tests/reshape/concat/test_append.py b/pandas/tests/reshape/concat/test_append.py index 62fe1ed3a7c49..43fe72b0776ed 100644 --- a/pandas/tests/reshape/concat/test_append.py +++ b/pandas/tests/reshape/concat/test_append.py @@ -184,18 +184,12 @@ def test_append_preserve_index_name(self): dt.datetime(2013, 1, 3, 7, 12), ] ), + pd.MultiIndex.from_arrays(["A B C".split(), "D E F".split()]), ] - indexes_cannot_append_with_other = [ - pd.MultiIndex.from_arrays(["A B C".split(), "D E F".split()]) - ] - - # error: Unsupported operand types for + ("List[Index]" and "List[MultiIndex]") - all_indexes = ( - indexes_can_append + indexes_cannot_append_with_other # type: ignore[operator] + @pytest.mark.parametrize( + "index", indexes_can_append, ids=lambda x: type(x).__name__ ) - - @pytest.mark.parametrize("index", all_indexes, ids=lambda x: type(x).__name__) def test_append_same_columns_type(self, index): # GH18359 @@ -249,41 +243,6 @@ def test_append_different_columns_types(self, df_columns, series_index): ) tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize( - "index_can_append", indexes_can_append, ids=lambda x: type(x).__name__ - ) - @pytest.mark.parametrize( - "index_cannot_append_with_other", - indexes_cannot_append_with_other, - ids=lambda x: type(x).__name__, - ) - def test_append_different_columns_types_raises( - self, index_can_append, index_cannot_append_with_other - ): - # GH18359 - # Dataframe.append will raise if MultiIndex appends - # or is appended to a different index type - # - # See also test 'test_append_different_columns_types' above for - # appending without raising. - - df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_can_append) - ser = Series([7, 8, 9], index=index_cannot_append_with_other, name=2) - msg = ( - r"Expected tuple, got (int|long|float|str|" - r"pandas._libs.interval.Interval)|" - r"object of type '(int|float|Timestamp|" - r"pandas._libs.interval.Interval)' has no len\(\)|" - ) - with pytest.raises(TypeError, match=msg): - df.append(ser) - - df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_cannot_append_with_other) - ser = Series([7, 8, 9], index=index_can_append, name=2) - - with pytest.raises(TypeError, match=msg): - df.append(ser) - def test_append_dtype_coerce(self, sort): # GH 4993 diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 646d1f0ab1508..a540b692f3aec 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1700,6 +1700,14 @@ def test_constructor_dict_multiindex(self): result = result.reindex(index=expected.index) tm.assert_series_equal(result, expected) + def test_constructor_dict_multiindex_reindex_flat(self): + # construction involves reindexing with a MultiIndex corner case + data = {("i", "i"): 0, ("i", "j"): 1, ("j", "i"): 2, "j": np.nan} + expected = Series(data) + + result = Series(expected[:-1].to_dict(), index=expected.index) + tm.assert_series_equal(result, expected) + def test_constructor_dict_timedelta_index(self): # GH #12169 : Resample category data with timedelta index # construct Series from dict as data and TimedeltaIndex as index
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry preliminary to de-duplicating some code in internals.construction
https://api.github.com/repos/pandas-dev/pandas/pulls/41707
2021-05-28T17:03:11Z
2021-06-01T14:52:40Z
2021-06-01T14:52:40Z
2021-06-01T16:05:15Z
BUG: DataFrameGroupBy with numeric_only and empty non-numeric data
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 1556c88aaecc6..b36499c340fd9 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -1061,6 +1061,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupBy.transform` with non-unique columns incorrectly raising ``AttributeError`` (:issue:`41427`) - Bug in :meth:`Resampler.apply` with non-unique columns incorrectly dropping duplicated columns (:issue:`41445`) - Bug in :meth:`SeriesGroupBy` aggregations incorrectly returning empty :class:`Series` instead of raising ``TypeError`` on aggregations that are invalid for its dtype, e.g. ``.prod`` with ``datetime64[ns]`` dtype (:issue:`41342`) +- Bug in :class:`DataFrameGroupBy` aggregations incorrectly failing to drop columns with invalid dtypes for that aggregation when there are no valid columns (:issue:`41291`) - Bug in :meth:`DataFrame.rolling.__iter__` where ``on`` was not assigned to the index of the resulting objects (:issue:`40373`) - Bug in :meth:`DataFrameGroupBy.transform` and :meth:`DataFrameGroupBy.agg` with ``engine="numba"`` where ``*args`` were being cached with the user passed function (:issue:`41647`) diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 7a286188c4e74..b72b927b3c2a8 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -516,7 +516,7 @@ def group_add(add_t[:, ::1] out, val = values[i, j] # not nan - if val == val: + if not checknull(val): nobs[lab, j] += 1 if nobs[lab, j] == 1: diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index b51fb2234e148..69f992f840c7c 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -67,10 +67,7 @@ validate_func_kwargs, ) from pandas.core.apply import GroupByApply -from pandas.core.base import ( - DataError, - SpecificationError, -) +from pandas.core.base import SpecificationError import pandas.core.common as com from pandas.core.construction import create_series_with_explicit_dtype from pandas.core.frame import DataFrame @@ -516,16 +513,12 @@ def _cython_transform( obj = self._selected_obj - is_numeric = is_numeric_dtype(obj.dtype) - if numeric_only and not is_numeric: - raise DataError("No numeric types to aggregate") - try: result = self.grouper._cython_operation( "transform", obj._values, how, axis, **kwargs ) - except (NotImplementedError, TypeError): - raise DataError("No numeric types to aggregate") + except NotImplementedError as err: + raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err return obj._constructor(result, index=self.obj.index, name=obj.name) @@ -1064,7 +1057,6 @@ def _cython_agg_general( # Note: we never get here with how="ohlc"; that goes through SeriesGroupBy data: Manager2D = self._get_data_to_aggregate() - orig = data if numeric_only: data = data.get_numeric_data(copy=False) @@ -1087,9 +1079,6 @@ def array_func(values: ArrayLike) -> ArrayLike: # continue and exclude the block new_mgr = data.grouped_reduce(array_func, ignore_failures=True) - if not len(new_mgr) and len(orig): - # If the original Manager was already empty, no need to raise - raise DataError("No numeric types to aggregate") if len(new_mgr) < len(data): warnings.warn( f"Dropping invalid columns in {type(self).__name__}.{how} " diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index b00a1160fb01b..6deb5bb1a76f0 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1339,20 +1339,12 @@ def _agg_general( with group_selection_context(self): # try a cython aggregation if we can - result = None - try: - result = self._cython_agg_general( - how=alias, - alt=npfunc, - numeric_only=numeric_only, - min_count=min_count, - ) - except DataError: - pass - - # apply a non-cython aggregation - if result is None: - result = self.aggregate(lambda x: npfunc(x, axis=self.axis)) + result = self._cython_agg_general( + how=alias, + alt=npfunc, + numeric_only=numeric_only, + min_count=min_count, + ) return result.__finalize__(self.obj, method="groupby") def _agg_py_fallback( diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index eb82e03aea82f..851dd7311183f 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -128,8 +128,9 @@ def test_groupby_aggregation_multi_level_column(): columns=MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 0), ("B", 1)]), ) - result = df.groupby(level=1, axis=1).sum() - expected = DataFrame({0: [2.0, 1, 1, 1], 1: [1, 0, 1, 1]}) + gb = df.groupby(level=1, axis=1) + result = gb.sum(numeric_only=False) + expected = DataFrame({0: [2.0, True, True, True], 1: [1, 0, 1, 1]}) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py index cf1177d231e37..a035c5500e2dc 100644 --- a/pandas/tests/groupby/aggregate/test_cython.py +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -18,7 +18,6 @@ bdate_range, ) import pandas._testing as tm -from pandas.core.groupby.groupby import DataError @pytest.mark.parametrize( @@ -98,9 +97,9 @@ def test_cython_agg_nothing_to_agg(): frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25}) - msg = "No numeric types to aggregate" - with pytest.raises(DataError, match=msg): - frame[["b"]].groupby(frame["a"]).mean() + result = frame[["b"]].groupby(frame["a"]).mean() + expected = DataFrame([], index=frame["a"].sort_values().drop_duplicates()) + tm.assert_frame_equal(result, expected) def test_cython_agg_nothing_to_agg_with_dates(): diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index 4d30543355d47..79990deed261d 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -433,15 +433,22 @@ def test_agg_over_numpy_arrays(): ], columns=["category", "arraydata"], ) - result = df.groupby("category").agg(sum) + gb = df.groupby("category") expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]] expected_index = Index([1, 2], name="category") expected_column = ["arraydata"] expected = DataFrame(expected_data, index=expected_index, columns=expected_column) + alt = gb.sum(numeric_only=False) + tm.assert_frame_equal(alt, expected) + + result = gb.agg("sum", numeric_only=False) tm.assert_frame_equal(result, expected) + # FIXME: the original version of this test called `gb.agg(sum)` + # and that raises TypeError if `numeric_only=False` is passed + @pytest.mark.parametrize("as_period", [True, False]) def test_agg_tzaware_non_datetime_result(as_period): @@ -524,9 +531,14 @@ def test_sum_uint64_overflow(): ) expected.index.name = 0 - result = df.groupby(0).sum() + result = df.groupby(0).sum(numeric_only=False) tm.assert_frame_equal(result, expected) + # out column is non-numeric, so with numeric_only=True it is dropped + result2 = df.groupby(0).sum(numeric_only=True) + expected2 = expected[[]] + tm.assert_frame_equal(result2, expected2) + @pytest.mark.parametrize( "structure, expected", diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 719fdb353e3cf..382a940d2a92c 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -638,7 +638,7 @@ def test_as_index_select_column(): def test_groupby_as_index_select_column_sum_empty_df(): # GH 35246 df = DataFrame(columns=["A", "B", "C"]) - left = df.groupby(by="A", as_index=False)["B"].sum() + left = df.groupby(by="A", as_index=False)["B"].sum(numeric_only=False) assert type(left) is DataFrame assert left.to_dict() == {"A": {}, "B": {}} @@ -1861,6 +1861,49 @@ def get_result(): get_result() return + else: + # ie. DataFrameGroupBy + if op in ["prod", "sum"]: + # ops that require more than just ordered-ness + if method != "apply": + # FIXME: apply goes through different code path + if df.dtypes[0].kind == "M": + # GH#41291 + # datetime64 -> prod and sum are invalid + result = get_result() + + # with numeric_only=True, these are dropped, and we get + # an empty DataFrame back + expected = df.set_index(keys)[[]] + tm.assert_equal(result, expected) + return + + elif isinstance(values, Categorical): + # GH#41291 + # Categorical doesn't implement sum or prod + result = get_result() + + # with numeric_only=True, these are dropped, and we get + # an empty DataFrame back + expected = df.set_index(keys)[[]] + if len(keys) != 1 and op == "prod": + # TODO: why just prod and not sum? + # Categorical is special without 'observed=True' + lev = Categorical([0], dtype=values.dtype) + mi = MultiIndex.from_product([lev, lev], names=["A", "B"]) + expected = DataFrame([], columns=[], index=mi) + + tm.assert_equal(result, expected) + return + + elif df.dtypes[0] == object: + # FIXME: the test is actually wrong here, xref #41341 + result = get_result() + # In this case we have list-of-list, will raise TypeError, + # and subsequently be dropped as nuisance columns + expected = df.set_index(keys)[[]] + tm.assert_equal(result, expected) + return result = get_result() expected = df.set_index(keys)[columns] @@ -2313,12 +2356,17 @@ def test_groupby_all_nan_groups_drop(): def test_groupby_empty_multi_column(): # GH 15106 - result = DataFrame(data=[], columns=["A", "B", "C"]).groupby(["A", "B"]).sum() + df = DataFrame(data=[], columns=["A", "B", "C"]) + gb = df.groupby(["A", "B"]) + result = gb.sum(numeric_only=False) expected = DataFrame( [], columns=["C"], index=MultiIndex([[], []], [[], []], names=["A", "B"]) ) tm.assert_frame_equal(result, expected) + result = gb.sum(numeric_only=True) + tm.assert_frame_equal(result, expected[[]]) + def test_groupby_filtered_df_std(): # GH 16174 diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index dae5c7274ffc5..9062049029e4d 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -24,7 +24,6 @@ DataFrameGroupBy, SeriesGroupBy, ) -from pandas.core.groupby.groupby import DataError def assert_fp_equal(a, b): @@ -741,11 +740,21 @@ def test_cython_transform_frame(op, args, targop): tm.assert_frame_equal(expected, getattr(gb, op)(*args).sort_index(axis=1)) # individual columns for c in df: - if c not in ["float", "int", "float_missing"] and op != "shift": - msg = "No numeric types to aggregate" - with pytest.raises(DataError, match=msg): + if ( + c not in ["float", "int", "float_missing"] + and op != "shift" + and not (c == "timedelta" and op == "cumsum") + ): + msg = "|".join( + [ + "does not support .* operations", + ".* is not supported for object dtype", + "is not implemented for this dtype", + ] + ) + with pytest.raises(TypeError, match=msg): gb[c].transform(op) - with pytest.raises(DataError, match=msg): + with pytest.raises(TypeError, match=msg): getattr(gb[c], op)() else: expected = gb[c].apply(targop)
- [x] closes #41291 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41706
2021-05-28T16:14:58Z
2021-06-02T15:14:09Z
2021-06-02T15:14:08Z
2021-06-02T16:59:36Z
REF: simplify _try_cast
diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 92f94f4424ee8..0267116cdfb99 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -17,10 +17,7 @@ import numpy.ma as ma from pandas._libs import lib -from pandas._libs.tslibs import ( - IncompatibleFrequency, - OutOfBoundsDatetime, -) +from pandas._libs.tslibs import IncompatibleFrequency from pandas._typing import ( AnyArrayLike, ArrayLike, @@ -719,9 +716,7 @@ def _try_cast( # while maybe_cast_to_datetime treats it as UTC # see test_maybe_promote_any_numpy_dtype_with_datetimetz - # error: Incompatible return value type (got "Union[ExtensionArray, - # ndarray, List[Any]]", expected "Union[ExtensionArray, ndarray]") - return maybe_cast_to_datetime(arr, dtype) # type: ignore[return-value] + return maybe_cast_to_datetime(arr, dtype) # TODO: copy? array_type = dtype.construct_array_type()._from_sequence @@ -734,6 +729,9 @@ def _try_cast( return subarr return ensure_wrapped_if_datetimelike(arr).astype(dtype, copy=copy) + elif dtype.kind in ["m", "M"]: + return maybe_cast_to_datetime(arr, dtype) + try: # GH#15832: Check if we are requesting a numeric dtype and # that we can convert the data to the requested dtype. @@ -743,9 +741,7 @@ def _try_cast( maybe_cast_to_integer_array(arr, dtype) subarr = arr else: - subarr = maybe_cast_to_datetime(arr, dtype) - if dtype is not None and dtype.kind == "M": - return subarr + subarr = arr if not isinstance(subarr, ABCExtensionArray): # 4 tests fail if we move this to a try/except/else; see @@ -753,16 +749,8 @@ def _try_cast( # test_constructor_dict_cast2, test_loc_setitem_dtype subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy) - except OutOfBoundsDatetime: - # in case of out of bound datetime64 -> always raise - raise - except (ValueError, TypeError) as err: - if dtype is not None and raise_cast_failure: - raise - elif "Cannot cast" in str(err) or "cannot be converted to timedelta64" in str( - err - ): - # via _disallow_mismatched_datetimelike + except (ValueError, TypeError): + if raise_cast_failure: raise else: subarr = np.array(arr, dtype=object, copy=copy) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index df79276f67386..f61f5c1d82596 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1579,7 +1579,7 @@ def try_timedelta(v: np.ndarray) -> np.ndarray: def maybe_cast_to_datetime( value: ExtensionArray | np.ndarray | list, dtype: DtypeObj | None -) -> ExtensionArray | np.ndarray | list: +) -> ExtensionArray | np.ndarray: """ try to cast the array/value to a datetimelike dtype, converting float nan to iNaT @@ -1705,7 +1705,8 @@ def maybe_cast_to_datetime( "maybe_cast_to_datetime allows a list *only* if dtype is not None" ) - return value + # at this point we have converted or raised in all cases where we had a list + return cast(ArrayLike, value) def sanitize_to_nanoseconds(values: np.ndarray, copy: bool = False) -> np.ndarray:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41705
2021-05-28T16:00:43Z
2021-05-31T17:41:50Z
2021-05-31T17:41:50Z
2021-05-31T18:10:51Z
REF: simplify Index.astype
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 2a50ebd959ace..68069a2e9482c 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -906,13 +906,10 @@ def astype(self, dtype, copy=True): if is_dtype_equal(self.dtype, dtype): return self.copy() if copy else self - elif is_categorical_dtype(dtype): - from pandas.core.indexes.category import CategoricalIndex - - return CategoricalIndex(self, name=self.name, dtype=dtype, copy=copy) - - elif is_extension_array_dtype(dtype): - return Index(np.asarray(self), name=self.name, dtype=dtype, copy=copy) + elif isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + new_values = cls._from_sequence(self, dtype=dtype, copy=False) + return Index(new_values, dtype=dtype, copy=copy, name=self.name) try: casted = self._values.astype(dtype, copy=copy) diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index 83998a2792a8a..066fa1f547328 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -18,6 +18,7 @@ cache_readonly, doc, ) +from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.cast import ( find_common_type, @@ -365,11 +366,17 @@ def astype(self, dtype, copy: bool = True) -> Index: return self return self.copy() - if isinstance(dtype, np.dtype) and dtype.kind == "M" and dtype != "M8[ns]": + if ( + isinstance(self.dtype, np.dtype) + and isinstance(dtype, np.dtype) + and dtype.kind == "M" + and dtype != "M8[ns]" + ): # For now Datetime supports this by unwrapping ndarray, but DTI doesn't - raise TypeError(f"Cannot cast {type(self._data).__name__} to dtype") + raise TypeError(f"Cannot cast {type(self).__name__} to dtype") - new_values = self._data.astype(dtype, copy=copy) + with rewrite_exception(type(self._data).__name__, type(self).__name__): + new_values = self._data.astype(dtype, copy=copy) # pass copy=False because any copying will be done in the # _data.astype call above diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 894abb0fb1776..61f3d62320a6e 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -422,12 +422,6 @@ def __reduce__(self): d.update(self._get_attributes_dict()) return _new_IntervalIndex, (type(self), d), None - @Appender(Index.astype.__doc__) - def astype(self, dtype, copy: bool = True): - with rewrite_exception("IntervalArray", type(self).__name__): - new_values = self._values.astype(dtype, copy=copy) - return Index(new_values, dtype=new_values.dtype, name=self.name) - @property def inferred_type(self) -> str: """Return a string of the type inferred from the values""" diff --git a/pandas/tests/indexes/datetimes/methods/test_astype.py b/pandas/tests/indexes/datetimes/methods/test_astype.py index 24387267cd5c4..3e329818540c3 100644 --- a/pandas/tests/indexes/datetimes/methods/test_astype.py +++ b/pandas/tests/indexes/datetimes/methods/test_astype.py @@ -223,7 +223,7 @@ def test_astype_object_with_nat(self): def test_astype_raises(self, dtype): # GH 13149, GH 13209 idx = DatetimeIndex(["2016-05-16", "NaT", NaT, np.NaN]) - msg = "Cannot cast DatetimeArray to dtype" + msg = "Cannot cast DatetimeIndex to dtype" with pytest.raises(TypeError, match=msg): idx.astype(dtype) diff --git a/pandas/tests/indexes/period/methods/test_astype.py b/pandas/tests/indexes/period/methods/test_astype.py index 73439d349bebd..74f627478a29c 100644 --- a/pandas/tests/indexes/period/methods/test_astype.py +++ b/pandas/tests/indexes/period/methods/test_astype.py @@ -21,7 +21,7 @@ class TestPeriodIndexAsType: def test_astype_raises(self, dtype): # GH#13149, GH#13209 idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq="D") - msg = "Cannot cast PeriodArray to dtype" + msg = "Cannot cast PeriodIndex to dtype" with pytest.raises(TypeError, match=msg): idx.astype(dtype) diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py index 54e61b35eb70f..e372fd007630a 100644 --- a/pandas/tests/indexes/period/test_constructors.py +++ b/pandas/tests/indexes/period/test_constructors.py @@ -538,7 +538,7 @@ def setup_method(self, method): self.series = Series(period_range("2000-01-01", periods=10, freq="D")) def test_constructor_cant_cast_period(self): - msg = "Cannot cast PeriodArray to dtype float64" + msg = "Cannot cast PeriodIndex to dtype float64" with pytest.raises(TypeError, match=msg): Series(period_range("2000-01-01", periods=10, freq="D"), dtype=float) diff --git a/pandas/tests/indexes/timedeltas/methods/test_astype.py b/pandas/tests/indexes/timedeltas/methods/test_astype.py index c2c7a1f32ae6e..fbe66bf78dbeb 100644 --- a/pandas/tests/indexes/timedeltas/methods/test_astype.py +++ b/pandas/tests/indexes/timedeltas/methods/test_astype.py @@ -101,7 +101,7 @@ def test_astype_timedelta64(self): def test_astype_raises(self, dtype): # GH 13149, GH 13209 idx = TimedeltaIndex([1e14, "NaT", NaT, np.NaN]) - msg = "Cannot cast TimedeltaArray to dtype" + msg = "Cannot cast TimedeltaIndex to dtype" with pytest.raises(TypeError, match=msg): idx.astype(dtype)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41703
2021-05-28T15:32:10Z
2021-05-31T21:29:45Z
2021-05-31T21:29:45Z
2021-05-31T21:30:02Z
TYP: fix ignores
diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 92f94f4424ee8..05e267bf83dd6 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -554,9 +554,8 @@ def sanitize_array( # TODO: copy? subarr = maybe_convert_platform(data) if subarr.dtype == object: - # Argument 1 to "maybe_infer_to_datetimelike" has incompatible - # type "Union[ExtensionArray, ndarray]"; expected "ndarray" - subarr = maybe_infer_to_datetimelike(subarr) # type: ignore[arg-type] + subarr = cast(np.ndarray, subarr) + subarr = maybe_infer_to_datetimelike(subarr) subarr = _sanitize_ndim(subarr, data, dtype, index, allow_2d=allow_2d) @@ -620,9 +619,7 @@ def _sanitize_ndim( if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype): # i.e. PandasDtype("O") - # error: Argument "dtype" to "asarray_tuplesafe" has incompatible type - # "Type[object]"; expected "Union[str, dtype[Any], None]" - result = com.asarray_tuplesafe(data, dtype=object) # type: ignore[arg-type] + result = com.asarray_tuplesafe(data, dtype=np.dtype("object")) cls = dtype.construct_array_type() result = cls._from_sequence(result, dtype=dtype) else: diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 40883dd8f747b..03554e67d7931 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -123,9 +123,8 @@ def maybe_convert_platform( arr = values if arr.dtype == object: - # error: Argument 1 to "maybe_convert_objects" has incompatible type - # "Union[ExtensionArray, ndarray]"; expected "ndarray" - arr = lib.maybe_convert_objects(arr) # type: ignore[arg-type] + arr = cast(np.ndarray, arr) + arr = lib.maybe_convert_objects(arr) return arr @@ -1249,13 +1248,12 @@ def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> Arra return values.copy() return values - if isinstance(values, ABCExtensionArray): + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray values = values.astype(dtype, copy=copy) else: - # error: Argument 1 to "astype_nansafe" has incompatible type "ExtensionArray"; - # expected "ndarray" - values = astype_nansafe(values, dtype, copy=copy) # type: ignore[arg-type] + values = astype_nansafe(values, dtype, copy=copy) # in pandas we don't store numpy str dtypes, so convert to object if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str): @@ -1958,7 +1956,7 @@ def construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray: def construct_1d_ndarray_preserving_na( - values: Sequence, dtype: DtypeObj | None = None, copy: bool = False + values: Sequence, dtype: np.dtype | None = None, copy: bool = False ) -> np.ndarray: """ Construct a new ndarray, coercing `values` to `dtype`, preserving NA. @@ -2003,17 +2001,9 @@ def construct_1d_ndarray_preserving_na( and isinstance(values, np.ndarray) and values.dtype.kind == "f" ): - # Argument 2 to "astype_float_to_int_nansafe" has incompatible - # type "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" - return astype_float_to_int_nansafe( - values, dtype, copy=copy # type: ignore[arg-type] - ) + return astype_float_to_int_nansafe(values, dtype, copy=copy) else: - # error: Argument "dtype" to "array" has incompatible type - # "Union[dtype[Any], ExtensionDtype, None]"; expected "Union[dtype[Any], - # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, - # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" - subarr = np.array(values, dtype=dtype, copy=copy) # type: ignore[arg-type] + subarr = np.array(values, dtype=dtype, copy=copy) return subarr diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 894abb0fb1776..1ae0be5e5f5bf 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1165,6 +1165,8 @@ def interval_range( if periods is not None: periods += 1 + breaks: np.ndarray | TimedeltaIndex | DatetimeIndex + if is_number(endpoint): # force consistency between start/end/freq (lower end if freq skips it) if com.all_not_none(start, end, freq): @@ -1190,16 +1192,8 @@ def interval_range( else: # delegate to the appropriate range function if isinstance(endpoint, Timestamp): - # error: Incompatible types in assignment (expression has type - # "DatetimeIndex", variable has type "ndarray") - breaks = date_range( # type: ignore[assignment] - start=start, end=end, periods=periods, freq=freq - ) + breaks = date_range(start=start, end=end, periods=periods, freq=freq) else: - # error: Incompatible types in assignment (expression has type - # "TimedeltaIndex", variable has type "ndarray") - breaks = timedelta_range( # type: ignore[assignment] - start=start, end=end, periods=periods, freq=freq - ) + breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq) return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41702
2021-05-28T15:03:59Z
2021-05-31T19:04:25Z
2021-05-31T19:04:25Z
2021-05-31T19:12:18Z
ENH: Deprecate non-keyword arguments for Resampler.interpolate
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index ba6bfb9da11cc..b03cdea1a8ce5 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -686,6 +686,7 @@ Deprecations - Deprecated passing arguments as positional (except for ``"codes"``) in :meth:`MultiIndex.codes` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`Index.set_names` and :meth:`MultiIndex.set_names` (except for ``names``) (:issue:`41485`) - Deprecated passing arguments (apart from ``cond`` and ``other``) as positional in :meth:`DataFrame.mask` and :meth:`Series.mask` (:issue:`41485`) +- Deprecated passing arguments as positional in :meth:`Resampler.interpolate` (other than ``"method"``) (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.clip` and :meth:`Series.clip` (other than ``"upper"`` and ``"lower"``) (:issue:`41485`) - Deprecated special treatment of lists with first element a Categorical in the :class:`DataFrame` constructor; pass as ``pd.DataFrame({col: categorical, ...})`` instead (:issue:`38845`) - Deprecated behavior of :class:`DataFrame` constructor when a ``dtype`` is passed and the data cannot be cast to that dtype. In a future version, this will raise instead of being silently ignored (:issue:`24435`) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 8195c18768eec..6378432392a04 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -34,6 +34,7 @@ from pandas.util._decorators import ( Appender, Substitution, + deprecate_nonkeyword_arguments, doc, ) @@ -832,6 +833,7 @@ def fillna(self, method, limit=None): """ return self._upsample(method, limit=limit) + @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "method"]) @doc(NDFrame.interpolate, **_shared_docs_kwargs) def interpolate( self, diff --git a/pandas/tests/resample/test_deprecated.py b/pandas/tests/resample/test_deprecated.py index fdb3a7872ad67..1f99c2888aad5 100644 --- a/pandas/tests/resample/test_deprecated.py +++ b/pandas/tests/resample/test_deprecated.py @@ -278,3 +278,30 @@ def test_resample_base_with_timedeltaindex(): tm.assert_index_equal(without_base.index, exp_without_base) tm.assert_index_equal(with_base.index, exp_with_base) + + +def test_interpolate_posargs_deprecation(): + # GH 41485 + idx = pd.to_datetime(["1992-08-27 07:46:48", "1992-08-27 07:46:59"]) + s = Series([1, 4], index=idx) + + msg = ( + r"In a future version of pandas all arguments of Resampler\.interpolate " + r"except for the argument 'method' will be keyword-only" + ) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.resample("3s").interpolate("linear", 0) + + idx = pd.to_datetime( + [ + "1992-08-27 07:46:48", + "1992-08-27 07:46:51", + "1992-08-27 07:46:54", + "1992-08-27 07:46:57", + ] + ) + expected = Series([1.0, 1.0, 1.0, 1.0], index=idx) + + expected.index._data.freq = "3s" + tm.assert_series_equal(result, expected)
- [x] xref #41485 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41699
2021-05-28T12:59:07Z
2021-06-05T09:12:39Z
2021-06-05T09:12:39Z
2021-06-05T09:12:49Z
BUG: Ignore chartsheets
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 81545ada63ce5..394be484f3d72 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -173,6 +173,7 @@ MultiIndex I/O ^^^ +- Bug in :func:`read_excel` attempting to read chart sheets from .xlsx files (:issue:`41448`) - - diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 719a4472fb9e3..4d6a766ad6cfa 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -82,8 +82,9 @@ or ``StringIO``. sheet_name : str, int, list, or None, default 0 Strings are used for sheet names. Integers are used in zero-indexed - sheet positions. Lists of strings/integers are used to request - multiple sheets. Specify None to get all sheets. + sheet positions (chart sheets do not count as a sheet position). + Lists of strings/integers are used to request multiple sheets. + Specify None to get all worksheets. Available cases: @@ -92,7 +93,7 @@ * ``"Sheet1"``: Load sheet with name "Sheet1" * ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5" as a dict of `DataFrame` - * None: All sheets. + * None: All worksheets. header : int, list of int, default 0 Row (0-indexed) to use for the column labels of the parsed diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index bc067e216760c..c74cf2099f41a 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -530,7 +530,7 @@ def load_workbook(self, filepath_or_buffer: FilePathOrBuffer): @property def sheet_names(self) -> list[str]: - return self.book.sheetnames + return [sheet.title for sheet in self.book.worksheets] def get_sheet_by_name(self, name: str): self.raise_if_bad_sheet_by_name(name) diff --git a/pandas/tests/io/data/excel/chartsheet.xls b/pandas/tests/io/data/excel/chartsheet.xls new file mode 100644 index 0000000000000..7d027400fbd52 Binary files /dev/null and b/pandas/tests/io/data/excel/chartsheet.xls differ diff --git a/pandas/tests/io/data/excel/chartsheet.xlsb b/pandas/tests/io/data/excel/chartsheet.xlsb new file mode 100644 index 0000000000000..805087280f851 Binary files /dev/null and b/pandas/tests/io/data/excel/chartsheet.xlsb differ diff --git a/pandas/tests/io/data/excel/chartsheet.xlsm b/pandas/tests/io/data/excel/chartsheet.xlsm new file mode 100644 index 0000000000000..aadb48d6f4824 Binary files /dev/null and b/pandas/tests/io/data/excel/chartsheet.xlsm differ diff --git a/pandas/tests/io/data/excel/chartsheet.xlsx b/pandas/tests/io/data/excel/chartsheet.xlsx new file mode 100644 index 0000000000000..c8d5e7afb3d07 Binary files /dev/null and b/pandas/tests/io/data/excel/chartsheet.xlsx differ diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index d40fb3ce4a135..cbd241ceda0b1 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -1250,6 +1250,34 @@ def test_trailing_blanks(self, read_ext): result = pd.read_excel(file_name) assert result.shape == (3, 3) + def test_ignore_chartsheets_by_str(self, request, read_ext): + # GH 41448 + if pd.read_excel.keywords["engine"] == "odf": + pytest.skip("chartsheets do not exist in the ODF format") + if pd.read_excel.keywords["engine"] == "pyxlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pytest.raises(ValueError, match="Worksheet named 'Chart1' not found"): + pd.read_excel("chartsheet" + read_ext, sheet_name="Chart1") + + def test_ignore_chartsheets_by_int(self, request, read_ext): + # GH 41448 + if pd.read_excel.keywords["engine"] == "odf": + pytest.skip("chartsheets do not exist in the ODF format") + if pd.read_excel.keywords["engine"] == "pyxlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pytest.raises( + ValueError, match="Worksheet index 1 is invalid, 1 worksheets found" + ): + pd.read_excel("chartsheet" + read_ext, sheet_name=1) + class TestExcelFileRead: @pytest.fixture(autouse=True) @@ -1501,6 +1529,19 @@ def test_engine_invalid_option(self, read_ext): with pd.option_context(f"io.excel{read_ext}.reader", "abc"): pass + def test_ignore_chartsheets(self, request, engine, read_ext): + # GH 41448 + if engine == "odf": + pytest.skip("chartsheets do not exist in the ODF format") + if engine == "pyxlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pd.ExcelFile("chartsheet" + read_ext) as excel: + assert excel.sheet_names == ["Sheet1"] + def test_corrupt_files_closed(self, request, engine, read_ext): # GH41778 errors = (BadZipFile,)
- [x] closes #41448 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Issue #41448 relates specifically to our openpyxl engine, and is resolved by this commit. However, the bug also exists in our pyxlsb engine and cannot be resolved until the upstream https://github.com/willtrnr/pyxlsb/issues/33 is addressed. I propose closing #41448 and leaving the pyxlsb case as an xfail for now. Let me know if there's a better way to track this.
https://api.github.com/repos/pandas-dev/pandas/pulls/41698
2021-05-28T04:12:15Z
2021-07-02T01:23:08Z
2021-07-02T01:23:08Z
2022-06-07T03:22:14Z
TST: More old issues
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index b84ff38b43ae7..a8df09d479f22 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -626,6 +626,18 @@ def test_setitem_iloc_two_dimensional_generator(self): expected = DataFrame({"a": [1, 2, 3], "b": [4, 1, 1]}) tm.assert_frame_equal(df, expected) + def test_setitem_dtypes_bytes_type_to_object(self): + # GH 20734 + index = Series(name="id", dtype="S24") + df = DataFrame(index=index) + df["a"] = Series(name="a", index=index, dtype=np.uint32) + df["b"] = Series(name="b", index=index, dtype="S64") + df["c"] = Series(name="c", index=index, dtype="S64") + df["d"] = Series(name="d", index=index, dtype=np.uint8) + result = df.dtypes + expected = Series([np.uint32, object, object, np.uint8], index=list("abcd")) + tm.assert_series_equal(result, expected) + class TestSetitemTZAwareValues: @pytest.fixture diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index cf4127da79bf9..2007e60dbc5d0 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -1146,3 +1146,35 @@ def test_apply_as_index_constant_lambda(as_index, expected): df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 1, 2, 2], "c": [1, 1, 1, 1]}) result = df.groupby(["a", "b"], as_index=as_index).apply(lambda x: 1) tm.assert_equal(result, expected) + + +def test_sort_index_groups(): + # GH 20420 + df = DataFrame( + {"A": [1, 2, 3, 4, 5], "B": [6, 7, 8, 9, 0], "C": [1, 1, 1, 2, 2]}, + index=range(5), + ) + result = df.groupby("C").apply(lambda x: x.A.sort_index()) + expected = Series( + range(1, 6), + index=MultiIndex.from_tuples( + [(1, 0), (1, 1), (1, 2), (2, 3), (2, 4)], names=["C", None] + ), + name="A", + ) + tm.assert_series_equal(result, expected) + + +def test_positional_slice_groups_datetimelike(): + # GH 21651 + expected = DataFrame( + { + "date": pd.date_range("2010-01-01", freq="12H", periods=5), + "vals": range(5), + "let": list("abcde"), + } + ) + result = expected.groupby([expected.let, expected.date.dt.date]).apply( + lambda x: x.iloc[0:] + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py index 1b74096cbfbdf..dfbf1a5b2cdc2 100644 --- a/pandas/tests/groupby/test_nth.py +++ b/pandas/tests/groupby/test_nth.py @@ -641,3 +641,25 @@ def test_nth_nan_in_grouper(dropna): ) tm.assert_frame_equal(result, expected) + + +def test_first_categorical_and_datetime_data_nat(): + # GH 20520 + df = DataFrame( + { + "group": ["first", "first", "second", "third", "third"], + "time": 5 * [np.datetime64("NaT")], + "categories": Series(["a", "b", "c", "a", "b"], dtype="category"), + } + ) + result = df.groupby("group").first() + expected = DataFrame( + { + "time": 3 * [np.datetime64("NaT")], + "categories": Series(["a", "c", "a"]).astype( + pd.CategoricalDtype(["a", "b", "c"]) + ), + } + ) + expected.index = Index(["first", "second", "third"], name="group") + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index b5092f83e1a9f..dae5c7274ffc5 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -1259,3 +1259,11 @@ def test_categorical_and_not_categorical_key(observed): tm.assert_series_equal(result, expected) expected_explicit = Series([4, 2, 4], name="B") tm.assert_series_equal(result, expected_explicit) + + +def test_string_rank_grouping(): + # GH 19354 + df = DataFrame({"A": [1, 1, 2], "B": [1, 2, 3]}) + result = df.groupby("A").transform("rank") + expected = DataFrame({"B": [1.0, 2.0, 1.0]}) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index f0018c8a82453..afcff6db5e3dd 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -866,3 +866,27 @@ def test_loc_get_scalar_casting_to_float(): result = df.loc[[(3, 4)], "b"].iloc[0] assert result == 2 assert isinstance(result, np.int64) + + +def test_loc_empty_single_selector_with_names(): + # GH 19517 + idx = MultiIndex.from_product([["a", "b"], ["A", "B"]], names=[1, 0]) + s2 = Series(index=idx, dtype=np.float64) + result = s2.loc["a"] + expected = Series([np.nan, np.nan], index=Index(["A", "B"], name=0)) + tm.assert_series_equal(result, expected) + + +def test_loc_keyerror_rightmost_key_missing(): + # GH 20951 + + df = DataFrame( + { + "A": [100, 100, 200, 200, 300, 300], + "B": [10, 10, 20, 21, 31, 33], + "C": range(6), + } + ) + df = df.set_index(["A", "B"]) + with pytest.raises(KeyError, match="^1$"): + df.loc[(100, 1)] diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 880fa6398d25a..aac26c13c2a7c 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -910,3 +910,26 @@ def test_none_comparison(series_with_simple_index): result = series < None assert not result.iat[0] assert not result.iat[1] + + +def test_series_varied_multiindex_alignment(): + # GH 20414 + s1 = Series( + range(8), + index=pd.MultiIndex.from_product( + [list("ab"), list("xy"), [1, 2]], names=["ab", "xy", "num"] + ), + ) + s2 = Series( + [1000 * i for i in range(1, 5)], + index=pd.MultiIndex.from_product([list("xy"), [1, 2]], names=["xy", "num"]), + ) + result = s1.loc[pd.IndexSlice["a", :, :]] + s2 + expected = Series( + [1000, 2001, 3002, 4003], + index=pd.MultiIndex.from_tuples( + [("a", "x", 1), ("a", "x", 2), ("a", "y", 1), ("a", "y", 2)], + names=["ab", "xy", "num"], + ), + ) + tm.assert_series_equal(result, expected)
- [x] closes #19354 - [x] closes #19517 - [x] closes #20414 - [x] closes #20420 - [x] closes #20520 - [x] closes #20734 - [x] closes #20951 - [x] closes #21651 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41697
2021-05-28T03:46:24Z
2021-05-28T19:14:00Z
2021-05-28T19:13:58Z
2021-05-28T19:14:09Z
DOC: add `to_html` to style.rst
diff --git a/doc/source/reference/style.rst b/doc/source/reference/style.rst index 0d743b5fe8b8b..68efd3b000bbc 100644 --- a/doc/source/reference/style.rst +++ b/doc/source/reference/style.rst @@ -67,5 +67,6 @@ Style export and import Styler.render Styler.export Styler.use + Styler.to_html Styler.to_excel Styler.to_latex diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 73924631aea5c..d6c151c3ed740 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -761,13 +761,13 @@ def to_html( ---------- buf : str, Path, or StringIO-like, optional, default None Buffer to write to. If ``None``, the output is returned as a string. - table_uuid: str, optional + table_uuid : str, optional Id attribute assigned to the <table> HTML element in the format: ``<table id="T_<table_uuid>" ..>`` If not given uses Styler's initially assigned value. - table_attributes: str, optional + table_attributes : str, optional Attributes to assign within the `<table>` HTML element in the format: ``<table .. <table_attributes> >``
follow up on recent `to_html` enhancement.
https://api.github.com/repos/pandas-dev/pandas/pulls/41692
2021-05-27T17:51:59Z
2021-06-03T23:28:33Z
2021-06-03T23:28:32Z
2021-06-04T08:10:07Z
Min max sparse fillna
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 4847372f18239..6ab296b314615 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -1397,7 +1397,7 @@ def max(self, axis=0, *args, **kwargs): # This condition returns a nan if there are no valid values in the array. if self.size > 0 and self._valid_sp_values.size == 0: - return np.nan + return self.fill_value else: return np.nanmax(self, axis) @@ -1406,7 +1406,7 @@ def min(self, axis=0, *args, **kwargs): # This condition returns a nan if there are no valid values in the array. if self.size > 0 and self._valid_sp_values.size == 0: - return np.nan + return self.fill_value else: return np.nanmin(self, axis) diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index b29855caf6c1d..1cc8a2df44812 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -1326,6 +1326,9 @@ class TestMinMax: data_neg = plain_data * (-1) data_NaN = SparseArray(np.array([0, 1, 2, np.nan, 4])) data_all_NaN = SparseArray(np.array([np.nan, np.nan, np.nan, np.nan, np.nan])) + data_NA_filled = SparseArray( + np.array([np.nan, np.nan, np.nan, np.nan, np.nan]), fill_value=5 + ) @pytest.mark.parametrize( "raw_data,max_expected,min_expected", @@ -1334,6 +1337,7 @@ class TestMinMax: (data_neg, [0], [-4]), (data_NaN, [4], [0]), (data_all_NaN, [np.nan], [np.nan]), + (data_NA_filled, [5], [5]), ], ) def test_maxmin(self, raw_data, max_expected, min_expected):
- [x] closes #41552 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41691
2021-05-27T17:16:45Z
2021-06-02T13:23:19Z
2021-06-02T13:23:19Z
2021-06-02T13:23:23Z
Fix error in pre commit for ecosystem rst
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index f5212b6fc8a51..ee061e7b7d3e6 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -569,4 +569,3 @@ Library Accessor Classes Description .. _composeml: https://github.com/alteryx/compose .. _datatest: https://datatest.readthedocs.io/ .. _woodwork: https://github.com/alteryx/woodwork -
Pre-commit errors on master
https://api.github.com/repos/pandas-dev/pandas/pulls/41688
2021-05-27T08:39:28Z
2021-05-27T10:20:58Z
2021-05-27T10:20:58Z
2021-05-27T10:25:30Z
CI: add sdist release workflow
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml new file mode 100644 index 0000000000000..0c2e30a74bbdb --- /dev/null +++ b/.github/workflows/sdist.yml @@ -0,0 +1,64 @@ +name: sdist + +on: + push: + branches: + - master + pull_request: + branches: + - master + - 1.2.x + - 1.3.x + paths-ignore: + - "doc/**" + +jobs: + build: + runs-on: ubuntu-latest + timeout-minutes: 60 + defaults: + run: + shell: bash -l {0} + + strategy: + fail-fast: false + matrix: + python-version: ["3.7", "3.8", "3.9"] + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip setuptools wheel + + # GH 39416 + pip install numpy + + - name: Build pandas sdist + run: | + pip list + python setup.py sdist --formats=gztar + + - uses: conda-incubator/setup-miniconda@v2 + with: + activate-environment: pandas-sdist + python-version: ${{ matrix.python-version }} + + - name: Install pandas from sdist + run: | + conda list + python -m pip install dist/*.gz + + - name: Import pandas + run: | + cd .. + conda list + python -c "import pandas; pandas.show_versions();"
- [x] closes #39417
https://api.github.com/repos/pandas-dev/pandas/pulls/41685
2021-05-26T21:51:39Z
2021-06-21T13:03:13Z
2021-06-21T13:03:11Z
2021-07-07T02:39:48Z
DOC: Add woodwork to ecosystem docs
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index bc2325f15852c..f5212b6fc8a51 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -75,12 +75,12 @@ Statsmodels leverages pandas objects as the underlying data container for comput Use pandas DataFrames in your `scikit-learn <https://scikit-learn.org/>`__ ML pipeline. -`Featuretools <https://github.com/featuretools/featuretools/>`__ +`Featuretools <https://github.com/alteryx/featuretools/>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Featuretools is a Python library for automated feature engineering built on top of pandas. It excels at transforming temporal and relational datasets into feature matrices for machine learning using reusable feature engineering "primitives". Users can contribute their own primitives in Python and share them with the rest of the community. -`Compose <https://github.com/FeatureLabs/compose>`__ +`Compose <https://github.com/alteryx/compose>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Compose is a machine learning tool for labeling data and prediction engineering. It allows you to structure the labeling process by parameterizing prediction problems and transforming time-driven relational data into target values with cutoff times that can be used for supervised learning. @@ -551,11 +551,12 @@ Library Accessor Classes Description ================== ============ ==================================== =============================================================================== `cyberpandas`_ ``ip`` ``Series`` Provides common operations for working with IP addresses. `pdvega`_ ``vgplot`` ``Series``, ``DataFrame`` Provides plotting functions from the Altair_ library. -`pandas-genomics`_ ``genomics`` ``Series``, ``DataFrame`` Provides common operations for quality control and analysis of genomics data +`pandas-genomics`_ ``genomics`` ``Series``, ``DataFrame`` Provides common operations for quality control and analysis of genomics data. `pandas_path`_ ``path`` ``Index``, ``Series`` Provides `pathlib.Path`_ functions for Series. `pint-pandas`_ ``pint`` ``Series``, ``DataFrame`` Provides units support for numeric Series and DataFrames. `composeml`_ ``slice`` ``DataFrame`` Provides a generator for enhanced data slicing. `datatest`_ ``validate`` ``Series``, ``DataFrame``, ``Index`` Provides validation, differences, and acceptance managers. +`woodwork`_ ``ww`` ``Series``, ``DataFrame`` Provides physical, logical, and semantic data typing information for Series and DataFrames. ================== ============ ==================================== =============================================================================== .. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest @@ -565,5 +566,7 @@ Library Accessor Classes Description .. _pandas_path: https://github.com/drivendataorg/pandas-path/ .. _pathlib.Path: https://docs.python.org/3/library/pathlib.html .. _pint-pandas: https://github.com/hgrecco/pint-pandas -.. _composeml: https://github.com/FeatureLabs/compose +.. _composeml: https://github.com/alteryx/compose .. _datatest: https://datatest.readthedocs.io/ +.. _woodwork: https://github.com/alteryx/woodwork + diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md index 547a5f30e0516..81ddf9c1e657f 100644 --- a/web/pandas/community/ecosystem.md +++ b/web/pandas/community/ecosystem.md @@ -34,7 +34,7 @@ computation. Use pandas DataFrames in your [scikit-learn](https://scikit-learn.org/) ML pipeline. -### [Featuretools](https://github.com/featuretools/featuretools/) +### [Featuretools](https://github.com/alteryx/featuretools/) Featuretools is a Python library for automated feature engineering built on top of pandas. It excels at transforming temporal and relational @@ -42,7 +42,7 @@ datasets into feature matrices for machine learning using reusable feature engineering "primitives". Users can contribute their own primitives in Python and share them with the rest of the community. -### [Compose](https://github.com/FeatureLabs/compose) +### [Compose](https://github.com/alteryx/compose) Compose is a machine learning tool for labeling data and prediction engineering. It allows you to structure the labeling process by parameterizing @@ -386,4 +386,5 @@ authors to coordinate on the namespace. | [pandas-genomics](https://pandas-genomics.readthedocs.io/en/latest/) | `genomics` | `Series`, `DataFrame` | | [pandas_path](https://github.com/drivendataorg/pandas-path/) | `path` | `Index`, `Series` | | [pint-pandas](https://github.com/hgrecco/pint-pandas) | `pint` | `Series`, `DataFrame` | - | [composeml](https://github.com/FeatureLabs/compose) | `slice` | `DataFrame` | + | [composeml](https://github.com/alteryx/compose) | `slice` | `DataFrame` | + | [woodwork](https://github.com/alteryx/woodwork) | `slice` | `Series`, `DataFrame` |
- [x] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41684
2021-05-26T20:38:04Z
2021-05-27T01:47:23Z
2021-05-27T01:47:23Z
2021-05-27T01:54:04Z
Add test for fixed regression in concat with empty DataFrames
diff --git a/pandas/tests/reshape/concat/test_empty.py b/pandas/tests/reshape/concat/test_empty.py index a97e9265b4f99..304dea52f359a 100644 --- a/pandas/tests/reshape/concat/test_empty.py +++ b/pandas/tests/reshape/concat/test_empty.py @@ -249,3 +249,26 @@ def test_empty_dtype_coerce(self): result = concat([df1, df2]) expected = df1.dtypes tm.assert_series_equal(result.dtypes, expected) + + def test_concat_empty_dataframe(self): + # 39037 + df1 = DataFrame(columns=["a", "b"]) + df2 = DataFrame(columns=["b", "c"]) + result = concat([df1, df2, df1]) + expected = DataFrame(columns=["a", "b", "c"]) + tm.assert_frame_equal(result, expected) + + df3 = DataFrame(columns=["a", "b"]) + df4 = DataFrame(columns=["b"]) + result = concat([df3, df4]) + expected = DataFrame(columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + def test_concat_empty_dataframe_different_dtypes(self): + # 39037 + df1 = DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}) + df2 = DataFrame({"a": [1, 2, 3]}) + + result = concat([df1[:0], df2[:0]]) + assert result["a"].dtype == np.int64 + assert result["b"].dtype == np.object_
- [ ✓ ] closes #39037 - [ ✓ ] tests added / passed - [ ✓ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41677
2021-05-26T10:03:14Z
2021-05-28T19:05:48Z
2021-05-28T19:05:48Z
2021-05-28T19:05:52Z
TST: Old Issues
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 7fe921571ee2e..da930ab4d7423 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1837,3 +1837,11 @@ def test_arithemetic_multiindex_align(): ) result = df1 - df2 tm.assert_frame_equal(result, expected) + + +def test_bool_frame_mult_float(): + # GH 18549 + df = DataFrame(True, list("ab"), list("cd")) + result = df * 1.0 + expected = DataFrame(np.ones((2, 2)), list("ab"), list("cd")) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_filters.py b/pandas/tests/groupby/test_filters.py index 995fd58a84cbd..b40514568452c 100644 --- a/pandas/tests/groupby/test_filters.py +++ b/pandas/tests/groupby/test_filters.py @@ -599,3 +599,16 @@ def test_filter_dropna_with_empty_groups(): result_true = groupped.filter(lambda x: x.mean() > 1, dropna=True) expected_true = Series(index=pd.Index([], dtype=int), dtype=np.float64) tm.assert_series_equal(result_true, expected_true) + + +def test_filter_consistent_result_before_after_agg_func(): + # GH 17091 + df = DataFrame({"data": range(6), "key": list("ABCABC")}) + grouper = df.groupby("key") + result = grouper.filter(lambda x: True) + expected = DataFrame({"data": range(6), "key": list("ABCABC")}) + tm.assert_frame_equal(result, expected) + + grouper.sum() + result = grouper.filter(lambda x: True) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 0c20622311e1f..772aa97c47233 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -977,3 +977,10 @@ def test_extension_array_cross_section_converts(): result = df.iloc[0] tm.assert_series_equal(result, expected) + + +def test_getitem_object_index_float_string(): + # GH 17286 + s = Series([1] * 4, index=Index(["a", "b", "c", 1.0])) + assert s["a"] == 1 + assert s[1.0] == 1 diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 8c79bafa2f888..c1a096ed06efc 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1565,6 +1565,19 @@ def test_loc_getitem_slice_datetime_objs_with_datetimeindex(self): result = ser.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)] tm.assert_series_equal(result, ser) + def test_loc_getitem_datetime_string_with_datetimeindex(self): + # GH 16710 + df = DataFrame( + {"a": range(10), "b": range(10)}, + index=date_range("2010-01-01", "2010-01-10"), + ) + result = df.loc[["2010-01-01", "2010-01-05"], ["a", "b"]] + expected = DataFrame( + {"a": [0, 4], "b": [0, 4]}, + index=DatetimeIndex(["2010-01-01", "2010-01-05"]), + ) + tm.assert_frame_equal(result, expected) + def test_loc_getitem_sorted_index_level_with_duplicates(self): # GH#4516 sorting a MultiIndex with duplicates and multiple dtypes mi = MultiIndex.from_tuples( diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 219c94b5a895d..10c8ccae67fb2 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -1393,6 +1393,44 @@ def test_to_latex_non_string_index(self): ) assert result == expected + def test_to_latex_multiindex_multirow(self): + # GH 16719 + mi = pd.MultiIndex.from_product( + [[0.0, 1.0], [3.0, 2.0, 1.0], ["0", "1"]], names=["i", "val0", "val1"] + ) + df = DataFrame(index=mi) + result = df.to_latex(multirow=True, escape=False) + expected = _dedent( + r""" + \begin{tabular}{lll} + \toprule + & & \\ + i & val0 & val1 \\ + \midrule + \multirow{6}{*}{0.0} & \multirow{2}{*}{3.0} & 0 \\ + & & 1 \\ + \cline{2-3} + & \multirow{2}{*}{2.0} & 0 \\ + & & 1 \\ + \cline{2-3} + & \multirow{2}{*}{1.0} & 0 \\ + & & 1 \\ + \cline{1-3} + \cline{2-3} + \multirow{6}{*}{1.0} & \multirow{2}{*}{3.0} & 0 \\ + & & 1 \\ + \cline{2-3} + & \multirow{2}{*}{2.0} & 0 \\ + & & 1 \\ + \cline{2-3} + & \multirow{2}{*}{1.0} & 0 \\ + & & 1 \\ + \bottomrule + \end{tabular} + """ + ) + assert result == expected + class TestTableBuilder: @pytest.fixture diff --git a/pandas/tests/tseries/offsets/test_custom_business_hour.py b/pandas/tests/tseries/offsets/test_custom_business_hour.py index 07270008adbd2..c2b4e3c343c11 100644 --- a/pandas/tests/tseries/offsets/test_custom_business_hour.py +++ b/pandas/tests/tseries/offsets/test_custom_business_hour.py @@ -19,6 +19,8 @@ assert_offset_equal, ) +from pandas.tseries.holiday import USFederalHolidayCalendar + class TestCustomBusinessHour(Base): _offset = CustomBusinessHour @@ -298,3 +300,11 @@ def test_apply_nanoseconds(self, nano_case): offset, cases = nano_case for base, expected in cases.items(): assert_offset_equal(offset, base, expected) + + def test_us_federal_holiday_with_datetime(self): + # GH 16867 + bhour_us = CustomBusinessHour(calendar=USFederalHolidayCalendar()) + t0 = datetime(2014, 1, 17, 15) + result = t0 + bhour_us * 8 + expected = Timestamp("2014-01-21 15:00:00") + assert result == expected
- [x] closes #16710 - [x] closes #16719 - [x] closes #16867 - [x] closes #17091 - [x] closes #17286 - [x] closes #18549 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41674
2021-05-26T05:22:05Z
2021-05-26T17:12:42Z
2021-05-26T17:12:40Z
2021-05-26T18:33:02Z
BUG: PeriodIndex.get_loc with mismatched freq
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 8a3d6cf63d4f1..acf5202a28409 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -937,6 +937,7 @@ Indexing - Bug in :meth:`Series.__delitem__` with ``ExtensionDtype`` incorrectly casting to ``ndarray`` (:issue:`40386`) - Bug in :meth:`DataFrame.loc` returning :class:`MultiIndex` in wrong order if indexer has duplicates (:issue:`40978`) - Bug in :meth:`DataFrame.__setitem__` raising ``TypeError`` when using a str subclass as the column name with a :class:`DatetimeIndex` (:issue:`37366`) +- Bug in :meth:`PeriodIndex.get_loc` failing to raise ``KeyError`` when given a :class:`Period` with a mismatched ``freq`` (:issue:`41670`) Missing ^^^^^^^ diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 2600363bc28eb..c1104b80a0a7a 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -4,10 +4,7 @@ datetime, timedelta, ) -from typing import ( - Any, - Hashable, -) +from typing import Hashable import warnings import numpy as np @@ -318,24 +315,6 @@ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: return False return dtype.freq == self.freq - # ------------------------------------------------------------------------ - # Indexing - - @doc(Index.__contains__) - def __contains__(self, key: Any) -> bool: - if isinstance(key, Period): - if key.freq != self.freq: - return False - else: - return key.ordinal in self._engine - else: - hash(key) - try: - self.get_loc(key) - return True - except KeyError: - return False - # ------------------------------------------------------------------------ # Index Methods @@ -472,6 +451,8 @@ def get_loc(self, key, method=None, tolerance=None): elif is_integer(key): # Period constructor will cast to string, which we dont want raise KeyError(key) + elif isinstance(key, Period) and key.freq != self.freq: + raise KeyError(key) try: key = Period(key, freq=self.freq) diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index e820c2250256e..a41d02cfbd394 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -338,15 +338,21 @@ def test_get_loc_integer(self): pi2.get_loc(46) # TODO: This method came from test_period; de-dup with version above - def test_get_loc2(self): + @pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"]) + def test_get_loc_method(self, method): idx = period_range("2000-01-01", periods=3) - for method in [None, "pad", "backfill", "nearest"]: - assert idx.get_loc(idx[1], method) == 1 - assert idx.get_loc(idx[1].asfreq("H", how="start"), method) == 1 - assert idx.get_loc(idx[1].to_timestamp(), method) == 1 - assert idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method) == 1 - assert idx.get_loc(str(idx[1]), method) == 1 + assert idx.get_loc(idx[1], method) == 1 + assert idx.get_loc(idx[1].to_timestamp(), method) == 1 + assert idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method) == 1 + assert idx.get_loc(str(idx[1]), method) == 1 + + key = idx[1].asfreq("H", how="start") + with pytest.raises(KeyError, match=str(key)): + idx.get_loc(key, method=method) + + # TODO: This method came from test_period; de-dup with version above + def test_get_loc3(self): idx = period_range("2000-01-01", periods=5)[::2] assert idx.get_loc("2000-01-02T12", method="nearest", tolerance="1 day") == 1 @@ -401,6 +407,21 @@ def test_get_loc_invalid_string_raises_keyerror(self): assert "A" not in ser assert "A" not in pi + def test_get_loc_mismatched_freq(self): + # see also test_get_indexer_mismatched_dtype testing we get analogous + # behavior for get_loc + dti = date_range("2016-01-01", periods=3) + pi = dti.to_period("D") + pi2 = dti.to_period("W") + pi3 = pi.view(pi2.dtype) # i.e. matching i8 representations + + with pytest.raises(KeyError, match="W-SUN"): + pi.get_loc(pi2[0]) + + with pytest.raises(KeyError, match="W-SUN"): + # even though we have matching i8 values + pi.get_loc(pi3[0]) + class TestGetIndexer: def test_get_indexer(self):
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41670
2021-05-25T23:22:58Z
2021-05-28T15:58:08Z
2021-05-28T15:58:08Z
2021-06-26T07:47:43Z
TST: Check float format in object column (#35603)
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index 96a69476ccbef..0d5c3bc21c609 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -240,6 +240,13 @@ def test_series_repr_nat(self): ) assert result == expected + def test_float_repr(self): + # GH#35603 + # check float format when cast to object + ser = Series([1.0]).astype(object) + expected = "0 1.0\ndtype: object" + assert repr(ser) == expected + class TestCategoricalRepr: def test_categorical_repr_unicode(self):
- [ ] closes #35603 - [x] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry This test checks if a Series cast to object still shows data in float format. Ran `pytest pandas/tests/series/test_constructors.py` Output: ``` ================================================= test session starts ================================================= platform win32 -- Python 3.8.10, pytest-6.2.4, py-1.10.0, pluggy-0.13.1 rootdir: C:\Users\mdhsi\pandas-michael, configfile: pyproject.toml plugins: hypothesis-6.12.0, asyncio-0.14.0, cov-2.11.1, forked-1.3.0, instafail-0.4.1, xdist-2.2.1 collected 301 items pandas\tests\series\test_constructors.py .......................................................................................................................................................................................................x..................................................................................................... --------------------------- generated xml file: C:\Users\mdhsi\pandas-michael\test-data.xml --------------------------- ================================================ slowest 30 durations ================================================= 0.25s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_empty_constructor[<lambda>-True0] 0.13s call pandas/tests/series/test_constructors.py::TestSeriesConstructorIndexCoercion::test_series_constructor_datetimelike_index_coercion 0.09s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['dateutil/US/Pacific'-True] 0.04s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_dtype_datetime64_10 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_empty[OrderedDict] 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_empty[dict] 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_cant_cast_datetimelike[PeriodIndex] 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_cant_cast_datetimelike[DatetimeIndex] 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_cant_cast_datetimelike[TimedeltaIndex] 0.02s setup pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_datetimelike_scalar_to_string_dtype[string] 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_pass_none 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_categorical_with_coercion 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_dtype_timedelta64 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[pytz.FixedOffset(300)-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['Asia/Tokyo'-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[pytz.FixedOffset(300)-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[tzlocal()-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['dateutil/Asia/Singapore'-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[datetime.timezone(datetime.timedelta(seconds=3600))-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[tzutc()-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['+01:15'-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['UTC'-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['UTC-02:15'-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[tzlocal()-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[pytz.FixedOffset(-300)-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['dateutil/Asia/Singapore'-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['UTC'-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['-02:15'-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[pytz.FixedOffset(-300)-False] =========================================== 300 passed, 1 xfailed in 2.22s ============================================ ```
https://api.github.com/repos/pandas-dev/pandas/pulls/41668
2021-05-25T22:11:50Z
2021-05-27T18:41:06Z
2021-05-27T18:41:06Z
2021-05-27T18:41:10Z
REF: move _str_extract function in accessor.py to array method
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index cb8a08f5668ac..95d9409b265ce 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2453,7 +2453,9 @@ def replace(self, to_replace, value, inplace: bool = False): # ------------------------------------------------------------------------ # String methods interface - def _str_map(self, f, na_value=np.nan, dtype=np.dtype("object")): + def _str_map( + self, f, na_value=np.nan, dtype=np.dtype("object"), convert: bool = True + ): # Optimization to apply the callable `f` to the categories once # and rebuild the result by `take`ing from the result with the codes. # Returns the same type as the object-dtype implementation though. diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 74ca5130ca322..ab1dadf4d2dfa 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -410,7 +410,9 @@ def _cmp_method(self, other, op): # String methods interface _str_na_value = StringDtype.na_value - def _str_map(self, f, na_value=None, dtype: Dtype | None = None): + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): from pandas.arrays import BooleanArray if dtype is None: diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 4370f3a4e15cf..454d8ebde989b 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -741,7 +741,9 @@ def value_counts(self, dropna: bool = True) -> Series: _str_na_value = ArrowStringDtype.na_value - def _str_map(self, f, na_value=None, dtype: Dtype | None = None): + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): # TODO: de-duplicate with StringArray method. This method is moreless copy and # paste. diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index e1399968cb1c4..7643019ff8c55 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -13,10 +13,7 @@ import numpy as np import pandas._libs.lib as lib -from pandas._typing import ( - ArrayLike, - FrameOrSeriesUnion, -) +from pandas._typing import FrameOrSeriesUnion from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( @@ -160,7 +157,6 @@ class StringMethods(NoNewAttributesMixin): # TODO: Dispatch all the methods # Currently the following are not dispatched to the array # * cat - # * extract # * extractall def __init__(self, data): @@ -243,7 +239,7 @@ def _wrap_result( self, result, name=None, - expand=None, + expand: bool | None = None, fill_value=np.nan, returns_string=True, ): @@ -2385,10 +2381,7 @@ def extract( 2 NaN dtype: object """ - from pandas import ( - DataFrame, - array as pd_array, - ) + from pandas import DataFrame if not isinstance(expand, bool): raise ValueError("expand must be True or False") @@ -2400,8 +2393,6 @@ def extract( if not expand and regex.groups > 1 and isinstance(self._data, ABCIndex): raise ValueError("only one regex group is supported with Index") - # TODO: dispatch - obj = self._data result_dtype = _result_dtype(obj) @@ -2415,8 +2406,8 @@ def extract( result = DataFrame(columns=columns, dtype=result_dtype) else: - result_list = _str_extract( - obj.array, pat, flags=flags, expand=returns_df + result_list = self._data.array._str_extract( + pat, flags=flags, expand=returns_df ) result_index: Index | None @@ -2431,9 +2422,7 @@ def extract( else: name = _get_single_group_name(regex) - result_arr = _str_extract(obj.array, pat, flags=flags, expand=returns_df) - # not dispatching, so we have to reconstruct here. - result = pd_array(result_arr, dtype=result_dtype) + result = self._data.array._str_extract(pat, flags=flags, expand=returns_df) return self._wrap_result(result, name=name) @forbid_nonstring_types(["bytes"]) @@ -3121,33 +3110,6 @@ def _get_group_names(regex: re.Pattern) -> list[Hashable]: return [names.get(1 + i, i) for i in range(regex.groups)] -def _str_extract(arr: ArrayLike, pat: str, flags=0, expand: bool = True): - """ - Find groups in each string in the array using passed regular expression. - - Returns - ------- - np.ndarray or list of lists is expand is True - """ - regex = re.compile(pat, flags=flags) - - empty_row = [np.nan] * regex.groups - - def f(x): - if not isinstance(x, str): - return empty_row - m = regex.search(x) - if m: - return [np.nan if item is None else item for item in m.groups()] - else: - return empty_row - - if expand: - return [f(val) for val in np.asarray(arr)] - - return np.array([f(val)[0] for val in np.asarray(arr)], dtype=object) - - def str_extractall(arr, pat, flags=0): regex = re.compile(pat, flags=flags) # the regex must contain capture groups. diff --git a/pandas/core/strings/base.py b/pandas/core/strings/base.py index 730870b448cb2..cd71844d3b527 100644 --- a/pandas/core/strings/base.py +++ b/pandas/core/strings/base.py @@ -230,3 +230,7 @@ def _str_split(self, pat=None, n=-1, expand=False): @abc.abstractmethod def _str_rsplit(self, pat=None, n=-1): pass + + @abc.abstractmethod + def _str_extract(self, pat: str, flags: int = 0, expand: bool = True): + pass diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py index c214ada9c1ada..7ce4abe904f3b 100644 --- a/pandas/core/strings/object_array.py +++ b/pandas/core/strings/object_array.py @@ -32,7 +32,9 @@ def __len__(self): # For typing, _str_map relies on the object being sized. raise NotImplementedError - def _str_map(self, f, na_value=None, dtype: Dtype | None = None): + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): """ Map a callable over valid element of the array. @@ -47,6 +49,8 @@ def _str_map(self, f, na_value=None, dtype: Dtype | None = None): for object-dtype and Categorical and ``pd.NA`` for StringArray. dtype : Dtype, optional The dtype of the result array. + convert : bool, default True + Whether to call `maybe_convert_objects` on the resulting ndarray """ if dtype is None: dtype = np.dtype("object") @@ -60,9 +64,9 @@ def _str_map(self, f, na_value=None, dtype: Dtype | None = None): arr = np.asarray(self, dtype=object) mask = isna(arr) - convert = not np.all(mask) + map_convert = convert and not np.all(mask) try: - result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert) + result = lib.map_infer_mask(arr, f, mask.view(np.uint8), map_convert) except (TypeError, AttributeError) as e: # Reraise the exception if callable `f` got wrong number of args. # The user may want to be warned by this, instead of getting NaN @@ -88,7 +92,7 @@ def g(x): return result if na_value is not np.nan: np.putmask(result, mask, na_value) - if result.dtype == object: + if convert and result.dtype == object: result = lib.maybe_convert_objects(result) return result @@ -410,3 +414,28 @@ def _str_lstrip(self, to_strip=None): def _str_rstrip(self, to_strip=None): return self._str_map(lambda x: x.rstrip(to_strip)) + + def _str_extract(self, pat: str, flags: int = 0, expand: bool = True): + regex = re.compile(pat, flags=flags) + na_value = self._str_na_value + + if not expand: + + def g(x): + m = regex.search(x) + return m.groups()[0] if m else na_value + + return self._str_map(g, convert=False) + + empty_row = [na_value] * regex.groups + + def f(x): + if not isinstance(x, str): + return empty_row + m = regex.search(x) + if m: + return [na_value if item is None else item for item in m.groups()] + else: + return empty_row + + return [f(val) for val in np.asarray(self)]
perf neutral refactor, another step towards #41372 after this... can use pyarrow native functions for expand case use _wrap_result and _str_map (without a perf impact)
https://api.github.com/repos/pandas-dev/pandas/pulls/41663
2021-05-25T16:06:25Z
2021-05-27T01:52:24Z
2021-05-27T01:52:24Z
2021-05-27T10:18:44Z
DEPR: Series(dt64naive, dtype=dt64tz) -> will match DatetimeIndex
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index ea9017da8a2f9..e0f77d8cffbb8 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -696,6 +696,7 @@ Deprecations - Deprecated passing arguments (apart from ``value``) as positional in :meth:`DataFrame.fillna` and :meth:`Series.fillna` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.reset_index` (other than ``"level"``) and :meth:`Series.reset_index` (:issue:`41485`) - Deprecated construction of :class:`Series` or :class:`DataFrame` with ``DatetimeTZDtype`` data and ``datetime64[ns]`` dtype. Use ``Series(data).dt.tz_localize(None)`` instead (:issue:`41555`,:issue:`33401`) +- In a future version, constructing :class:`Series` or :class:`DataFrame` with ``datetime64[ns]`` data and ``DatetimeTZDtype`` will treat the data as wall-times instead of as UTC times (matching DatetimeIndex behavior). To treat the data as UTC times, use ``pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(dtype.tz)`` or ``pd.Series(data.view("int64"), dtype=dtype)`` (:issue:`33401`) - Deprecated passing arguments as positional in :meth:`DataFrame.set_axis` and :meth:`Series.set_axis` (other than ``"labels"``) (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.where` and :meth:`Series.where` (other than ``"cond"`` and ``"other"``) (:issue:`41485`) - Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_csv` (:issue:`41485`) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 40883dd8f747b..df79276f67386 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1657,6 +1657,22 @@ def maybe_cast_to_datetime( # Numeric values are UTC at this point, # so localize and convert # equiv: Series(dta).astype(dtype) # though deprecated + if getattr(vdtype, "kind", None) == "M": + # GH#24559, GH#33401 deprecate behavior inconsistent + # with DatetimeArray/DatetimeIndex + warnings.warn( + "In a future version, constructing a Series " + "from datetime64[ns] data and a " + "DatetimeTZDtype will interpret the data " + "as wall-times instead of " + "UTC times, matching the behavior of " + "DatetimeIndex. To treat the data as UTC " + "times, use pd.Series(data).dt" + ".tz_localize('UTC').tz_convert(dtype.tz) " + "or pd.Series(data.view('int64'), dtype=dtype)", + FutureWarning, + stacklevel=5, + ) value = dta.tz_localize("UTC").tz_convert(dtype.tz) except OutOfBoundsDatetime: diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index af730bf299336..646d1f0ab1508 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1094,7 +1094,21 @@ def test_construction_consistency(self): result = Series(ser.dt.tz_convert("UTC"), dtype=ser.dtype) tm.assert_series_equal(result, ser) - result = Series(ser.values, dtype=ser.dtype) + msg = "will interpret the data as wall-times" + with tm.assert_produces_warning(FutureWarning, match=msg): + # deprecate behavior inconsistent with DatetimeIndex GH#33401 + result = Series(ser.values, dtype=ser.dtype) + tm.assert_series_equal(result, ser) + + with tm.assert_produces_warning(None): + # one suggested alternative to the deprecated usage + middle = Series(ser.values).dt.tz_localize("UTC") + result = middle.dt.tz_convert(ser.dtype.tz) + tm.assert_series_equal(result, ser) + + with tm.assert_produces_warning(None): + # the other suggested alternative to the deprecated usage + result = Series(ser.values.view("int64"), dtype=ser.dtype) tm.assert_series_equal(result, ser) @pytest.mark.parametrize(
- [ ] xref #33401 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry I think this is the last deprecation needed for the constructor/astype consistency.
https://api.github.com/repos/pandas-dev/pandas/pulls/41662
2021-05-25T14:55:41Z
2021-05-31T14:50:51Z
2021-05-31T14:50:51Z
2021-05-31T15:12:52Z
DOC: update styler user guide for new `text_gradient`
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb index 86696cc909764..219b74407fae4 100644 --- a/doc/source/user_guide/style.ipynb +++ b/doc/source/user_guide/style.ipynb @@ -1012,7 +1012,8 @@ " - [.highlight_min][minfunc] and [.highlight_max][maxfunc]: for use with identifying extremeties in data.\n", " - [.highlight_between][betweenfunc] and [.highlight_quantile][quantilefunc]: for use with identifying classes within data.\n", " - [.background_gradient][bgfunc]: a flexible method for highlighting cells based or their, or other, values on a numeric scale.\n", - " - [.bar][barfunc]: to display mini-charts within cell backgrounds.\n", + " - [.text_gradient][textfunc]: similar method for highlighting text based on their, or other, values on a numeric scale.\n", + " - [.bar][barfunc]: to display mini-charts within cell backgrounds.\n", " \n", "The individual documentation on each function often gives more examples of their arguments.\n", "\n", @@ -1022,6 +1023,7 @@ "[betweenfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_between.rst\n", "[quantilefunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_quantile.rst\n", "[bgfunc]: ../reference/api/pandas.io.formats.style.Styler.background_gradient.rst\n", + "[textfunc]: ../reference/api/pandas.io.formats.style.Styler.text_gradient.rst\n", "[barfunc]: ../reference/api/pandas.io.formats.style.Styler.bar.rst" ] }, @@ -1098,14 +1100,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Background Gradient" + "### Background Gradient and Text Gradient" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "You can create \"heatmaps\" with the `background_gradient` method. These require matplotlib, and we'll use [Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) to get a nice colormap." + "You can create \"heatmaps\" with the `background_gradient` and `text_gradient` methods. These require matplotlib, and we'll use [Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) to get a nice colormap." ] }, { @@ -1120,19 +1122,31 @@ "df2.style.background_gradient(cmap=cm)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df2.style.text_gradient(cmap=cm)" + ] + }, { "cell_type": "markdown", "metadata": {}, "source": [ - "[.background_gradient][bgfunc] has a number of keyword arguments to customise the gradients and colors. See its documentation.\n", + "[.background_gradient][bgfunc] and [.text_gradient][textfunc] have a number of keyword arguments to customise the gradients and colors. See the documentation.\n", "\n", - "[bgfunc]: ../reference/api/pandas.io.formats.style.Styler.background_gradient.rst" + "[bgfunc]: ../reference/api/pandas.io.formats.style.Styler.background_gradient.rst\n", + "[textfunc]: ../reference/api/pandas.io.formats.style.Styler.text_gradient.rst" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + "### Set properties\n", + "\n", "Use `Styler.set_properties` when the style doesn't actually depend on the values. This is just a simple wrapper for `.applymap` where the function returns the same properties for all cells." ] },
Add `text_gradient` to Styler user guide after merge.
https://api.github.com/repos/pandas-dev/pandas/pulls/41661
2021-05-25T10:22:55Z
2021-05-25T12:43:05Z
2021-05-25T12:43:05Z
2021-05-25T13:33:18Z
ENH: add long and short captions to `Styler.to_latex`
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index ba6bfb9da11cc..2f024af34b19d 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -143,7 +143,7 @@ One also has greater control of the display through separate sparsification of t Render trimming has also been added for large numbers of data elements to avoid browser overload (:issue:`40712`). We have added an extension to allow LaTeX styling as an alternative to CSS styling and a method :meth:`.Styler.to_latex` -which renders the necessary LaTeX format including built-up styles. An additional file io function :meth:`.Styler.to_html` has been added for convenience (:issue:`40312`). +which renders the necessary LaTeX format including built-up styles (:issue:`21673`, :issue:`41659`). An additional file io function :meth:`Styler.to_html` has been added for convenience (:issue:`40312`). Documentation has also seen major revisions in light of new features (:issue:`39720` :issue:`39317` :issue:`40493`) diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index d6c151c3ed740..7b88d53dd7f4e 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -87,8 +87,8 @@ class Styler(StylerRenderer): List of {selector: (attr, value)} dicts; see Notes. uuid : str, default None A unique identifier to avoid CSS collisions; generated automatically. - caption : str, default None - Caption to attach to the table. + caption : str, tuple, default None + String caption to attach to the table. Tuple only used for LaTeX dual captions. table_attributes : str, default None Items that show up in the opening ``<table>`` tag in addition to automatic (by default) id. @@ -175,7 +175,7 @@ def __init__( precision: int | None = None, table_styles: CSSStyles | None = None, uuid: str | None = None, - caption: str | None = None, + caption: str | tuple | None = None, table_attributes: str | None = None, cell_ids: bool = True, na_rep: str | None = None, @@ -419,7 +419,7 @@ def to_latex( position_float: str | None = None, hrules: bool = False, label: str | None = None, - caption: str | None = None, + caption: str | tuple | None = None, sparse_index: bool | None = None, sparse_columns: bool | None = None, multirow_align: str = "c", @@ -460,8 +460,10 @@ def to_latex( label : str, optional The LaTeX label included as: \\label{<label>}. This is used with \\ref{<label>} in the main .tex file. - caption : str, optional - The LaTeX table caption included as: \\caption{<caption>}. + caption : str, tuple, optional + If string, the LaTeX table caption included as: \\caption{<caption>}. + If tuple, i.e ("full caption", "short caption"), the caption included + as: \\caption[<caption[1]>]{<caption[0]>}. sparse_index : bool, optional Whether to sparsify the display of a hierarchical index. Setting to False will display each explicit level element in a hierarchical key for each row. @@ -1344,13 +1346,16 @@ def set_uuid(self, uuid: str) -> Styler: self.uuid = uuid return self - def set_caption(self, caption: str) -> Styler: + def set_caption(self, caption: str | tuple) -> Styler: """ Set the text added to a ``<caption>`` HTML element. Parameters ---------- - caption : str + caption : str, tuple + For HTML output either the string input is used or the first element of the + tuple. For LaTeX the string input provides a caption and the additional + tuple input allows for full captions and short captions, in that order. Returns ------- diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index 7af8802673f80..7686d8a340c37 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -75,7 +75,7 @@ def __init__( uuid_len: int = 5, table_styles: CSSStyles | None = None, table_attributes: str | None = None, - caption: str | None = None, + caption: str | tuple | None = None, cell_ids: bool = True, ): diff --git a/pandas/io/formats/templates/html_table.tpl b/pandas/io/formats/templates/html_table.tpl index dadefa4bd8365..33153af6f0882 100644 --- a/pandas/io/formats/templates/html_table.tpl +++ b/pandas/io/formats/templates/html_table.tpl @@ -6,8 +6,10 @@ <table id="T_{{uuid}}"{% if table_attributes %} {{table_attributes}}{% endif %}> {% endif %} {% block caption %} -{% if caption %} +{% if caption and caption is string %} <caption>{{caption}}</caption> +{% elif caption and caption is sequence %} + <caption>{{caption[0]}}</caption> {% endif %} {% endblock caption %} {% block thead %} diff --git a/pandas/io/formats/templates/latex.tpl b/pandas/io/formats/templates/latex.tpl index e5db6ad8ca7f8..66fe99642850f 100644 --- a/pandas/io/formats/templates/latex.tpl +++ b/pandas/io/formats/templates/latex.tpl @@ -9,9 +9,12 @@ {% if position_float is not none%} \{{position_float}} {% endif %} -{% if caption %} +{% if caption and caption is string %} \caption{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %} +{% elif caption and caption is sequence %} +\caption[{{caption[1]}}]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %} + {% endif %} {% for style in table_styles %} {% if style['selector'] not in ['position', 'position_float', 'caption', 'toprule', 'midrule', 'bottomrule', 'column_format'] %} diff --git a/pandas/tests/io/formats/style/test_html.py b/pandas/tests/io/formats/style/test_html.py index 6c3abe04db926..74b4c7ea3977c 100644 --- a/pandas/tests/io/formats/style/test_html.py +++ b/pandas/tests/io/formats/style/test_html.py @@ -231,3 +231,8 @@ def test_from_custom_template(tmpdir): assert result.template_html is not Styler.template_html styler = result(DataFrame({"A": [1, 2]})) assert styler.render() + + +def test_caption_as_sequence(styler): + styler.set_caption(("full cap", "short cap")) + assert "<caption>full cap</caption>" in styler.render() diff --git a/pandas/tests/io/formats/style/test_to_latex.py b/pandas/tests/io/formats/style/test_to_latex.py index 5945502a4c90c..97347bddaa187 100644 --- a/pandas/tests/io/formats/style/test_to_latex.py +++ b/pandas/tests/io/formats/style/test_to_latex.py @@ -438,3 +438,8 @@ def test_parse_latex_table_wrapping(styler): overwrite=False, ) assert _parse_latex_table_wrapping(styler.table_styles, None) is True + + +def test_short_caption(styler): + result = styler.to_latex(caption=("full cap", "short cap")) + assert "\\caption[short cap]{full cap}" in result
completes an item on the list for `DataFrame.to_latex` deprecation #41649
https://api.github.com/repos/pandas-dev/pandas/pulls/41659
2021-05-25T07:58:00Z
2021-06-04T14:40:12Z
2021-06-04T14:40:12Z
2021-06-04T18:10:35Z
DOC: add `Styler.to_latex` info in `io.rst` doc page
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 7f0cd613726dc..d26e511202f9c 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -22,6 +22,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like text;Fixed-Width Text File;:ref:`read_fwf<io.fwf_reader>` text;`JSON <https://www.json.org/>`__;:ref:`read_json<io.json_reader>`;:ref:`to_json<io.json_writer>` text;`HTML <https://en.wikipedia.org/wiki/HTML>`__;:ref:`read_html<io.read_html>`;:ref:`to_html<io.html>` + text;`LaTeX <https://en.wikipedia.org/wiki/LaTeX>`__;;:ref:`Styler.to_latex<io.latex>` text;`XML <https://www.w3.org/standards/xml/core>`__;:ref:`read_xml<io.read_xml>`;:ref:`to_xml<io.xml>` text; Local clipboard;:ref:`read_clipboard<io.clipboard>`;:ref:`to_clipboard<io.clipboard>` binary;`MS Excel <https://en.wikipedia.org/wiki/Microsoft_Excel>`__;:ref:`read_excel<io.excel_reader>`;:ref:`to_excel<io.excel_writer>` @@ -2830,7 +2831,42 @@ parse HTML tables in the top-level pandas io function ``read_html``. .. |lxml| replace:: **lxml** .. _lxml: https://lxml.de +.. _io.latex: +LaTeX +----- + +.. versionadded:: 1.3.0 + +Currently there are no methods to read from LaTeX, only output methods. + +Writing to LaTeX files +'''''''''''''''''''''' + +.. note:: + + DataFrame *and* Styler objects currently have a ``to_latex`` method. We recommend + using the `Styler.to_latex() <../reference/api/pandas.io.formats.style.Styler.to_latex.rst>`__ method + over `DataFrame.to_latex() <../reference/api/pandas.DataFrame.to_latex.rst>`__ due to the former's greater flexibility with + conditional styling, and the latter's possible future deprecation. + +Review the documentation for `Styler.to_latex <../reference/api/pandas.io.formats.style.Styler.to_latex.rst>`__, +which gives examples of conditional styling and explains the operation of its keyword +arguments. + +For simple application the following pattern is sufficient. + +.. ipython:: python + + df = pd.DataFrame([[1, 2], [3, 4]], index=["a", "b"], columns=["c", "d"]) + print(df.style.to_latex()) + +To format values before output, chain the `Styler.format <../reference/api/pandas.io.formats.style.Styler.format.rst>`__ +method. + +.. ipython:: python + + print(df.style.format("€ {}").to_latex()) XML ---
follow-on from adding `Styler.to_latex`. This is basic addition that primarily points to the core method docs (which have detailed descriptions and examples)
https://api.github.com/repos/pandas-dev/pandas/pulls/41658
2021-05-25T06:47:54Z
2021-05-25T14:46:23Z
2021-05-25T14:46:23Z
2021-05-25T15:00:11Z
ENH: Deprecate arguments #41485
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index c87cd9b116f2b..655ee09a02ca8 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -694,6 +694,7 @@ Deprecations - Deprecated construction of :class:`Series` or :class:`DataFrame` with ``DatetimeTZDtype`` data and ``datetime64[ns]`` dtype. Use ``Series(data).dt.tz_localize(None)`` instead (:issue:`41555`,:issue:`33401`) - Deprecated passing arguments as positional in :meth:`DataFrame.set_axis` and :meth:`Series.set_axis` (other than ``"labels"``) (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.where` and :meth:`Series.where` (other than ``"cond"`` and ``"other"``) (:issue:`41485`) +- Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_csv` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.drop` (other than ``"labels"``) and :meth:`Series.drop` (:issue:`41485`) - diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index ad08b8d4b7097..d957a669351c1 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -30,7 +30,10 @@ AbstractMethodError, ParserWarning, ) -from pandas.util._decorators import Appender +from pandas.util._decorators import ( + Appender, + deprecate_nonkeyword_arguments, +) from pandas.core.dtypes.common import ( is_file_like, @@ -472,6 +475,9 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): return parser.read(nrows) +@deprecate_nonkeyword_arguments( + version=None, allowed_args=["filepath_or_buffer"], stacklevel=3 +) @Appender( _doc_read_csv_and_table.format( func_name="read_csv", diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index adafbf38439d5..eba5e52516b4c 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -733,6 +733,18 @@ def test_read_csv_delimiter_and_sep_no_default(all_parsers): parser.read_csv(f, sep=" ", delimiter=".") +def test_read_csv_posargs_deprecation(all_parsers): + # GH 41485 + f = StringIO("a,b\n1,2") + parser = all_parsers + msg = ( + "In a future version of pandas all arguments of read_csv " + "except for the argument 'filepath_or_buffer' will be keyword-only" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + parser.read_csv(f, " ") + + @pytest.mark.parametrize("delimiter", [",", "\t"]) def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter): # GH: 35958
- [ ] xref #41485 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41657
2021-05-25T05:18:11Z
2021-05-27T18:18:15Z
2021-05-27T18:18:15Z
2021-05-29T08:21:48Z
BUG: groupby.transform/agg caching *args with numba engine
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 258e391b9220c..6d1a6a4e96b33 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -988,6 +988,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupBy.__getitem__` with non-unique columns incorrectly returning a malformed :class:`SeriesGroupBy` instead of :class:`DataFrameGroupBy` (:issue:`41427`) - Bug in :meth:`DataFrameGroupBy.transform` with non-unique columns incorrectly raising ``AttributeError`` (:issue:`41427`) - Bug in :meth:`Resampler.apply` with non-unique columns incorrectly dropping duplicated columns (:issue:`41445`) +- Bug in :meth:`DataFrameGroupBy.transform` and :meth:`DataFrameGroupBy.agg` with ``engine="numba"`` where ``*args`` were being cached with the user passed function (:issue:`41647`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index b27eb4bb8f325..1c0a3dcc1e1db 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1131,10 +1131,16 @@ def _transform_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs) group_keys = self.grouper._get_group_keys() numba_transform_func = numba_.generate_numba_transform_func( - tuple(args), kwargs, func, engine_kwargs + kwargs, func, engine_kwargs ) result = numba_transform_func( - sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns) + sorted_data, + sorted_index, + starts, + ends, + len(group_keys), + len(data.columns), + *args, ) cache_key = (func, "groupby_transform") @@ -1157,11 +1163,15 @@ def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs) starts, ends, sorted_index, sorted_data = self._numba_prep(func, data) group_keys = self.grouper._get_group_keys() - numba_agg_func = numba_.generate_numba_agg_func( - tuple(args), kwargs, func, engine_kwargs - ) + numba_agg_func = numba_.generate_numba_agg_func(kwargs, func, engine_kwargs) result = numba_agg_func( - sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns) + sorted_data, + sorted_index, + starts, + ends, + len(group_keys), + len(data.columns), + *args, ) cache_key = (func, "groupby_agg") diff --git a/pandas/core/groupby/numba_.py b/pandas/core/groupby/numba_.py index 26070fcb5e89c..ad78280c5d835 100644 --- a/pandas/core/groupby/numba_.py +++ b/pandas/core/groupby/numba_.py @@ -56,11 +56,12 @@ def f(values, index, ...): def generate_numba_agg_func( - args: tuple, kwargs: dict[str, Any], func: Callable[..., Scalar], engine_kwargs: dict[str, bool] | None, -) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int], np.ndarray]: +) -> Callable[ + [np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int, Any], np.ndarray +]: """ Generate a numba jitted agg function specified by values from engine_kwargs. @@ -72,8 +73,6 @@ def generate_numba_agg_func( Parameters ---------- - args : tuple - *args to be passed into the function kwargs : dict **kwargs to be passed into the function func : function @@ -103,6 +102,7 @@ def group_agg( end: np.ndarray, num_groups: int, num_columns: int, + *args: Any, ) -> np.ndarray: result = np.empty((num_groups, num_columns)) for i in numba.prange(num_groups): @@ -116,11 +116,12 @@ def group_agg( def generate_numba_transform_func( - args: tuple, kwargs: dict[str, Any], func: Callable[..., np.ndarray], engine_kwargs: dict[str, bool] | None, -) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int], np.ndarray]: +) -> Callable[ + [np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int, Any], np.ndarray +]: """ Generate a numba jitted transform function specified by values from engine_kwargs. @@ -132,8 +133,6 @@ def generate_numba_transform_func( Parameters ---------- - args : tuple - *args to be passed into the function kwargs : dict **kwargs to be passed into the function func : function @@ -163,6 +162,7 @@ def group_transform( end: np.ndarray, num_groups: int, num_columns: int, + *args: Any, ) -> np.ndarray: result = np.empty((len(values), num_columns)) for i in numba.prange(num_groups): diff --git a/pandas/tests/groupby/aggregate/test_numba.py b/pandas/tests/groupby/aggregate/test_numba.py index 6de81d03ca418..ba2d6eeb287c0 100644 --- a/pandas/tests/groupby/aggregate/test_numba.py +++ b/pandas/tests/groupby/aggregate/test_numba.py @@ -6,7 +6,9 @@ from pandas import ( DataFrame, + Index, NamedAgg, + Series, option_context, ) import pandas._testing as tm @@ -154,3 +156,20 @@ def test_multifunc_notimplimented(agg_func): with pytest.raises(NotImplementedError, match="Numba engine can"): grouped[1].agg(agg_func, engine="numba") + + +@td.skip_if_no("numba", "0.46.0") +def test_args_not_cached(): + # GH 41647 + def sum_last(values, index, n): + return values[-n:].sum() + + df = DataFrame({"id": [0, 0, 1, 1], "x": [1, 1, 1, 1]}) + grouped_x = df.groupby("id")["x"] + result = grouped_x.agg(sum_last, 1, engine="numba") + expected = Series([1.0] * 2, name="x", index=Index([0, 1], name="id")) + tm.assert_series_equal(result, expected) + + result = grouped_x.agg(sum_last, 2, engine="numba") + expected = Series([2.0] * 2, name="x", index=Index([0, 1], name="id")) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/transform/test_numba.py b/pandas/tests/groupby/transform/test_numba.py index fbee2361b9b45..8019071be72f3 100644 --- a/pandas/tests/groupby/transform/test_numba.py +++ b/pandas/tests/groupby/transform/test_numba.py @@ -5,6 +5,7 @@ from pandas import ( DataFrame, + Series, option_context, ) import pandas._testing as tm @@ -146,3 +147,20 @@ def test_multifunc_notimplimented(agg_func): with pytest.raises(NotImplementedError, match="Numba engine can"): grouped[1].transform(agg_func, engine="numba") + + +@td.skip_if_no("numba", "0.46.0") +def test_args_not_cached(): + # GH 41647 + def sum_last(values, index, n): + return values[-n:].sum() + + df = DataFrame({"id": [0, 0, 1, 1], "x": [1, 1, 1, 1]}) + grouped_x = df.groupby("id")["x"] + result = grouped_x.transform(sum_last, 1, engine="numba") + expected = Series([1.0] * 4, name="x") + tm.assert_series_equal(result, expected) + + result = grouped_x.transform(sum_last, 2, engine="numba") + expected = Series([2.0] * 4, name="x") + tm.assert_series_equal(result, expected)
- [x] closes #41647 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41656
2021-05-25T04:57:22Z
2021-05-26T01:46:37Z
2021-05-26T01:46:36Z
2021-05-26T03:26:26Z
REF: de-duplicate _format_attrs
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 8fb88e625d948..74659c98dbb7e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1158,14 +1158,18 @@ def _format_data(self, name=None) -> str_t: is_justify = False return format_object_summary( - self, self._formatter_func, is_justify=is_justify, name=name + self, + self._formatter_func, + is_justify=is_justify, + name=name, + line_break_each_value=self._is_multi, ) - def _format_attrs(self): + def _format_attrs(self) -> list[tuple[str_t, str_t | int]]: """ Return a list of tuples of the (attr,formatted_value). """ - return format_object_attrs(self) + return format_object_attrs(self, include_dtype=not self._is_multi) def _mpl_repr(self): # how to represent ourselves to matplotlib @@ -2407,6 +2411,13 @@ def is_all_dates(self) -> bool: ) return self._is_all_dates + @cache_readonly + def _is_multi(self) -> bool: + """ + Cached check equivalent to isinstance(self, MultiIndex) + """ + return isinstance(self, ABCMultiIndex) + # -------------------------------------------------------------------- # Pickle Methods diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index e835990eb8d89..9b4ddb9d5c222 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -324,13 +324,8 @@ def _format_attrs(self): # error: "CategoricalIndex" has no attribute "ordered" ("ordered", self.ordered), # type: ignore[attr-defined] ] - if self.name is not None: - attrs.append(("name", ibase.default_pprint(self.name))) - attrs.append(("dtype", f"'{self.dtype.name}'")) - max_seq_items = get_option("display.max_seq_items") or len(self) - if len(self) > max_seq_items: - attrs.append(("length", len(self))) - return attrs + extra = super()._format_attrs() + return attrs + extra def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]: from pandas.io.formats.printing import pprint_thing diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index b2377f5b27966..857353e0f56f7 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -361,7 +361,9 @@ def _format_attrs(self): freq = self.freqstr if freq is not None: freq = repr(freq) - attrs.append(("freq", freq)) + # Argument 1 to "append" of "list" has incompatible type + # "Tuple[str, Optional[str]]"; expected "Tuple[str, Union[str, int]]" + attrs.append(("freq", freq)) # type: ignore[arg-type] return attrs def _summary(self, name=None) -> str: diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index fc92a1b3afe53..e4618007cc4dc 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -16,8 +16,6 @@ import numpy as np -from pandas._config import get_option - from pandas._libs import lib from pandas._libs.interval import ( Interval, @@ -80,7 +78,6 @@ from pandas.core.indexes.base import ( Index, _index_shared_docs, - default_pprint, ensure_index, maybe_extract_name, ) @@ -919,49 +916,9 @@ def _format_native_types(self, na_rep="NaN", quoting=None, **kwargs): return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs) def _format_data(self, name=None) -> str: - # TODO: integrate with categorical and make generic # name argument is unused here; just for compat with base / categorical - n = len(self) - max_seq_items = min((get_option("display.max_seq_items") or n) // 10, 10) - - formatter = str - - if n == 0: - summary = "[]" - elif n == 1: - first = formatter(self[0]) - summary = f"[{first}]" - elif n == 2: - first = formatter(self[0]) - last = formatter(self[-1]) - summary = f"[{first}, {last}]" - else: - - if n > max_seq_items: - n = min(max_seq_items // 2, 10) - head = [formatter(x) for x in self[:n]] - tail = [formatter(x) for x in self[-n:]] - head_joined = ", ".join(head) - tail_joined = ", ".join(tail) - summary = f"[{head_joined} ... {tail_joined}]" - else: - tail = [formatter(x) for x in self] - joined = ", ".join(tail) - summary = f"[{joined}]" - - return summary + "," + self._format_space() - - def _format_attrs(self): - attrs = [] - if self.name is not None: - attrs.append(("name", default_pprint(self.name))) - attrs.append(("dtype", f"'{self.dtype}'")) - return attrs - - def _format_space(self) -> str: - space = " " * (len(type(self).__name__) + 1) - return f"\n{space}" + return self._data._format_data() + "," + self._format_space() # -------------------------------------------------------------------- # Set Operations diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 1a3719233a1da..b50c741b123e2 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -89,11 +89,7 @@ lexsort_indexer, ) -from pandas.io.formats.printing import ( - format_object_attrs, - format_object_summary, - pprint_thing, -) +from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: from pandas import ( @@ -1287,20 +1283,6 @@ def _formatter_func(self, tup): formatter_funcs = [level._formatter_func for level in self.levels] return tuple(func(val) for func, val in zip(formatter_funcs, tup)) - def _format_data(self, name=None) -> str: - """ - Return the formatted data as a unicode string - """ - return format_object_summary( - self, self._formatter_func, name=name, line_break_each_value=True - ) - - def _format_attrs(self): - """ - Return a list of tuples of the (attr,formatted_value). - """ - return format_object_attrs(self, include_dtype=False) - def _format_native_types(self, na_rep="nan", **kwargs): new_levels = [] new_codes = []
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41655
2021-05-24T23:37:24Z
2021-05-25T12:37:52Z
2021-05-25T12:37:52Z
2021-05-25T14:01:37Z
API: EA._can_hold_na -> EDtype.can_hold_na
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 7dddb9f3d6f25..4f2d80e73fedf 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -35,6 +35,7 @@ from pandas.util._decorators import ( Appender, Substitution, + cache_readonly, ) from pandas.util._validators import ( validate_bool_kwarg, @@ -1273,7 +1274,9 @@ def _concat_same_type( # such as take(), reindex(), shift(), etc. In addition, those results # will then be of the ExtensionArray subclass rather than an array # of objects - _can_hold_na = True + @cache_readonly + def _can_hold_na(self) -> bool: + return self.dtype._can_hold_na def _reduce(self, name: str, *, skipna: bool = True, **kwargs): """ diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 068f5703649fa..ae9d7dcd648e3 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -353,7 +353,6 @@ class Categorical(NDArrayBackedExtensionArray, PandasObject, ObjectStringArrayMi # tolist is not actually deprecated, just suppressed in the __dir__ _hidden_attrs = PandasObject._hidden_attrs | frozenset(["tolist"]) _typ = "categorical" - _can_hold_na = True _dtype: CategoricalDtype diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 4b264eef4bada..17f12536b4663 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -159,6 +159,10 @@ class DatetimeLikeArrayMixin(OpsMixin, NDArrayBackedExtensionArray): _recognized_scalars: tuple[type, ...] _ndarray: np.ndarray + @cache_readonly + def _can_hold_na(self) -> bool: + return True + def __init__(self, data, dtype: Dtype | None = None, freq=None, copy=False): raise AbstractMethodError(self) diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index 9671c340a0a92..414c60603b9fe 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -367,6 +367,13 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: else: return None + @property + def _can_hold_na(self) -> bool: + """ + Can arrays of this dtype hold NA values? + """ + return True + def register_extension_dtype(cls: type[E]) -> type[E]: """ diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c7769046c70b2..9a1be4d010196 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -175,15 +175,15 @@ def is_view(self) -> bool: return values.base is not None @final - @property + @cache_readonly def _can_hold_na(self) -> bool: """ Can we store NA values in this Block? """ - values = self.values - if isinstance(values, np.ndarray): - return values.dtype.kind not in ["b", "i", "u"] - return values._can_hold_na + dtype = self.dtype + if isinstance(dtype, np.dtype): + return dtype.kind not in ["b", "i", "u"] + return dtype._can_hold_na @final @cache_readonly diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py index 2402c70a166b7..13dec96b144ff 100644 --- a/pandas/tests/extension/test_external_block.py +++ b/pandas/tests/extension/test_external_block.py @@ -14,9 +14,11 @@ class CustomBlock(ExtensionBlock): _holder = np.ndarray - # error: Cannot override final attribute "_can_hold_na" - # (previously declared in base class "Block") - _can_hold_na = False # type: ignore[misc] + + # Cannot override final attribute "_can_hold_na" + @property # type: ignore[misc] + def _can_hold_na(self) -> bool: + return False @pytest.fixture
- [x] closes #40574 - [ ] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41654
2021-05-24T23:34:41Z
2021-06-08T12:55:41Z
2021-06-08T12:55:41Z
2021-06-08T14:43:16Z
FMT: trim redundant freqstr from PeriodIndex __repr__
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index aee0d4fecd6ae..ec69d9ccbdd90 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1119,14 +1119,14 @@ def to_period(self, freq=None) -> PeriodArray: ... "2000-08-31 00:00:00"])) >>> df.index.to_period("M") PeriodIndex(['2000-03', '2000-05', '2000-08'], - dtype='period[M]', freq='M') + dtype='period[M]') Infer the daily frequency >>> idx = pd.date_range("2017-01-01", periods=2) >>> idx.to_period() PeriodIndex(['2017-01-01', '2017-01-02'], - dtype='period[D]', freq='D') + dtype='period[D]') """ from pandas.core.arrays import PeriodArray diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 101209be30b40..c2323c8697eee 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -564,15 +564,15 @@ def asfreq(self, freq=None, how: str = "E") -> PeriodArray: >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A') >>> pidx PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'], - dtype='period[A-DEC]', freq='A-DEC') + dtype='period[A-DEC]') >>> pidx.asfreq('M') PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12', - '2015-12'], dtype='period[M]', freq='M') + '2015-12'], dtype='period[M]') >>> pidx.asfreq('M', how='S') PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01', - '2015-01'], dtype='period[M]', freq='M') + '2015-01'], dtype='period[M]') """ how = libperiod.validate_end_alias(how) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 136843938b683..fb51f4ba08bfe 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -153,11 +153,11 @@ class PeriodIndex(DatetimeIndexOpsMixin): -------- >>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3]) >>> idx - PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]', freq='Q-DEC') + PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]') """ _typ = "periodindex" - _attributes = ["name", "freq"] + _attributes = ["name"] # define my properties & methods for delegation _is_numeric_dtype = False @@ -636,7 +636,7 @@ def period_range( PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06', '2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12', '2018-01'], - dtype='period[M]', freq='M') + dtype='period[M]') If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor endpoints for a ``PeriodIndex`` with frequency matching that of the @@ -645,7 +645,7 @@ def period_range( >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'), ... end=pd.Period('2017Q2', freq='Q'), freq='M') PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'], - dtype='period[M]', freq='M') + dtype='period[M]') """ if com.count_not_none(start, end, periods) != 2: raise ValueError( diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py index a8f8406e24fef..70156092eeabe 100644 --- a/pandas/tests/indexes/datetimelike.py +++ b/pandas/tests/indexes/datetimelike.py @@ -44,7 +44,9 @@ def test_str(self, simple_index): if hasattr(idx, "tz"): if idx.tz is not None: assert idx.tz in str(idx) - if hasattr(idx, "freq"): + if isinstance(idx, pd.PeriodIndex): + assert f"dtype='period[{idx.freqstr}]'" in str(idx) + else: assert f"freq='{idx.freqstr}'" in str(idx) def test_view(self, simple_index): diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py index 7d054a7af4a4d..bfd83f1360671 100644 --- a/pandas/tests/indexes/period/test_formats.py +++ b/pandas/tests/indexes/period/test_formats.py @@ -62,40 +62,31 @@ def test_representation(self, method): idx9 = pd.period_range("2013Q1", periods=3, freq="Q") idx10 = PeriodIndex(["2011-01-01", "2011-02-01"], freq="3D") - exp1 = "PeriodIndex([], dtype='period[D]', freq='D')" + exp1 = "PeriodIndex([], dtype='period[D]')" - exp2 = "PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')" + exp2 = "PeriodIndex(['2011-01-01'], dtype='period[D]')" - exp3 = "PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', freq='D')" + exp3 = "PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]')" exp4 = ( "PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], " - "dtype='period[D]', freq='D')" + "dtype='period[D]')" ) - exp5 = ( - "PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', " - "freq='A-DEC')" - ) + exp5 = "PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]')" exp6 = ( "PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], " - "dtype='period[H]', freq='H')" + "dtype='period[H]')" ) - exp7 = "PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', freq='Q-DEC')" + exp7 = "PeriodIndex(['2013Q1'], dtype='period[Q-DEC]')" - exp8 = "PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', freq='Q-DEC')" + exp8 = "PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]')" - exp9 = ( - "PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], " - "dtype='period[Q-DEC]', freq='Q-DEC')" - ) + exp9 = "PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], dtype='period[Q-DEC]')" - exp10 = ( - "PeriodIndex(['2011-01-01', '2011-02-01'], " - "dtype='period[3D]', freq='3D')" - ) + exp10 = "PeriodIndex(['2011-01-01', '2011-02-01'], dtype='period[3D]')" for idx, expected in zip( [idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idx10],
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41653
2021-05-24T23:28:04Z
2021-05-25T12:39:44Z
2021-05-25T12:39:44Z
2021-05-25T14:16:58Z
GH41457 Upgrade Bootstrap to v5.0
diff --git a/web/pandas/_templates/layout.html b/web/pandas/_templates/layout.html index 023bfe9e26b78..b3bb1a2a3f86d 100644 --- a/web/pandas/_templates/layout.html +++ b/web/pandas/_templates/layout.html @@ -14,8 +14,8 @@ <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <link rel='shortcut icon' type='image/x-icon' href='{{ base_url }}/static/img/favicon.ico'/> <link rel="stylesheet" - href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" - integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" + href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/css/bootstrap.min.css" + integrity="sha384-+0n0xVW2eSR5OomGNYDnhzAbDsOXxcvSN1TPprVMTNDbiYZCxYbOOl7+AMvyTG2x" crossorigin="anonymous"> {% for stylesheet in static.css %} <link rel="stylesheet" @@ -27,14 +27,14 @@ <header> <nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark"> <div class="container"> - <button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#nav-content" aria-controls="nav-content" aria-expanded="false" aria-label="Toggle navigation"> + <button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#nav-content" aria-controls="nav-content" aria-expanded="false" aria-label="Toggle navigation"> <span class="navbar-toggler-icon"></span> </button> {% if static.logo %}<a class="navbar-brand" href="{{ base_url }}/"><img alt="" src="{{ base_url }}{{ static.logo }}"/></a>{% endif %} <div class="collapse navbar-collapse" id="nav-content"> - <ul class="navbar-nav ml-auto"> + <ul class="navbar-nav ms-auto"> {% for item in navbar %} {% if not item.has_subitems %} <li class="nav-item"> @@ -43,7 +43,7 @@ {% else %} <li class="nav-item dropdown"> <a class="nav-link dropdown-toggle" - data-toggle="dropdown" + data-bs-toggle="dropdown" id="{{ item.slug }}" href="#" role="button" @@ -68,7 +68,7 @@ </div> </main> <footer class="container pt-4 pt-md-5 border-top"> - <ul class="list-inline social-buttons float-right"> + <ul class="list-inline social-buttons float-end"> <li class="list-inline-item"> <a href="https://twitter.com/pandas_dev/"> <i class="fab fa-twitter"></i> @@ -89,15 +89,9 @@ pandas is a fiscally sponsored project of <a href="https://numfocus.org">NumFOCUS.</a> </p> </footer> - - <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" - integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" - crossorigin="anonymous"></script> - <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" - integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" - crossorigin="anonymous"></script> - <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" - integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" + + <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/js/bootstrap.bundle.min.js" + integrity="sha384-gtEjrD/SeCtmISkJkNUaaKMoLD0//ElJ19smozuHV6z3Iehds+3Ulb9Bn9Plx0x4" crossorigin="anonymous"></script> </body> </html> diff --git a/web/pandas/contribute.md b/web/pandas/contribute.md index 9f4ebaf97598c..0163a1c8110b2 100644 --- a/web/pandas/contribute.md +++ b/web/pandas/contribute.md @@ -14,7 +14,7 @@ and about current sponsors in the [sponsors page](about/sponsors.html). <i class="fas fa-circle fa-stack-2x pink"></i> <i class="fas fa-building fa-stack-1x fa-inverse"></i> </span> - <h4 class="service-heading mt-3 font-weight-bold blue">Corporate support</h4> + <h4 class="service-heading mt-3 fw-bold blue">Corporate support</h4> <p class="text-muted"> pandas depends on companies and institutions using the software to support its development. Hiring people to work on pandas, or letting existing employees to contribute to the @@ -28,7 +28,7 @@ and about current sponsors in the [sponsors page](about/sponsors.html). <i class="fas fa-circle fa-stack-2x pink"></i> <i class="fas fa-users fa-stack-1x fa-inverse"></i> </span> - <h4 class="service-heading mt-3 font-weight-bold blue">Individual contributors</h4> + <h4 class="service-heading mt-3 fw-bold blue">Individual contributors</h4> <p class="text-muted"> pandas is mostly developed by volunteers. All kind of contributions are welcome, such as contributions to the code, to the website (including graphical designers), @@ -42,7 +42,7 @@ and about current sponsors in the [sponsors page](about/sponsors.html). <i class="fas fa-circle fa-stack-2x pink"></i> <i class="fas fa-dollar-sign fa-stack-1x fa-inverse"></i> </span> - <h4 class="service-heading mt-3 font-weight-bold blue">Donations</h4> + <h4 class="service-heading mt-3 fw-bold blue">Donations</h4> <p class="text-muted"> Individual donations are appreciated, and are used for things like the project infrastructure, travel expenses for our volunteer contributors to attend diff --git a/web/pandas/index.html b/web/pandas/index.html index 75c797d6dd93d..930f6caa59cb9 100644 --- a/web/pandas/index.html +++ b/web/pandas/index.html @@ -3,7 +3,7 @@ <div class="container"> <div class="row"> <div class="col-md-9"> - <section class="jumbotron text-center"> + <section class="h-30 p-5 bg-light border rounded-3 text-center mb-4"> <h1>pandas</h1> <p> <strong>pandas</strong> is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool,<br/> @@ -98,7 +98,7 @@ <h4>Previous versions</h4> {% endif %} {% if releases[5:] %} <p class="text-center"> - <a data-toggle="collapse" href="#show-more-releases" role="button" aria-expanded="false" aria-controls="show-more-releases">Show more</a> + <a data-bs-toggle="collapse" href="#show-more-releases" role="button" aria-expanded="false" aria-controls="show-more-releases">Show more</a> </p> <ul id="show-more-releases" class="collapse"> {% for release in releases[5:] %} diff --git a/web/pandas/static/css/pandas.css b/web/pandas/static/css/pandas.css index 459f006db5727..67955dd35587c 100644 --- a/web/pandas/static/css/pandas.css +++ b/web/pandas/static/css/pandas.css @@ -42,6 +42,18 @@ ol ol, ol ul, ul ol, ul ul { a.navbar-brand img { height: 3rem; } +a:link:not(.btn):not(.dropdown-item):not(.nav-link) { +text-decoration: none; +} +a:visited:not(.btn):not(.dropdown-item):not(.nav-link) { +text-decoration: none; +} +a:hover:not(.btn):not(.dropdown-item):not(.nav-link) { +text-decoration: underline; +} +a:active:not(.btn):not(.dropdown-item):not(.nav-link) { +text-decoration: underline; +} div.card { margin: 0 0 .2em .2em !important; }
Whats new: Update BS script tags in template Replace jumbotron with utilities Replace font-weight-bold with fw-bold Replace ml-auto with ms-auto Replace float-right with float-end Replace data-target with data-bs-target Replace data-toggle with data-bs-toggle Add CSS rules for <a> - [x] closes #41457 - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41651
2021-05-24T22:19:48Z
2021-07-06T21:32:53Z
2021-07-06T21:32:52Z
2021-07-06T22:33:05Z
Deprecated nonkeyword arguments for set_codes function
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 645443c450146..c6e0e5bf2ffcd 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -679,6 +679,7 @@ Deprecations - Deprecated the ``convert_float`` optional argument in :func:`read_excel` and :meth:`ExcelFile.parse` (:issue:`41127`) - Deprecated behavior of :meth:`DatetimeIndex.union` with mixed timezones; in a future version both will be cast to UTC instead of object dtype (:issue:`39328`) - Deprecated using ``usecols`` with out of bounds indices for ``read_csv`` with ``engine="c"`` (:issue:`25623`) +- Deprecated passing arguments as positional (except for ``"codes"``) in :meth:`MultiIndex.codes` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`Index.set_names` and :meth:`MultiIndex.set_names` (except for ``names``) (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.clip` and :meth:`Series.clip` (other than ``"upper"`` and ``"lower"``) (:issue:`41485`) - Deprecated special treatment of lists with first element a Categorical in the :class:`DataFrame` constructor; pass as ``pd.DataFrame({col: categorical, ...})`` instead (:issue:`38845`) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 1362679ae0064..59882422f5439 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -991,6 +991,7 @@ def _set_codes( self._reset_cache() + @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "codes"]) def set_codes(self, codes, level=None, inplace=None, verify_integrity: bool = True): """ Set new codes on MultiIndex. Defaults to returning new index. @@ -1058,7 +1059,7 @@ def set_codes(self, codes, level=None, inplace=None, verify_integrity: bool = Tr warnings.warn( "inplace is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=3, ) else: inplace = False diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py index e756f95bb2bc5..e806ee1751b00 100644 --- a/pandas/tests/indexes/multi/test_get_set.py +++ b/pandas/tests/indexes/multi/test_get_set.py @@ -449,3 +449,25 @@ def test_set_levels_pos_args_deprecation(): names=["foo", "bar"], ) tm.assert_index_equal(result, expected) + + +def test_set_codes_pos_args_depreciation(idx): + # https://github.com/pandas-dev/pandas/issues/41485 + msg = ( + r"In a future version of pandas all arguments of MultiIndex.set_codes except " + r"for the argument 'codes' will be keyword-only" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = idx.set_codes([[0, 0, 1, 2, 3, 3], [0, 1, 0, 1, 0, 1]], [0, 1]) + expected = MultiIndex.from_tuples( + [ + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ], + names=["first", "second"], + ) + tm.assert_index_equal(result, expected)
- [ ] xref #41485 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41650
2021-05-24T21:25:28Z
2021-05-27T14:44:26Z
2021-05-27T14:44:26Z
2021-05-27T14:44:30Z
BUG: Series[int].loc setitem with Series[int] results in Series[float]
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 177b1ccd166cb..741fc323262b6 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -2218,6 +2218,8 @@ def can_hold_element(arr: ArrayLike, element: Any) -> bool: if dtype.kind in ["i", "u"]: if tipo is not None: if tipo.kind not in ["i", "u"]: + if is_float(element) and element.is_integer(): + return True # Anything other than integer we cannot hold return False elif dtype.itemsize < tipo.itemsize: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c7769046c70b2..adca54abce04d 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -781,14 +781,6 @@ def _replace_list( # so un-tile here return self.replace(src_list, dest_list[0], inplace, regex) - # https://github.com/pandas-dev/pandas/issues/40371 - # the following pairs check code caused a regression so we catch that case here - # until the issue is fixed properly in can_hold_element - - # error: "Iterable[Any]" has no attribute "tolist" - if hasattr(src_list, "tolist"): - src_list = src_list.tolist() # type: ignore[attr-defined] - # Exclude anything that we know we won't contain pairs = [ (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 3f850dfbc6a39..13054062defb4 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -158,7 +158,7 @@ def test_setitem_series_object_dtype(self, indexer, ser_index): expected = Series([Series([42], index=[ser_index]), 0], dtype="object") tm.assert_series_equal(ser, expected) - @pytest.mark.parametrize("index, exp_value", [(0, 42.0), (1, np.nan)]) + @pytest.mark.parametrize("index, exp_value", [(0, 42), (1, np.nan)]) def test_setitem_series(self, index, exp_value): # GH#38303 ser = Series([0, 0])
``` >>> pd.__version__ '1.3.0.dev0+1695.g55e58542db' >>> >>> s = pd.Series([0, 0]) >>> s 0 0 1 0 dtype: int64 >>> >>> s2 = pd.Series([42]) >>> s2 0 42 dtype: int64 >>> >>> s.loc[0] = s2 >>> s 0 42.0 1 0.0 dtype: float64 >>> ``` Does not need a release note, this issue is only on master as on 1.2.4 this raises `ValueError: No axis named 1 for object type Series` which was fixed in #39358 draft, since if we merge and backport #40555 first, we can also remove the patch in this PR.
https://api.github.com/repos/pandas-dev/pandas/pulls/41644
2021-05-24T15:32:39Z
2021-06-08T14:12:08Z
2021-06-08T14:12:08Z
2021-06-09T21:57:09Z
REF: share __array_wrap__
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 8fb88e625d948..33f31c4ce96c0 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -813,6 +813,7 @@ def __array_wrap__(self, result, context=None): return result attrs = self._get_attributes_dict() + attrs.pop("freq", None) # For DatetimeIndex/TimedeltaIndex return Index(result, **attrs) @cache_readonly diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index b2377f5b27966..185f1dced72bf 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -35,7 +35,6 @@ ) from pandas.core.dtypes.common import ( - is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_integer, @@ -113,15 +112,10 @@ def __array_wrap__(self, result, context=None): """ Gets called after a ufunc and other functions. """ - result = lib.item_from_zerodim(result) - if is_bool_dtype(result) or lib.is_scalar(result): - return result - - attrs = self._get_attributes_dict() - if not is_period_dtype(self.dtype) and attrs["freq"]: - # no need to infer if freq is None - attrs["freq"] = "infer" - return type(self)(result, **attrs) + out = super().__array_wrap__(result, context=context) + if isinstance(out, DatetimeTimedeltaMixin) and self.freq is not None: + out = out._with_freq("infer") + return out # ------------------------------------------------------------------------ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index ac09159c23566..d02f415a4f29f 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -257,8 +257,8 @@ class DatetimeIndex(DatetimeTimedeltaMixin): _engine_type = libindex.DatetimeEngine _supports_partial_string_indexing = True - _comparables = ["name", "freqstr", "tz"] - _attributes = ["name", "tz", "freq"] + _comparables = ["name", "freqstr"] + _attributes = ["name", "freq"] _is_numeric_dtype = False diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index fc92a1b3afe53..1dfcb0ec29f27 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -256,7 +256,7 @@ def func(self, other, sort=None): class IntervalIndex(ExtensionIndex): _typ = "intervalindex" _comparables = ["name"] - _attributes = ["name", "closed"] + _attributes = ["name"] # annotate properties pinned via inherit_names closed: str @@ -422,12 +422,8 @@ def __contains__(self, key: Any) -> bool: def _multiindex(self) -> MultiIndex: return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"]) - def __array_wrap__(self, result, context=None): - # we don't want the superclass implementation - return result - def __reduce__(self): - d = {"left": self.left, "right": self.right} + d = {"left": self.left, "right": self.right, "closed": self.closed} d.update(self._get_attributes_dict()) return _new_IntervalIndex, (type(self), d), None diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 136843938b683..2f2d16b01af53 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -34,7 +34,6 @@ from pandas.util._decorators import doc from pandas.core.dtypes.common import ( - is_bool_dtype, is_datetime64_any_dtype, is_float, is_integer, @@ -350,42 +349,6 @@ def __contains__(self, key: Any) -> bool: # ------------------------------------------------------------------------ # Index Methods - def __array_wrap__(self, result, context=None): - """ - Gets called after a ufunc and other functions. - - Needs additional handling as PeriodIndex stores internal data as int - dtype - - Replace this to __numpy_ufunc__ in future version and implement - __array_function__ for Indexes - """ - if isinstance(context, tuple) and len(context) > 0: - func = context[0] - if func is np.add: - pass - elif func is np.subtract: - name = self.name - left = context[1][0] - right = context[1][1] - if isinstance(left, PeriodIndex) and isinstance(right, PeriodIndex): - name = left.name if left.name == right.name else None - return Index(result, name=name) - elif isinstance(left, Period) or isinstance(right, Period): - return Index(result, name=name) - elif isinstance(func, np.ufunc): - if "M->M" not in func.types: - msg = f"ufunc '{func.__name__}' not supported for the PeriodIndex" - # This should be TypeError, but TypeError cannot be raised - # from here because numpy catches. - raise ValueError(msg) - - if is_bool_dtype(result): - return result - # the result is object dtype array of Period - # cannot pass _simple_new as it is - return type(self)(result, freq=self.freq, name=self.name) - def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray: """ where : array of timestamps
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41643
2021-05-24T15:26:15Z
2021-05-25T12:48:10Z
2021-05-25T12:48:10Z
2021-05-25T13:58:22Z
typo fix
diff --git a/setup.cfg b/setup.cfg index f39e377e50c97..6ce66a6f2bdbd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,7 +14,7 @@ classifiers = Environment :: Console Intended Audience :: Science/Research License :: OSI Approved :: BSD License - Operating System :: OS Independen + Operating System :: OS Independent Programming Language :: Cython Programming Language :: Python Programming Language :: Python :: 3
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41642
2021-05-24T15:24:22Z
2021-05-24T15:43:40Z
2021-05-24T15:43:40Z
2021-05-24T15:43:47Z
[POC] implement test_arithmetic.py
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index a323e2487e356..71b2774a92612 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -13,7 +13,7 @@ import pandas.util.testing as tm import pandas as pd -from pandas._libs.tslibs import Timestamp, Timedelta +from pandas._libs.tslibs import Timestamp from pandas.tests.indexes.common import Base @@ -26,42 +26,6 @@ def full_like(array, value): return ret -class TestIndexArithmeticWithTimedeltaScalar(object): - - @pytest.mark.parametrize('index', [ - Int64Index(range(1, 11)), - UInt64Index(range(1, 11)), - Float64Index(range(1, 11)), - RangeIndex(1, 11)]) - @pytest.mark.parametrize('scalar_td', [Timedelta(days=1), - Timedelta(days=1).to_timedelta64(), - Timedelta(days=1).to_pytimedelta()]) - def test_index_mul_timedelta(self, scalar_td, index): - # GH#19333 - expected = pd.timedelta_range('1 days', '10 days') - - result = index * scalar_td - tm.assert_index_equal(result, expected) - commute = scalar_td * index - tm.assert_index_equal(commute, expected) - - @pytest.mark.parametrize('index', [Int64Index(range(1, 3)), - UInt64Index(range(1, 3)), - Float64Index(range(1, 3)), - RangeIndex(1, 3)]) - @pytest.mark.parametrize('scalar_td', [Timedelta(days=1), - Timedelta(days=1).to_timedelta64(), - Timedelta(days=1).to_pytimedelta()]) - def test_index_rdiv_timedelta(self, scalar_td, index): - expected = pd.TimedeltaIndex(['1 Day', '12 Hours']) - - result = scalar_td / index - tm.assert_index_equal(result, expected) - - with pytest.raises(TypeError): - index / scalar_td - - class Numeric(Base): def test_can_hold_identifiers(self): diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index bf2308cd8c097..2571498ca802c 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -891,22 +891,3 @@ def test_td64series_mul_timedeltalike_invalid(self, scalar_td): td1 * scalar_td with tm.assert_raises_regex(TypeError, pattern): scalar_td * td1 - - -class TestTimedeltaSeriesInvalidArithmeticOps(object): - @pytest.mark.parametrize('scalar_td', [ - timedelta(minutes=5, seconds=4), - Timedelta('5m4s'), - Timedelta('5m4s').to_timedelta64()]) - def test_td64series_pow_invalid(self, scalar_td): - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - - # check that we are getting a TypeError - # with 'operate' (from core/ops.py) for the ops that are not - # defined - pattern = 'operate|unsupported|cannot|not supported' - with tm.assert_raises_regex(TypeError, pattern): - scalar_td ** td1 - with tm.assert_raises_regex(TypeError, pattern): - td1 ** scalar_td diff --git a/pandas/tests/test_arithmetic.py b/pandas/tests/test_arithmetic.py new file mode 100644 index 0000000000000..f15b629f15ae3 --- /dev/null +++ b/pandas/tests/test_arithmetic.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +# Arithmetc tests for DataFrame/Series/Index/Array classes that should +# behave identically. +from datetime import timedelta + +import pytest +import numpy as np + +import pandas as pd +import pandas.util.testing as tm + +from pandas import Timedelta + + +# ------------------------------------------------------------------ +# Numeric dtypes Arithmetic with Timedelta Scalar + +class TestNumericArraylikeArithmeticWithTimedeltaScalar(object): + + @pytest.mark.parametrize('box', [ + pd.Index, + pd.Series, + pytest.param(pd.DataFrame, + marks=pytest.mark.xfail(reason="block.eval incorrect", + strict=True)) + ]) + @pytest.mark.parametrize('index', [ + pd.Int64Index(range(1, 11)), + pd.UInt64Index(range(1, 11)), + pd.Float64Index(range(1, 11)), + pd.RangeIndex(1, 11)], + ids=lambda x: type(x).__name__) + @pytest.mark.parametrize('scalar_td', [ + Timedelta(days=1), + Timedelta(days=1).to_timedelta64(), + Timedelta(days=1).to_pytimedelta()], + ids=lambda x: type(x).__name__) + def test_index_mul_timedelta(self, scalar_td, index, box): + # GH#19333 + + if (box is pd.Series and + type(scalar_td) is timedelta and index.dtype == 'f8'): + raise pytest.xfail(reason="Cannot multiply timedelta by float") + + expected = pd.timedelta_range('1 days', '10 days') + + index = tm.box_expected(index, box) + expected = tm.box_expected(expected, box) + + result = index * scalar_td + tm.assert_equal(result, expected) + + commute = scalar_td * index + tm.assert_equal(commute, expected) + + @pytest.mark.parametrize('box', [pd.Index, pd.Series, pd.DataFrame]) + @pytest.mark.parametrize('index', [ + pd.Int64Index(range(1, 3)), + pd.UInt64Index(range(1, 3)), + pd.Float64Index(range(1, 3)), + pd.RangeIndex(1, 3)], + ids=lambda x: type(x).__name__) + @pytest.mark.parametrize('scalar_td', [ + Timedelta(days=1), + Timedelta(days=1).to_timedelta64(), + Timedelta(days=1).to_pytimedelta()], + ids=lambda x: type(x).__name__) + def test_index_rdiv_timedelta(self, scalar_td, index, box): + + if box is pd.Series and type(scalar_td) is timedelta: + raise pytest.xfail(reason="TODO: Figure out why this case fails") + if box is pd.DataFrame and isinstance(scalar_td, timedelta): + raise pytest.xfail(reason="TODO: Figure out why this case fails") + + expected = pd.TimedeltaIndex(['1 Day', '12 Hours']) + + index = tm.box_expected(index, box) + expected = tm.box_expected(expected, box) + + result = scalar_td / index + tm.assert_equal(result, expected) + + with pytest.raises(TypeError): + index / scalar_td + + +# ------------------------------------------------------------------ +# Timedelta64[ns] dtype Arithmetic Operations + + +class TestTimedeltaArraylikeInvalidArithmeticOps(object): + + @pytest.mark.parametrize('box', [ + pd.Index, + pd.Series, + pytest.param(pd.DataFrame, + marks=pytest.mark.xfail(reason="raises ValueError " + "instead of TypeError", + strict=True)) + ]) + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + Timedelta('5m4s'), + Timedelta('5m4s').to_timedelta64()]) + def test_td64series_pow_invalid(self, scalar_td, box): + td1 = pd.Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + td1 = tm.box_expected(td1, box) + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + pattern = 'operate|unsupported|cannot|not supported' + with tm.assert_raises_regex(TypeError, pattern): + scalar_td ** td1 + + with tm.assert_raises_regex(TypeError, pattern): + td1 ** scalar_td diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 9697c991122dd..6dffbcb0b4f01 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1478,6 +1478,50 @@ def assert_panel_equal(left, right, assert item in left, msg +def assert_equal(left, right, **kwargs): + """ + Wrapper for tm.assert_*_equal to dispatch to the appropriate test function. + + Parameters + ---------- + left : Index, Series, or DataFrame + right : Index, Series, or DataFrame + **kwargs + """ + if isinstance(left, pd.Index): + assert_index_equal(left, right, **kwargs) + elif isinstance(left, pd.Series): + assert_series_equal(left, right, **kwargs) + elif isinstance(left, pd.DataFrame): + assert_frame_equal(left, right, **kwargs) + else: + raise NotImplementedError(type(left)) + + +def box_expected(expected, box_cls): + """ + Helper function to wrap the expected output of a test in a given box_class. + + Parameters + ---------- + expected : np.ndarray, Index, Series + box_cls : {Index, Series, DataFrame} + + Returns + ------- + subclass of box_cls + """ + if box_cls is pd.Index: + expected = pd.Index(expected) + elif box_cls is pd.Series: + expected = pd.Series(expected) + elif box_cls is pd.DataFrame: + expected = pd.Series(expected).to_frame() + else: + raise NotImplementedError(box_cls) + return expected + + # ----------------------------------------------------------------------------- # Sparse
There are a ton of scattered arithmetic tests for Index/Series/DataFrame that _should_ be testing the same things, but in fact are haphazard. Fixing this given the current structure would entail an enormous about of code duplication. This PR if a proof of concept for gathering all those tests in one test_arithmetic.py file, parametrizing them, and ensuring that the relevant behavior is identical across arraylike classes. As Datetime/Timedelta/Period EA come online, the case for de-duplication will be even stronger. In this form it is really easy to track (via xfails) what behavior needs fixing.
https://api.github.com/repos/pandas-dev/pandas/pulls/22033
2018-07-23T21:14:22Z
2018-07-29T16:04:58Z
2018-07-29T16:04:58Z
2018-07-29T17:12:00Z
REF/API: Stricter extension checking.
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 905073645fcb3..4a0bf67f47bae 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -9,7 +9,8 @@ from pandas.core.dtypes.dtypes import ( registry, CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, DatetimeTZDtypeType, PeriodDtype, PeriodDtypeType, IntervalDtype, - IntervalDtypeType, ExtensionDtype) + IntervalDtypeType, PandasExtensionDtype, ExtensionDtype, + _pandas_registry) from pandas.core.dtypes.generic import ( ABCCategorical, ABCPeriodIndex, ABCDatetimeIndex, ABCSeries, ABCSparseArray, ABCSparseSeries, ABCCategoricalIndex, ABCIndexClass, @@ -1709,17 +1710,9 @@ def is_extension_array_dtype(arr_or_dtype): Third-party libraries may implement arrays or types satisfying this interface as well. """ - from pandas.core.arrays import ExtensionArray - - if isinstance(arr_or_dtype, (ABCIndexClass, ABCSeries)): - arr_or_dtype = arr_or_dtype._values - - try: - arr_or_dtype = pandas_dtype(arr_or_dtype) - except TypeError: - pass - - return isinstance(arr_or_dtype, (ExtensionDtype, ExtensionArray)) + dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype) + return (isinstance(dtype, ExtensionDtype) or + registry.find(dtype) is not None) def is_complex_dtype(arr_or_dtype): @@ -1999,12 +1992,12 @@ def pandas_dtype(dtype): return dtype # registered extension types - result = registry.find(dtype) + result = _pandas_registry.find(dtype) or registry.find(dtype) if result is not None: return result # un-registered extension types - elif isinstance(dtype, ExtensionDtype): + elif isinstance(dtype, (PandasExtensionDtype, ExtensionDtype)): return dtype # try a numpy dtype diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index cf771a127a696..f53ccc86fc4ff 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -22,9 +22,9 @@ class Registry(object): -------- registry.register(MyExtensionDtype) """ - dtypes = [] + def __init__(self): + self.dtypes = [] - @classmethod def register(self, dtype): """ Parameters @@ -50,7 +50,7 @@ def find(self, dtype): dtype_type = dtype if not isinstance(dtype, type): dtype_type = type(dtype) - if issubclass(dtype_type, (PandasExtensionDtype, ExtensionDtype)): + if issubclass(dtype_type, ExtensionDtype): return dtype return None @@ -65,6 +65,9 @@ def find(self, dtype): registry = Registry() +# TODO(Extension): remove the second registry once all internal extension +# dtypes are real extension dtypes. +_pandas_registry = Registry() class PandasExtensionDtype(_DtypeOpsMixin): @@ -822,7 +825,7 @@ def is_dtype(cls, dtype): # register the dtypes in search order -registry.register(DatetimeTZDtype) -registry.register(PeriodDtype) registry.register(IntervalDtype) registry.register(CategoricalDtype) +_pandas_registry.register(DatetimeTZDtype) +_pandas_registry.register(PeriodDtype) diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 02ac7fc7d5ed7..55c841ba1fc46 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -9,7 +9,7 @@ from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, PeriodDtype, - IntervalDtype, CategoricalDtype, registry) + IntervalDtype, CategoricalDtype, registry, _pandas_registry) from pandas.core.dtypes.common import ( is_categorical_dtype, is_categorical, is_datetime64tz_dtype, is_datetimetz, @@ -775,21 +775,31 @@ def test_update_dtype_errors(self, bad_dtype): @pytest.mark.parametrize( 'dtype', - [DatetimeTZDtype, CategoricalDtype, - PeriodDtype, IntervalDtype]) + [CategoricalDtype, IntervalDtype]) def test_registry(dtype): assert dtype in registry.dtypes +@pytest.mark.parametrize('dtype', [DatetimeTZDtype, PeriodDtype]) +def test_pandas_registry(dtype): + assert dtype not in registry.dtypes + assert dtype in _pandas_registry.dtypes + + @pytest.mark.parametrize( 'dtype, expected', [('int64', None), ('interval', IntervalDtype()), ('interval[int64]', IntervalDtype()), ('interval[datetime64[ns]]', IntervalDtype('datetime64[ns]')), - ('category', CategoricalDtype()), - ('period[D]', PeriodDtype('D')), - ('datetime64[ns, US/Eastern]', DatetimeTZDtype('ns', 'US/Eastern'))]) + ('category', CategoricalDtype())]) def test_registry_find(dtype, expected): - assert registry.find(dtype) == expected + + +@pytest.mark.parametrize( + 'dtype, expected', + [('period[D]', PeriodDtype('D')), + ('datetime64[ns, US/Eastern]', DatetimeTZDtype('ns', 'US/Eastern'))]) +def test_pandas_registry_find(dtype, expected): + assert _pandas_registry.find(dtype) == expected
Removes is_extension_array_dtype's handling of both arrays and dtypes. Now it handles just arrays, and we provide `is_extension_dtype` for checking whether a dtype is an extension dtype. It's the caller's responsibility to know whether they have an array or dtype. Closes #22021
https://api.github.com/repos/pandas-dev/pandas/pulls/22031
2018-07-23T19:41:56Z
2018-07-31T13:22:48Z
2018-07-31T13:22:47Z
2018-07-31T13:22:52Z
[BLD] [CLN] Close assorted issues - bare exceptions, unused func
diff --git a/pandas/_libs/skiplist.pxd b/pandas/_libs/skiplist.pxd index 82a0862112199..78f206962bcfc 100644 --- a/pandas/_libs/skiplist.pxd +++ b/pandas/_libs/skiplist.pxd @@ -3,8 +3,6 @@ from cython cimport Py_ssize_t -from numpy cimport double_t - cdef extern from "src/skiplist.h": ctypedef struct node_t: @@ -33,7 +31,7 @@ cdef extern from "src/skiplist.h": # Node itself not intended to be exposed. cdef class Node: cdef public: - double_t value + double value list next list width diff --git a/pandas/_libs/skiplist.pyx b/pandas/_libs/skiplist.pyx index 5ede31b24118d..23836ef7f4de9 100644 --- a/pandas/_libs/skiplist.pyx +++ b/pandas/_libs/skiplist.pyx @@ -9,9 +9,6 @@ from libc.math cimport log import numpy as np -cimport numpy as cnp -from numpy cimport double_t -cnp.import_array() # MSVC does not have log2! @@ -26,11 +23,11 @@ from random import random cdef class Node: # cdef public: - # double_t value + # double value # list next # list width - def __init__(self, double_t value, list next, list width): + def __init__(self, double value, list next, list width): self.value = value self.next = next self.width = width diff --git a/pandas/_libs/src/compat_helper.h b/pandas/_libs/src/compat_helper.h index bdff61d7d4150..116cd91070a60 100644 --- a/pandas/_libs/src/compat_helper.h +++ b/pandas/_libs/src/compat_helper.h @@ -11,7 +11,7 @@ The full license is in the LICENSE file, distributed with this software. #define PANDAS__LIBS_SRC_COMPAT_HELPER_H_ #include "Python.h" -#include "numpy_helper.h" +#include "helper.h" /* PySlice_GetIndicesEx changes signature in PY3 diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index 63ab120833ba1..4dc4fcb00d84d 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -131,7 +131,7 @@ def _validate_timedelta_unit(arg): """ provide validation / translation for timedelta short units """ try: return _unit_map[arg] - except: + except (KeyError, TypeError): if arg is None: return 'ns' raise ValueError("invalid timedelta unit {arg} provided" diff --git a/pandas/io/s3.py b/pandas/io/s3.py index bd2286c5c8569..7d1360934fd53 100644 --- a/pandas/io/s3.py +++ b/pandas/io/s3.py @@ -3,7 +3,7 @@ try: import s3fs from botocore.exceptions import NoCredentialsError -except: +except ImportError: raise ImportError("The s3fs library is required to handle s3 files") if compat.PY3: diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 9567c08781856..136299a4b81be 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -27,7 +27,7 @@ import scipy _is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >= LooseVersion('0.19.0')) -except: +except ImportError: _is_scipy_ge_0190 = False diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py index eb40e5521f7f1..aa020ba4c0623 100755 --- a/pandas/tests/io/generate_legacy_storage_files.py +++ b/pandas/tests/io/generate_legacy_storage_files.py @@ -303,7 +303,7 @@ def write_legacy_pickles(output_dir): # make sure we are < 0.13 compat (in py3) try: from pandas.compat import zip, cPickle as pickle # noqa - except: + except ImportError: import pickle version = pandas.__version__ diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 45cbbd43cd6a8..c71e26ae56e8e 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -218,7 +218,7 @@ def c_unpickler(path): with open(path, 'rb') as fh: fh.seek(0) return c_pickle.load(fh) - except: + except ImportError: c_pickler = None c_unpickler = None diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index f8f742c5980ac..4b0edfce89174 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -468,7 +468,7 @@ def _transaction_test(self): with self.pandasSQL.run_transaction() as trans: trans.execute(ins_sql) raise Exception('error') - except: + except Exception: # ignore raised exception pass res = self.pandasSQL.read_query('SELECT * FROM test_trans') diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 2bc44cb1c683f..ab3fdd8cbf84f 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -27,7 +27,7 @@ import scipy _is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >= LooseVersion('0.19.0')) -except: +except ImportError: _is_scipy_ge_0190 = False diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 7d5753d03f4fc..82cd44113cb25 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -1,7 +1,6 @@ from pandas.compat import callable, signature, PY2 from pandas._libs.properties import cache_readonly # noqa import inspect -import types import warnings from textwrap import dedent, wrap from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS @@ -339,48 +338,3 @@ def make_signature(func): if spec.keywords: args.append('**' + spec.keywords) return args, spec.args - - -class docstring_wrapper(object): - """ - Decorator to wrap a function and provide - a dynamically evaluated doc-string. - - Parameters - ---------- - func : callable - creator : callable - return the doc-string - default : str, optional - return this doc-string on error - """ - _attrs = ['__module__', '__name__', - '__qualname__', '__annotations__'] - - def __init__(self, func, creator, default=None): - self.func = func - self.creator = creator - self.default = default - update_wrapper( - self, func, [attr for attr in self._attrs - if hasattr(func, attr)]) - - def __get__(self, instance, cls=None): - - # we are called with a class - if instance is None: - return self - - # we want to return the actual passed instance - return types.MethodType(self, instance) - - def __call__(self, *args, **kwargs): - return self.func(*args, **kwargs) - - @property - def __doc__(self): - try: - return self.creator() - except Exception as exc: - msg = self.default or str(exc) - return msg diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py index 01198fc541e0c..5600834f3b615 100644 --- a/pandas/util/_print_versions.py +++ b/pandas/util/_print_versions.py @@ -114,7 +114,7 @@ def show_versions(as_json=False): if (as_json): try: import json - except: + except ImportError: import simplejson as json j = dict(system=dict(sys_info), dependencies=dict(deps_blob)) diff --git a/setup.py b/setup.py index d265733738425..f058c8a6e3c99 100755 --- a/setup.py +++ b/setup.py @@ -438,9 +438,12 @@ def get_tag(self): # enable coverage by building cython files by setting the environment variable -# "PANDAS_CYTHON_COVERAGE" (with a Truthy value) +# "PANDAS_CYTHON_COVERAGE" (with a Truthy value) or by running build_ext +# with `--with-cython-coverage`enabled linetrace = os.environ.get('PANDAS_CYTHON_COVERAGE', False) -CYTHON_TRACE = str(int(bool(linetrace))) +if '--with-cython-coverage' in sys.argv: + linetrace = True + sys.argv.remove('--with-cython-coverage') # Note: if not using `cythonize`, coverage can be enabled by # pinning `ext.cython_directives = directives` to each ext in extensions.
<s>- Removing numpy cimport from cython modules where feasible fixes the npy_deprecated1.7 ... warning.</s> <b>update</b> Nope! - Catch specific exceptions in a handful of places. - Removes unused function #19676 - Implement command-line option to enable cython coverage #21991 closes #19676 closes #21991 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/22030
2018-07-23T19:17:16Z
2018-07-26T12:54:08Z
2018-07-26T12:54:08Z
2018-07-26T16:22:22Z
Separate out internals.concat, internals.managers
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 68698f45d5623..55f2e06a1a976 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -19,6 +19,9 @@ cdef extern from "compat_helper.h": Py_ssize_t *slicelength) except -1 +from algos import ensure_int64 + + cdef class BlockPlacement: # __slots__ = '_as_slice', '_as_array', '_len' cdef slice _as_slice @@ -436,3 +439,26 @@ def get_blkno_indexers(int64_t[:] blknos, bint group=True): i += 1 yield blkno, result + + +def get_blkno_placements(blknos, blk_count, group=True): + """ + + Parameters + ---------- + blknos : array of int64 + blk_count : int + group : bool + + Returns + ------- + iterator + yield (BlockPlacement, blkno) + + """ + + blknos = ensure_int64(blknos) + + # FIXME: blk_count is unused, but it may avoid the use of dicts in cython + for blkno, indexer in get_blkno_indexers(blknos, group): + yield blkno, BlockPlacement(indexer) diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index a4cd301806569..22caa577c2891 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -1,2549 +1,14 @@ # -*- coding: utf-8 -*- -import copy -import itertools -import operator -from collections import defaultdict -from functools import partial - -import numpy as np - -from pandas._libs import internals as libinternals - -from pandas.core.base import PandasObject - -from pandas.core.dtypes.dtypes import ( - ExtensionDtype, - PandasExtensionDtype) -from pandas.core.dtypes.common import ( - _NS_DTYPE, - ensure_int64, - is_timedelta64_dtype, - is_datetime64_dtype, is_datetimetz, - is_categorical_dtype, - is_datetimelike_v_numeric, - is_float_dtype, is_numeric_dtype, - is_numeric_v_string_like, is_extension_type, - is_extension_array_dtype, - is_scalar, - _get_dtype) -from pandas.core.dtypes.cast import ( - maybe_promote, - infer_dtype_from_scalar, - find_common_type) -from pandas.core.dtypes.missing import isna -import pandas.core.dtypes.concat as _concat -from pandas.core.dtypes.generic import ABCSeries, ABCExtensionArray - -import pandas.core.algorithms as algos - -from pandas.core.index import Index, MultiIndex, ensure_index -from pandas.core.indexing import maybe_convert_indices -from pandas.io.formats.printing import pprint_thing - -from pandas.core.sparse.array import _maybe_to_sparse -from pandas._libs import lib, tslibs -from pandas._libs.internals import BlockPlacement - -from pandas.util._decorators import cache_readonly -from pandas.util._validators import validate_bool_kwarg -from pandas.compat import range, map, zip, u - -from .blocks import ( - Block, - _extend_blocks, _merge_blocks, _safe_reshape, - make_block, get_block_type) from .blocks import ( # noqa:F401 _block2d_to_blocknd, _factor_indexer, _block_shape, # io.pytables + _safe_reshape, # io.packers + make_block, # io.pytables, io.packers FloatBlock, IntBlock, ComplexBlock, BoolBlock, ObjectBlock, TimeDeltaBlock, DatetimeBlock, DatetimeTZBlock, - CategoricalBlock, ExtensionBlock, SparseBlock, ScalarBlock) - -# TODO: flexible with index=None and/or items=None - - -class BlockManager(PandasObject): - """ - Core internal data structure to implement DataFrame, Series, Panel, etc. - - Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a - lightweight blocked set of labeled data to be manipulated by the DataFrame - public API class - - Attributes - ---------- - shape - ndim - axes - values - items - - Methods - ------- - set_axis(axis, new_labels) - copy(deep=True) - - get_dtype_counts - get_ftype_counts - get_dtypes - get_ftypes - - apply(func, axes, block_filter_fn) - - get_bool_data - get_numeric_data - - get_slice(slice_like, axis) - get(label) - iget(loc) - get_scalar(label_tup) - - take(indexer, axis) - reindex_axis(new_labels, axis) - reindex_indexer(new_labels, indexer, axis) - - delete(label) - insert(loc, label, value) - set(label, value) - - Parameters - ---------- - - - Notes - ----- - This is *not* a public API class - """ - __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', - '_is_consolidated', '_blknos', '_blklocs'] - - def __init__(self, blocks, axes, do_integrity_check=True): - self.axes = [ensure_index(ax) for ax in axes] - self.blocks = tuple(blocks) - - for block in blocks: - if block.is_sparse: - if len(block.mgr_locs) != 1: - raise AssertionError("Sparse block refers to multiple " - "items") - else: - if self.ndim != block.ndim: - raise AssertionError( - 'Number of Block dimensions ({block}) must equal ' - 'number of axes ({self})'.format(block=block.ndim, - self=self.ndim)) - - if do_integrity_check: - self._verify_integrity() - - self._consolidate_check() - - self._rebuild_blknos_and_blklocs() - - def make_empty(self, axes=None): - """ return an empty BlockManager with the items axis of len 0 """ - if axes is None: - axes = [ensure_index([])] + [ensure_index(a) - for a in self.axes[1:]] - - # preserve dtype if possible - if self.ndim == 1: - blocks = np.array([], dtype=self.array_dtype) - else: - blocks = [] - return self.__class__(blocks, axes) - - def __nonzero__(self): - return True - - # Python3 compat - __bool__ = __nonzero__ - - @property - def shape(self): - return tuple(len(ax) for ax in self.axes) - - @property - def ndim(self): - return len(self.axes) - - def set_axis(self, axis, new_labels): - new_labels = ensure_index(new_labels) - old_len = len(self.axes[axis]) - new_len = len(new_labels) - - if new_len != old_len: - raise ValueError( - 'Length mismatch: Expected axis has {old} elements, new ' - 'values have {new} elements'.format(old=old_len, new=new_len)) - - self.axes[axis] = new_labels - - def rename_axis(self, mapper, axis, copy=True, level=None): - """ - Rename one of axes. - - Parameters - ---------- - mapper : unary callable - axis : int - copy : boolean, default True - level : int, default None - - """ - obj = self.copy(deep=copy) - obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) - return obj - - def add_prefix(self, prefix): - f = partial('{prefix}{}'.format, prefix=prefix) - return self.rename_axis(f, axis=0) - - def add_suffix(self, suffix): - f = partial('{}{suffix}'.format, suffix=suffix) - return self.rename_axis(f, axis=0) - - @property - def _is_single_block(self): - if self.ndim == 1: - return True - - if len(self.blocks) != 1: - return False - - blk = self.blocks[0] - return (blk.mgr_locs.is_slice_like and - blk.mgr_locs.as_slice == slice(0, len(self), 1)) - - def _rebuild_blknos_and_blklocs(self): - """ - Update mgr._blknos / mgr._blklocs. - """ - new_blknos = np.empty(self.shape[0], dtype=np.int64) - new_blklocs = np.empty(self.shape[0], dtype=np.int64) - new_blknos.fill(-1) - new_blklocs.fill(-1) - - for blkno, blk in enumerate(self.blocks): - rl = blk.mgr_locs - new_blknos[rl.indexer] = blkno - new_blklocs[rl.indexer] = np.arange(len(rl)) - - if (new_blknos == -1).any(): - raise AssertionError("Gaps in blk ref_locs") - - self._blknos = new_blknos - self._blklocs = new_blklocs - - # make items read only for now - def _get_items(self): - return self.axes[0] - - items = property(fget=_get_items) - - def _get_counts(self, f): - """ return a dict of the counts of the function in BlockManager """ - self._consolidate_inplace() - counts = dict() - for b in self.blocks: - v = f(b) - counts[v] = counts.get(v, 0) + b.shape[0] - return counts - - def get_dtype_counts(self): - return self._get_counts(lambda b: b.dtype.name) - - def get_ftype_counts(self): - return self._get_counts(lambda b: b.ftype) - - def get_dtypes(self): - dtypes = np.array([blk.dtype for blk in self.blocks]) - return algos.take_1d(dtypes, self._blknos, allow_fill=False) - - def get_ftypes(self): - ftypes = np.array([blk.ftype for blk in self.blocks]) - return algos.take_1d(ftypes, self._blknos, allow_fill=False) - - def __getstate__(self): - block_values = [b.values for b in self.blocks] - block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] - axes_array = [ax for ax in self.axes] - - extra_state = { - '0.14.1': { - 'axes': axes_array, - 'blocks': [dict(values=b.values, mgr_locs=b.mgr_locs.indexer) - for b in self.blocks] - } - } - - # First three elements of the state are to maintain forward - # compatibility with 0.13.1. - return axes_array, block_values, block_items, extra_state - - def __setstate__(self, state): - def unpickle_block(values, mgr_locs): - # numpy < 1.7 pickle compat - if values.dtype == 'M8[us]': - values = values.astype('M8[ns]') - return make_block(values, placement=mgr_locs) - - if (isinstance(state, tuple) and len(state) >= 4 and - '0.14.1' in state[3]): - state = state[3]['0.14.1'] - self.axes = [ensure_index(ax) for ax in state['axes']] - self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs']) - for b in state['blocks']) - else: - # discard anything after 3rd, support beta pickling format for a - # little while longer - ax_arrays, bvalues, bitems = state[:3] - - self.axes = [ensure_index(ax) for ax in ax_arrays] - - if len(bitems) == 1 and self.axes[0].equals(bitems[0]): - # This is a workaround for pre-0.14.1 pickles that didn't - # support unpickling multi-block frames/panels with non-unique - # columns/items, because given a manager with items ["a", "b", - # "a"] there's no way of knowing which block's "a" is where. - # - # Single-block case can be supported under the assumption that - # block items corresponded to manager items 1-to-1. - all_mgr_locs = [slice(0, len(bitems[0]))] - else: - all_mgr_locs = [self.axes[0].get_indexer(blk_items) - for blk_items in bitems] - - self.blocks = tuple( - unpickle_block(values, mgr_locs) - for values, mgr_locs in zip(bvalues, all_mgr_locs)) - - self._post_setstate() - - def _post_setstate(self): - self._is_consolidated = False - self._known_consolidated = False - self._rebuild_blknos_and_blklocs() - - def __len__(self): - return len(self.items) - - def __unicode__(self): - output = pprint_thing(self.__class__.__name__) - for i, ax in enumerate(self.axes): - if i == 0: - output += u('\nItems: {ax}'.format(ax=ax)) - else: - output += u('\nAxis {i}: {ax}'.format(i=i, ax=ax)) - - for block in self.blocks: - output += u('\n{block}'.format(block=pprint_thing(block))) - return output - - def _verify_integrity(self): - mgr_shape = self.shape - tot_items = sum(len(x.mgr_locs) for x in self.blocks) - for block in self.blocks: - if block._verify_integrity and block.shape[1:] != mgr_shape[1:]: - construction_error(tot_items, block.shape[1:], self.axes) - if len(self.items) != tot_items: - raise AssertionError('Number of manager items must equal union of ' - 'block items\n# manager items: {0}, # ' - 'tot_items: {1}'.format( - len(self.items), tot_items)) - - def apply(self, f, axes=None, filter=None, do_integrity_check=False, - consolidate=True, **kwargs): - """ - iterate over the blocks, collect and create a new block manager - - Parameters - ---------- - f : the callable or function name to operate on at the block level - axes : optional (if not supplied, use self.axes) - filter : list, if supplied, only call the block if the filter is in - the block - do_integrity_check : boolean, default False. Do the block manager - integrity check - consolidate: boolean, default True. Join together blocks having same - dtype - - Returns - ------- - Block Manager (new object) - - """ - - result_blocks = [] - - # filter kwarg is used in replace-* family of methods - if filter is not None: - filter_locs = set(self.items.get_indexer_for(filter)) - if len(filter_locs) == len(self.items): - # All items are included, as if there were no filtering - filter = None - else: - kwargs['filter'] = filter_locs - - if consolidate: - self._consolidate_inplace() - - if f == 'where': - align_copy = True - if kwargs.get('align', True): - align_keys = ['other', 'cond'] - else: - align_keys = ['cond'] - elif f == 'putmask': - align_copy = False - if kwargs.get('align', True): - align_keys = ['new', 'mask'] - else: - align_keys = ['mask'] - elif f == 'eval': - align_copy = False - align_keys = ['other'] - elif f == 'fillna': - # fillna internally does putmask, maybe it's better to do this - # at mgr, not block level? - align_copy = False - align_keys = ['value'] - else: - align_keys = [] - - # TODO(EA): may interfere with ExtensionBlock.setitem for blocks - # with a .values attribute. - aligned_args = dict((k, kwargs[k]) - for k in align_keys - if hasattr(kwargs[k], 'values') and - not isinstance(kwargs[k], ABCExtensionArray)) - - for b in self.blocks: - if filter is not None: - if not b.mgr_locs.isin(filter_locs).any(): - result_blocks.append(b) - continue - - if aligned_args: - b_items = self.items[b.mgr_locs.indexer] - - for k, obj in aligned_args.items(): - axis = getattr(obj, '_info_axis_number', 0) - kwargs[k] = obj.reindex(b_items, axis=axis, - copy=align_copy) - - kwargs['mgr'] = self - applied = getattr(b, f)(**kwargs) - result_blocks = _extend_blocks(applied, result_blocks) - - if len(result_blocks) == 0: - return self.make_empty(axes or self.axes) - bm = self.__class__(result_blocks, axes or self.axes, - do_integrity_check=do_integrity_check) - bm._consolidate_inplace() - return bm - - def reduction(self, f, axis=0, consolidate=True, transposed=False, - **kwargs): - """ - iterate over the blocks, collect and create a new block manager. - This routine is intended for reduction type operations and - will do inference on the generated blocks. - - Parameters - ---------- - f: the callable or function name to operate on at the block level - axis: reduction axis, default 0 - consolidate: boolean, default True. Join together blocks having same - dtype - transposed: boolean, default False - we are holding transposed data - - Returns - ------- - Block Manager (new object) - - """ - - if consolidate: - self._consolidate_inplace() - - axes, blocks = [], [] - for b in self.blocks: - kwargs['mgr'] = self - axe, block = getattr(b, f)(axis=axis, **kwargs) - - axes.append(axe) - blocks.append(block) - - # note that some DatetimeTZ, Categorical are always ndim==1 - ndim = {b.ndim for b in blocks} - - if 2 in ndim: - - new_axes = list(self.axes) - - # multiple blocks that are reduced - if len(blocks) > 1: - new_axes[1] = axes[0] - - # reset the placement to the original - for b, sb in zip(blocks, self.blocks): - b.mgr_locs = sb.mgr_locs - - else: - new_axes[axis] = Index(np.concatenate( - [ax.values for ax in axes])) - - if transposed: - new_axes = new_axes[::-1] - blocks = [b.make_block(b.values.T, - placement=np.arange(b.shape[1]) - ) for b in blocks] - - return self.__class__(blocks, new_axes) - - # 0 ndim - if 0 in ndim and 1 not in ndim: - values = np.array([b.values for b in blocks]) - if len(values) == 1: - return values.item() - blocks = [make_block(values, ndim=1)] - axes = Index([ax[0] for ax in axes]) - - # single block - values = _concat._concat_compat([b.values for b in blocks]) - - # compute the orderings of our original data - if len(self.blocks) > 1: - - indexer = np.empty(len(self.axes[0]), dtype=np.intp) - i = 0 - for b in self.blocks: - for j in b.mgr_locs: - indexer[j] = i - i = i + 1 - - values = values.take(indexer) - - return SingleBlockManager( - [make_block(values, - ndim=1, - placement=np.arange(len(values)))], - axes[0]) - - def isna(self, func, **kwargs): - return self.apply('apply', func=func, **kwargs) - - def where(self, **kwargs): - return self.apply('where', **kwargs) - - def eval(self, **kwargs): - return self.apply('eval', **kwargs) - - def quantile(self, **kwargs): - return self.reduction('quantile', **kwargs) - - def setitem(self, **kwargs): - return self.apply('setitem', **kwargs) - - def putmask(self, **kwargs): - return self.apply('putmask', **kwargs) - - def diff(self, **kwargs): - return self.apply('diff', **kwargs) - - def interpolate(self, **kwargs): - return self.apply('interpolate', **kwargs) - - def shift(self, **kwargs): - return self.apply('shift', **kwargs) - - def fillna(self, **kwargs): - return self.apply('fillna', **kwargs) - - def downcast(self, **kwargs): - return self.apply('downcast', **kwargs) - - def astype(self, dtype, **kwargs): - return self.apply('astype', dtype=dtype, **kwargs) - - def convert(self, **kwargs): - return self.apply('convert', **kwargs) - - def replace(self, **kwargs): - return self.apply('replace', **kwargs) - - def replace_list(self, src_list, dest_list, inplace=False, regex=False, - mgr=None): - """ do a list replace """ - - inplace = validate_bool_kwarg(inplace, 'inplace') - - if mgr is None: - mgr = self - - # figure out our mask a-priori to avoid repeated replacements - values = self.as_array() - - def comp(s): - if isna(s): - return isna(values) - return _maybe_compare(values, getattr(s, 'asm8', s), operator.eq) - - masks = [comp(s) for i, s in enumerate(src_list)] - - result_blocks = [] - src_len = len(src_list) - 1 - for blk in self.blocks: - - # its possible to get multiple result blocks here - # replace ALWAYS will return a list - rb = [blk if inplace else blk.copy()] - for i, (s, d) in enumerate(zip(src_list, dest_list)): - new_rb = [] - for b in rb: - if b.dtype == np.object_: - convert = i == src_len - result = b.replace(s, d, inplace=inplace, regex=regex, - mgr=mgr, convert=convert) - new_rb = _extend_blocks(result, new_rb) - else: - # get our mask for this element, sized to this - # particular block - m = masks[i][b.mgr_locs.indexer] - if m.any(): - b = b.coerce_to_target_dtype(d) - new_rb.extend(b.putmask(m, d, inplace=True)) - else: - new_rb.append(b) - rb = new_rb - result_blocks.extend(rb) - - bm = self.__class__(result_blocks, self.axes) - bm._consolidate_inplace() - return bm - - def reshape_nd(self, axes, **kwargs): - """ a 2d-nd reshape operation on a BlockManager """ - return self.apply('reshape_nd', axes=axes, **kwargs) - - def is_consolidated(self): - """ - Return True if more than one block with the same dtype - """ - if not self._known_consolidated: - self._consolidate_check() - return self._is_consolidated - - def _consolidate_check(self): - ftypes = [blk.ftype for blk in self.blocks] - self._is_consolidated = len(ftypes) == len(set(ftypes)) - self._known_consolidated = True - - @property - def is_mixed_type(self): - # Warning, consolidation needs to get checked upstairs - self._consolidate_inplace() - return len(self.blocks) > 1 - - @property - def is_numeric_mixed_type(self): - # Warning, consolidation needs to get checked upstairs - self._consolidate_inplace() - return all(block.is_numeric for block in self.blocks) - - @property - def is_datelike_mixed_type(self): - # Warning, consolidation needs to get checked upstairs - self._consolidate_inplace() - return any(block.is_datelike for block in self.blocks) - - @property - def any_extension_types(self): - """Whether any of the blocks in this manager are extension blocks""" - return any(block.is_extension for block in self.blocks) - - @property - def is_view(self): - """ return a boolean if we are a single block and are a view """ - if len(self.blocks) == 1: - return self.blocks[0].is_view - - # It is technically possible to figure out which blocks are views - # e.g. [ b.values.base is not None for b in self.blocks ] - # but then we have the case of possibly some blocks being a view - # and some blocks not. setting in theory is possible on the non-view - # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit - # complicated - - return False - - def get_bool_data(self, copy=False): - """ - Parameters - ---------- - copy : boolean, default False - Whether to copy the blocks - """ - self._consolidate_inplace() - return self.combine([b for b in self.blocks if b.is_bool], copy) - - def get_numeric_data(self, copy=False): - """ - Parameters - ---------- - copy : boolean, default False - Whether to copy the blocks - """ - self._consolidate_inplace() - return self.combine([b for b in self.blocks if b.is_numeric], copy) - - def combine(self, blocks, copy=True): - """ return a new manager with the blocks """ - if len(blocks) == 0: - return self.make_empty() - - # FIXME: optimization potential - indexer = np.sort(np.concatenate([b.mgr_locs.as_array - for b in blocks])) - inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) - - new_blocks = [] - for b in blocks: - b = b.copy(deep=copy) - b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array, - axis=0, allow_fill=False) - new_blocks.append(b) - - axes = list(self.axes) - axes[0] = self.items.take(indexer) - - return self.__class__(new_blocks, axes, do_integrity_check=False) - - def get_slice(self, slobj, axis=0): - if axis >= self.ndim: - raise IndexError("Requested axis not found in manager") - - if axis == 0: - new_blocks = self._slice_take_blocks_ax0(slobj) - else: - slicer = [slice(None)] * (axis + 1) - slicer[axis] = slobj - slicer = tuple(slicer) - new_blocks = [blk.getitem_block(slicer) for blk in self.blocks] - - new_axes = list(self.axes) - new_axes[axis] = new_axes[axis][slobj] - - bm = self.__class__(new_blocks, new_axes, do_integrity_check=False) - bm._consolidate_inplace() - return bm - - def __contains__(self, item): - return item in self.items - - @property - def nblocks(self): - return len(self.blocks) - - def copy(self, deep=True, mgr=None): - """ - Make deep or shallow copy of BlockManager - - Parameters - ---------- - deep : boolean o rstring, default True - If False, return shallow copy (do not copy data) - If 'all', copy data and a deep copy of the index - - Returns - ------- - copy : BlockManager - """ - - # this preserves the notion of view copying of axes - if deep: - if deep == 'all': - copy = lambda ax: ax.copy(deep=True) - else: - copy = lambda ax: ax.view() - new_axes = [copy(ax) for ax in self.axes] - else: - new_axes = list(self.axes) - return self.apply('copy', axes=new_axes, deep=deep, - do_integrity_check=False) - - def as_array(self, transpose=False, items=None): - """Convert the blockmanager data into an numpy array. - - Parameters - ---------- - transpose : boolean, default False - If True, transpose the return array - items : list of strings or None - Names of block items that will be included in the returned - array. ``None`` means that all block items will be used - - Returns - ------- - arr : ndarray - """ - if len(self.blocks) == 0: - arr = np.empty(self.shape, dtype=float) - return arr.transpose() if transpose else arr - - if items is not None: - mgr = self.reindex_axis(items, axis=0) - else: - mgr = self - - if self._is_single_block or not self.is_mixed_type: - arr = mgr.blocks[0].get_values() - else: - arr = mgr._interleave() - - return arr.transpose() if transpose else arr - - def _interleave(self): - """ - Return ndarray from blocks with specified item order - Items must be contained in the blocks - """ - dtype = _interleaved_dtype(self.blocks) - - result = np.empty(self.shape, dtype=dtype) - - if result.shape[0] == 0: - # Workaround for numpy 1.7 bug: - # - # >>> a = np.empty((0,10)) - # >>> a[slice(0,0)] - # array([], shape=(0, 10), dtype=float64) - # >>> a[[]] - # Traceback (most recent call last): - # File "<stdin>", line 1, in <module> - # IndexError: index 0 is out of bounds for axis 0 with size 0 - return result - - itemmask = np.zeros(self.shape[0]) - - for blk in self.blocks: - rl = blk.mgr_locs - result[rl.indexer] = blk.get_values(dtype) - itemmask[rl.indexer] = 1 - - if not itemmask.all(): - raise AssertionError('Some items were not contained in blocks') - - return result - - def to_dict(self, copy=True): - """ - Return a dict of str(dtype) -> BlockManager - - Parameters - ---------- - copy : boolean, default True - - Returns - ------- - values : a dict of dtype -> BlockManager - - Notes - ----- - This consolidates based on str(dtype) - """ - self._consolidate_inplace() - - bd = {} - for b in self.blocks: - bd.setdefault(str(b.dtype), []).append(b) - - return {dtype: self.combine(blocks, copy=copy) - for dtype, blocks in bd.items()} - - def xs(self, key, axis=1, copy=True, takeable=False): - if axis < 1: - raise AssertionError( - 'Can only take xs across axis >= 1, got {ax}'.format(ax=axis)) - - # take by position - if takeable: - loc = key - else: - loc = self.axes[axis].get_loc(key) - - slicer = [slice(None, None) for _ in range(self.ndim)] - slicer[axis] = loc - slicer = tuple(slicer) - - new_axes = list(self.axes) - - # could be an array indexer! - if isinstance(loc, (slice, np.ndarray)): - new_axes[axis] = new_axes[axis][loc] - else: - new_axes.pop(axis) - - new_blocks = [] - if len(self.blocks) > 1: - # we must copy here as we are mixed type - for blk in self.blocks: - newb = make_block(values=blk.values[slicer], - klass=blk.__class__, - placement=blk.mgr_locs) - new_blocks.append(newb) - elif len(self.blocks) == 1: - block = self.blocks[0] - vals = block.values[slicer] - if copy: - vals = vals.copy() - new_blocks = [make_block(values=vals, - placement=block.mgr_locs, - klass=block.__class__)] - - return self.__class__(new_blocks, new_axes) - - def fast_xs(self, loc): - """ - get a cross sectional for a given location in the - items ; handle dups - - return the result, is *could* be a view in the case of a - single block - """ - if len(self.blocks) == 1: - return self.blocks[0].iget((slice(None), loc)) - - items = self.items - - # non-unique (GH4726) - if not items.is_unique: - result = self._interleave() - if self.ndim == 2: - result = result.T - return result[loc] - - # unique - dtype = _interleaved_dtype(self.blocks) - n = len(items) - result = np.empty(n, dtype=dtype) - for blk in self.blocks: - # Such assignment may incorrectly coerce NaT to None - # result[blk.mgr_locs] = blk._slice((slice(None), loc)) - for i, rl in enumerate(blk.mgr_locs): - result[rl] = blk._try_coerce_result(blk.iget((i, loc))) - - return result - - def consolidate(self): - """ - Join together blocks having same dtype - - Returns - ------- - y : BlockManager - """ - if self.is_consolidated(): - return self - - bm = self.__class__(self.blocks, self.axes) - bm._is_consolidated = False - bm._consolidate_inplace() - return bm - - def _consolidate_inplace(self): - if not self.is_consolidated(): - self.blocks = tuple(_consolidate(self.blocks)) - self._is_consolidated = True - self._known_consolidated = True - self._rebuild_blknos_and_blklocs() - - def get(self, item, fastpath=True): - """ - Return values for selected item (ndarray or BlockManager). - """ - if self.items.is_unique: - - if not isna(item): - loc = self.items.get_loc(item) - else: - indexer = np.arange(len(self.items))[isna(self.items)] - - # allow a single nan location indexer - if not is_scalar(indexer): - if len(indexer) == 1: - loc = indexer.item() - else: - raise ValueError("cannot label index with a null key") - - return self.iget(loc, fastpath=fastpath) - else: - - if isna(item): - raise TypeError("cannot label index with a null key") - - indexer = self.items.get_indexer_for([item]) - return self.reindex_indexer(new_axis=self.items[indexer], - indexer=indexer, axis=0, - allow_dups=True) - - def iget(self, i, fastpath=True): - """ - Return the data as a SingleBlockManager if fastpath=True and possible - - Otherwise return as a ndarray - """ - block = self.blocks[self._blknos[i]] - values = block.iget(self._blklocs[i]) - if not fastpath or not block._box_to_block_values or values.ndim != 1: - return values - - # fastpath shortcut for select a single-dim from a 2-dim BM - return SingleBlockManager( - [block.make_block_same_class(values, - placement=slice(0, len(values)), - ndim=1)], - self.axes[1]) - - def get_scalar(self, tup): - """ - Retrieve single item - """ - full_loc = [ax.get_loc(x) for ax, x in zip(self.axes, tup)] - blk = self.blocks[self._blknos[full_loc[0]]] - values = blk.values - - # FIXME: this may return non-upcasted types? - if values.ndim == 1: - return values[full_loc[1]] - - full_loc[0] = self._blklocs[full_loc[0]] - return values[tuple(full_loc)] - - def delete(self, item): - """ - Delete selected item (items if non-unique) in-place. - """ - indexer = self.items.get_loc(item) - - is_deleted = np.zeros(self.shape[0], dtype=np.bool_) - is_deleted[indexer] = True - ref_loc_offset = -is_deleted.cumsum() - - is_blk_deleted = [False] * len(self.blocks) - - if isinstance(indexer, int): - affected_start = indexer - else: - affected_start = is_deleted.nonzero()[0][0] - - for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]): - blk = self.blocks[blkno] - bml = blk.mgr_locs - blk_del = is_deleted[bml.indexer].nonzero()[0] - - if len(blk_del) == len(bml): - is_blk_deleted[blkno] = True - continue - elif len(blk_del) != 0: - blk.delete(blk_del) - bml = blk.mgr_locs - - blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer]) - - # FIXME: use Index.delete as soon as it uses fastpath=True - self.axes[0] = self.items[~is_deleted] - self.blocks = tuple(b for blkno, b in enumerate(self.blocks) - if not is_blk_deleted[blkno]) - self._shape = None - self._rebuild_blknos_and_blklocs() - - def set(self, item, value, check=False): - """ - Set new item in-place. Does not consolidate. Adds new Block if not - contained in the current set of items - if check, then validate that we are not setting the same data in-place - """ - # FIXME: refactor, clearly separate broadcasting & zip-like assignment - # can prob also fix the various if tests for sparse/categorical - - # TODO(EA): Remove an is_extension_ when all extension types satisfy - # the interface - value_is_extension_type = (is_extension_type(value) or - is_extension_array_dtype(value)) - - # categorical/spares/datetimetz - if value_is_extension_type: - - def value_getitem(placement): - return value - else: - if value.ndim == self.ndim - 1: - value = _safe_reshape(value, (1,) + value.shape) - - def value_getitem(placement): - return value - else: - - def value_getitem(placement): - return value[placement.indexer] - - if value.shape[1:] != self.shape[1:]: - raise AssertionError('Shape of new values must be compatible ' - 'with manager shape') - - try: - loc = self.items.get_loc(item) - except KeyError: - # This item wasn't present, just insert at end - self.insert(len(self.items), item, value) - return - - if isinstance(loc, int): - loc = [loc] - - blknos = self._blknos[loc] - blklocs = self._blklocs[loc].copy() - - unfit_mgr_locs = [] - unfit_val_locs = [] - removed_blknos = [] - for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks), - group=True): - blk = self.blocks[blkno] - blk_locs = blklocs[val_locs.indexer] - if blk.should_store(value): - blk.set(blk_locs, value_getitem(val_locs), check=check) - else: - unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) - unfit_val_locs.append(val_locs) - - # If all block items are unfit, schedule the block for removal. - if len(val_locs) == len(blk.mgr_locs): - removed_blknos.append(blkno) - else: - self._blklocs[blk.mgr_locs.indexer] = -1 - blk.delete(blk_locs) - self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk)) - - if len(removed_blknos): - # Remove blocks & update blknos accordingly - is_deleted = np.zeros(self.nblocks, dtype=np.bool_) - is_deleted[removed_blknos] = True - - new_blknos = np.empty(self.nblocks, dtype=np.int64) - new_blknos.fill(-1) - new_blknos[~is_deleted] = np.arange(self.nblocks - - len(removed_blknos)) - self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0, - allow_fill=False) - self.blocks = tuple(blk for i, blk in enumerate(self.blocks) - if i not in set(removed_blknos)) - - if unfit_val_locs: - unfit_mgr_locs = np.concatenate(unfit_mgr_locs) - unfit_count = len(unfit_mgr_locs) - - new_blocks = [] - if value_is_extension_type: - # This code (ab-)uses the fact that sparse blocks contain only - # one item. - new_blocks.extend( - make_block(values=value.copy(), ndim=self.ndim, - placement=slice(mgr_loc, mgr_loc + 1)) - for mgr_loc in unfit_mgr_locs) - - self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) + - len(self.blocks)) - self._blklocs[unfit_mgr_locs] = 0 - - else: - # unfit_val_locs contains BlockPlacement objects - unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) - - new_blocks.append( - make_block(values=value_getitem(unfit_val_items), - ndim=self.ndim, placement=unfit_mgr_locs)) - - self._blknos[unfit_mgr_locs] = len(self.blocks) - self._blklocs[unfit_mgr_locs] = np.arange(unfit_count) - - self.blocks += tuple(new_blocks) - - # Newly created block's dtype may already be present. - self._known_consolidated = False - - def insert(self, loc, item, value, allow_duplicates=False): - """ - Insert item at selected position. - - Parameters - ---------- - loc : int - item : hashable - value : array_like - allow_duplicates: bool - If False, trying to insert non-unique item will raise - - """ - if not allow_duplicates and item in self.items: - # Should this be a different kind of error?? - raise ValueError('cannot insert {}, already exists'.format(item)) - - if not isinstance(loc, int): - raise TypeError("loc must be int") - - # insert to the axis; this could possibly raise a TypeError - new_axis = self.items.insert(loc, item) - - block = make_block(values=value, ndim=self.ndim, - placement=slice(loc, loc + 1)) - - for blkno, count in _fast_count_smallints(self._blknos[loc:]): - blk = self.blocks[blkno] - if count == len(blk.mgr_locs): - blk.mgr_locs = blk.mgr_locs.add(1) - else: - new_mgr_locs = blk.mgr_locs.as_array.copy() - new_mgr_locs[new_mgr_locs >= loc] += 1 - blk.mgr_locs = new_mgr_locs - - if loc == self._blklocs.shape[0]: - # np.append is a lot faster (at least in numpy 1.7.1), let's use it - # if we can. - self._blklocs = np.append(self._blklocs, 0) - self._blknos = np.append(self._blknos, len(self.blocks)) - else: - self._blklocs = np.insert(self._blklocs, loc, 0) - self._blknos = np.insert(self._blknos, loc, len(self.blocks)) - - self.axes[0] = new_axis - self.blocks += (block,) - self._shape = None - - self._known_consolidated = False - - if len(self.blocks) > 100: - self._consolidate_inplace() - - def reindex_axis(self, new_index, axis, method=None, limit=None, - fill_value=None, copy=True): - """ - Conform block manager to new index. - """ - new_index = ensure_index(new_index) - new_index, indexer = self.axes[axis].reindex(new_index, method=method, - limit=limit) - - return self.reindex_indexer(new_index, indexer, axis=axis, - fill_value=fill_value, copy=copy) - - def reindex_indexer(self, new_axis, indexer, axis, fill_value=None, - allow_dups=False, copy=True): - """ - Parameters - ---------- - new_axis : Index - indexer : ndarray of int64 or None - axis : int - fill_value : object - allow_dups : bool - - pandas-indexer with -1's only. - """ - if indexer is None: - if new_axis is self.axes[axis] and not copy: - return self - - result = self.copy(deep=copy) - result.axes = list(self.axes) - result.axes[axis] = new_axis - return result - - self._consolidate_inplace() - - # some axes don't allow reindexing with dups - if not allow_dups: - self.axes[axis]._can_reindex(indexer) - - if axis >= self.ndim: - raise IndexError("Requested axis not found in manager") - - if axis == 0: - new_blocks = self._slice_take_blocks_ax0(indexer, - fill_tuple=(fill_value,)) - else: - new_blocks = [blk.take_nd(indexer, axis=axis, fill_tuple=( - fill_value if fill_value is not None else blk.fill_value,)) - for blk in self.blocks] - - new_axes = list(self.axes) - new_axes[axis] = new_axis - return self.__class__(new_blocks, new_axes) - - def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): - """ - Slice/take blocks along axis=0. - - Overloaded for SingleBlock - - Returns - ------- - new_blocks : list of Block - - """ - - allow_fill = fill_tuple is not None - - sl_type, slobj, sllen = _preprocess_slice_or_indexer( - slice_or_indexer, self.shape[0], allow_fill=allow_fill) - - if self._is_single_block: - blk = self.blocks[0] - - if sl_type in ('slice', 'mask'): - return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))] - elif not allow_fill or self.ndim == 1: - if allow_fill and fill_tuple[0] is None: - _, fill_value = maybe_promote(blk.dtype) - fill_tuple = (fill_value, ) - - return [blk.take_nd(slobj, axis=0, - new_mgr_locs=slice(0, sllen), - fill_tuple=fill_tuple)] - - if sl_type in ('slice', 'mask'): - blknos = self._blknos[slobj] - blklocs = self._blklocs[slobj] - else: - blknos = algos.take_1d(self._blknos, slobj, fill_value=-1, - allow_fill=allow_fill) - blklocs = algos.take_1d(self._blklocs, slobj, fill_value=-1, - allow_fill=allow_fill) - - # When filling blknos, make sure blknos is updated before appending to - # blocks list, that way new blkno is exactly len(blocks). - # - # FIXME: mgr_groupby_blknos must return mgr_locs in ascending order, - # pytables serialization will break otherwise. - blocks = [] - for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks), - group=True): - if blkno == -1: - # If we've got here, fill_tuple was not None. - fill_value = fill_tuple[0] - - blocks.append(self._make_na_block(placement=mgr_locs, - fill_value=fill_value)) - else: - blk = self.blocks[blkno] - - # Otherwise, slicing along items axis is necessary. - if not blk._can_consolidate: - # A non-consolidatable block, it's easy, because there's - # only one item and each mgr loc is a copy of that single - # item. - for mgr_loc in mgr_locs: - newblk = blk.copy(deep=True) - newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1) - blocks.append(newblk) - - else: - blocks.append(blk.take_nd(blklocs[mgr_locs.indexer], - axis=0, new_mgr_locs=mgr_locs, - fill_tuple=None)) - - return blocks - - def _make_na_block(self, placement, fill_value=None): - # TODO: infer dtypes other than float64 from fill_value - - if fill_value is None: - fill_value = np.nan - block_shape = list(self.shape) - block_shape[0] = len(placement) - - dtype, fill_value = infer_dtype_from_scalar(fill_value) - block_values = np.empty(block_shape, dtype=dtype) - block_values.fill(fill_value) - return make_block(block_values, placement=placement) - - def take(self, indexer, axis=1, verify=True, convert=True): - """ - Take items along any axis. - """ - self._consolidate_inplace() - indexer = (np.arange(indexer.start, indexer.stop, indexer.step, - dtype='int64') - if isinstance(indexer, slice) - else np.asanyarray(indexer, dtype='int64')) - - n = self.shape[axis] - if convert: - indexer = maybe_convert_indices(indexer, n) - - if verify: - if ((indexer == -1) | (indexer >= n)).any(): - raise Exception('Indices must be nonzero and less than ' - 'the axis length') - - new_labels = self.axes[axis].take(indexer) - return self.reindex_indexer(new_axis=new_labels, indexer=indexer, - axis=axis, allow_dups=True) - - def merge(self, other, lsuffix='', rsuffix=''): - if not self._is_indexed_like(other): - raise AssertionError('Must have same axes to merge managers') - - l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix, - right=other.items, rsuffix=rsuffix) - new_items = _concat_indexes([l, r]) - - new_blocks = [blk.copy(deep=False) for blk in self.blocks] - - offset = self.shape[0] - for blk in other.blocks: - blk = blk.copy(deep=False) - blk.mgr_locs = blk.mgr_locs.add(offset) - new_blocks.append(blk) - - new_axes = list(self.axes) - new_axes[0] = new_items - - return self.__class__(_consolidate(new_blocks), new_axes) - - def _is_indexed_like(self, other): - """ - Check all axes except items - """ - if self.ndim != other.ndim: - raise AssertionError( - 'Number of dimensions must agree got {ndim} and ' - '{oth_ndim}'.format(ndim=self.ndim, oth_ndim=other.ndim)) - for ax, oax in zip(self.axes[1:], other.axes[1:]): - if not ax.equals(oax): - return False - return True - - def equals(self, other): - self_axes, other_axes = self.axes, other.axes - if len(self_axes) != len(other_axes): - return False - if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): - return False - self._consolidate_inplace() - other._consolidate_inplace() - if len(self.blocks) != len(other.blocks): - return False - - # canonicalize block order, using a tuple combining the type - # name and then mgr_locs because there might be unconsolidated - # blocks (say, Categorical) which can only be distinguished by - # the iteration order - def canonicalize(block): - return (block.dtype.name, block.mgr_locs.as_array.tolist()) - - self_blocks = sorted(self.blocks, key=canonicalize) - other_blocks = sorted(other.blocks, key=canonicalize) - return all(block.equals(oblock) - for block, oblock in zip(self_blocks, other_blocks)) - - def unstack(self, unstacker_func): - """Return a blockmanager with all blocks unstacked. - - Parameters - ---------- - unstacker_func : callable - A (partially-applied) ``pd.core.reshape._Unstacker`` class. - - Returns - ------- - unstacked : BlockManager - """ - dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items) - new_columns = dummy.get_new_columns() - new_index = dummy.get_new_index() - new_blocks = [] - columns_mask = [] - - for blk in self.blocks: - blocks, mask = blk._unstack( - partial(unstacker_func, - value_columns=self.items[blk.mgr_locs.indexer]), - new_columns) - - new_blocks.extend(blocks) - columns_mask.extend(mask) - - new_columns = new_columns[columns_mask] - - bm = BlockManager(new_blocks, [new_columns, new_index]) - return bm - - -class SingleBlockManager(BlockManager): - """ manage a single block with """ - - ndim = 1 - _is_consolidated = True - _known_consolidated = True - __slots__ = () - - def __init__(self, block, axis, do_integrity_check=False, fastpath=False): - - if isinstance(axis, list): - if len(axis) != 1: - raise ValueError("cannot create SingleBlockManager with more " - "than 1 axis") - axis = axis[0] - - # passed from constructor, single block, single axis - if fastpath: - self.axes = [axis] - if isinstance(block, list): - - # empty block - if len(block) == 0: - block = [np.array([])] - elif len(block) != 1: - raise ValueError('Cannot create SingleBlockManager with ' - 'more than 1 block') - block = block[0] - else: - self.axes = [ensure_index(axis)] - - # create the block here - if isinstance(block, list): - - # provide consolidation to the interleaved_dtype - if len(block) > 1: - dtype = _interleaved_dtype(block) - block = [b.astype(dtype) for b in block] - block = _consolidate(block) - - if len(block) != 1: - raise ValueError('Cannot create SingleBlockManager with ' - 'more than 1 block') - block = block[0] - - if not isinstance(block, Block): - block = make_block(block, placement=slice(0, len(axis)), ndim=1) - - self.blocks = [block] - - def _post_setstate(self): - pass - - @property - def _block(self): - return self.blocks[0] - - @property - def _values(self): - return self._block.values - - @property - def _blknos(self): - """ compat with BlockManager """ - return None - - @property - def _blklocs(self): - """ compat with BlockManager """ - return None - - def get_slice(self, slobj, axis=0): - if axis >= self.ndim: - raise IndexError("Requested axis not found in manager") - - return self.__class__(self._block._slice(slobj), - self.index[slobj], fastpath=True) - - @property - def index(self): - return self.axes[0] - - def convert(self, **kwargs): - """ convert the whole block as one """ - kwargs['by_item'] = False - return self.apply('convert', **kwargs) - - @property - def dtype(self): - return self._block.dtype - - @property - def array_dtype(self): - return self._block.array_dtype - - @property - def ftype(self): - return self._block.ftype - - def get_dtype_counts(self): - return {self.dtype.name: 1} - - def get_ftype_counts(self): - return {self.ftype: 1} - - def get_dtypes(self): - return np.array([self._block.dtype]) - - def get_ftypes(self): - return np.array([self._block.ftype]) - - def external_values(self): - return self._block.external_values() - - def internal_values(self): - return self._block.internal_values() - - def formatting_values(self): - """Return the internal values used by the DataFrame/SeriesFormatter""" - return self._block.formatting_values() - - def get_values(self): - """ return a dense type view """ - return np.array(self._block.to_dense(), copy=False) - - @property - def asobject(self): - """ - return a object dtype array. datetime/timedelta like values are boxed - to Timestamp/Timedelta instances. - """ - return self._block.get_values(dtype=object) - - @property - def _can_hold_na(self): - return self._block._can_hold_na - - def is_consolidated(self): - return True - - def _consolidate_check(self): - pass - - def _consolidate_inplace(self): - pass - - def delete(self, item): - """ - Delete single item from SingleBlockManager. - - Ensures that self.blocks doesn't become empty. - """ - loc = self.items.get_loc(item) - self._block.delete(loc) - self.axes[0] = self.axes[0].delete(loc) - - def fast_xs(self, loc): - """ - fast path for getting a cross-section - return a view of the data - """ - return self._block.values[loc] - - def concat(self, to_concat, new_axis): - """ - Concatenate a list of SingleBlockManagers into a single - SingleBlockManager. - - Used for pd.concat of Series objects with axis=0. - - Parameters - ---------- - to_concat : list of SingleBlockManagers - new_axis : Index of the result - - Returns - ------- - SingleBlockManager - - """ - non_empties = [x for x in to_concat if len(x) > 0] - - # check if all series are of the same block type: - if len(non_empties) > 0: - blocks = [obj.blocks[0] for obj in non_empties] - - if all(type(b) is type(blocks[0]) for b in blocks[1:]): # noqa - new_block = blocks[0].concat_same_type(blocks) - else: - values = [x.values for x in blocks] - values = _concat._concat_compat(values) - new_block = make_block( - values, placement=slice(0, len(values), 1)) - else: - values = [x._block.values for x in to_concat] - values = _concat._concat_compat(values) - new_block = make_block( - values, placement=slice(0, len(values), 1)) - - mgr = SingleBlockManager(new_block, new_axis) - return mgr - - -def construction_error(tot_items, block_shape, axes, e=None): - """ raise a helpful message about our construction """ - passed = tuple(map(int, [tot_items] + list(block_shape))) - implied = tuple(map(int, [len(ax) for ax in axes])) - if passed == implied and e is not None: - raise e - if block_shape[0] == 0: - raise ValueError("Empty data passed with indices specified.") - raise ValueError("Shape of passed values is {0}, indices imply {1}".format( - passed, implied)) - - -def create_block_manager_from_blocks(blocks, axes): - try: - if len(blocks) == 1 and not isinstance(blocks[0], Block): - # if blocks[0] is of length 0, return empty blocks - if not len(blocks[0]): - blocks = [] - else: - # It's OK if a single block is passed as values, its placement - # is basically "all items", but if there're many, don't bother - # converting, it's an error anyway. - blocks = [make_block(values=blocks[0], - placement=slice(0, len(axes[0])))] - - mgr = BlockManager(blocks, axes) - mgr._consolidate_inplace() - return mgr - - except (ValueError) as e: - blocks = [getattr(b, 'values', b) for b in blocks] - tot_items = sum(b.shape[0] for b in blocks) - construction_error(tot_items, blocks[0].shape[1:], axes, e) - - -def create_block_manager_from_arrays(arrays, names, axes): - - try: - blocks = form_blocks(arrays, names, axes) - mgr = BlockManager(blocks, axes) - mgr._consolidate_inplace() - return mgr - except ValueError as e: - construction_error(len(arrays), arrays[0].shape, axes, e) - - -def form_blocks(arrays, names, axes): - # put "leftover" items in float bucket, where else? - # generalize? - items_dict = defaultdict(list) - extra_locs = [] - - names_idx = ensure_index(names) - if names_idx.equals(axes[0]): - names_indexer = np.arange(len(names_idx)) - else: - assert names_idx.intersection(axes[0]).is_unique - names_indexer = names_idx.get_indexer_for(axes[0]) - - for i, name_idx in enumerate(names_indexer): - if name_idx == -1: - extra_locs.append(i) - continue - - k = names[name_idx] - v = arrays[name_idx] - - block_type = get_block_type(v) - items_dict[block_type.__name__].append((i, k, v)) - - blocks = [] - if len(items_dict['FloatBlock']): - float_blocks = _multi_blockify(items_dict['FloatBlock']) - blocks.extend(float_blocks) - - if len(items_dict['ComplexBlock']): - complex_blocks = _multi_blockify(items_dict['ComplexBlock']) - blocks.extend(complex_blocks) - - if len(items_dict['TimeDeltaBlock']): - timedelta_blocks = _multi_blockify(items_dict['TimeDeltaBlock']) - blocks.extend(timedelta_blocks) - - if len(items_dict['IntBlock']): - int_blocks = _multi_blockify(items_dict['IntBlock']) - blocks.extend(int_blocks) - - if len(items_dict['DatetimeBlock']): - datetime_blocks = _simple_blockify(items_dict['DatetimeBlock'], - _NS_DTYPE) - blocks.extend(datetime_blocks) - - if len(items_dict['DatetimeTZBlock']): - dttz_blocks = [make_block(array, - klass=DatetimeTZBlock, - placement=[i]) - for i, _, array in items_dict['DatetimeTZBlock']] - blocks.extend(dttz_blocks) - - if len(items_dict['BoolBlock']): - bool_blocks = _simple_blockify(items_dict['BoolBlock'], np.bool_) - blocks.extend(bool_blocks) - - if len(items_dict['ObjectBlock']) > 0: - object_blocks = _simple_blockify(items_dict['ObjectBlock'], np.object_) - blocks.extend(object_blocks) - - if len(items_dict['SparseBlock']) > 0: - sparse_blocks = _sparse_blockify(items_dict['SparseBlock']) - blocks.extend(sparse_blocks) - - if len(items_dict['CategoricalBlock']) > 0: - cat_blocks = [make_block(array, klass=CategoricalBlock, placement=[i]) - for i, _, array in items_dict['CategoricalBlock']] - blocks.extend(cat_blocks) - - if len(items_dict['ExtensionBlock']): - - external_blocks = [ - make_block(array, klass=ExtensionBlock, placement=[i]) - for i, _, array in items_dict['ExtensionBlock'] - ] - - blocks.extend(external_blocks) - - if len(extra_locs): - shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) - - # empty items -> dtype object - block_values = np.empty(shape, dtype=object) - block_values.fill(np.nan) - - na_block = make_block(block_values, placement=extra_locs) - blocks.append(na_block) - - return blocks - - -def _simple_blockify(tuples, dtype): - """ return a single array of a block that has a single dtype; if dtype is - not None, coerce to this dtype - """ - values, placement = _stack_arrays(tuples, dtype) - - # CHECK DTYPE? - if dtype is not None and values.dtype != dtype: # pragma: no cover - values = values.astype(dtype) - - block = make_block(values, placement=placement) - return [block] - - -def _multi_blockify(tuples, dtype=None): - """ return an array of blocks that potentially have different dtypes """ - - # group by dtype - grouper = itertools.groupby(tuples, lambda x: x[2].dtype) - - new_blocks = [] - for dtype, tup_block in grouper: - - values, placement = _stack_arrays(list(tup_block), dtype) - - block = make_block(values, placement=placement) - new_blocks.append(block) - - return new_blocks - - -def _sparse_blockify(tuples, dtype=None): - """ return an array of blocks that potentially have different dtypes (and - are sparse) - """ - - new_blocks = [] - for i, names, array in tuples: - array = _maybe_to_sparse(array) - block = make_block(array, klass=SparseBlock, placement=[i]) - new_blocks.append(block) - - return new_blocks - - -def _stack_arrays(tuples, dtype): - - # fml - def _asarray_compat(x): - if isinstance(x, ABCSeries): - return x._values - else: - return np.asarray(x) - - def _shape_compat(x): - if isinstance(x, ABCSeries): - return len(x), - else: - return x.shape - - placement, names, arrays = zip(*tuples) - - first = arrays[0] - shape = (len(arrays),) + _shape_compat(first) - - stacked = np.empty(shape, dtype=dtype) - for i, arr in enumerate(arrays): - stacked[i] = _asarray_compat(arr) - - return stacked, placement - - -def _interleaved_dtype(blocks): - if not len(blocks): - return None - - dtype = find_common_type([b.dtype for b in blocks]) - - # only numpy compat - if isinstance(dtype, (PandasExtensionDtype, ExtensionDtype)): - dtype = np.object - - return dtype - - -def _consolidate(blocks): - """ - Merge blocks having same dtype, exclude non-consolidating blocks - """ - - # sort by _can_consolidate, dtype - gkey = lambda x: x._consolidate_key - grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) - - new_blocks = [] - for (_can_consolidate, dtype), group_blocks in grouper: - merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype, - _can_consolidate=_can_consolidate) - new_blocks = _extend_blocks(merged_blocks, new_blocks) - return new_blocks - - -def _maybe_compare(a, b, op): - - is_a_array = isinstance(a, np.ndarray) - is_b_array = isinstance(b, np.ndarray) - - # numpy deprecation warning to have i8 vs integer comparisons - if is_datetimelike_v_numeric(a, b): - result = False - - # numpy deprecation warning if comparing numeric vs string-like - elif is_numeric_v_string_like(a, b): - result = False - - else: - result = op(a, b) - - if is_scalar(result) and (is_a_array or is_b_array): - type_names = [type(a).__name__, type(b).__name__] - - if is_a_array: - type_names[0] = 'ndarray(dtype={dtype})'.format(dtype=a.dtype) - - if is_b_array: - type_names[1] = 'ndarray(dtype={dtype})'.format(dtype=b.dtype) - - raise TypeError( - "Cannot compare types {a!r} and {b!r}".format(a=type_names[0], - b=type_names[1])) - return result - - -def _concat_indexes(indexes): - return indexes[0].append(indexes[1:]) - - -def _get_blkno_placements(blknos, blk_count, group=True): - """ - - Parameters - ---------- - blknos : array of int64 - blk_count : int - group : bool - - Returns - ------- - iterator - yield (BlockPlacement, blkno) - - """ - - blknos = ensure_int64(blknos) - - # FIXME: blk_count is unused, but it may avoid the use of dicts in cython - for blkno, indexer in libinternals.get_blkno_indexers(blknos, group): - yield blkno, BlockPlacement(indexer) - - -def items_overlap_with_suffix(left, lsuffix, right, rsuffix): - """ - If two indices overlap, add suffixes to overlapping entries. - - If corresponding suffix is empty, the entry is simply converted to string. - - """ - to_rename = left.intersection(right) - if len(to_rename) == 0: - return left, right - else: - if not lsuffix and not rsuffix: - raise ValueError('columns overlap but no suffix specified: ' - '{rename}'.format(rename=to_rename)) - - def lrenamer(x): - if x in to_rename: - return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix) - return x - - def rrenamer(x): - if x in to_rename: - return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix) - return x - - return (_transform_index(left, lrenamer), - _transform_index(right, rrenamer)) - - -def _transform_index(index, func, level=None): - """ - Apply function to all values found in index. - - This includes transforming multiindex entries separately. - Only apply function to one level of the MultiIndex if level is specified. - - """ - if isinstance(index, MultiIndex): - if level is not None: - items = [tuple(func(y) if i == level else y - for i, y in enumerate(x)) for x in index] - else: - items = [tuple(func(y) for y in x) for x in index] - return MultiIndex.from_tuples(items, names=index.names) - else: - items = [func(x) for x in index] - return Index(items, name=index.name, tupleize_cols=False) - - -def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): - """ - Concatenate block managers into one. - - Parameters - ---------- - mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples - axes : list of Index - concat_axis : int - copy : bool - - """ - concat_plan = combine_concat_plans( - [get_mgr_concatenation_plan(mgr, indexers) - for mgr, indexers in mgrs_indexers], concat_axis) - - blocks = [] - - for placement, join_units in concat_plan: - - if len(join_units) == 1 and not join_units[0].indexers: - b = join_units[0].block - values = b.values - if copy: - values = values.copy() - elif not copy: - values = values.view() - b = b.make_block_same_class(values, placement=placement) - elif is_uniform_join_units(join_units): - b = join_units[0].block.concat_same_type( - [ju.block for ju in join_units], placement=placement) - else: - b = make_block( - concatenate_join_units(join_units, concat_axis, copy=copy), - placement=placement) - blocks.append(b) - - return BlockManager(blocks, axes) - - -def is_uniform_join_units(join_units): - """ - Check if the join units consist of blocks of uniform type that can - be concatenated using Block.concat_same_type instead of the generic - concatenate_join_units (which uses `_concat._concat_compat`). - - """ - return ( - # all blocks need to have the same type - all(type(ju.block) is type(join_units[0].block) for ju in join_units) and # noqa - # no blocks that would get missing values (can lead to type upcasts) - # unless we're an extension dtype. - all(not ju.is_na or ju.block.is_extension for ju in join_units) and - # no blocks with indexers (as then the dimensions do not fit) - all(not ju.indexers for ju in join_units) and - # disregard Panels - all(ju.block.ndim <= 2 for ju in join_units) and - # only use this path when there is something to concatenate - len(join_units) > 1) - - -def is_uniform_reindex(join_units): - return ( - # TODO: should this be ju.block._can_hold_na? - all(ju.block and ju.block.is_extension for ju in join_units) and - len(set(ju.block.dtype.name for ju in join_units)) == 1 - ) - - -def get_empty_dtype_and_na(join_units): - """ - Return dtype and N/A values to use when concatenating specified units. - - Returned N/A value may be None which means there was no casting involved. - - Returns - ------- - dtype - na - """ - - if len(join_units) == 1: - blk = join_units[0].block - if blk is None: - return np.float64, np.nan - - if is_uniform_reindex(join_units): - # XXX: integrate property - empty_dtype = join_units[0].block.dtype - upcasted_na = join_units[0].block.fill_value - return empty_dtype, upcasted_na - - has_none_blocks = False - dtypes = [None] * len(join_units) - for i, unit in enumerate(join_units): - if unit.block is None: - has_none_blocks = True - else: - dtypes[i] = unit.dtype - - upcast_classes = defaultdict(list) - null_upcast_classes = defaultdict(list) - for dtype, unit in zip(dtypes, join_units): - if dtype is None: - continue - - if is_categorical_dtype(dtype): - upcast_cls = 'category' - elif is_datetimetz(dtype): - upcast_cls = 'datetimetz' - elif issubclass(dtype.type, np.bool_): - upcast_cls = 'bool' - elif issubclass(dtype.type, np.object_): - upcast_cls = 'object' - elif is_datetime64_dtype(dtype): - upcast_cls = 'datetime' - elif is_timedelta64_dtype(dtype): - upcast_cls = 'timedelta' - elif is_float_dtype(dtype) or is_numeric_dtype(dtype): - upcast_cls = dtype.name - else: - upcast_cls = 'float' - - # Null blocks should not influence upcast class selection, unless there - # are only null blocks, when same upcasting rules must be applied to - # null upcast classes. - if unit.is_na: - null_upcast_classes[upcast_cls].append(dtype) - else: - upcast_classes[upcast_cls].append(dtype) - - if not upcast_classes: - upcast_classes = null_upcast_classes - - # create the result - if 'object' in upcast_classes: - return np.dtype(np.object_), np.nan - elif 'bool' in upcast_classes: - if has_none_blocks: - return np.dtype(np.object_), np.nan - else: - return np.dtype(np.bool_), None - elif 'category' in upcast_classes: - return np.dtype(np.object_), np.nan - elif 'datetimetz' in upcast_classes: - dtype = upcast_classes['datetimetz'] - return dtype[0], tslibs.iNaT - elif 'datetime' in upcast_classes: - return np.dtype('M8[ns]'), tslibs.iNaT - elif 'timedelta' in upcast_classes: - return np.dtype('m8[ns]'), tslibs.iNaT - else: # pragma - g = np.find_common_type(upcast_classes, []) - if is_float_dtype(g): - return g, g.type(np.nan) - elif is_numeric_dtype(g): - if has_none_blocks: - return np.float64, np.nan - else: - return g, None - - msg = "invalid dtype determination in get_concat_dtype" - raise AssertionError(msg) - - -def concatenate_join_units(join_units, concat_axis, copy): - """ - Concatenate values from several join units along selected axis. - """ - if concat_axis == 0 and len(join_units) > 1: - # Concatenating join units along ax0 is handled in _merge_blocks. - raise AssertionError("Concatenating join units along axis0") - - empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units) - - to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype, - upcasted_na=upcasted_na) - for ju in join_units] - - if len(to_concat) == 1: - # Only one block, nothing to concatenate. - concat_values = to_concat[0] - if copy: - if isinstance(concat_values, np.ndarray): - # non-reindexed (=not yet copied) arrays are made into a view - # in JoinUnit.get_reindexed_values - if concat_values.base is not None: - concat_values = concat_values.copy() - else: - concat_values = concat_values.copy() - else: - concat_values = _concat._concat_compat(to_concat, axis=concat_axis) - - return concat_values - - -def get_mgr_concatenation_plan(mgr, indexers): - """ - Construct concatenation plan for given block manager and indexers. - - Parameters - ---------- - mgr : BlockManager - indexers : dict of {axis: indexer} - - Returns - ------- - plan : list of (BlockPlacement, JoinUnit) tuples - - """ - # Calculate post-reindex shape , save for item axis which will be separate - # for each block anyway. - mgr_shape = list(mgr.shape) - for ax, indexer in indexers.items(): - mgr_shape[ax] = len(indexer) - mgr_shape = tuple(mgr_shape) - - if 0 in indexers: - ax0_indexer = indexers.pop(0) - blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1) - blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1) - else: - - if mgr._is_single_block: - blk = mgr.blocks[0] - return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))] - - ax0_indexer = None - blknos = mgr._blknos - blklocs = mgr._blklocs - - plan = [] - for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks), - group=False): - - assert placements.is_slice_like - - join_unit_indexers = indexers.copy() - - shape = list(mgr_shape) - shape[0] = len(placements) - shape = tuple(shape) - - if blkno == -1: - unit = JoinUnit(None, shape) - else: - blk = mgr.blocks[blkno] - ax0_blk_indexer = blklocs[placements.indexer] - - unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and - # Fastpath detection of join unit not - # needing to reindex its block: no ax0 - # reindexing took place and block - # placement was sequential before. - ((ax0_indexer is None and - blk.mgr_locs.is_slice_like and - blk.mgr_locs.as_slice.step == 1) or - # Slow-ish detection: all indexer locs - # are sequential (and length match is - # checked above). - (np.diff(ax0_blk_indexer) == 1).all())) - - # Omit indexer if no item reindexing is required. - if unit_no_ax0_reindexing: - join_unit_indexers.pop(0, None) - else: - join_unit_indexers[0] = ax0_blk_indexer - - unit = JoinUnit(blk, shape, join_unit_indexers) - - plan.append((placements, unit)) - - return plan - - -def combine_concat_plans(plans, concat_axis): - """ - Combine multiple concatenation plans into one. - - existing_plan is updated in-place. - """ - if len(plans) == 1: - for p in plans[0]: - yield p[0], [p[1]] - - elif concat_axis == 0: - offset = 0 - for plan in plans: - last_plc = None - - for plc, unit in plan: - yield plc.add(offset), [unit] - last_plc = plc - - if last_plc is not None: - offset += last_plc.as_slice.stop - - else: - num_ended = [0] - - def _next_or_none(seq): - retval = next(seq, None) - if retval is None: - num_ended[0] += 1 - return retval - - plans = list(map(iter, plans)) - next_items = list(map(_next_or_none, plans)) - - while num_ended[0] != len(next_items): - if num_ended[0] > 0: - raise ValueError("Plan shapes are not aligned") - - placements, units = zip(*next_items) - - lengths = list(map(len, placements)) - min_len, max_len = min(lengths), max(lengths) - - if min_len == max_len: - yield placements[0], units - next_items[:] = map(_next_or_none, plans) - else: - yielded_placement = None - yielded_units = [None] * len(next_items) - for i, (plc, unit) in enumerate(next_items): - yielded_units[i] = unit - if len(plc) > min_len: - # trim_join_unit updates unit in place, so only - # placement needs to be sliced to skip min_len. - next_items[i] = (plc[min_len:], - trim_join_unit(unit, min_len)) - else: - yielded_placement = plc - next_items[i] = _next_or_none(plans[i]) - - yield yielded_placement, yielded_units - - -def trim_join_unit(join_unit, length): - """ - Reduce join_unit's shape along item axis to length. - - Extra items that didn't fit are returned as a separate block. - """ - - if 0 not in join_unit.indexers: - extra_indexers = join_unit.indexers - - if join_unit.block is None: - extra_block = None - else: - extra_block = join_unit.block.getitem_block(slice(length, None)) - join_unit.block = join_unit.block.getitem_block(slice(length)) - else: - extra_block = join_unit.block - - extra_indexers = copy.copy(join_unit.indexers) - extra_indexers[0] = extra_indexers[0][length:] - join_unit.indexers[0] = join_unit.indexers[0][:length] - - extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:] - join_unit.shape = (length,) + join_unit.shape[1:] - - return JoinUnit(block=extra_block, indexers=extra_indexers, - shape=extra_shape) - - -class JoinUnit(object): - - def __init__(self, block, shape, indexers=None): - # Passing shape explicitly is required for cases when block is None. - if indexers is None: - indexers = {} - self.block = block - self.indexers = indexers - self.shape = shape - - def __repr__(self): - return '{name}({block!r}, {indexers})'.format( - name=self.__class__.__name__, block=self.block, - indexers=self.indexers) - - @cache_readonly - def needs_filling(self): - for indexer in self.indexers.values(): - # FIXME: cache results of indexer == -1 checks. - if (indexer == -1).any(): - return True - - return False - - @cache_readonly - def dtype(self): - if self.block is None: - raise AssertionError("Block is None, no dtype") - - if not self.needs_filling: - return self.block.dtype - else: - return _get_dtype(maybe_promote(self.block.dtype, - self.block.fill_value)[0]) - - @cache_readonly - def is_na(self): - if self.block is None: - return True - - if not self.block._can_hold_na: - return False - - # Usually it's enough to check but a small fraction of values to see if - # a block is NOT null, chunks should help in such cases. 1000 value - # was chosen rather arbitrarily. - values = self.block.values - if self.block.is_categorical: - values_flat = values.categories - elif self.block.is_sparse: - # fill_value is not NaN and have holes - if not values._null_fill_value and values.sp_index.ngaps > 0: - return False - values_flat = values.ravel(order='K') - elif isinstance(self.block, ExtensionBlock): - values_flat = values - else: - values_flat = values.ravel(order='K') - total_len = values_flat.shape[0] - chunk_len = max(total_len // 40, 1000) - for i in range(0, total_len, chunk_len): - if not isna(values_flat[i:i + chunk_len]).all(): - return False - - return True - - def get_reindexed_values(self, empty_dtype, upcasted_na): - if upcasted_na is None: - # No upcasting is necessary - fill_value = self.block.fill_value - values = self.block.get_values() - else: - fill_value = upcasted_na - - if self.is_na: - if getattr(self.block, 'is_object', False): - # we want to avoid filling with np.nan if we are - # using None; we already know that we are all - # nulls - values = self.block.values.ravel(order='K') - if len(values) and values[0] is None: - fill_value = None - - if getattr(self.block, 'is_datetimetz', False) or \ - is_datetimetz(empty_dtype): - pass - elif getattr(self.block, 'is_categorical', False): - pass - elif getattr(self.block, 'is_sparse', False): - pass - else: - missing_arr = np.empty(self.shape, dtype=empty_dtype) - missing_arr.fill(fill_value) - return missing_arr - - if not self.indexers: - if not self.block._can_consolidate: - # preserve these for validation in _concat_compat - return self.block.values - - if self.block.is_bool and not self.block.is_categorical: - # External code requested filling/upcasting, bool values must - # be upcasted to object to avoid being upcasted to numeric. - values = self.block.astype(np.object_).values - elif self.block.is_extension: - values = self.block.values - else: - # No dtype upcasting is done here, it will be performed during - # concatenation itself. - values = self.block.get_values() - - if not self.indexers: - # If there's no indexing to be done, we want to signal outside - # code that this array must be copied explicitly. This is done - # by returning a view and checking `retval.base`. - values = values.view() - - else: - for ax, indexer in self.indexers.items(): - values = algos.take_nd(values, indexer, axis=ax, - fill_value=fill_value) - - return values - - -def _fast_count_smallints(arr): - """Faster version of set(arr) for sequences of small numbers.""" - if len(arr) == 0: - # Handle empty arr case separately: numpy 1.6 chokes on that. - return np.empty((0, 2), dtype=arr.dtype) - else: - counts = np.bincount(arr.astype(np.int_)) - nz = counts.nonzero()[0] - return np.c_[nz, counts[nz]] - - -def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill): - if isinstance(slice_or_indexer, slice): - return ('slice', slice_or_indexer, - libinternals.slice_len(slice_or_indexer, length)) - elif (isinstance(slice_or_indexer, np.ndarray) and - slice_or_indexer.dtype == np.bool_): - return 'mask', slice_or_indexer, slice_or_indexer.sum() - else: - indexer = np.asanyarray(slice_or_indexer, dtype=np.int64) - if not allow_fill: - indexer = maybe_convert_indices(indexer, length) - return 'fancy', indexer, len(indexer) + CategoricalBlock, ExtensionBlock, SparseBlock, ScalarBlock, + Block) +from .managers import ( # noqa:F401 + BlockManager, SingleBlockManager, + create_block_manager_from_arrays, create_block_manager_from_blocks, + items_overlap_with_suffix, # reshape.merge + concatenate_block_managers) # reshape.concat, reshape.merge diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py new file mode 100644 index 0000000000000..4eeeb069d7142 --- /dev/null +++ b/pandas/core/internals/concat.py @@ -0,0 +1,474 @@ +# -*- coding: utf-8 -*- +# TODO: Needs a better name; too many modules are already called "concat" +import copy +from collections import defaultdict + +import numpy as np + +from pandas._libs import tslibs, internals as libinternals +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.missing import isna +from pandas.core.dtypes.common import ( + is_timedelta64_dtype, + is_datetime64_dtype, is_datetimetz, + is_categorical_dtype, + is_float_dtype, is_numeric_dtype, + _get_dtype) +from pandas.core.dtypes.cast import maybe_promote +import pandas.core.dtypes.concat as _concat + +import pandas.core.algorithms as algos + + +def get_mgr_concatenation_plan(mgr, indexers): + """ + Construct concatenation plan for given block manager and indexers. + + Parameters + ---------- + mgr : BlockManager + indexers : dict of {axis: indexer} + + Returns + ------- + plan : list of (BlockPlacement, JoinUnit) tuples + + """ + # Calculate post-reindex shape , save for item axis which will be separate + # for each block anyway. + mgr_shape = list(mgr.shape) + for ax, indexer in indexers.items(): + mgr_shape[ax] = len(indexer) + mgr_shape = tuple(mgr_shape) + + if 0 in indexers: + ax0_indexer = indexers.pop(0) + blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1) + blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1) + else: + + if mgr._is_single_block: + blk = mgr.blocks[0] + return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))] + + ax0_indexer = None + blknos = mgr._blknos + blklocs = mgr._blklocs + + plan = [] + for blkno, placements in libinternals.get_blkno_placements(blknos, + mgr.nblocks, + group=False): + + assert placements.is_slice_like + + join_unit_indexers = indexers.copy() + + shape = list(mgr_shape) + shape[0] = len(placements) + shape = tuple(shape) + + if blkno == -1: + unit = JoinUnit(None, shape) + else: + blk = mgr.blocks[blkno] + ax0_blk_indexer = blklocs[placements.indexer] + + unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and + # Fastpath detection of join unit not + # needing to reindex its block: no ax0 + # reindexing took place and block + # placement was sequential before. + ((ax0_indexer is None and + blk.mgr_locs.is_slice_like and + blk.mgr_locs.as_slice.step == 1) or + # Slow-ish detection: all indexer locs + # are sequential (and length match is + # checked above). + (np.diff(ax0_blk_indexer) == 1).all())) + + # Omit indexer if no item reindexing is required. + if unit_no_ax0_reindexing: + join_unit_indexers.pop(0, None) + else: + join_unit_indexers[0] = ax0_blk_indexer + + unit = JoinUnit(blk, shape, join_unit_indexers) + + plan.append((placements, unit)) + + return plan + + +class JoinUnit(object): + + def __init__(self, block, shape, indexers=None): + # Passing shape explicitly is required for cases when block is None. + if indexers is None: + indexers = {} + self.block = block + self.indexers = indexers + self.shape = shape + + def __repr__(self): + return '{name}({block!r}, {indexers})'.format( + name=self.__class__.__name__, block=self.block, + indexers=self.indexers) + + @cache_readonly + def needs_filling(self): + for indexer in self.indexers.values(): + # FIXME: cache results of indexer == -1 checks. + if (indexer == -1).any(): + return True + + return False + + @cache_readonly + def dtype(self): + if self.block is None: + raise AssertionError("Block is None, no dtype") + + if not self.needs_filling: + return self.block.dtype + else: + return _get_dtype(maybe_promote(self.block.dtype, + self.block.fill_value)[0]) + + @cache_readonly + def is_na(self): + if self.block is None: + return True + + if not self.block._can_hold_na: + return False + + # Usually it's enough to check but a small fraction of values to see if + # a block is NOT null, chunks should help in such cases. 1000 value + # was chosen rather arbitrarily. + values = self.block.values + if self.block.is_categorical: + values_flat = values.categories + elif self.block.is_sparse: + # fill_value is not NaN and have holes + if not values._null_fill_value and values.sp_index.ngaps > 0: + return False + values_flat = values.ravel(order='K') + elif self.block.is_extension: + values_flat = values + else: + values_flat = values.ravel(order='K') + total_len = values_flat.shape[0] + chunk_len = max(total_len // 40, 1000) + for i in range(0, total_len, chunk_len): + if not isna(values_flat[i:i + chunk_len]).all(): + return False + + return True + + def get_reindexed_values(self, empty_dtype, upcasted_na): + if upcasted_na is None: + # No upcasting is necessary + fill_value = self.block.fill_value + values = self.block.get_values() + else: + fill_value = upcasted_na + + if self.is_na: + if getattr(self.block, 'is_object', False): + # we want to avoid filling with np.nan if we are + # using None; we already know that we are all + # nulls + values = self.block.values.ravel(order='K') + if len(values) and values[0] is None: + fill_value = None + + if getattr(self.block, 'is_datetimetz', False) or \ + is_datetimetz(empty_dtype): + pass + elif getattr(self.block, 'is_categorical', False): + pass + elif getattr(self.block, 'is_sparse', False): + pass + else: + missing_arr = np.empty(self.shape, dtype=empty_dtype) + missing_arr.fill(fill_value) + return missing_arr + + if not self.indexers: + if not self.block._can_consolidate: + # preserve these for validation in _concat_compat + return self.block.values + + if self.block.is_bool and not self.block.is_categorical: + # External code requested filling/upcasting, bool values must + # be upcasted to object to avoid being upcasted to numeric. + values = self.block.astype(np.object_).values + elif self.block.is_extension: + values = self.block.values + else: + # No dtype upcasting is done here, it will be performed during + # concatenation itself. + values = self.block.get_values() + + if not self.indexers: + # If there's no indexing to be done, we want to signal outside + # code that this array must be copied explicitly. This is done + # by returning a view and checking `retval.base`. + values = values.view() + + else: + for ax, indexer in self.indexers.items(): + values = algos.take_nd(values, indexer, axis=ax, + fill_value=fill_value) + + return values + + +def concatenate_join_units(join_units, concat_axis, copy): + """ + Concatenate values from several join units along selected axis. + """ + if concat_axis == 0 and len(join_units) > 1: + # Concatenating join units along ax0 is handled in _merge_blocks. + raise AssertionError("Concatenating join units along axis0") + + empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units) + + to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype, + upcasted_na=upcasted_na) + for ju in join_units] + + if len(to_concat) == 1: + # Only one block, nothing to concatenate. + concat_values = to_concat[0] + if copy: + if isinstance(concat_values, np.ndarray): + # non-reindexed (=not yet copied) arrays are made into a view + # in JoinUnit.get_reindexed_values + if concat_values.base is not None: + concat_values = concat_values.copy() + else: + concat_values = concat_values.copy() + else: + concat_values = _concat._concat_compat(to_concat, axis=concat_axis) + + return concat_values + + +def get_empty_dtype_and_na(join_units): + """ + Return dtype and N/A values to use when concatenating specified units. + + Returned N/A value may be None which means there was no casting involved. + + Returns + ------- + dtype + na + """ + + if len(join_units) == 1: + blk = join_units[0].block + if blk is None: + return np.float64, np.nan + + if is_uniform_reindex(join_units): + # XXX: integrate property + empty_dtype = join_units[0].block.dtype + upcasted_na = join_units[0].block.fill_value + return empty_dtype, upcasted_na + + has_none_blocks = False + dtypes = [None] * len(join_units) + for i, unit in enumerate(join_units): + if unit.block is None: + has_none_blocks = True + else: + dtypes[i] = unit.dtype + + upcast_classes = defaultdict(list) + null_upcast_classes = defaultdict(list) + for dtype, unit in zip(dtypes, join_units): + if dtype is None: + continue + + if is_categorical_dtype(dtype): + upcast_cls = 'category' + elif is_datetimetz(dtype): + upcast_cls = 'datetimetz' + elif issubclass(dtype.type, np.bool_): + upcast_cls = 'bool' + elif issubclass(dtype.type, np.object_): + upcast_cls = 'object' + elif is_datetime64_dtype(dtype): + upcast_cls = 'datetime' + elif is_timedelta64_dtype(dtype): + upcast_cls = 'timedelta' + elif is_float_dtype(dtype) or is_numeric_dtype(dtype): + upcast_cls = dtype.name + else: + upcast_cls = 'float' + + # Null blocks should not influence upcast class selection, unless there + # are only null blocks, when same upcasting rules must be applied to + # null upcast classes. + if unit.is_na: + null_upcast_classes[upcast_cls].append(dtype) + else: + upcast_classes[upcast_cls].append(dtype) + + if not upcast_classes: + upcast_classes = null_upcast_classes + + # create the result + if 'object' in upcast_classes: + return np.dtype(np.object_), np.nan + elif 'bool' in upcast_classes: + if has_none_blocks: + return np.dtype(np.object_), np.nan + else: + return np.dtype(np.bool_), None + elif 'category' in upcast_classes: + return np.dtype(np.object_), np.nan + elif 'datetimetz' in upcast_classes: + dtype = upcast_classes['datetimetz'] + return dtype[0], tslibs.iNaT + elif 'datetime' in upcast_classes: + return np.dtype('M8[ns]'), tslibs.iNaT + elif 'timedelta' in upcast_classes: + return np.dtype('m8[ns]'), tslibs.iNaT + else: # pragma + g = np.find_common_type(upcast_classes, []) + if is_float_dtype(g): + return g, g.type(np.nan) + elif is_numeric_dtype(g): + if has_none_blocks: + return np.float64, np.nan + else: + return g, None + + msg = "invalid dtype determination in get_concat_dtype" + raise AssertionError(msg) + + +def is_uniform_join_units(join_units): + """ + Check if the join units consist of blocks of uniform type that can + be concatenated using Block.concat_same_type instead of the generic + concatenate_join_units (which uses `_concat._concat_compat`). + + """ + return ( + # all blocks need to have the same type + all(type(ju.block) is type(join_units[0].block) for ju in join_units) and # noqa + # no blocks that would get missing values (can lead to type upcasts) + # unless we're an extension dtype. + all(not ju.is_na or ju.block.is_extension for ju in join_units) and + # no blocks with indexers (as then the dimensions do not fit) + all(not ju.indexers for ju in join_units) and + # disregard Panels + all(ju.block.ndim <= 2 for ju in join_units) and + # only use this path when there is something to concatenate + len(join_units) > 1) + + +def is_uniform_reindex(join_units): + return ( + # TODO: should this be ju.block._can_hold_na? + all(ju.block and ju.block.is_extension for ju in join_units) and + len(set(ju.block.dtype.name for ju in join_units)) == 1 + ) + + +def trim_join_unit(join_unit, length): + """ + Reduce join_unit's shape along item axis to length. + + Extra items that didn't fit are returned as a separate block. + """ + + if 0 not in join_unit.indexers: + extra_indexers = join_unit.indexers + + if join_unit.block is None: + extra_block = None + else: + extra_block = join_unit.block.getitem_block(slice(length, None)) + join_unit.block = join_unit.block.getitem_block(slice(length)) + else: + extra_block = join_unit.block + + extra_indexers = copy.copy(join_unit.indexers) + extra_indexers[0] = extra_indexers[0][length:] + join_unit.indexers[0] = join_unit.indexers[0][:length] + + extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:] + join_unit.shape = (length,) + join_unit.shape[1:] + + return JoinUnit(block=extra_block, indexers=extra_indexers, + shape=extra_shape) + + +def combine_concat_plans(plans, concat_axis): + """ + Combine multiple concatenation plans into one. + + existing_plan is updated in-place. + """ + if len(plans) == 1: + for p in plans[0]: + yield p[0], [p[1]] + + elif concat_axis == 0: + offset = 0 + for plan in plans: + last_plc = None + + for plc, unit in plan: + yield plc.add(offset), [unit] + last_plc = plc + + if last_plc is not None: + offset += last_plc.as_slice.stop + + else: + num_ended = [0] + + def _next_or_none(seq): + retval = next(seq, None) + if retval is None: + num_ended[0] += 1 + return retval + + plans = list(map(iter, plans)) + next_items = list(map(_next_or_none, plans)) + + while num_ended[0] != len(next_items): + if num_ended[0] > 0: + raise ValueError("Plan shapes are not aligned") + + placements, units = zip(*next_items) + + lengths = list(map(len, placements)) + min_len, max_len = min(lengths), max(lengths) + + if min_len == max_len: + yield placements[0], units + next_items[:] = map(_next_or_none, plans) + else: + yielded_placement = None + yielded_units = [None] * len(next_items) + for i, (plc, unit) in enumerate(next_items): + yielded_units[i] = unit + if len(plc) > min_len: + # trim_join_unit updates unit in place, so only + # placement needs to be sliced to skip min_len. + next_items[i] = (plc[min_len:], + trim_join_unit(unit, min_len)) + else: + yielded_placement = plc + next_items[i] = _next_or_none(plans[i]) + + yield yielded_placement, yielded_units diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py new file mode 100644 index 0000000000000..8ad569003a43a --- /dev/null +++ b/pandas/core/internals/managers.py @@ -0,0 +1,2068 @@ +# -*- coding: utf-8 -*- +from collections import defaultdict +from functools import partial +import itertools +import operator + +import numpy as np + +from pandas._libs import lib, internals as libinternals + +from pandas.util._validators import validate_bool_kwarg +from pandas.compat import range, map, zip + +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + PandasExtensionDtype) +from pandas.core.dtypes.common import ( + _NS_DTYPE, + is_datetimelike_v_numeric, + is_numeric_v_string_like, is_extension_type, + is_extension_array_dtype, + is_scalar) +from pandas.core.dtypes.cast import ( + maybe_promote, + infer_dtype_from_scalar, + find_common_type) +from pandas.core.dtypes.missing import isna +import pandas.core.dtypes.concat as _concat +from pandas.core.dtypes.generic import ABCSeries, ABCExtensionArray + +from pandas.core.base import PandasObject +import pandas.core.algorithms as algos +from pandas.core.sparse.array import _maybe_to_sparse + +from pandas.core.index import Index, MultiIndex, ensure_index +from pandas.core.indexing import maybe_convert_indices + +from pandas.io.formats.printing import pprint_thing + +from .blocks import ( + Block, DatetimeTZBlock, CategoricalBlock, ExtensionBlock, SparseBlock, + _extend_blocks, _merge_blocks, _safe_reshape, + make_block, get_block_type) +from .concat import ( # all for concatenate_block_managers + concatenate_join_units, is_uniform_join_units, + get_mgr_concatenation_plan, combine_concat_plans) + +# TODO: flexible with index=None and/or items=None + + +class BlockManager(PandasObject): + """ + Core internal data structure to implement DataFrame, Series, Panel, etc. + + Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a + lightweight blocked set of labeled data to be manipulated by the DataFrame + public API class + + Attributes + ---------- + shape + ndim + axes + values + items + + Methods + ------- + set_axis(axis, new_labels) + copy(deep=True) + + get_dtype_counts + get_ftype_counts + get_dtypes + get_ftypes + + apply(func, axes, block_filter_fn) + + get_bool_data + get_numeric_data + + get_slice(slice_like, axis) + get(label) + iget(loc) + get_scalar(label_tup) + + take(indexer, axis) + reindex_axis(new_labels, axis) + reindex_indexer(new_labels, indexer, axis) + + delete(label) + insert(loc, label, value) + set(label, value) + + Parameters + ---------- + + + Notes + ----- + This is *not* a public API class + """ + __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', + '_is_consolidated', '_blknos', '_blklocs'] + + def __init__(self, blocks, axes, do_integrity_check=True): + self.axes = [ensure_index(ax) for ax in axes] + self.blocks = tuple(blocks) + + for block in blocks: + if block.is_sparse: + if len(block.mgr_locs) != 1: + raise AssertionError("Sparse block refers to multiple " + "items") + else: + if self.ndim != block.ndim: + raise AssertionError( + 'Number of Block dimensions ({block}) must equal ' + 'number of axes ({self})'.format(block=block.ndim, + self=self.ndim)) + + if do_integrity_check: + self._verify_integrity() + + self._consolidate_check() + + self._rebuild_blknos_and_blklocs() + + def make_empty(self, axes=None): + """ return an empty BlockManager with the items axis of len 0 """ + if axes is None: + axes = [ensure_index([])] + [ensure_index(a) + for a in self.axes[1:]] + + # preserve dtype if possible + if self.ndim == 1: + blocks = np.array([], dtype=self.array_dtype) + else: + blocks = [] + return self.__class__(blocks, axes) + + def __nonzero__(self): + return True + + # Python3 compat + __bool__ = __nonzero__ + + @property + def shape(self): + return tuple(len(ax) for ax in self.axes) + + @property + def ndim(self): + return len(self.axes) + + def set_axis(self, axis, new_labels): + new_labels = ensure_index(new_labels) + old_len = len(self.axes[axis]) + new_len = len(new_labels) + + if new_len != old_len: + raise ValueError( + 'Length mismatch: Expected axis has {old} elements, new ' + 'values have {new} elements'.format(old=old_len, new=new_len)) + + self.axes[axis] = new_labels + + def rename_axis(self, mapper, axis, copy=True, level=None): + """ + Rename one of axes. + + Parameters + ---------- + mapper : unary callable + axis : int + copy : boolean, default True + level : int, default None + + """ + obj = self.copy(deep=copy) + obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) + return obj + + def add_prefix(self, prefix): + f = partial('{prefix}{}'.format, prefix=prefix) + return self.rename_axis(f, axis=0) + + def add_suffix(self, suffix): + f = partial('{}{suffix}'.format, suffix=suffix) + return self.rename_axis(f, axis=0) + + @property + def _is_single_block(self): + if self.ndim == 1: + return True + + if len(self.blocks) != 1: + return False + + blk = self.blocks[0] + return (blk.mgr_locs.is_slice_like and + blk.mgr_locs.as_slice == slice(0, len(self), 1)) + + def _rebuild_blknos_and_blklocs(self): + """ + Update mgr._blknos / mgr._blklocs. + """ + new_blknos = np.empty(self.shape[0], dtype=np.int64) + new_blklocs = np.empty(self.shape[0], dtype=np.int64) + new_blknos.fill(-1) + new_blklocs.fill(-1) + + for blkno, blk in enumerate(self.blocks): + rl = blk.mgr_locs + new_blknos[rl.indexer] = blkno + new_blklocs[rl.indexer] = np.arange(len(rl)) + + if (new_blknos == -1).any(): + raise AssertionError("Gaps in blk ref_locs") + + self._blknos = new_blknos + self._blklocs = new_blklocs + + # make items read only for now + def _get_items(self): + return self.axes[0] + + items = property(fget=_get_items) + + def _get_counts(self, f): + """ return a dict of the counts of the function in BlockManager """ + self._consolidate_inplace() + counts = dict() + for b in self.blocks: + v = f(b) + counts[v] = counts.get(v, 0) + b.shape[0] + return counts + + def get_dtype_counts(self): + return self._get_counts(lambda b: b.dtype.name) + + def get_ftype_counts(self): + return self._get_counts(lambda b: b.ftype) + + def get_dtypes(self): + dtypes = np.array([blk.dtype for blk in self.blocks]) + return algos.take_1d(dtypes, self._blknos, allow_fill=False) + + def get_ftypes(self): + ftypes = np.array([blk.ftype for blk in self.blocks]) + return algos.take_1d(ftypes, self._blknos, allow_fill=False) + + def __getstate__(self): + block_values = [b.values for b in self.blocks] + block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] + axes_array = [ax for ax in self.axes] + + extra_state = { + '0.14.1': { + 'axes': axes_array, + 'blocks': [dict(values=b.values, mgr_locs=b.mgr_locs.indexer) + for b in self.blocks] + } + } + + # First three elements of the state are to maintain forward + # compatibility with 0.13.1. + return axes_array, block_values, block_items, extra_state + + def __setstate__(self, state): + def unpickle_block(values, mgr_locs): + # numpy < 1.7 pickle compat + if values.dtype == 'M8[us]': + values = values.astype('M8[ns]') + return make_block(values, placement=mgr_locs) + + if (isinstance(state, tuple) and len(state) >= 4 and + '0.14.1' in state[3]): + state = state[3]['0.14.1'] + self.axes = [ensure_index(ax) for ax in state['axes']] + self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs']) + for b in state['blocks']) + else: + # discard anything after 3rd, support beta pickling format for a + # little while longer + ax_arrays, bvalues, bitems = state[:3] + + self.axes = [ensure_index(ax) for ax in ax_arrays] + + if len(bitems) == 1 and self.axes[0].equals(bitems[0]): + # This is a workaround for pre-0.14.1 pickles that didn't + # support unpickling multi-block frames/panels with non-unique + # columns/items, because given a manager with items ["a", "b", + # "a"] there's no way of knowing which block's "a" is where. + # + # Single-block case can be supported under the assumption that + # block items corresponded to manager items 1-to-1. + all_mgr_locs = [slice(0, len(bitems[0]))] + else: + all_mgr_locs = [self.axes[0].get_indexer(blk_items) + for blk_items in bitems] + + self.blocks = tuple( + unpickle_block(values, mgr_locs) + for values, mgr_locs in zip(bvalues, all_mgr_locs)) + + self._post_setstate() + + def _post_setstate(self): + self._is_consolidated = False + self._known_consolidated = False + self._rebuild_blknos_and_blklocs() + + def __len__(self): + return len(self.items) + + def __unicode__(self): + output = pprint_thing(self.__class__.__name__) + for i, ax in enumerate(self.axes): + if i == 0: + output += u'\nItems: {ax}'.format(ax=ax) + else: + output += u'\nAxis {i}: {ax}'.format(i=i, ax=ax) + + for block in self.blocks: + output += u'\n{block}'.format(block=pprint_thing(block)) + return output + + def _verify_integrity(self): + mgr_shape = self.shape + tot_items = sum(len(x.mgr_locs) for x in self.blocks) + for block in self.blocks: + if block._verify_integrity and block.shape[1:] != mgr_shape[1:]: + construction_error(tot_items, block.shape[1:], self.axes) + if len(self.items) != tot_items: + raise AssertionError('Number of manager items must equal union of ' + 'block items\n# manager items: {0}, # ' + 'tot_items: {1}'.format( + len(self.items), tot_items)) + + def apply(self, f, axes=None, filter=None, do_integrity_check=False, + consolidate=True, **kwargs): + """ + iterate over the blocks, collect and create a new block manager + + Parameters + ---------- + f : the callable or function name to operate on at the block level + axes : optional (if not supplied, use self.axes) + filter : list, if supplied, only call the block if the filter is in + the block + do_integrity_check : boolean, default False. Do the block manager + integrity check + consolidate: boolean, default True. Join together blocks having same + dtype + + Returns + ------- + Block Manager (new object) + + """ + + result_blocks = [] + + # filter kwarg is used in replace-* family of methods + if filter is not None: + filter_locs = set(self.items.get_indexer_for(filter)) + if len(filter_locs) == len(self.items): + # All items are included, as if there were no filtering + filter = None + else: + kwargs['filter'] = filter_locs + + if consolidate: + self._consolidate_inplace() + + if f == 'where': + align_copy = True + if kwargs.get('align', True): + align_keys = ['other', 'cond'] + else: + align_keys = ['cond'] + elif f == 'putmask': + align_copy = False + if kwargs.get('align', True): + align_keys = ['new', 'mask'] + else: + align_keys = ['mask'] + elif f == 'eval': + align_copy = False + align_keys = ['other'] + elif f == 'fillna': + # fillna internally does putmask, maybe it's better to do this + # at mgr, not block level? + align_copy = False + align_keys = ['value'] + else: + align_keys = [] + + # TODO(EA): may interfere with ExtensionBlock.setitem for blocks + # with a .values attribute. + aligned_args = dict((k, kwargs[k]) + for k in align_keys + if hasattr(kwargs[k], 'values') and + not isinstance(kwargs[k], ABCExtensionArray)) + + for b in self.blocks: + if filter is not None: + if not b.mgr_locs.isin(filter_locs).any(): + result_blocks.append(b) + continue + + if aligned_args: + b_items = self.items[b.mgr_locs.indexer] + + for k, obj in aligned_args.items(): + axis = getattr(obj, '_info_axis_number', 0) + kwargs[k] = obj.reindex(b_items, axis=axis, + copy=align_copy) + + kwargs['mgr'] = self + applied = getattr(b, f)(**kwargs) + result_blocks = _extend_blocks(applied, result_blocks) + + if len(result_blocks) == 0: + return self.make_empty(axes or self.axes) + bm = self.__class__(result_blocks, axes or self.axes, + do_integrity_check=do_integrity_check) + bm._consolidate_inplace() + return bm + + def reduction(self, f, axis=0, consolidate=True, transposed=False, + **kwargs): + """ + iterate over the blocks, collect and create a new block manager. + This routine is intended for reduction type operations and + will do inference on the generated blocks. + + Parameters + ---------- + f: the callable or function name to operate on at the block level + axis: reduction axis, default 0 + consolidate: boolean, default True. Join together blocks having same + dtype + transposed: boolean, default False + we are holding transposed data + + Returns + ------- + Block Manager (new object) + + """ + + if consolidate: + self._consolidate_inplace() + + axes, blocks = [], [] + for b in self.blocks: + kwargs['mgr'] = self + axe, block = getattr(b, f)(axis=axis, **kwargs) + + axes.append(axe) + blocks.append(block) + + # note that some DatetimeTZ, Categorical are always ndim==1 + ndim = {b.ndim for b in blocks} + + if 2 in ndim: + + new_axes = list(self.axes) + + # multiple blocks that are reduced + if len(blocks) > 1: + new_axes[1] = axes[0] + + # reset the placement to the original + for b, sb in zip(blocks, self.blocks): + b.mgr_locs = sb.mgr_locs + + else: + new_axes[axis] = Index(np.concatenate( + [ax.values for ax in axes])) + + if transposed: + new_axes = new_axes[::-1] + blocks = [b.make_block(b.values.T, + placement=np.arange(b.shape[1]) + ) for b in blocks] + + return self.__class__(blocks, new_axes) + + # 0 ndim + if 0 in ndim and 1 not in ndim: + values = np.array([b.values for b in blocks]) + if len(values) == 1: + return values.item() + blocks = [make_block(values, ndim=1)] + axes = Index([ax[0] for ax in axes]) + + # single block + values = _concat._concat_compat([b.values for b in blocks]) + + # compute the orderings of our original data + if len(self.blocks) > 1: + + indexer = np.empty(len(self.axes[0]), dtype=np.intp) + i = 0 + for b in self.blocks: + for j in b.mgr_locs: + indexer[j] = i + i = i + 1 + + values = values.take(indexer) + + return SingleBlockManager( + [make_block(values, + ndim=1, + placement=np.arange(len(values)))], + axes[0]) + + def isna(self, func, **kwargs): + return self.apply('apply', func=func, **kwargs) + + def where(self, **kwargs): + return self.apply('where', **kwargs) + + def eval(self, **kwargs): + return self.apply('eval', **kwargs) + + def quantile(self, **kwargs): + return self.reduction('quantile', **kwargs) + + def setitem(self, **kwargs): + return self.apply('setitem', **kwargs) + + def putmask(self, **kwargs): + return self.apply('putmask', **kwargs) + + def diff(self, **kwargs): + return self.apply('diff', **kwargs) + + def interpolate(self, **kwargs): + return self.apply('interpolate', **kwargs) + + def shift(self, **kwargs): + return self.apply('shift', **kwargs) + + def fillna(self, **kwargs): + return self.apply('fillna', **kwargs) + + def downcast(self, **kwargs): + return self.apply('downcast', **kwargs) + + def astype(self, dtype, **kwargs): + return self.apply('astype', dtype=dtype, **kwargs) + + def convert(self, **kwargs): + return self.apply('convert', **kwargs) + + def replace(self, **kwargs): + return self.apply('replace', **kwargs) + + def replace_list(self, src_list, dest_list, inplace=False, regex=False, + mgr=None): + """ do a list replace """ + + inplace = validate_bool_kwarg(inplace, 'inplace') + + if mgr is None: + mgr = self + + # figure out our mask a-priori to avoid repeated replacements + values = self.as_array() + + def comp(s): + if isna(s): + return isna(values) + return _maybe_compare(values, getattr(s, 'asm8', s), operator.eq) + + masks = [comp(s) for i, s in enumerate(src_list)] + + result_blocks = [] + src_len = len(src_list) - 1 + for blk in self.blocks: + + # its possible to get multiple result blocks here + # replace ALWAYS will return a list + rb = [blk if inplace else blk.copy()] + for i, (s, d) in enumerate(zip(src_list, dest_list)): + new_rb = [] + for b in rb: + if b.dtype == np.object_: + convert = i == src_len + result = b.replace(s, d, inplace=inplace, regex=regex, + mgr=mgr, convert=convert) + new_rb = _extend_blocks(result, new_rb) + else: + # get our mask for this element, sized to this + # particular block + m = masks[i][b.mgr_locs.indexer] + if m.any(): + b = b.coerce_to_target_dtype(d) + new_rb.extend(b.putmask(m, d, inplace=True)) + else: + new_rb.append(b) + rb = new_rb + result_blocks.extend(rb) + + bm = self.__class__(result_blocks, self.axes) + bm._consolidate_inplace() + return bm + + def reshape_nd(self, axes, **kwargs): + """ a 2d-nd reshape operation on a BlockManager """ + return self.apply('reshape_nd', axes=axes, **kwargs) + + def is_consolidated(self): + """ + Return True if more than one block with the same dtype + """ + if not self._known_consolidated: + self._consolidate_check() + return self._is_consolidated + + def _consolidate_check(self): + ftypes = [blk.ftype for blk in self.blocks] + self._is_consolidated = len(ftypes) == len(set(ftypes)) + self._known_consolidated = True + + @property + def is_mixed_type(self): + # Warning, consolidation needs to get checked upstairs + self._consolidate_inplace() + return len(self.blocks) > 1 + + @property + def is_numeric_mixed_type(self): + # Warning, consolidation needs to get checked upstairs + self._consolidate_inplace() + return all(block.is_numeric for block in self.blocks) + + @property + def is_datelike_mixed_type(self): + # Warning, consolidation needs to get checked upstairs + self._consolidate_inplace() + return any(block.is_datelike for block in self.blocks) + + @property + def any_extension_types(self): + """Whether any of the blocks in this manager are extension blocks""" + return any(block.is_extension for block in self.blocks) + + @property + def is_view(self): + """ return a boolean if we are a single block and are a view """ + if len(self.blocks) == 1: + return self.blocks[0].is_view + + # It is technically possible to figure out which blocks are views + # e.g. [ b.values.base is not None for b in self.blocks ] + # but then we have the case of possibly some blocks being a view + # and some blocks not. setting in theory is possible on the non-view + # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit + # complicated + + return False + + def get_bool_data(self, copy=False): + """ + Parameters + ---------- + copy : boolean, default False + Whether to copy the blocks + """ + self._consolidate_inplace() + return self.combine([b for b in self.blocks if b.is_bool], copy) + + def get_numeric_data(self, copy=False): + """ + Parameters + ---------- + copy : boolean, default False + Whether to copy the blocks + """ + self._consolidate_inplace() + return self.combine([b for b in self.blocks if b.is_numeric], copy) + + def combine(self, blocks, copy=True): + """ return a new manager with the blocks """ + if len(blocks) == 0: + return self.make_empty() + + # FIXME: optimization potential + indexer = np.sort(np.concatenate([b.mgr_locs.as_array + for b in blocks])) + inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) + + new_blocks = [] + for b in blocks: + b = b.copy(deep=copy) + b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array, + axis=0, allow_fill=False) + new_blocks.append(b) + + axes = list(self.axes) + axes[0] = self.items.take(indexer) + + return self.__class__(new_blocks, axes, do_integrity_check=False) + + def get_slice(self, slobj, axis=0): + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + if axis == 0: + new_blocks = self._slice_take_blocks_ax0(slobj) + else: + slicer = [slice(None)] * (axis + 1) + slicer[axis] = slobj + slicer = tuple(slicer) + new_blocks = [blk.getitem_block(slicer) for blk in self.blocks] + + new_axes = list(self.axes) + new_axes[axis] = new_axes[axis][slobj] + + bm = self.__class__(new_blocks, new_axes, do_integrity_check=False) + bm._consolidate_inplace() + return bm + + def __contains__(self, item): + return item in self.items + + @property + def nblocks(self): + return len(self.blocks) + + def copy(self, deep=True, mgr=None): + """ + Make deep or shallow copy of BlockManager + + Parameters + ---------- + deep : boolean o rstring, default True + If False, return shallow copy (do not copy data) + If 'all', copy data and a deep copy of the index + + Returns + ------- + copy : BlockManager + """ + + # this preserves the notion of view copying of axes + if deep: + if deep == 'all': + copy = lambda ax: ax.copy(deep=True) + else: + copy = lambda ax: ax.view() + new_axes = [copy(ax) for ax in self.axes] + else: + new_axes = list(self.axes) + return self.apply('copy', axes=new_axes, deep=deep, + do_integrity_check=False) + + def as_array(self, transpose=False, items=None): + """Convert the blockmanager data into an numpy array. + + Parameters + ---------- + transpose : boolean, default False + If True, transpose the return array + items : list of strings or None + Names of block items that will be included in the returned + array. ``None`` means that all block items will be used + + Returns + ------- + arr : ndarray + """ + if len(self.blocks) == 0: + arr = np.empty(self.shape, dtype=float) + return arr.transpose() if transpose else arr + + if items is not None: + mgr = self.reindex_axis(items, axis=0) + else: + mgr = self + + if self._is_single_block or not self.is_mixed_type: + arr = mgr.blocks[0].get_values() + else: + arr = mgr._interleave() + + return arr.transpose() if transpose else arr + + def _interleave(self): + """ + Return ndarray from blocks with specified item order + Items must be contained in the blocks + """ + dtype = _interleaved_dtype(self.blocks) + + result = np.empty(self.shape, dtype=dtype) + + if result.shape[0] == 0: + # Workaround for numpy 1.7 bug: + # + # >>> a = np.empty((0,10)) + # >>> a[slice(0,0)] + # array([], shape=(0, 10), dtype=float64) + # >>> a[[]] + # Traceback (most recent call last): + # File "<stdin>", line 1, in <module> + # IndexError: index 0 is out of bounds for axis 0 with size 0 + return result + + itemmask = np.zeros(self.shape[0]) + + for blk in self.blocks: + rl = blk.mgr_locs + result[rl.indexer] = blk.get_values(dtype) + itemmask[rl.indexer] = 1 + + if not itemmask.all(): + raise AssertionError('Some items were not contained in blocks') + + return result + + def to_dict(self, copy=True): + """ + Return a dict of str(dtype) -> BlockManager + + Parameters + ---------- + copy : boolean, default True + + Returns + ------- + values : a dict of dtype -> BlockManager + + Notes + ----- + This consolidates based on str(dtype) + """ + self._consolidate_inplace() + + bd = {} + for b in self.blocks: + bd.setdefault(str(b.dtype), []).append(b) + + return {dtype: self.combine(blocks, copy=copy) + for dtype, blocks in bd.items()} + + def xs(self, key, axis=1, copy=True, takeable=False): + if axis < 1: + raise AssertionError( + 'Can only take xs across axis >= 1, got {ax}'.format(ax=axis)) + + # take by position + if takeable: + loc = key + else: + loc = self.axes[axis].get_loc(key) + + slicer = [slice(None, None) for _ in range(self.ndim)] + slicer[axis] = loc + slicer = tuple(slicer) + + new_axes = list(self.axes) + + # could be an array indexer! + if isinstance(loc, (slice, np.ndarray)): + new_axes[axis] = new_axes[axis][loc] + else: + new_axes.pop(axis) + + new_blocks = [] + if len(self.blocks) > 1: + # we must copy here as we are mixed type + for blk in self.blocks: + newb = make_block(values=blk.values[slicer], + klass=blk.__class__, + placement=blk.mgr_locs) + new_blocks.append(newb) + elif len(self.blocks) == 1: + block = self.blocks[0] + vals = block.values[slicer] + if copy: + vals = vals.copy() + new_blocks = [make_block(values=vals, + placement=block.mgr_locs, + klass=block.__class__)] + + return self.__class__(new_blocks, new_axes) + + def fast_xs(self, loc): + """ + get a cross sectional for a given location in the + items ; handle dups + + return the result, is *could* be a view in the case of a + single block + """ + if len(self.blocks) == 1: + return self.blocks[0].iget((slice(None), loc)) + + items = self.items + + # non-unique (GH4726) + if not items.is_unique: + result = self._interleave() + if self.ndim == 2: + result = result.T + return result[loc] + + # unique + dtype = _interleaved_dtype(self.blocks) + n = len(items) + result = np.empty(n, dtype=dtype) + for blk in self.blocks: + # Such assignment may incorrectly coerce NaT to None + # result[blk.mgr_locs] = blk._slice((slice(None), loc)) + for i, rl in enumerate(blk.mgr_locs): + result[rl] = blk._try_coerce_result(blk.iget((i, loc))) + + return result + + def consolidate(self): + """ + Join together blocks having same dtype + + Returns + ------- + y : BlockManager + """ + if self.is_consolidated(): + return self + + bm = self.__class__(self.blocks, self.axes) + bm._is_consolidated = False + bm._consolidate_inplace() + return bm + + def _consolidate_inplace(self): + if not self.is_consolidated(): + self.blocks = tuple(_consolidate(self.blocks)) + self._is_consolidated = True + self._known_consolidated = True + self._rebuild_blknos_and_blklocs() + + def get(self, item, fastpath=True): + """ + Return values for selected item (ndarray or BlockManager). + """ + if self.items.is_unique: + + if not isna(item): + loc = self.items.get_loc(item) + else: + indexer = np.arange(len(self.items))[isna(self.items)] + + # allow a single nan location indexer + if not is_scalar(indexer): + if len(indexer) == 1: + loc = indexer.item() + else: + raise ValueError("cannot label index with a null key") + + return self.iget(loc, fastpath=fastpath) + else: + + if isna(item): + raise TypeError("cannot label index with a null key") + + indexer = self.items.get_indexer_for([item]) + return self.reindex_indexer(new_axis=self.items[indexer], + indexer=indexer, axis=0, + allow_dups=True) + + def iget(self, i, fastpath=True): + """ + Return the data as a SingleBlockManager if fastpath=True and possible + + Otherwise return as a ndarray + """ + block = self.blocks[self._blknos[i]] + values = block.iget(self._blklocs[i]) + if not fastpath or not block._box_to_block_values or values.ndim != 1: + return values + + # fastpath shortcut for select a single-dim from a 2-dim BM + return SingleBlockManager( + [block.make_block_same_class(values, + placement=slice(0, len(values)), + ndim=1)], + self.axes[1]) + + def get_scalar(self, tup): + """ + Retrieve single item + """ + full_loc = [ax.get_loc(x) for ax, x in zip(self.axes, tup)] + blk = self.blocks[self._blknos[full_loc[0]]] + values = blk.values + + # FIXME: this may return non-upcasted types? + if values.ndim == 1: + return values[full_loc[1]] + + full_loc[0] = self._blklocs[full_loc[0]] + return values[tuple(full_loc)] + + def delete(self, item): + """ + Delete selected item (items if non-unique) in-place. + """ + indexer = self.items.get_loc(item) + + is_deleted = np.zeros(self.shape[0], dtype=np.bool_) + is_deleted[indexer] = True + ref_loc_offset = -is_deleted.cumsum() + + is_blk_deleted = [False] * len(self.blocks) + + if isinstance(indexer, int): + affected_start = indexer + else: + affected_start = is_deleted.nonzero()[0][0] + + for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]): + blk = self.blocks[blkno] + bml = blk.mgr_locs + blk_del = is_deleted[bml.indexer].nonzero()[0] + + if len(blk_del) == len(bml): + is_blk_deleted[blkno] = True + continue + elif len(blk_del) != 0: + blk.delete(blk_del) + bml = blk.mgr_locs + + blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer]) + + # FIXME: use Index.delete as soon as it uses fastpath=True + self.axes[0] = self.items[~is_deleted] + self.blocks = tuple(b for blkno, b in enumerate(self.blocks) + if not is_blk_deleted[blkno]) + self._shape = None + self._rebuild_blknos_and_blklocs() + + def set(self, item, value, check=False): + """ + Set new item in-place. Does not consolidate. Adds new Block if not + contained in the current set of items + if check, then validate that we are not setting the same data in-place + """ + # FIXME: refactor, clearly separate broadcasting & zip-like assignment + # can prob also fix the various if tests for sparse/categorical + + # TODO(EA): Remove an is_extension_ when all extension types satisfy + # the interface + value_is_extension_type = (is_extension_type(value) or + is_extension_array_dtype(value)) + + # categorical/spares/datetimetz + if value_is_extension_type: + + def value_getitem(placement): + return value + else: + if value.ndim == self.ndim - 1: + value = _safe_reshape(value, (1,) + value.shape) + + def value_getitem(placement): + return value + else: + + def value_getitem(placement): + return value[placement.indexer] + + if value.shape[1:] != self.shape[1:]: + raise AssertionError('Shape of new values must be compatible ' + 'with manager shape') + + try: + loc = self.items.get_loc(item) + except KeyError: + # This item wasn't present, just insert at end + self.insert(len(self.items), item, value) + return + + if isinstance(loc, int): + loc = [loc] + + blknos = self._blknos[loc] + blklocs = self._blklocs[loc].copy() + + unfit_mgr_locs = [] + unfit_val_locs = [] + removed_blknos = [] + for blkno, val_locs in libinternals.get_blkno_placements(blknos, + self.nblocks, + group=True): + blk = self.blocks[blkno] + blk_locs = blklocs[val_locs.indexer] + if blk.should_store(value): + blk.set(blk_locs, value_getitem(val_locs), check=check) + else: + unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) + unfit_val_locs.append(val_locs) + + # If all block items are unfit, schedule the block for removal. + if len(val_locs) == len(blk.mgr_locs): + removed_blknos.append(blkno) + else: + self._blklocs[blk.mgr_locs.indexer] = -1 + blk.delete(blk_locs) + self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk)) + + if len(removed_blknos): + # Remove blocks & update blknos accordingly + is_deleted = np.zeros(self.nblocks, dtype=np.bool_) + is_deleted[removed_blknos] = True + + new_blknos = np.empty(self.nblocks, dtype=np.int64) + new_blknos.fill(-1) + new_blknos[~is_deleted] = np.arange(self.nblocks - + len(removed_blknos)) + self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0, + allow_fill=False) + self.blocks = tuple(blk for i, blk in enumerate(self.blocks) + if i not in set(removed_blknos)) + + if unfit_val_locs: + unfit_mgr_locs = np.concatenate(unfit_mgr_locs) + unfit_count = len(unfit_mgr_locs) + + new_blocks = [] + if value_is_extension_type: + # This code (ab-)uses the fact that sparse blocks contain only + # one item. + new_blocks.extend( + make_block(values=value.copy(), ndim=self.ndim, + placement=slice(mgr_loc, mgr_loc + 1)) + for mgr_loc in unfit_mgr_locs) + + self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) + + len(self.blocks)) + self._blklocs[unfit_mgr_locs] = 0 + + else: + # unfit_val_locs contains BlockPlacement objects + unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) + + new_blocks.append( + make_block(values=value_getitem(unfit_val_items), + ndim=self.ndim, placement=unfit_mgr_locs)) + + self._blknos[unfit_mgr_locs] = len(self.blocks) + self._blklocs[unfit_mgr_locs] = np.arange(unfit_count) + + self.blocks += tuple(new_blocks) + + # Newly created block's dtype may already be present. + self._known_consolidated = False + + def insert(self, loc, item, value, allow_duplicates=False): + """ + Insert item at selected position. + + Parameters + ---------- + loc : int + item : hashable + value : array_like + allow_duplicates: bool + If False, trying to insert non-unique item will raise + + """ + if not allow_duplicates and item in self.items: + # Should this be a different kind of error?? + raise ValueError('cannot insert {}, already exists'.format(item)) + + if not isinstance(loc, int): + raise TypeError("loc must be int") + + # insert to the axis; this could possibly raise a TypeError + new_axis = self.items.insert(loc, item) + + block = make_block(values=value, ndim=self.ndim, + placement=slice(loc, loc + 1)) + + for blkno, count in _fast_count_smallints(self._blknos[loc:]): + blk = self.blocks[blkno] + if count == len(blk.mgr_locs): + blk.mgr_locs = blk.mgr_locs.add(1) + else: + new_mgr_locs = blk.mgr_locs.as_array.copy() + new_mgr_locs[new_mgr_locs >= loc] += 1 + blk.mgr_locs = new_mgr_locs + + if loc == self._blklocs.shape[0]: + # np.append is a lot faster (at least in numpy 1.7.1), let's use it + # if we can. + self._blklocs = np.append(self._blklocs, 0) + self._blknos = np.append(self._blknos, len(self.blocks)) + else: + self._blklocs = np.insert(self._blklocs, loc, 0) + self._blknos = np.insert(self._blknos, loc, len(self.blocks)) + + self.axes[0] = new_axis + self.blocks += (block,) + self._shape = None + + self._known_consolidated = False + + if len(self.blocks) > 100: + self._consolidate_inplace() + + def reindex_axis(self, new_index, axis, method=None, limit=None, + fill_value=None, copy=True): + """ + Conform block manager to new index. + """ + new_index = ensure_index(new_index) + new_index, indexer = self.axes[axis].reindex(new_index, method=method, + limit=limit) + + return self.reindex_indexer(new_index, indexer, axis=axis, + fill_value=fill_value, copy=copy) + + def reindex_indexer(self, new_axis, indexer, axis, fill_value=None, + allow_dups=False, copy=True): + """ + Parameters + ---------- + new_axis : Index + indexer : ndarray of int64 or None + axis : int + fill_value : object + allow_dups : bool + + pandas-indexer with -1's only. + """ + if indexer is None: + if new_axis is self.axes[axis] and not copy: + return self + + result = self.copy(deep=copy) + result.axes = list(self.axes) + result.axes[axis] = new_axis + return result + + self._consolidate_inplace() + + # some axes don't allow reindexing with dups + if not allow_dups: + self.axes[axis]._can_reindex(indexer) + + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + if axis == 0: + new_blocks = self._slice_take_blocks_ax0(indexer, + fill_tuple=(fill_value,)) + else: + new_blocks = [blk.take_nd(indexer, axis=axis, fill_tuple=( + fill_value if fill_value is not None else blk.fill_value,)) + for blk in self.blocks] + + new_axes = list(self.axes) + new_axes[axis] = new_axis + return self.__class__(new_blocks, new_axes) + + def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): + """ + Slice/take blocks along axis=0. + + Overloaded for SingleBlock + + Returns + ------- + new_blocks : list of Block + + """ + + allow_fill = fill_tuple is not None + + sl_type, slobj, sllen = _preprocess_slice_or_indexer( + slice_or_indexer, self.shape[0], allow_fill=allow_fill) + + if self._is_single_block: + blk = self.blocks[0] + + if sl_type in ('slice', 'mask'): + return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))] + elif not allow_fill or self.ndim == 1: + if allow_fill and fill_tuple[0] is None: + _, fill_value = maybe_promote(blk.dtype) + fill_tuple = (fill_value, ) + + return [blk.take_nd(slobj, axis=0, + new_mgr_locs=slice(0, sllen), + fill_tuple=fill_tuple)] + + if sl_type in ('slice', 'mask'): + blknos = self._blknos[slobj] + blklocs = self._blklocs[slobj] + else: + blknos = algos.take_1d(self._blknos, slobj, fill_value=-1, + allow_fill=allow_fill) + blklocs = algos.take_1d(self._blklocs, slobj, fill_value=-1, + allow_fill=allow_fill) + + # When filling blknos, make sure blknos is updated before appending to + # blocks list, that way new blkno is exactly len(blocks). + # + # FIXME: mgr_groupby_blknos must return mgr_locs in ascending order, + # pytables serialization will break otherwise. + blocks = [] + for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, + self.nblocks, + group=True): + if blkno == -1: + # If we've got here, fill_tuple was not None. + fill_value = fill_tuple[0] + + blocks.append(self._make_na_block(placement=mgr_locs, + fill_value=fill_value)) + else: + blk = self.blocks[blkno] + + # Otherwise, slicing along items axis is necessary. + if not blk._can_consolidate: + # A non-consolidatable block, it's easy, because there's + # only one item and each mgr loc is a copy of that single + # item. + for mgr_loc in mgr_locs: + newblk = blk.copy(deep=True) + newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1) + blocks.append(newblk) + + else: + blocks.append(blk.take_nd(blklocs[mgr_locs.indexer], + axis=0, new_mgr_locs=mgr_locs, + fill_tuple=None)) + + return blocks + + def _make_na_block(self, placement, fill_value=None): + # TODO: infer dtypes other than float64 from fill_value + + if fill_value is None: + fill_value = np.nan + block_shape = list(self.shape) + block_shape[0] = len(placement) + + dtype, fill_value = infer_dtype_from_scalar(fill_value) + block_values = np.empty(block_shape, dtype=dtype) + block_values.fill(fill_value) + return make_block(block_values, placement=placement) + + def take(self, indexer, axis=1, verify=True, convert=True): + """ + Take items along any axis. + """ + self._consolidate_inplace() + indexer = (np.arange(indexer.start, indexer.stop, indexer.step, + dtype='int64') + if isinstance(indexer, slice) + else np.asanyarray(indexer, dtype='int64')) + + n = self.shape[axis] + if convert: + indexer = maybe_convert_indices(indexer, n) + + if verify: + if ((indexer == -1) | (indexer >= n)).any(): + raise Exception('Indices must be nonzero and less than ' + 'the axis length') + + new_labels = self.axes[axis].take(indexer) + return self.reindex_indexer(new_axis=new_labels, indexer=indexer, + axis=axis, allow_dups=True) + + def merge(self, other, lsuffix='', rsuffix=''): + if not self._is_indexed_like(other): + raise AssertionError('Must have same axes to merge managers') + + l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix, + right=other.items, rsuffix=rsuffix) + new_items = _concat_indexes([l, r]) + + new_blocks = [blk.copy(deep=False) for blk in self.blocks] + + offset = self.shape[0] + for blk in other.blocks: + blk = blk.copy(deep=False) + blk.mgr_locs = blk.mgr_locs.add(offset) + new_blocks.append(blk) + + new_axes = list(self.axes) + new_axes[0] = new_items + + return self.__class__(_consolidate(new_blocks), new_axes) + + def _is_indexed_like(self, other): + """ + Check all axes except items + """ + if self.ndim != other.ndim: + raise AssertionError( + 'Number of dimensions must agree got {ndim} and ' + '{oth_ndim}'.format(ndim=self.ndim, oth_ndim=other.ndim)) + for ax, oax in zip(self.axes[1:], other.axes[1:]): + if not ax.equals(oax): + return False + return True + + def equals(self, other): + self_axes, other_axes = self.axes, other.axes + if len(self_axes) != len(other_axes): + return False + if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): + return False + self._consolidate_inplace() + other._consolidate_inplace() + if len(self.blocks) != len(other.blocks): + return False + + # canonicalize block order, using a tuple combining the type + # name and then mgr_locs because there might be unconsolidated + # blocks (say, Categorical) which can only be distinguished by + # the iteration order + def canonicalize(block): + return (block.dtype.name, block.mgr_locs.as_array.tolist()) + + self_blocks = sorted(self.blocks, key=canonicalize) + other_blocks = sorted(other.blocks, key=canonicalize) + return all(block.equals(oblock) + for block, oblock in zip(self_blocks, other_blocks)) + + def unstack(self, unstacker_func): + """Return a blockmanager with all blocks unstacked. + + Parameters + ---------- + unstacker_func : callable + A (partially-applied) ``pd.core.reshape._Unstacker`` class. + + Returns + ------- + unstacked : BlockManager + """ + dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items) + new_columns = dummy.get_new_columns() + new_index = dummy.get_new_index() + new_blocks = [] + columns_mask = [] + + for blk in self.blocks: + blocks, mask = blk._unstack( + partial(unstacker_func, + value_columns=self.items[blk.mgr_locs.indexer]), + new_columns) + + new_blocks.extend(blocks) + columns_mask.extend(mask) + + new_columns = new_columns[columns_mask] + + bm = BlockManager(new_blocks, [new_columns, new_index]) + return bm + + +class SingleBlockManager(BlockManager): + """ manage a single block with """ + + ndim = 1 + _is_consolidated = True + _known_consolidated = True + __slots__ = () + + def __init__(self, block, axis, do_integrity_check=False, fastpath=False): + + if isinstance(axis, list): + if len(axis) != 1: + raise ValueError("cannot create SingleBlockManager with more " + "than 1 axis") + axis = axis[0] + + # passed from constructor, single block, single axis + if fastpath: + self.axes = [axis] + if isinstance(block, list): + + # empty block + if len(block) == 0: + block = [np.array([])] + elif len(block) != 1: + raise ValueError('Cannot create SingleBlockManager with ' + 'more than 1 block') + block = block[0] + else: + self.axes = [ensure_index(axis)] + + # create the block here + if isinstance(block, list): + + # provide consolidation to the interleaved_dtype + if len(block) > 1: + dtype = _interleaved_dtype(block) + block = [b.astype(dtype) for b in block] + block = _consolidate(block) + + if len(block) != 1: + raise ValueError('Cannot create SingleBlockManager with ' + 'more than 1 block') + block = block[0] + + if not isinstance(block, Block): + block = make_block(block, placement=slice(0, len(axis)), ndim=1) + + self.blocks = [block] + + def _post_setstate(self): + pass + + @property + def _block(self): + return self.blocks[0] + + @property + def _values(self): + return self._block.values + + @property + def _blknos(self): + """ compat with BlockManager """ + return None + + @property + def _blklocs(self): + """ compat with BlockManager """ + return None + + def get_slice(self, slobj, axis=0): + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + return self.__class__(self._block._slice(slobj), + self.index[slobj], fastpath=True) + + @property + def index(self): + return self.axes[0] + + def convert(self, **kwargs): + """ convert the whole block as one """ + kwargs['by_item'] = False + return self.apply('convert', **kwargs) + + @property + def dtype(self): + return self._block.dtype + + @property + def array_dtype(self): + return self._block.array_dtype + + @property + def ftype(self): + return self._block.ftype + + def get_dtype_counts(self): + return {self.dtype.name: 1} + + def get_ftype_counts(self): + return {self.ftype: 1} + + def get_dtypes(self): + return np.array([self._block.dtype]) + + def get_ftypes(self): + return np.array([self._block.ftype]) + + def external_values(self): + return self._block.external_values() + + def internal_values(self): + return self._block.internal_values() + + def formatting_values(self): + """Return the internal values used by the DataFrame/SeriesFormatter""" + return self._block.formatting_values() + + def get_values(self): + """ return a dense type view """ + return np.array(self._block.to_dense(), copy=False) + + @property + def asobject(self): + """ + return a object dtype array. datetime/timedelta like values are boxed + to Timestamp/Timedelta instances. + """ + return self._block.get_values(dtype=object) + + @property + def _can_hold_na(self): + return self._block._can_hold_na + + def is_consolidated(self): + return True + + def _consolidate_check(self): + pass + + def _consolidate_inplace(self): + pass + + def delete(self, item): + """ + Delete single item from SingleBlockManager. + + Ensures that self.blocks doesn't become empty. + """ + loc = self.items.get_loc(item) + self._block.delete(loc) + self.axes[0] = self.axes[0].delete(loc) + + def fast_xs(self, loc): + """ + fast path for getting a cross-section + return a view of the data + """ + return self._block.values[loc] + + def concat(self, to_concat, new_axis): + """ + Concatenate a list of SingleBlockManagers into a single + SingleBlockManager. + + Used for pd.concat of Series objects with axis=0. + + Parameters + ---------- + to_concat : list of SingleBlockManagers + new_axis : Index of the result + + Returns + ------- + SingleBlockManager + + """ + non_empties = [x for x in to_concat if len(x) > 0] + + # check if all series are of the same block type: + if len(non_empties) > 0: + blocks = [obj.blocks[0] for obj in non_empties] + + if all(type(b) is type(blocks[0]) for b in blocks[1:]): # noqa + new_block = blocks[0].concat_same_type(blocks) + else: + values = [x.values for x in blocks] + values = _concat._concat_compat(values) + new_block = make_block( + values, placement=slice(0, len(values), 1)) + else: + values = [x._block.values for x in to_concat] + values = _concat._concat_compat(values) + new_block = make_block( + values, placement=slice(0, len(values), 1)) + + mgr = SingleBlockManager(new_block, new_axis) + return mgr + + +# -------------------------------------------------------------------- +# Constructor Helpers + +def create_block_manager_from_blocks(blocks, axes): + try: + if len(blocks) == 1 and not isinstance(blocks[0], Block): + # if blocks[0] is of length 0, return empty blocks + if not len(blocks[0]): + blocks = [] + else: + # It's OK if a single block is passed as values, its placement + # is basically "all items", but if there're many, don't bother + # converting, it's an error anyway. + blocks = [make_block(values=blocks[0], + placement=slice(0, len(axes[0])))] + + mgr = BlockManager(blocks, axes) + mgr._consolidate_inplace() + return mgr + + except (ValueError) as e: + blocks = [getattr(b, 'values', b) for b in blocks] + tot_items = sum(b.shape[0] for b in blocks) + construction_error(tot_items, blocks[0].shape[1:], axes, e) + + +def create_block_manager_from_arrays(arrays, names, axes): + + try: + blocks = form_blocks(arrays, names, axes) + mgr = BlockManager(blocks, axes) + mgr._consolidate_inplace() + return mgr + except ValueError as e: + construction_error(len(arrays), arrays[0].shape, axes, e) + + +def construction_error(tot_items, block_shape, axes, e=None): + """ raise a helpful message about our construction """ + passed = tuple(map(int, [tot_items] + list(block_shape))) + implied = tuple(map(int, [len(ax) for ax in axes])) + if passed == implied and e is not None: + raise e + if block_shape[0] == 0: + raise ValueError("Empty data passed with indices specified.") + raise ValueError("Shape of passed values is {0}, indices imply {1}".format( + passed, implied)) + + +# ----------------------------------------------------------------------- + +def form_blocks(arrays, names, axes): + # put "leftover" items in float bucket, where else? + # generalize? + items_dict = defaultdict(list) + extra_locs = [] + + names_idx = ensure_index(names) + if names_idx.equals(axes[0]): + names_indexer = np.arange(len(names_idx)) + else: + assert names_idx.intersection(axes[0]).is_unique + names_indexer = names_idx.get_indexer_for(axes[0]) + + for i, name_idx in enumerate(names_indexer): + if name_idx == -1: + extra_locs.append(i) + continue + + k = names[name_idx] + v = arrays[name_idx] + + block_type = get_block_type(v) + items_dict[block_type.__name__].append((i, k, v)) + + blocks = [] + if len(items_dict['FloatBlock']): + float_blocks = _multi_blockify(items_dict['FloatBlock']) + blocks.extend(float_blocks) + + if len(items_dict['ComplexBlock']): + complex_blocks = _multi_blockify(items_dict['ComplexBlock']) + blocks.extend(complex_blocks) + + if len(items_dict['TimeDeltaBlock']): + timedelta_blocks = _multi_blockify(items_dict['TimeDeltaBlock']) + blocks.extend(timedelta_blocks) + + if len(items_dict['IntBlock']): + int_blocks = _multi_blockify(items_dict['IntBlock']) + blocks.extend(int_blocks) + + if len(items_dict['DatetimeBlock']): + datetime_blocks = _simple_blockify(items_dict['DatetimeBlock'], + _NS_DTYPE) + blocks.extend(datetime_blocks) + + if len(items_dict['DatetimeTZBlock']): + dttz_blocks = [make_block(array, + klass=DatetimeTZBlock, + placement=[i]) + for i, _, array in items_dict['DatetimeTZBlock']] + blocks.extend(dttz_blocks) + + if len(items_dict['BoolBlock']): + bool_blocks = _simple_blockify(items_dict['BoolBlock'], np.bool_) + blocks.extend(bool_blocks) + + if len(items_dict['ObjectBlock']) > 0: + object_blocks = _simple_blockify(items_dict['ObjectBlock'], np.object_) + blocks.extend(object_blocks) + + if len(items_dict['SparseBlock']) > 0: + sparse_blocks = _sparse_blockify(items_dict['SparseBlock']) + blocks.extend(sparse_blocks) + + if len(items_dict['CategoricalBlock']) > 0: + cat_blocks = [make_block(array, klass=CategoricalBlock, placement=[i]) + for i, _, array in items_dict['CategoricalBlock']] + blocks.extend(cat_blocks) + + if len(items_dict['ExtensionBlock']): + + external_blocks = [ + make_block(array, klass=ExtensionBlock, placement=[i]) + for i, _, array in items_dict['ExtensionBlock'] + ] + + blocks.extend(external_blocks) + + if len(extra_locs): + shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) + + # empty items -> dtype object + block_values = np.empty(shape, dtype=object) + block_values.fill(np.nan) + + na_block = make_block(block_values, placement=extra_locs) + blocks.append(na_block) + + return blocks + + +def _simple_blockify(tuples, dtype): + """ return a single array of a block that has a single dtype; if dtype is + not None, coerce to this dtype + """ + values, placement = _stack_arrays(tuples, dtype) + + # CHECK DTYPE? + if dtype is not None and values.dtype != dtype: # pragma: no cover + values = values.astype(dtype) + + block = make_block(values, placement=placement) + return [block] + + +def _multi_blockify(tuples, dtype=None): + """ return an array of blocks that potentially have different dtypes """ + + # group by dtype + grouper = itertools.groupby(tuples, lambda x: x[2].dtype) + + new_blocks = [] + for dtype, tup_block in grouper: + + values, placement = _stack_arrays(list(tup_block), dtype) + + block = make_block(values, placement=placement) + new_blocks.append(block) + + return new_blocks + + +def _sparse_blockify(tuples, dtype=None): + """ return an array of blocks that potentially have different dtypes (and + are sparse) + """ + + new_blocks = [] + for i, names, array in tuples: + array = _maybe_to_sparse(array) + block = make_block(array, klass=SparseBlock, placement=[i]) + new_blocks.append(block) + + return new_blocks + + +def _stack_arrays(tuples, dtype): + + # fml + def _asarray_compat(x): + if isinstance(x, ABCSeries): + return x._values + else: + return np.asarray(x) + + def _shape_compat(x): + if isinstance(x, ABCSeries): + return len(x), + else: + return x.shape + + placement, names, arrays = zip(*tuples) + + first = arrays[0] + shape = (len(arrays),) + _shape_compat(first) + + stacked = np.empty(shape, dtype=dtype) + for i, arr in enumerate(arrays): + stacked[i] = _asarray_compat(arr) + + return stacked, placement + + +def _interleaved_dtype(blocks): + if not len(blocks): + return None + + dtype = find_common_type([b.dtype for b in blocks]) + + # only numpy compat + if isinstance(dtype, (PandasExtensionDtype, ExtensionDtype)): + dtype = np.object + + return dtype + + +def _consolidate(blocks): + """ + Merge blocks having same dtype, exclude non-consolidating blocks + """ + + # sort by _can_consolidate, dtype + gkey = lambda x: x._consolidate_key + grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) + + new_blocks = [] + for (_can_consolidate, dtype), group_blocks in grouper: + merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype, + _can_consolidate=_can_consolidate) + new_blocks = _extend_blocks(merged_blocks, new_blocks) + return new_blocks + + +def _maybe_compare(a, b, op): + + is_a_array = isinstance(a, np.ndarray) + is_b_array = isinstance(b, np.ndarray) + + # numpy deprecation warning to have i8 vs integer comparisons + if is_datetimelike_v_numeric(a, b): + result = False + + # numpy deprecation warning if comparing numeric vs string-like + elif is_numeric_v_string_like(a, b): + result = False + + else: + result = op(a, b) + + if is_scalar(result) and (is_a_array or is_b_array): + type_names = [type(a).__name__, type(b).__name__] + + if is_a_array: + type_names[0] = 'ndarray(dtype={dtype})'.format(dtype=a.dtype) + + if is_b_array: + type_names[1] = 'ndarray(dtype={dtype})'.format(dtype=b.dtype) + + raise TypeError( + "Cannot compare types {a!r} and {b!r}".format(a=type_names[0], + b=type_names[1])) + return result + + +def _concat_indexes(indexes): + return indexes[0].append(indexes[1:]) + + +def items_overlap_with_suffix(left, lsuffix, right, rsuffix): + """ + If two indices overlap, add suffixes to overlapping entries. + + If corresponding suffix is empty, the entry is simply converted to string. + + """ + to_rename = left.intersection(right) + if len(to_rename) == 0: + return left, right + else: + if not lsuffix and not rsuffix: + raise ValueError('columns overlap but no suffix specified: ' + '{rename}'.format(rename=to_rename)) + + def lrenamer(x): + if x in to_rename: + return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix) + return x + + def rrenamer(x): + if x in to_rename: + return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix) + return x + + return (_transform_index(left, lrenamer), + _transform_index(right, rrenamer)) + + +def _transform_index(index, func, level=None): + """ + Apply function to all values found in index. + + This includes transforming multiindex entries separately. + Only apply function to one level of the MultiIndex if level is specified. + + """ + if isinstance(index, MultiIndex): + if level is not None: + items = [tuple(func(y) if i == level else y + for i, y in enumerate(x)) for x in index] + else: + items = [tuple(func(y) for y in x) for x in index] + return MultiIndex.from_tuples(items, names=index.names) + else: + items = [func(x) for x in index] + return Index(items, name=index.name, tupleize_cols=False) + + +def _fast_count_smallints(arr): + """Faster version of set(arr) for sequences of small numbers.""" + if len(arr) == 0: + # Handle empty arr case separately: numpy 1.6 chokes on that. + return np.empty((0, 2), dtype=arr.dtype) + else: + counts = np.bincount(arr.astype(np.int_)) + nz = counts.nonzero()[0] + return np.c_[nz, counts[nz]] + + +def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill): + if isinstance(slice_or_indexer, slice): + return ('slice', slice_or_indexer, + libinternals.slice_len(slice_or_indexer, length)) + elif (isinstance(slice_or_indexer, np.ndarray) and + slice_or_indexer.dtype == np.bool_): + return 'mask', slice_or_indexer, slice_or_indexer.sum() + else: + indexer = np.asanyarray(slice_or_indexer, dtype=np.int64) + if not allow_fill: + indexer = maybe_convert_indices(indexer, length) + return 'fancy', indexer, len(indexer) + + +def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): + """ + Concatenate block managers into one. + + Parameters + ---------- + mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples + axes : list of Index + concat_axis : int + copy : bool + + """ + concat_plan = combine_concat_plans( + [get_mgr_concatenation_plan(mgr, indexers) + for mgr, indexers in mgrs_indexers], concat_axis) + + blocks = [] + + for placement, join_units in concat_plan: + + if len(join_units) == 1 and not join_units[0].indexers: + b = join_units[0].block + values = b.values + if copy: + values = values.copy() + elif not copy: + values = values.view() + b = b.make_block_same_class(values, placement=placement) + elif is_uniform_join_units(join_units): + b = join_units[0].block.concat_same_type( + [ju.block for ju in join_units], placement=placement) + else: + b = make_block( + concatenate_join_units(join_units, concat_axis, copy=copy), + placement=placement) + blocks.append(b) + + return BlockManager(blocks, axes)
Follow-up to #22014. Moved `get_blockno_placements` to libinternals since a) its a natural fit anyway and b) it is needed in both concat and managers and I didn't want to runtime-import it. The only non-cut/paste edit is a change of `isinstance(self.block, ExtensionBlock)` to `self.block.is_extension` `__init__` namespace is big in part because `io.packers` uses `getattr(internals, name)` and is just really tightly intertwined with internals.
https://api.github.com/repos/pandas-dev/pandas/pulls/22028
2018-07-23T16:44:24Z
2018-07-24T00:03:35Z
2018-07-24T00:03:35Z
2018-07-26T16:22:30Z
TST: restructure internal extension arrays tests (split between /arrays and /extension)
diff --git a/pandas/tests/extension/integer/test_integer.py b/pandas/tests/arrays/test_integer.py similarity index 70% rename from pandas/tests/extension/integer/test_integer.py rename to pandas/tests/arrays/test_integer.py index 3af127091d2d8..349a6aee5701e 100644 --- a/pandas/tests/extension/integer/test_integer.py +++ b/pandas/tests/arrays/test_integer.py @@ -1,11 +1,10 @@ +# -*- coding: utf-8 -*- import numpy as np import pandas as pd import pandas.util.testing as tm import pytest -from pandas.tests.extension import base -from pandas.api.types import ( - is_integer, is_scalar, is_float, is_float_dtype) +from pandas.api.types import is_integer, is_float, is_float_dtype, is_scalar from pandas.core.dtypes.generic import ABCIndexClass from pandas.core.arrays import ( @@ -14,6 +13,8 @@ Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype) +from pandas.tests.extension.base import BaseOpsUtil + def make_data(): return (list(range(8)) + @@ -39,42 +40,13 @@ def data_missing(dtype): return integer_array([np.nan, 1], dtype=dtype) -@pytest.fixture -def data_repeated(data): - def gen(count): - for _ in range(count): - yield data - yield gen - - -@pytest.fixture -def data_for_sorting(dtype): - return integer_array([1, 2, 0], dtype=dtype) - - -@pytest.fixture -def data_missing_for_sorting(dtype): - return integer_array([1, np.nan, 0], dtype=dtype) - - -@pytest.fixture -def na_cmp(): - # we are np.nan - return lambda x, y: np.isnan(x) and np.isnan(y) - - -@pytest.fixture -def na_value(): - return np.nan - - -@pytest.fixture -def data_for_grouping(dtype): - b = 1 - a = 0 - c = 2 - na = np.nan - return integer_array([b, b, na, na, a, a, b, c], dtype=dtype) +@pytest.fixture(params=['data', 'data_missing']) +def all_data(request, data, data_missing): + """Parametrized fixture giving 'data' and 'data_missing'""" + if request.param == 'data': + return data + elif request.param == 'data_missing': + return data_missing def test_dtypes(dtype): @@ -87,61 +59,50 @@ def test_dtypes(dtype): assert dtype.name is not None -class BaseInteger(object): - - def assert_index_equal(self, left, right, *args, **kwargs): - - left_na = left.isna() - right_na = right.isna() +class TestInterface(object): - tm.assert_numpy_array_equal(left_na, right_na) - return tm.assert_index_equal(left[~left_na], - right[~right_na], - *args, **kwargs) - - def assert_series_equal(self, left, right, *args, **kwargs): + def test_repr_array(self, data): + result = repr(data) - left_na = left.isna() - right_na = right.isna() + # not long + assert '...' not in result - tm.assert_series_equal(left_na, right_na) - return tm.assert_series_equal(left[~left_na], - right[~right_na], - *args, **kwargs) + assert 'dtype=' in result + assert 'IntegerArray' in result - def assert_frame_equal(self, left, right, *args, **kwargs): - # TODO(EA): select_dtypes - tm.assert_index_equal( - left.columns, right.columns, - exact=kwargs.get('check_column_type', 'equiv'), - check_names=kwargs.get('check_names', True), - check_exact=kwargs.get('check_exact', False), - check_categorical=kwargs.get('check_categorical', True), - obj='{obj}.columns'.format(obj=kwargs.get('obj', 'DataFrame'))) + def test_repr_array_long(self, data): + # some arrays may be able to assert a ... in the repr + with pd.option_context('display.max_seq_items', 1): + result = repr(data) - integers = (left.dtypes == 'integer').index + assert '...' in result + assert 'length' in result - for col in integers: - self.assert_series_equal(left[col], right[col], - *args, **kwargs) - left = left.drop(columns=integers) - right = right.drop(columns=integers) - tm.assert_frame_equal(left, right, *args, **kwargs) +class TestConstructors(object): + def test_from_dtype_from_float(self, data): + # construct from our dtype & string dtype + dtype = data.dtype -class TestDtype(BaseInteger, base.BaseDtypeTests): + # from float + expected = pd.Series(data) + result = pd.Series(np.array(data).astype('float'), dtype=str(dtype)) + tm.assert_series_equal(result, expected) - @pytest.mark.skip(reason="using multiple dtypes") - def test_is_dtype_unboxes_dtype(self): - # we have multiple dtypes, so skip - pass + # from int / list + expected = pd.Series(data) + result = pd.Series(np.array(data).tolist(), dtype=str(dtype)) + tm.assert_series_equal(result, expected) - def test_array_type_with_arg(self, data, dtype): - assert dtype.construct_array_type() is IntegerArray + # from int / array + expected = pd.Series(data).dropna().reset_index(drop=True) + dropped = np.array(data.dropna()).astype(np.dtype((dtype.type))) + result = pd.Series(dropped, dtype=str(dtype)) + tm.assert_series_equal(result, expected) -class TestArithmeticOps(BaseInteger, base.BaseArithmeticOpsTests): +class TestArithmeticOps(BaseOpsUtil): def _check_divmod_op(self, s, op, other, exc=None): super(TestArithmeticOps, self)._check_divmod_op(s, op, other, None) @@ -178,7 +139,7 @@ def _check_op_float(self, result, expected, mask, s, op_name, other): # check comparisions that are resulting in float dtypes expected[mask] = np.nan - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) def _check_op_integer(self, result, expected, mask, s, op_name, other): # check comparisions that are resulting in integer dtypes @@ -231,10 +192,10 @@ def _check_op_integer(self, result, expected, mask, s, op_name, other): original = original.astype('float') original[mask] = np.nan - self.assert_series_equal(original, expected.astype('float')) + tm.assert_series_equal(original, expected.astype('float')) # assert our expected result - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) def test_arith_integer_array(self, data, all_arithmetic_operators): # we operate with a rhs of an integer array @@ -319,7 +280,7 @@ def test_error(self, data, all_arithmetic_operators): opa(np.arange(len(s)).reshape(-1, len(s))) -class TestComparisonOps(BaseInteger, base.BaseComparisonOpsTests): +class TestComparisonOps(BaseOpsUtil): def _compare_other(self, s, data, op_name, other): op = self.get_op_from_name(op_name) @@ -345,144 +306,21 @@ def _compare_other(self, s, data, op_name, other): tm.assert_series_equal(result, expected) + def test_compare_scalar(self, data, all_compare_operators): + op_name = all_compare_operators + s = pd.Series(data) + self._compare_other(s, data, op_name, 0) -class TestInterface(BaseInteger, base.BaseInterfaceTests): - - def test_repr_array(self, data): - result = repr(data) - - # not long - assert '...' not in result - - assert 'dtype=' in result - assert 'IntegerArray' in result - - def test_repr_array_long(self, data): - # some arrays may be able to assert a ... in the repr - with pd.option_context('display.max_seq_items', 1): - result = repr(data) - - assert '...' in result - assert 'length' in result - - -class TestConstructors(BaseInteger, base.BaseConstructorsTests): - - def test_from_dtype_from_float(self, data): - # construct from our dtype & string dtype - dtype = data.dtype - - # from float - expected = pd.Series(data) - result = pd.Series(np.array(data).astype('float'), dtype=str(dtype)) - self.assert_series_equal(result, expected) - - # from int / list - expected = pd.Series(data) - result = pd.Series(np.array(data).tolist(), dtype=str(dtype)) - self.assert_series_equal(result, expected) - - # from int / array - expected = pd.Series(data).dropna().reset_index(drop=True) - dropped = np.array(data.dropna()).astype(np.dtype((dtype.type))) - result = pd.Series(dropped, dtype=str(dtype)) - self.assert_series_equal(result, expected) - - -class TestReshaping(BaseInteger, base.BaseReshapingTests): - - def test_concat_mixed_dtypes(self, data): - # https://github.com/pandas-dev/pandas/issues/20762 - df1 = pd.DataFrame({'A': data[:3]}) - df2 = pd.DataFrame({"A": [1, 2, 3]}) - df3 = pd.DataFrame({"A": ['a', 'b', 'c']}).astype('category') - df4 = pd.DataFrame({"A": pd.SparseArray([1, 2, 3])}) - dfs = [df1, df2, df3, df4] - - # dataframes - result = pd.concat(dfs) - expected = pd.concat([x.astype(object) for x in dfs]) - self.assert_frame_equal(result, expected) - - # series - result = pd.concat([x['A'] for x in dfs]) - expected = pd.concat([x['A'].astype(object) for x in dfs]) - self.assert_series_equal(result, expected) - - result = pd.concat([df1, df2]) - expected = pd.concat([df1.astype('object'), df2.astype('object')]) - self.assert_frame_equal(result, expected) - - # concat of an Integer and Int coerces to object dtype - # TODO(jreback) once integrated this would - # be a result of Integer - result = pd.concat([df1['A'], df2['A']]) - expected = pd.concat([df1['A'].astype('object'), - df2['A'].astype('object')]) - self.assert_series_equal(result, expected) - - -class TestGetitem(BaseInteger, base.BaseGetitemTests): - pass + def test_compare_array(self, data, all_compare_operators): + op_name = all_compare_operators + s = pd.Series(data) + other = pd.Series([0] * len(data)) + self._compare_other(s, data, op_name, other) -class TestMissing(BaseInteger, base.BaseMissingTests): +class TestCasting(object): pass - -class TestMethods(BaseInteger, base.BaseMethodsTests): - - @pytest.mark.parametrize('dropna', [True, False]) - def test_value_counts(self, all_data, dropna): - all_data = all_data[:10] - if dropna: - other = np.array(all_data[~all_data.isna()]) - else: - other = all_data - - result = pd.Series(all_data).value_counts(dropna=dropna).sort_index() - expected = pd.Series(other).value_counts( - dropna=dropna).sort_index() - expected.index = expected.index.astype(all_data.dtype) - - self.assert_series_equal(result, expected) - - def test_combine_add(self, data_repeated): - # GH 20825 - orig_data1, orig_data2 = data_repeated(2) - s1 = pd.Series(orig_data1) - s2 = pd.Series(orig_data2) - - # fundamentally this is not a great operation - # as overflow / underflow can easily happen here - # e.g. int8 + int8 - def scalar_add(a, b): - - # TODO; should really be a type specific NA - if pd.isna(a) or pd.isna(b): - return np.nan - if is_integer(a): - a = int(a) - elif is_integer(b): - b = int(b) - return a + b - - result = s1.combine(s2, scalar_add) - expected = pd.Series( - orig_data1._from_sequence([scalar_add(a, b) for (a, b) in - zip(orig_data1, - orig_data2)])) - self.assert_series_equal(result, expected) - - val = s1.iloc[0] - result = s1.combine(val, lambda x1, x2: x1 + x2) - expected = pd.Series( - orig_data1._from_sequence([a + val for a in list(orig_data1)])) - self.assert_series_equal(result, expected) - - -class TestCasting(BaseInteger, base.BaseCastingTests): - @pytest.mark.parametrize('dropna', [True, False]) def test_construct_index(self, all_data, dropna): # ensure that we do not coerce to Float64Index, rather @@ -497,7 +335,7 @@ def test_construct_index(self, all_data, dropna): result = pd.Index(integer_array(other, dtype=all_data.dtype)) expected = pd.Index(other, dtype=object) - self.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected) @pytest.mark.parametrize('dropna', [True, False]) def test_astype_index(self, all_data, dropna): @@ -515,7 +353,7 @@ def test_astype_index(self, all_data, dropna): result = idx.astype(dtype) expected = idx.astype(object).astype(dtype) - self.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected) def test_astype(self, all_data): all_data = all_data[:10] @@ -528,13 +366,13 @@ def test_astype(self, all_data): s = pd.Series(ints) result = s.astype(all_data.dtype) expected = pd.Series(ints) - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) # coerce to same other - ints s = pd.Series(ints) result = s.astype(dtype) expected = pd.Series(ints, dtype=dtype) - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) # coerce to same numpy_dtype - ints s = pd.Series(ints) @@ -547,13 +385,13 @@ def test_astype(self, all_data): s = pd.Series(mixed) result = s.astype(all_data.dtype) expected = pd.Series(mixed) - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) # coerce to same other - mixed s = pd.Series(mixed) result = s.astype(dtype) expected = pd.Series(mixed, dtype=dtype) - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) # coerce to same numpy_dtype - mixed s = pd.Series(mixed) @@ -572,12 +410,12 @@ def test_astype_specific_casting(self, dtype): s = pd.Series([1, 2, 3], dtype='Int64') result = s.astype(dtype) expected = pd.Series([1, 2, 3], dtype=dtype) - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) s = pd.Series([1, 2, 3, None], dtype='Int64') result = s.astype(dtype) expected = pd.Series([1, 2, 3, None], dtype=dtype) - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) def test_construct_cast_invalid(self, dtype): @@ -597,24 +435,6 @@ def test_construct_cast_invalid(self, dtype): pd.Series(arr).astype(dtype) -class TestGroupby(BaseInteger, base.BaseGroupbyTests): - - @pytest.mark.xfail(reason="groupby not working", strict=True) - def test_groupby_extension_no_sort(self, data_for_grouping): - super(TestGroupby, self).test_groupby_extension_no_sort( - data_for_grouping) - - @pytest.mark.parametrize('as_index', [ - pytest.param(True, - marks=pytest.mark.xfail(reason="groupby not working", - strict=True)), - False - ]) - def test_groupby_extension_agg(self, as_index, data_for_grouping): - super(TestGroupby, self).test_groupby_extension_agg( - as_index, data_for_grouping) - - def test_frame_repr(data_missing): df = pd.DataFrame({'A': data_missing}) diff --git a/pandas/tests/arrays/test_interval.py b/pandas/tests/arrays/test_interval.py new file mode 100644 index 0000000000000..bcf4cea795978 --- /dev/null +++ b/pandas/tests/arrays/test_interval.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +import pytest +import numpy as np + +from pandas import Index, IntervalIndex, date_range, timedelta_range +from pandas.core.arrays import IntervalArray +import pandas.util.testing as tm + + +@pytest.fixture(params=[ + (Index([0, 2, 4]), Index([1, 3, 5])), + (Index([0., 1., 2.]), Index([1., 2., 3.])), + (timedelta_range('0 days', periods=3), + timedelta_range('1 day', periods=3)), + (date_range('20170101', periods=3), date_range('20170102', periods=3)), + (date_range('20170101', periods=3, tz='US/Eastern'), + date_range('20170102', periods=3, tz='US/Eastern'))], + ids=lambda x: str(x[0].dtype)) +def left_right_dtypes(request): + """ + Fixture for building an IntervalArray from various dtypes + """ + return request.param + + +class TestMethods(object): + + @pytest.mark.parametrize('repeats', [0, 1, 5]) + def test_repeat(self, left_right_dtypes, repeats): + left, right = left_right_dtypes + result = IntervalArray.from_arrays(left, right).repeat(repeats) + expected = IntervalArray.from_arrays( + left.repeat(repeats), right.repeat(repeats)) + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize('bad_repeats, msg', [ + (-1, 'negative dimensions are not allowed'), + ('foo', r'invalid literal for (int|long)\(\) with base 10')]) + def test_repeat_errors(self, bad_repeats, msg): + array = IntervalArray.from_breaks(range(4)) + with tm.assert_raises_regex(ValueError, msg): + array.repeat(bad_repeats) + + @pytest.mark.parametrize('new_closed', [ + 'left', 'right', 'both', 'neither']) + def test_set_closed(self, closed, new_closed): + # GH 21670 + array = IntervalArray.from_breaks(range(10), closed=closed) + result = array.set_closed(new_closed) + expected = IntervalArray.from_breaks(range(10), closed=new_closed) + tm.assert_extension_array_equal(result, expected) + + +class TestSetitem(object): + + def test_set_na(self, left_right_dtypes): + left, right = left_right_dtypes + result = IntervalArray.from_arrays(left, right) + result[0] = np.nan + + expected_left = Index([left._na_value] + list(left[1:])) + expected_right = Index([right._na_value] + list(right[1:])) + expected = IntervalArray.from_arrays(expected_left, expected_right) + + tm.assert_extension_array_equal(result, expected) + + +def test_repr_matches(): + idx = IntervalIndex.from_breaks([1, 2, 3]) + a = repr(idx) + b = repr(idx.values) + assert a.replace("Index", "Array") == b diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index c8656808739c4..4e7886dd2e943 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -127,10 +127,11 @@ def test_combine_add(self, data_repeated): s1 = pd.Series(orig_data1) s2 = pd.Series(orig_data2) result = s1.combine(s2, lambda x1, x2: x1 + x2) - expected = pd.Series( - orig_data1._from_sequence([a + b for (a, b) in - zip(list(orig_data1), - list(orig_data2))])) + with np.errstate(over='ignore'): + expected = pd.Series( + orig_data1._from_sequence([a + b for (a, b) in + zip(list(orig_data1), + list(orig_data2))])) self.assert_series_equal(result, expected) val = s1.iloc[0] diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index f7bfdb8ec218a..05351c56862b8 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -23,9 +23,9 @@ def get_op_from_name(self, op_name): def check_opname(self, s, op_name, other, exc=NotImplementedError): op = self.get_op_from_name(op_name) - self._check_op(s, op, other, exc) + self._check_op(s, op, other, op_name, exc) - def _check_op(self, s, op, other, exc=NotImplementedError): + def _check_op(self, s, op, other, op_name, exc=NotImplementedError): if exc is None: result = op(s, other) expected = s.combine(other, op) @@ -69,7 +69,8 @@ def test_arith_series_with_array(self, data, all_arithmetic_operators): # ndarray & other series op_name = all_arithmetic_operators s = pd.Series(data) - self.check_opname(s, op_name, [s.iloc[0]] * len(s), exc=TypeError) + self.check_opname(s, op_name, pd.Series([s.iloc[0]] * len(s)), + exc=TypeError) def test_divmod(self, data): s = pd.Series(data) @@ -113,5 +114,5 @@ def test_compare_scalar(self, data, all_compare_operators): def test_compare_array(self, data, all_compare_operators): op_name = all_compare_operators s = pd.Series(data) - other = [0] * len(data) + other = pd.Series([data[0]] * len(data)) self._compare_other(s, data, op_name, other) diff --git a/pandas/tests/extension/category/__init__.py b/pandas/tests/extension/category/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/pandas/tests/extension/integer/__init__.py b/pandas/tests/extension/integer/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/pandas/tests/extension/interval/__init__.py b/pandas/tests/extension/interval/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/test_categorical.py similarity index 85% rename from pandas/tests/extension/category/test_categorical.py rename to pandas/tests/extension/test_categorical.py index 76f6b03907ef8..b8c73a9efdae8 100644 --- a/pandas/tests/extension/category/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -1,3 +1,18 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" import string import pytest @@ -204,10 +219,14 @@ class TestComparisonOps(base.BaseComparisonOpsTests): def _compare_other(self, s, data, op_name, other): op = self.get_op_from_name(op_name) if op_name == '__eq__': - assert not op(data, other).all() + result = op(s, other) + expected = s.combine(other, lambda x, y: x == y) + assert (result == expected).all() elif op_name == '__ne__': - assert op(data, other).all() + result = op(s, other) + expected = s.combine(other, lambda x, y: x != y) + assert (result == expected).all() else: with pytest.raises(TypeError): diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py new file mode 100644 index 0000000000000..50c0e6dd8b347 --- /dev/null +++ b/pandas/tests/extension/test_integer.py @@ -0,0 +1,229 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +import numpy as np +import pandas as pd +import pytest + +from pandas.tests.extension import base +from pandas.core.dtypes.common import is_extension_array_dtype + +from pandas.core.arrays import IntegerArray, integer_array +from pandas.core.arrays.integer import ( + Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, + UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype) + + +def make_data(): + return (list(range(1, 9)) + [np.nan] + list(range(10, 98)) + + [np.nan] + [99, 100]) + + +@pytest.fixture(params=[Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, + UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype]) +def dtype(request): + return request.param() + + +@pytest.fixture +def data(dtype): + return integer_array(make_data(), dtype=dtype) + + +@pytest.fixture +def data_missing(dtype): + return integer_array([np.nan, 1], dtype=dtype) + + +@pytest.fixture +def data_repeated(data): + def gen(count): + for _ in range(count): + yield data + yield gen + + +@pytest.fixture +def data_for_sorting(dtype): + return integer_array([1, 2, 0], dtype=dtype) + + +@pytest.fixture +def data_missing_for_sorting(dtype): + return integer_array([1, np.nan, 0], dtype=dtype) + + +@pytest.fixture +def na_cmp(): + # we are np.nan + return lambda x, y: np.isnan(x) and np.isnan(y) + + +@pytest.fixture +def na_value(): + return np.nan + + +@pytest.fixture +def data_for_grouping(dtype): + b = 1 + a = 0 + c = 2 + na = np.nan + return integer_array([b, b, na, na, a, a, b, c], dtype=dtype) + + +class TestDtype(base.BaseDtypeTests): + + @pytest.mark.skip(reason="using multiple dtypes") + def test_is_dtype_unboxes_dtype(self): + # we have multiple dtypes, so skip + pass + + def test_array_type_with_arg(self, data, dtype): + assert dtype.construct_array_type() is IntegerArray + + +class TestArithmeticOps(base.BaseArithmeticOpsTests): + + def check_opname(self, s, op_name, other, exc=None): + # overwriting to indicate ops don't raise an error + super(TestArithmeticOps, self).check_opname(s, op_name, + other, exc=None) + + def _check_op(self, s, op, other, op_name, exc=NotImplementedError): + if exc is None: + if s.dtype.is_unsigned_integer and (op_name == '__rsub__'): + # TODO see https://github.com/pandas-dev/pandas/issues/22023 + pytest.skip("unsigned subtraction gives negative values") + + if (hasattr(other, 'dtype') + and not is_extension_array_dtype(other.dtype) + and pd.api.types.is_integer_dtype(other.dtype)): + # other is np.int64 and would therefore always result in + # upcasting, so keeping other as same numpy_dtype + other = other.astype(s.dtype.numpy_dtype) + + result = op(s, other) + expected = s.combine(other, op) + + if op_name == '__rdiv__': + # combine is not giving the correct result for this case + pytest.skip("skipping reverse div in python 2") + elif op_name in ('__rtruediv__', '__truediv__', '__div__'): + expected = expected.astype(float) + if op_name == '__rtruediv__': + # TODO reverse operators result in object dtype + result = result.astype(float) + elif op_name.startswith('__r'): + # TODO reverse operators result in object dtype + # see https://github.com/pandas-dev/pandas/issues/22024 + expected = expected.astype(s.dtype) + result = result.astype(s.dtype) + else: + # combine method result in 'biggest' (int64) dtype + expected = expected.astype(s.dtype) + pass + if (op_name == '__rpow__') and isinstance(other, pd.Series): + # TODO pow on Int arrays gives different result with NA + # see https://github.com/pandas-dev/pandas/issues/22022 + result = result.fillna(1) + + self.assert_series_equal(result, expected) + else: + with pytest.raises(exc): + op(s, other) + + def _check_divmod_op(self, s, op, other, exc=None): + super(TestArithmeticOps, self)._check_divmod_op(s, op, other, None) + + @pytest.mark.skip(reason="intNA does not error on ops") + def test_error(self, data, all_arithmetic_operators): + # other specific errors tested in the integer array specific tests + pass + + +class TestComparisonOps(base.BaseComparisonOpsTests): + + def check_opname(self, s, op_name, other, exc=None): + super(TestComparisonOps, self).check_opname(s, op_name, + other, exc=None) + + def _compare_other(self, s, data, op_name, other): + self.check_opname(s, op_name, other) + + +class TestInterface(base.BaseInterfaceTests): + pass + + +class TestConstructors(base.BaseConstructorsTests): + pass + + +class TestReshaping(base.BaseReshapingTests): + pass + + # for test_concat_mixed_dtypes test + # concat of an Integer and Int coerces to object dtype + # TODO(jreback) once integrated this would + + +class TestGetitem(base.BaseGetitemTests): + pass + + +class TestMissing(base.BaseMissingTests): + pass + + +class TestMethods(base.BaseMethodsTests): + + @pytest.mark.parametrize('dropna', [True, False]) + def test_value_counts(self, all_data, dropna): + all_data = all_data[:10] + if dropna: + other = np.array(all_data[~all_data.isna()]) + else: + other = all_data + + result = pd.Series(all_data).value_counts(dropna=dropna).sort_index() + expected = pd.Series(other).value_counts( + dropna=dropna).sort_index() + expected.index = expected.index.astype(all_data.dtype) + + self.assert_series_equal(result, expected) + + +class TestCasting(base.BaseCastingTests): + pass + + +class TestGroupby(base.BaseGroupbyTests): + + @pytest.mark.xfail(reason="groupby not working", strict=True) + def test_groupby_extension_no_sort(self, data_for_grouping): + super(TestGroupby, self).test_groupby_extension_no_sort( + data_for_grouping) + + @pytest.mark.parametrize('as_index', [ + pytest.param(True, + marks=pytest.mark.xfail(reason="groupby not working", + strict=True)), + False + ]) + def test_groupby_extension_agg(self, as_index, data_for_grouping): + super(TestGroupby, self).test_groupby_extension_agg( + as_index, data_for_grouping) diff --git a/pandas/tests/extension/interval/test_interval.py b/pandas/tests/extension/test_interval.py similarity index 54% rename from pandas/tests/extension/interval/test_interval.py rename to pandas/tests/extension/test_interval.py index a10a56ddfdfac..625619a90ed4c 100644 --- a/pandas/tests/extension/interval/test_interval.py +++ b/pandas/tests/extension/test_interval.py @@ -1,7 +1,22 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" import pytest import numpy as np -from pandas import Index, Interval, IntervalIndex, date_range, timedelta_range +from pandas import Interval from pandas.core.arrays import IntervalArray from pandas.core.dtypes.dtypes import IntervalDtype from pandas.tests.extension import base @@ -15,22 +30,6 @@ def make_data(): return [Interval(l, r) for l, r in zip(left, right)] -@pytest.fixture(params=[ - (Index([0, 2, 4]), Index([1, 3, 5])), - (Index([0., 1., 2.]), Index([1., 2., 3.])), - (timedelta_range('0 days', periods=3), - timedelta_range('1 day', periods=3)), - (date_range('20170101', periods=3), date_range('20170102', periods=3)), - (date_range('20170101', periods=3, tz='US/Eastern'), - date_range('20170102', periods=3, tz='US/Eastern'))], - ids=lambda x: str(x[0].dtype)) -def left_right_dtypes(request): - """ - Fixture for building an IntervalArray from various dtypes - """ - return request.param - - @pytest.fixture def dtype(): return IntervalDtype() @@ -111,30 +110,6 @@ class TestInterface(BaseInterval, base.BaseInterfaceTests): class TestMethods(BaseInterval, base.BaseMethodsTests): - @pytest.mark.parametrize('repeats', [0, 1, 5]) - def test_repeat(self, left_right_dtypes, repeats): - left, right = left_right_dtypes - result = IntervalArray.from_arrays(left, right).repeat(repeats) - expected = IntervalArray.from_arrays( - left.repeat(repeats), right.repeat(repeats)) - tm.assert_extension_array_equal(result, expected) - - @pytest.mark.parametrize('bad_repeats, msg', [ - (-1, 'negative dimensions are not allowed'), - ('foo', r'invalid literal for (int|long)\(\) with base 10')]) - def test_repeat_errors(self, bad_repeats, msg): - array = IntervalArray.from_breaks(range(4)) - with tm.assert_raises_regex(ValueError, msg): - array.repeat(bad_repeats) - - @pytest.mark.parametrize('new_closed', [ - 'left', 'right', 'both', 'neither']) - def test_set_closed(self, closed, new_closed): - # GH 21670 - array = IntervalArray.from_breaks(range(10), closed=closed) - result = array.set_closed(new_closed) - expected = IntervalArray.from_breaks(range(10), closed=new_closed) - tm.assert_extension_array_equal(result, expected) @pytest.mark.skip(reason='addition is not defined for intervals') def test_combine_add(self, data_repeated): @@ -173,21 +148,4 @@ class TestReshaping(BaseInterval, base.BaseReshapingTests): class TestSetitem(BaseInterval, base.BaseSetitemTests): - - def test_set_na(self, left_right_dtypes): - left, right = left_right_dtypes - result = IntervalArray.from_arrays(left, right) - result[0] = np.nan - - expected_left = Index([left._na_value] + list(left[1:])) - expected_right = Index([right._na_value] + list(right[1:])) - expected = IntervalArray.from_arrays(expected_left, expected_right) - - self.assert_extension_array_equal(result, expected) - - -def test_repr_matches(): - idx = IntervalIndex.from_breaks([1, 2, 3]) - a = repr(idx) - b = repr(idx.values) - assert a.replace("Index", "Array") == b + pass
Pull request to discuss what to do with the tests for internal EAs (and one of the comments I still had in https://github.com/pandas-dev/pandas/pull/21160) Basically, I would keep the `tests/extension/..` only for subclassing the base extension array test suite, and any array-specific functionality is tested in `tests/arrays/..` (eg closed attribute for IntervalArray, specific arithmetic behaviour for IntegerArray, ...) This means that when adding a test related to EAs, we need to think about: is this testing something that is applicable to all EAs? (-> add a base test to `tests/extension/base` so this is tested for all internal and external EAs) or is this testing something specific to a particular EA? (-> add a test in `tests/array/EAtype/..`) Of course often there can be some ambiguity here. Main reason that I would split them is that over time, we probably add a lot of EA-type-specific tests, and then keeping the general ones mixed with the specific ones will make it only confusing / hard to see what is going on. Drawback is of course that it is tested in two places. In practice what I propose in this PR, is also what we already do for Categorical at the moment: Categorical has its own tests in `tests/arrays/categorical` (and probably also some in `indexes` and `frame`, ..), but we also run the base extension tests for Categorical in `tests/extension/`
https://api.github.com/repos/pandas-dev/pandas/pulls/22026
2018-07-23T14:59:48Z
2018-09-06T10:11:30Z
2018-09-06T10:11:30Z
2018-09-06T10:11:33Z
[CLN] Dispatch (some) Frame ops to Series, avoiding _data.eval
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 9b71ab656920d..700916ba6066e 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -532,6 +532,35 @@ Current Behavior: ... OverflowError: Trying to coerce negative values to unsigned integers +.. _whatsnew_0240.api.crosstab_dtypes + +Crosstab Preserves Dtypes +^^^^^^^^^^^^^^^^^^^^^^^^^ + +:func:`crosstab` will preserve now dtypes in some cases that previously would +cast from integer dtype to floating dtype (:issue:`22019`) + +Previous Behavior: + +.. code-block:: ipython + + In [3]: df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4], + ...: 'c': [1, 1, np.nan, 1, 1]}) + In [4]: pd.crosstab(df.a, df.b, normalize='columns') + Out[4]: + b 3 4 + a + 1 0.5 0.0 + 2 0.5 1.0 + +Current Behavior: + +.. code-block:: ipython + + In [3]: df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4], + ...: 'c': [1, 1, np.nan, 1, 1]}) + In [4]: pd.crosstab(df.a, df.b, normalize='columns') + Datetimelike API Changes ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 138d1017aa43d..ff7590f6d5358 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4899,7 +4899,6 @@ def _arith_op(left, right): copy=False) def _combine_match_index(self, other, func, level=None): - assert isinstance(other, Series) left, right = self.align(other, join='outer', axis=0, level=level, copy=False) assert left.index.equals(right.index) @@ -4919,11 +4918,7 @@ def _combine_match_columns(self, other, func, level=None, try_cast=True): left, right = self.align(other, join='outer', axis=1, level=level, copy=False) assert left.columns.equals(right.index) - - new_data = left._data.eval(func=func, other=right, - axes=[left.columns, self.index], - try_cast=try_cast) - return self._constructor(new_data) + return ops.dispatch_to_series(left, right, func, axis="columns") def _combine_const(self, other, func, errors='raise', try_cast=True): if lib.is_scalar(other) or np.ndim(other) == 0: diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 8171840c96b6e..a05b2bad9bd3e 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -1666,7 +1666,7 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0): # ----------------------------------------------------------------------------- # DataFrame -def dispatch_to_series(left, right, func, str_rep=None): +def dispatch_to_series(left, right, func, str_rep=None, axis=None): """ Evaluate the frame operation func(left, right) by evaluating column-by-column, dispatching to the Series implementation. @@ -1677,6 +1677,7 @@ def dispatch_to_series(left, right, func, str_rep=None): right : scalar or DataFrame func : arithmetic or comparison operator str_rep : str or None, default None + axis : {None, 0, 1, "index", "columns"} Returns ------- @@ -1700,6 +1701,15 @@ def column_op(a, b): return {i: func(a.iloc[:, i], b.iloc[:, i]) for i in range(len(a.columns))} + elif isinstance(right, ABCSeries) and axis == "columns": + # We only get here if called via left._combine_match_columns, + # in which case we specifically want to operate row-by-row + assert right.index.equals(left.columns) + + def column_op(a, b): + return {i: func(a.iloc[:, i], b.iloc[i]) + for i in range(len(a.columns))} + elif isinstance(right, ABCSeries): assert right.index.equals(left.index) # Handle other cases later @@ -1844,7 +1854,10 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): pass_op = op if should_series_dispatch(self, other, op) else na_op return self._combine_frame(other, pass_op, fill_value, level) elif isinstance(other, ABCSeries): - return _combine_series_frame(self, other, na_op, + # For these values of `axis`, we end up dispatching to Series op, + # so do not want the masked op. + pass_op = op if axis in [0, "columns", None] else na_op + return _combine_series_frame(self, other, pass_op, fill_value=fill_value, axis=axis, level=level, try_cast=True) else: diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index 5050922173564..a09efe6d4761c 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -505,33 +505,25 @@ def test_tdi_add_dt64_array(self, box_df_broadcast_failure): # ------------------------------------------------------------------ # Operations with int-like others - def test_td64arr_add_int_series_invalid(self, box_df_broadcast_failure, - tdser): - box = box_df_broadcast_failure + def test_td64arr_add_int_series_invalid(self, box, tdser): tdser = tm.box_expected(tdser, box) err = TypeError if box is not pd.Index else NullFrequencyError with pytest.raises(err): tdser + Series([2, 3, 4]) - def test_td64arr_radd_int_series_invalid(self, box_df_broadcast_failure, - tdser): - box = box_df_broadcast_failure + def test_td64arr_radd_int_series_invalid(self, box, tdser): tdser = tm.box_expected(tdser, box) err = TypeError if box is not pd.Index else NullFrequencyError with pytest.raises(err): Series([2, 3, 4]) + tdser - def test_td64arr_sub_int_series_invalid(self, box_df_broadcast_failure, - tdser): - box = box_df_broadcast_failure + def test_td64arr_sub_int_series_invalid(self, box, tdser): tdser = tm.box_expected(tdser, box) err = TypeError if box is not pd.Index else NullFrequencyError with pytest.raises(err): tdser - Series([2, 3, 4]) - def test_td64arr_rsub_int_series_invalid(self, box_df_broadcast_failure, - tdser): - box = box_df_broadcast_failure + def test_td64arr_rsub_int_series_invalid(self, box, tdser): tdser = tm.box_expected(tdser, box) err = TypeError if box is not pd.Index else NullFrequencyError with pytest.raises(err): @@ -605,9 +597,10 @@ def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser): Series([1, 2, 3]) # TODO: Add DataFrame in here? ], ids=lambda x: type(x).__name__) - def test_td64arr_add_sub_numeric_arr_invalid( - self, box_df_broadcast_failure, vec, dtype, tdser): - box = box_df_broadcast_failure + def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser): + if box is pd.DataFrame and not isinstance(vec, Series): + raise pytest.xfail(reason="Tries to broadcast incorrectly") + tdser = tm.box_expected(tdser, box) err = TypeError if box is pd.Index and not dtype.startswith('float'): @@ -930,9 +923,9 @@ def test_td64arr_sub_offset_array(self, box_df_broadcast_failure): @pytest.mark.parametrize('names', [(None, None, None), ('foo', 'bar', None), ('foo', 'foo', 'foo')]) - def test_td64arr_with_offset_series(self, names, box_df_broadcast_failure): + def test_td64arr_with_offset_series(self, names, box_df_fail): # GH#18849 - box = box_df_broadcast_failure + box = box_df_fail box2 = Series if box is pd.Index else box tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'], @@ -963,10 +956,11 @@ def test_td64arr_with_offset_series(self, names, box_df_broadcast_failure): tm.assert_equal(res3, expected_sub) @pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series]) - def test_td64arr_addsub_anchored_offset_arraylike( - self, obox, box_df_broadcast_failure): + def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box): # GH#18824 - box = box_df_broadcast_failure + if box is pd.DataFrame and obox is not pd.Series: + raise pytest.xfail(reason="Attempts to broadcast incorrectly") + tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00']) tdi = tm.box_expected(tdi, box) diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 0bc74c6890ee9..6186ce4d45ef2 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -721,7 +721,7 @@ def test_align_int_fill_bug(self): result = df1 - df1.mean() expected = df2 - df2.mean() - assert_frame_equal(result, expected) + assert_frame_equal(result.astype('f8'), expected) def test_align_multiindex(self): # GH 10665 diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 1ee48d0120c7d..1cb036dccf23c 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1566,8 +1566,9 @@ def test_crosstab_normalize(self): full_normal) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index'), row_normal) - tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='columns'), - col_normal) + tm.assert_frame_equal( + pd.crosstab(df.a, df.b, normalize='columns').astype('f8'), + col_normal) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=1), pd.crosstab(df.a, df.b, normalize='columns')) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=0), @@ -1600,7 +1601,8 @@ def test_crosstab_normalize(self): tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index', margins=True), row_normal_margins) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='columns', - margins=True), col_normal_margins) + margins=True).astype('f8'), + col_normal_margins) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=True, margins=True), all_normal_margins) diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 601e251d45b4b..f3ab197771d53 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -758,9 +758,6 @@ def test_operators_bitwise(self): def test_scalar_na_cmp_corners(self): s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10]) - def tester(a, b): - return a & b - with pytest.raises(TypeError): s & datetime(2005, 1, 1) @@ -780,8 +777,11 @@ def tester(a, b): # this is an alignment issue; these are equivalent # https://github.com/pandas-dev/pandas/issues/5284 - pytest.raises(ValueError, lambda: d.__and__(s, axis='columns')) - pytest.raises(ValueError, tester, s, d) + with pytest.raises(TypeError): + d.__and__(s, axis='columns') + + with pytest.raises(TypeError): + s & d # this is wrong as its not a boolean result # result = d.__and__(s,axis='index')
ATM there are exactly two places where `BlockManager.eval` is called: `DataFrame._combine_match_columns` and `DataFrame._combine_const`. This replaces the usage in `_combine_match_columns` with a dispatch-to-Series implementation. Some output dtypes get changed (see edits in `test_axis_select_reindex`, `test_pivot`), and some errors get changed from `ValueError` to `TypeError` (see `test_operators`). The other usage of `_data.eval` will be removed separately; that turns out to be a lot more trouble because a bunch of `DataFrame` behavior is currently incorrect (see #22017). This PR also: - Simplifies some of the special-casing in SparseDataFrame; trying to move towards not having separate implementations for these methods - Dispatches `_combine_match_index` to avoid calling `self.values` when doing so would require coercing to object-dtype.
https://api.github.com/repos/pandas-dev/pandas/pulls/22019
2018-07-22T22:44:29Z
2018-10-03T11:32:35Z
2018-10-03T11:32:35Z
2018-10-03T11:32:41Z
REF: move range-generation functions to EA mixin classes
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 7bb1c45998eb2..eb8821382037d 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +from datetime import datetime, timedelta import operator import warnings @@ -8,7 +9,7 @@ from pandas._libs.tslibs import timezones from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds, Timedelta from pandas._libs.tslibs.period import ( - DIFFERENT_FREQ_INDEX, IncompatibleFrequency) + Period, DIFFERENT_FREQ_INDEX, IncompatibleFrequency) from pandas.errors import NullFrequencyError, PerformanceWarning from pandas import compat @@ -19,6 +20,13 @@ from pandas.core.dtypes.common import ( needs_i8_conversion, is_list_like, + is_offsetlike, + is_extension_array_dtype, + is_datetime64_dtype, + is_datetime64_any_dtype, + is_datetime64tz_dtype, + is_float_dtype, + is_integer_dtype, is_bool_dtype, is_period_dtype, is_timedelta64_dtype, @@ -100,7 +108,7 @@ class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin): _freq and that the inheriting class has methods: - _validate_frequency + _generate_range """ @property @@ -132,6 +140,14 @@ def asi8(self): # ------------------------------------------------------------------ # Array-like Methods + @property + def shape(self): + return (len(self),) + + @property + def size(self): + return np.prod(self.shape) + def __len__(self): return len(self._data) @@ -296,6 +312,34 @@ def resolution(self): """ return frequencies.Resolution.get_str(self._resolution) + @classmethod + def _validate_frequency(cls, index, freq, **kwargs): + """ + Validate that a frequency is compatible with the values of a given + Datetime Array/Index or Timedelta Array/Index + + Parameters + ---------- + index : DatetimeIndex or TimedeltaIndex + The index on which to determine if the given frequency is valid + freq : DateOffset + The frequency to validate + """ + if is_period_dtype(cls): + # Frequency validation is not meaningful for Period Array/Index + return None + + inferred = index.inferred_freq + if index.size == 0 or inferred == freq.freqstr: + return None + + on_freq = cls._generate_range(start=index[0], end=None, + periods=len(index), freq=freq, **kwargs) + if not np.array_equal(index.asi8, on_freq.asi8): + raise ValueError('Inferred frequency {infer} from passed values ' + 'does not conform to passed frequency {passed}' + .format(infer=inferred, passed=freq.freqstr)) + # ------------------------------------------------------------------ # Arithmetic Methods @@ -477,6 +521,188 @@ def _addsub_offset_array(self, other, op): kwargs['freq'] = 'infer' return type(self)(res_values, **kwargs) + def shift(self, n, freq=None): + """ + Specialized shift which produces a Datetime/Timedelta Array/Index + + Parameters + ---------- + n : int + Periods to shift by + freq : DateOffset or timedelta-like, optional + + Returns + ------- + shifted : same type as self + """ + if freq is not None and freq != self.freq: + if isinstance(freq, compat.string_types): + freq = frequencies.to_offset(freq) + offset = n * freq + result = self + offset + + if hasattr(self, 'tz'): + result._tz = self.tz + + return result + + if n == 0: + # immutable so OK + return self + + if self.freq is None: + raise NullFrequencyError("Cannot shift with no freq") + + start = self[0] + n * self.freq + end = self[-1] + n * self.freq + attribs = self._get_attributes_dict() + return self._generate_range(start=start, end=end, periods=None, + **attribs) + + @classmethod + def _add_datetimelike_methods(cls): + """ + add in the datetimelike methods (as we may have to override the + superclass) + """ + + def __add__(self, other): + other = lib.item_from_zerodim(other) + if isinstance(other, (ABCSeries, ABCDataFrame)): + return NotImplemented + + # scalar others + elif other is NaT: + result = self._add_nat() + elif isinstance(other, (Tick, timedelta, np.timedelta64)): + result = self._add_delta(other) + elif isinstance(other, DateOffset): + # specifically _not_ a Tick + result = self._add_offset(other) + elif isinstance(other, (datetime, np.datetime64)): + result = self._add_datelike(other) + elif lib.is_integer(other): + # This check must come after the check for np.timedelta64 + # as is_integer returns True for these + result = self.shift(other) + + # array-like others + elif is_timedelta64_dtype(other): + # TimedeltaIndex, ndarray[timedelta64] + result = self._add_delta(other) + elif is_offsetlike(other): + # Array/Index of DateOffset objects + result = self._addsub_offset_array(other, operator.add) + elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other): + # DatetimeIndex, ndarray[datetime64] + return self._add_datelike(other) + elif is_integer_dtype(other): + result = self._addsub_int_array(other, operator.add) + elif is_float_dtype(other) or is_period_dtype(other): + # Explicitly catch invalid dtypes + raise TypeError("cannot add {dtype}-dtype to {cls}" + .format(dtype=other.dtype, + cls=type(self).__name__)) + elif is_extension_array_dtype(other): + # Categorical op will raise; defer explicitly + return NotImplemented + else: # pragma: no cover + return NotImplemented + + return result + + cls.__add__ = __add__ + + def __radd__(self, other): + # alias for __add__ + return self.__add__(other) + cls.__radd__ = __radd__ + + def __sub__(self, other): + other = lib.item_from_zerodim(other) + if isinstance(other, (ABCSeries, ABCDataFrame)): + return NotImplemented + + # scalar others + elif other is NaT: + result = self._sub_nat() + elif isinstance(other, (Tick, timedelta, np.timedelta64)): + result = self._add_delta(-other) + elif isinstance(other, DateOffset): + # specifically _not_ a Tick + result = self._add_offset(-other) + elif isinstance(other, (datetime, np.datetime64)): + result = self._sub_datelike(other) + elif lib.is_integer(other): + # This check must come after the check for np.timedelta64 + # as is_integer returns True for these + result = self.shift(-other) + elif isinstance(other, Period): + result = self._sub_period(other) + + # array-like others + elif is_timedelta64_dtype(other): + # TimedeltaIndex, ndarray[timedelta64] + result = self._add_delta(-other) + elif is_offsetlike(other): + # Array/Index of DateOffset objects + result = self._addsub_offset_array(other, operator.sub) + elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other): + # DatetimeIndex, ndarray[datetime64] + result = self._sub_datelike(other) + elif is_period_dtype(other): + # PeriodIndex + result = self._sub_period_array(other) + elif is_integer_dtype(other): + result = self._addsub_int_array(other, operator.sub) + elif isinstance(other, ABCIndexClass): + raise TypeError("cannot subtract {cls} and {typ}" + .format(cls=type(self).__name__, + typ=type(other).__name__)) + elif is_float_dtype(other): + # Explicitly catch invalid dtypes + raise TypeError("cannot subtract {dtype}-dtype from {cls}" + .format(dtype=other.dtype, + cls=type(self).__name__)) + elif is_extension_array_dtype(other): + # Categorical op will raise; defer explicitly + return NotImplemented + else: # pragma: no cover + return NotImplemented + + return result + + cls.__sub__ = __sub__ + + def __rsub__(self, other): + if is_datetime64_dtype(other) and is_timedelta64_dtype(self): + # ndarray[datetime64] cannot be subtracted from self, so + # we need to wrap in DatetimeArray/Index and flip the operation + if not isinstance(other, DatetimeLikeArrayMixin): + # Avoid down-casting DatetimeIndex + from pandas.core.arrays import DatetimeArrayMixin + other = DatetimeArrayMixin(other) + return other - self + elif (is_datetime64_any_dtype(self) and hasattr(other, 'dtype') and + not is_datetime64_any_dtype(other)): + # GH#19959 datetime - datetime is well-defined as timedelta, + # but any other type - datetime is not well-defined. + raise TypeError("cannot subtract {cls} from {typ}" + .format(cls=type(self).__name__, + typ=type(other).__name__)) + return -(self - other) + cls.__rsub__ = __rsub__ + + def __iadd__(self, other): + # alias for __add__ + return self.__add__(other) + cls.__iadd__ = __iadd__ + + def __isub__(self, other): + # alias for __sub__ + return self.__sub__(other) + cls.__isub__ = __isub__ + # -------------------------------------------------------------- # Comparison Methods diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 29f97b344f267..00d53ad82b2dc 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from datetime import datetime, timedelta +from datetime import datetime, timedelta, time import warnings import numpy as np @@ -8,11 +8,12 @@ from pandas._libs import tslib from pandas._libs.tslib import Timestamp, NaT, iNaT from pandas._libs.tslibs import ( + normalize_date, conversion, fields, timezones, resolution as libresolution) from pandas.util._decorators import cache_readonly -from pandas.errors import PerformanceWarning +from pandas.errors import PerformanceWarning, AbstractMethodError from pandas import compat from pandas.core.dtypes.common import ( @@ -30,11 +31,14 @@ from pandas.core.algorithms import checked_add_with_arr from pandas.tseries.frequencies import to_offset -from pandas.tseries.offsets import Tick +from pandas.tseries.offsets import Tick, Day, generate_range from pandas.core.arrays import datetimelike as dtl +_midnight = time(0, 0) + + def _to_m8(key, tz=None): """ Timestamp-like => dt64 @@ -177,13 +181,16 @@ def _simple_new(cls, values, freq=None, tz=None, **kwargs): result._tz = timezones.tz_standardize(tz) return result - def __new__(cls, values, freq=None, tz=None): + def __new__(cls, values, freq=None, tz=None, dtype=None): if tz is None and hasattr(values, 'tz'): # e.g. DatetimeIndex tz = values.tz freq, freq_infer = dtl.maybe_infer_freq(freq) + # if dtype has an embedded tz, capture it + tz = dtl.validate_tz_from_dtype(dtype, tz) + result = cls._simple_new(values, freq=freq, tz=tz) if freq_infer: inferred = result.inferred_freq @@ -194,6 +201,117 @@ def __new__(cls, values, freq=None, tz=None): # constructor, this does not call _deepcopy_if_needed return result + @classmethod + def _generate_range(cls, start, end, periods, freq, tz=None, + normalize=False, ambiguous='raise', closed=None): + if com.count_not_none(start, end, periods, freq) != 3: + raise ValueError('Of the four parameters: start, end, periods, ' + 'and freq, exactly three must be specified') + freq = to_offset(freq) + + if start is not None: + start = Timestamp(start) + + if end is not None: + end = Timestamp(end) + + if start is None and end is None: + if closed is not None: + raise ValueError("Closed has to be None if not both of start" + "and end are defined") + + left_closed, right_closed = dtl.validate_endpoints(closed) + + start, end, _normalized = _maybe_normalize_endpoints(start, end, + normalize) + + tz, inferred_tz = _infer_tz_from_endpoints(start, end, tz) + + if hasattr(freq, 'delta') and freq != Day(): + # sub-Day Tick + if inferred_tz is None and tz is not None: + # naive dates + if start is not None and start.tz is None: + start = start.tz_localize(tz, ambiguous=False) + + if end is not None and end.tz is None: + end = end.tz_localize(tz, ambiguous=False) + + if start and end: + if start.tz is None and end.tz is not None: + start = start.tz_localize(end.tz, ambiguous=False) + + if end.tz is None and start.tz is not None: + end = end.tz_localize(start.tz, ambiguous=False) + + if cls._use_cached_range(freq, _normalized, start, end): + index = cls._cached_range(start, end, periods=periods, + freq=freq) + else: + index = _generate_regular_range(cls, start, end, periods, freq) + + else: + + if tz is not None: + # naive dates + if start is not None and start.tz is not None: + start = start.replace(tzinfo=None) + + if end is not None and end.tz is not None: + end = end.replace(tzinfo=None) + + if start and end: + if start.tz is None and end.tz is not None: + end = end.replace(tzinfo=None) + + if end.tz is None and start.tz is not None: + start = start.replace(tzinfo=None) + + if freq is not None: + if cls._use_cached_range(freq, _normalized, start, end): + index = cls._cached_range(start, end, periods=periods, + freq=freq) + else: + index = _generate_regular_range(cls, start, end, + periods, freq) + + if tz is not None and getattr(index, 'tz', None) is None: + arr = conversion.tz_localize_to_utc( + ensure_int64(index.values), + tz, ambiguous=ambiguous) + + index = cls(arr) + + # index is localized datetime64 array -> have to convert + # start/end as well to compare + if start is not None: + start = start.tz_localize(tz).asm8 + if end is not None: + end = end.tz_localize(tz).asm8 + else: + # Create a linearly spaced date_range in local time + start = start.tz_localize(tz) + end = end.tz_localize(tz) + arr = np.linspace(start.value, end.value, periods) + index = cls._simple_new(arr.astype('M8[ns]'), freq=None, tz=tz) + + if not left_closed and len(index) and index[0] == start: + index = index[1:] + if not right_closed and len(index) and index[-1] == end: + index = index[:-1] + + return cls._simple_new(index.values, freq=freq, tz=tz) + + @classmethod + def _use_cached_range(cls, freq, _normalized, start, end): + # DatetimeArray is mutable, so is not cached + return False + + @classmethod + def _cached_range(cls, start=None, end=None, + periods=None, freq=None, **kwargs): + raise AbstractMethodError(cls) + # ----------------------------------------------------------------- # Descriptive Properties @@ -1085,3 +1203,109 @@ def to_julian_date(self): DatetimeArrayMixin._add_comparison_ops() +DatetimeArrayMixin._add_datetimelike_methods() + + +def _generate_regular_range(cls, start, end, periods, freq): + if isinstance(freq, Tick): + stride = freq.nanos + if periods is None: + b = Timestamp(start).value + # cannot just use e = Timestamp(end) + 1 because arange breaks when + # stride is too large, see GH10887 + e = (b + (Timestamp(end).value - b) // stride * stride + + stride // 2 + 1) + # end.tz == start.tz by this point due to _generate implementation + tz = start.tz + elif start is not None: + b = Timestamp(start).value + e = b + np.int64(periods) * stride + tz = start.tz + elif end is not None: + e = Timestamp(end).value + stride + b = e - np.int64(periods) * stride + tz = end.tz + else: + raise ValueError("at least 'start' or 'end' should be specified " + "if a 'period' is given.") + + data = np.arange(b, e, stride, dtype=np.int64) + data = cls._simple_new(data.view(_NS_DTYPE), None, tz=tz) + else: + tz = None + if isinstance(start, Timestamp): + tz = start.tz + start = start.to_pydatetime() + + if isinstance(end, Timestamp): + tz = end.tz + end = end.to_pydatetime() + + xdr = generate_range(start=start, end=end, + periods=periods, offset=freq) + + values = np.array([x.value for x in xdr]) + data = cls._simple_new(values, freq=freq, tz=tz) + + return data + + +def _infer_tz_from_endpoints(start, end, tz): + """ + If a timezone is not explicitly given via `tz`, see if one can + be inferred from the `start` and `end` endpoints. If more than one + of these inputs provides a timezone, require that they all agree. + + Parameters + ---------- + start : Timestamp + end : Timestamp + tz : tzinfo or None + + Returns + ------- + tz : tzinfo or None + inferred_tz : tzinfo or None + + Raises + ------ + TypeError : if start and end timezones do not agree + """ + try: + inferred_tz = timezones.infer_tzinfo(start, end) + except Exception: + raise TypeError('Start and end cannot both be tz-aware with ' + 'different timezones') + + inferred_tz = timezones.maybe_get_tz(inferred_tz) + tz = timezones.maybe_get_tz(tz) + + if tz is not None and inferred_tz is not None: + if not timezones.tz_compare(inferred_tz, tz): + raise AssertionError("Inferred time zone not equal to passed " + "time zone") + + elif inferred_tz is not None: + tz = inferred_tz + + return tz, inferred_tz + + +def _maybe_normalize_endpoints(start, end, normalize): + _normalized = True + + if start is not None: + if normalize: + start = normalize_date(start) + _normalized = True + else: + _normalized = _normalized and start.time() == _midnight + + if end is not None: + if normalize: + end = normalize_date(end) + _normalized = True + else: + _normalized = _normalized and end.time() == _midnight + + return start, end, _normalized diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 9c98f73312dbf..481d5313f0e25 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -386,6 +386,7 @@ def _maybe_convert_timedelta(self, other): PeriodArrayMixin._add_comparison_ops() +PeriodArrayMixin._add_datetimelike_methods() # ------------------------------------------------------------------- diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index cc93644677463..df9e57cb5f0e1 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -4,7 +4,7 @@ import numpy as np from pandas._libs import tslibs -from pandas._libs.tslibs import Timedelta, NaT +from pandas._libs.tslibs import Timedelta, Timestamp, NaT, iNaT from pandas._libs.tslibs.fields import get_timedelta_field from pandas._libs.tslibs.timedeltas import array_to_timedelta64 @@ -16,6 +16,7 @@ from pandas.core.dtypes.missing import isna import pandas.core.common as com +from pandas.core.algorithms import checked_add_with_arr from pandas.tseries.offsets import Tick from pandas.tseries.frequencies import to_offset @@ -230,6 +231,36 @@ def _add_delta(self, delta): return type(self)(new_values, freq='infer') + def _add_datelike(self, other): + # adding a timedeltaindex to a datetimelike + from pandas.core.arrays import DatetimeArrayMixin + if isinstance(other, (DatetimeArrayMixin, np.ndarray)): + # if other is an ndarray, we assume it is datetime64-dtype + # defer to implementation in DatetimeIndex + if not isinstance(other, DatetimeArrayMixin): + other = DatetimeArrayMixin(other) + return other + self + else: + assert other is not NaT + other = Timestamp(other) + i8 = self.asi8 + result = checked_add_with_arr(i8, other.value, + arr_mask=self._isnan) + result = self._maybe_mask_results(result, fill_value=iNaT) + return DatetimeArrayMixin(result) + + def _addsub_offset_array(self, other, op): + # Add or subtract Array-like of DateOffset objects + try: + # TimedeltaIndex can only operate with a subset of DateOffset + # subclasses. Incompatible classes will raise AttributeError, + # which we re-raise as TypeError + return dtl.DatetimeLikeArrayMixin._addsub_offset_array(self, other, + op) + except AttributeError: + raise TypeError("Cannot add/subtract non-tick DateOffset to {cls}" + .format(cls=type(self).__name__)) + def _evaluate_with_timedelta_like(self, other, op): if isinstance(other, ABCSeries): # GH#19042 @@ -370,6 +401,7 @@ def f(x): TimedeltaArrayMixin._add_comparison_ops() +TimedeltaArrayMixin._add_datetimelike_methods() # --------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 20926ea5163af..f09fe8c8abdcf 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -274,6 +274,26 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, return IntervalIndex(data, dtype=dtype, name=name, copy=copy, closed=closed) + elif (is_datetime64_any_dtype(data) or + (dtype is not None and is_datetime64_any_dtype(dtype)) or + 'tz' in kwargs): + from pandas import DatetimeIndex + result = DatetimeIndex(data, copy=copy, name=name, + dtype=dtype, **kwargs) + if dtype is not None and is_dtype_equal(_o_dtype, dtype): + return Index(result.to_pydatetime(), dtype=_o_dtype) + else: + return result + + elif (is_timedelta64_dtype(data) or + (dtype is not None and is_timedelta64_dtype(dtype))): + from pandas import TimedeltaIndex + result = TimedeltaIndex(data, copy=copy, name=name, **kwargs) + if dtype is not None and _o_dtype == dtype: + return Index(result.to_pytimedelta(), dtype=_o_dtype) + else: + return result + # extension dtype elif is_extension_array_dtype(data) or is_extension_array_dtype(dtype): data = np.asarray(data) @@ -290,27 +310,6 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, # index-like elif isinstance(data, (np.ndarray, Index, ABCSeries)): - - if (is_datetime64_any_dtype(data) or - (dtype is not None and is_datetime64_any_dtype(dtype)) or - 'tz' in kwargs): - from pandas import DatetimeIndex - result = DatetimeIndex(data, copy=copy, name=name, - dtype=dtype, **kwargs) - if dtype is not None and is_dtype_equal(_o_dtype, dtype): - return Index(result.to_pydatetime(), dtype=_o_dtype) - else: - return result - - elif (is_timedelta64_dtype(data) or - (dtype is not None and is_timedelta64_dtype(dtype))): - from pandas import TimedeltaIndex - result = TimedeltaIndex(data, copy=copy, name=name, **kwargs) - if dtype is not None and _o_dtype == dtype: - return Index(result.to_pytimedelta(), dtype=_o_dtype) - else: - return result - if dtype is not None: try: diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 8f05a9a887830..3f8c07fe7cd21 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -3,8 +3,6 @@ Base and utility classes for tseries type pandas objects. """ import warnings -import operator -from datetime import datetime, timedelta from pandas import compat from pandas.compat.numpy import function as nv @@ -13,7 +11,6 @@ import numpy as np from pandas._libs import lib, iNaT, NaT -from pandas._libs.tslibs.period import Period from pandas._libs.tslibs.timestamps import round_ns from pandas.core.dtypes.common import ( @@ -24,32 +21,23 @@ is_list_like, is_scalar, is_bool_dtype, - is_offsetlike, is_categorical_dtype, is_datetime_or_timedelta_dtype, is_float_dtype, is_integer_dtype, is_object_dtype, - is_string_dtype, - is_datetime64_dtype, - is_datetime64tz_dtype, - is_datetime64_any_dtype, - is_period_dtype, - is_timedelta64_dtype) + is_string_dtype) from pandas.core.dtypes.generic import ( - ABCIndex, ABCSeries, ABCDataFrame, ABCPeriodIndex, ABCIndexClass) + ABCIndex, ABCSeries, ABCPeriodIndex, ABCIndexClass) from pandas.core.dtypes.missing import isna from pandas.core import common as com, algorithms, ops -from pandas.errors import NullFrequencyError import pandas.io.formats.printing as printing from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util._decorators import Appender, cache_readonly import pandas.core.dtypes.concat as _concat -import pandas.tseries.frequencies as frequencies -from pandas.tseries.offsets import Tick, DateOffset import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -206,30 +194,6 @@ def floor(self, freq): def ceil(self, freq): return self._round(freq, np.ceil) - @classmethod - def _validate_frequency(cls, index, freq, **kwargs): - """ - Validate that a frequency is compatible with the values of a given - DatetimeIndex or TimedeltaIndex - - Parameters - ---------- - index : DatetimeIndex or TimedeltaIndex - The index on which to determine if the given frequency is valid - freq : DateOffset - The frequency to validate - """ - inferred = index.inferred_freq - if index.empty or inferred == freq.freqstr: - return None - - on_freq = cls._generate_range( - index[0], None, len(index), None, freq, **kwargs) - if not np.array_equal(index.asi8, on_freq.asi8): - msg = ('Inferred frequency {infer} from passed values does not ' - 'conform to passed frequency {passed}') - raise ValueError(msg.format(infer=inferred, passed=freq.freqstr)) - class DatetimeIndexOpsMixin(DatetimeLikeArrayMixin): """ common ops mixin to support a unified interface datetimelike Index """ @@ -584,56 +548,9 @@ def _add_datetimelike_methods(cls): """ def __add__(self, other): - other = lib.item_from_zerodim(other) - if isinstance(other, (ABCSeries, ABCDataFrame)): - return NotImplemented - - # scalar others - elif other is NaT: - result = self._add_nat() - elif isinstance(other, (Tick, timedelta, np.timedelta64)): - result = self._add_delta(other) - elif isinstance(other, DateOffset): - # specifically _not_ a Tick - result = self._add_offset(other) - elif isinstance(other, (datetime, np.datetime64)): - result = self._add_datelike(other) - elif is_integer(other): - # This check must come after the check for np.timedelta64 - # as is_integer returns True for these - result = self.shift(other) - - # array-like others - elif is_timedelta64_dtype(other): - # TimedeltaIndex, ndarray[timedelta64] - result = self._add_delta(other) - elif is_offsetlike(other): - # Array/Index of DateOffset objects - result = self._addsub_offset_array(other, operator.add) - elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other): - # DatetimeIndex, ndarray[datetime64] - return self._add_datelike(other) - elif is_integer_dtype(other): - result = self._addsub_int_array(other, operator.add) - elif is_float_dtype(other) or is_period_dtype(other): - # Explicitly catch invalid dtypes - raise TypeError("cannot add {dtype}-dtype to {cls}" - .format(dtype=other.dtype, - cls=type(self).__name__)) - elif is_categorical_dtype(other): - # Categorical op will raise; defer explicitly - return NotImplemented - else: # pragma: no cover - return NotImplemented - - if result is NotImplemented: - return NotImplemented - elif not isinstance(result, Index): - # Index.__new__ will choose appropriate subclass for dtype - result = Index(result) - res_name = ops.get_op_result_name(self, other) - result.name = res_name - return result + # dispatch to ExtensionArray implementation + result = super(cls, self).__add__(other) + return wrap_arithmetic_op(self, other, result) cls.__add__ = __add__ @@ -643,95 +560,17 @@ def __radd__(self, other): cls.__radd__ = __radd__ def __sub__(self, other): - from pandas import Index - - other = lib.item_from_zerodim(other) - if isinstance(other, (ABCSeries, ABCDataFrame)): - return NotImplemented - - # scalar others - elif other is NaT: - result = self._sub_nat() - elif isinstance(other, (Tick, timedelta, np.timedelta64)): - result = self._add_delta(-other) - elif isinstance(other, DateOffset): - # specifically _not_ a Tick - result = self._add_offset(-other) - elif isinstance(other, (datetime, np.datetime64)): - result = self._sub_datelike(other) - elif is_integer(other): - # This check must come after the check for np.timedelta64 - # as is_integer returns True for these - result = self.shift(-other) - elif isinstance(other, Period): - result = self._sub_period(other) - - # array-like others - elif is_timedelta64_dtype(other): - # TimedeltaIndex, ndarray[timedelta64] - result = self._add_delta(-other) - elif is_offsetlike(other): - # Array/Index of DateOffset objects - result = self._addsub_offset_array(other, operator.sub) - elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other): - # DatetimeIndex, ndarray[datetime64] - result = self._sub_datelike(other) - elif is_period_dtype(other): - # PeriodIndex - result = self._sub_period_array(other) - elif is_integer_dtype(other): - result = self._addsub_int_array(other, operator.sub) - elif isinstance(other, Index): - raise TypeError("cannot subtract {cls} and {typ}" - .format(cls=type(self).__name__, - typ=type(other).__name__)) - elif is_float_dtype(other): - # Explicitly catch invalid dtypes - raise TypeError("cannot subtract {dtype}-dtype from {cls}" - .format(dtype=other.dtype, - cls=type(self).__name__)) - elif is_categorical_dtype(other): - # Categorical op will raise; defer explicitly - return NotImplemented - else: # pragma: no cover - return NotImplemented - - if result is NotImplemented: - return NotImplemented - elif not isinstance(result, Index): - # Index.__new__ will choose appropriate subclass for dtype - result = Index(result) - res_name = ops.get_op_result_name(self, other) - result.name = res_name - return result + # dispatch to ExtensionArray implementation + result = super(cls, self).__sub__(other) + return wrap_arithmetic_op(self, other, result) cls.__sub__ = __sub__ def __rsub__(self, other): - if is_datetime64_dtype(other) and is_timedelta64_dtype(self): - # ndarray[datetime64] cannot be subtracted from self, so - # we need to wrap in DatetimeIndex and flip the operation - from pandas import DatetimeIndex - return DatetimeIndex(other) - self - elif (is_datetime64_any_dtype(self) and hasattr(other, 'dtype') and - not is_datetime64_any_dtype(other)): - # GH#19959 datetime - datetime is well-defined as timedelta, - # but any other type - datetime is not well-defined. - raise TypeError("cannot subtract {cls} from {typ}" - .format(cls=type(self).__name__, - typ=type(other).__name__)) - return -(self - other) - cls.__rsub__ = __rsub__ + result = super(cls, self).__rsub__(other) + return wrap_arithmetic_op(self, other, result) - def __iadd__(self, other): - # alias for __add__ - return self.__add__(other) - cls.__iadd__ = __iadd__ - - def __isub__(self, other): - # alias for __sub__ - return self.__sub__(other) - cls.__isub__ = __isub__ + cls.__rsub__ = __rsub__ def isin(self, values): """ @@ -754,44 +593,6 @@ def isin(self, values): return algorithms.isin(self.asi8, values.asi8) - def shift(self, n, freq=None): - """ - Specialized shift which produces a DatetimeIndex - - Parameters - ---------- - n : int - Periods to shift by - freq : DateOffset or timedelta-like, optional - - Returns - ------- - shifted : DatetimeIndex - """ - if freq is not None and freq != self.freq: - if isinstance(freq, compat.string_types): - freq = frequencies.to_offset(freq) - offset = n * freq - result = self + offset - - if hasattr(self, 'tz'): - result._tz = self.tz - - return result - - if n == 0: - # immutable so OK - return self - - if self.freq is None: - raise NullFrequencyError("Cannot shift with no freq") - - start = self[0] + n * self.freq - end = self[-1] + n * self.freq - attribs = self._get_attributes_dict() - return self._generate_range(start=start, end=end, periods=None, - **attribs) - def repeat(self, repeats, *args, **kwargs): """ Analogous to ndarray.repeat @@ -896,3 +697,16 @@ def _ensure_datetimelike_to_i8(other): # period array cannot be coerces to int other = Index(other).asi8 return other + + +def wrap_arithmetic_op(self, other, result): + if result is NotImplemented: + return NotImplemented + + if not isinstance(result, Index): + # Index.__new__ will choose appropriate subclass for dtype + result = Index(result) + + res_name = ops.get_op_result_name(self, other) + result.name = res_name + return result diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 933e7406b5af3..3ee91a106f36b 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -40,7 +40,7 @@ from pandas.core.indexes.datetimelike import ( DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin) from pandas.tseries.offsets import ( - generate_range, Tick, CDay, prefix_mapping) + generate_range, CDay, prefix_mapping) from pandas.core.tools.timedeltas import to_timedelta from pandas.util._decorators import ( @@ -49,7 +49,7 @@ import pandas.tseries.offsets as offsets import pandas.core.tools.datetimes as tools -from pandas._libs import (lib, index as libindex, tslibs, tslib as libts, +from pandas._libs import (lib, index as libindex, tslib as libts, join as libjoin, Timestamp) from pandas._libs.tslibs import (timezones, conversion, fields, parsing, ccalendar) @@ -98,9 +98,6 @@ def wrapper(self, other): return compat.set_function_name(wrapper, opname, cls) -_midnight = time(0, 0) - - def _new_DatetimeIndex(cls, d): """ This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__ """ @@ -323,7 +320,7 @@ def __new__(cls, data=None, data = tools.to_datetime(data, dayfirst=dayfirst, yearfirst=yearfirst) - if isinstance(data, DatetimeIndex): + if isinstance(data, DatetimeArrayMixin): if tz is None: tz = data.tz elif data.tz is None: @@ -375,135 +372,19 @@ def __new__(cls, data=None, return subarr._deepcopy_if_needed(ref_to_data, copy) @classmethod - def _generate_range(cls, start, end, periods, name, freq, tz=None, - normalize=False, ambiguous='raise', closed=None): - if com.count_not_none(start, end, periods, freq) != 3: - raise ValueError('Of the four parameters: start, end, periods, ' - 'and freq, exactly three must be specified') - - _normalized = True - - if start is not None: - start = Timestamp(start) - - if end is not None: - end = Timestamp(end) - - if start is None and end is None: - if closed is not None: - raise ValueError("Closed has to be None if not both of start" - "and end are defined") - - left_closed, right_closed = dtl.validate_endpoints(closed) - - try: - inferred_tz = timezones.infer_tzinfo(start, end) - except Exception: - raise TypeError('Start and end cannot both be tz-aware with ' - 'different timezones') - - inferred_tz = timezones.maybe_get_tz(inferred_tz) - tz = timezones.maybe_get_tz(tz) - - if tz is not None and inferred_tz is not None: - if not timezones.tz_compare(inferred_tz, tz): - raise AssertionError("Inferred time zone not equal to passed " - "time zone") - - elif inferred_tz is not None: - tz = inferred_tz - - if start is not None: - if normalize: - start = tslibs.normalize_date(start) - _normalized = True - else: - _normalized = _normalized and start.time() == _midnight - - if end is not None: - if normalize: - end = tslibs.normalize_date(end) - _normalized = True - else: - _normalized = _normalized and end.time() == _midnight - - if hasattr(freq, 'delta') and freq != offsets.Day(): - if inferred_tz is None and tz is not None: - # naive dates - if start is not None and start.tz is None: - start = start.tz_localize(tz, ambiguous=False) - - if end is not None and end.tz is None: - end = end.tz_localize(tz, ambiguous=False) - - if start and end: - if start.tz is None and end.tz is not None: - start = start.tz_localize(end.tz, ambiguous=False) + @Appender(DatetimeArrayMixin._generate_range.__doc__) + def _generate_range(cls, start, end, periods, name=None, freq=None, + tz=None, normalize=False, ambiguous='raise', + closed=None): + out = super(DatetimeIndex, cls)._generate_range( + start, end, periods, freq, + tz=tz, normalize=normalize, ambiguous=ambiguous, closed=closed) + out.name = name + return out - if end.tz is None and start.tz is not None: - end = end.tz_localize(start.tz, ambiguous=False) - - if _use_cached_range(freq, _normalized, start, end): - index = cls._cached_range(start, end, periods=periods, - freq=freq, name=name) - else: - index = _generate_regular_range(cls, start, end, periods, freq) - - else: - - if tz is not None: - # naive dates - if start is not None and start.tz is not None: - start = start.replace(tzinfo=None) - - if end is not None and end.tz is not None: - end = end.replace(tzinfo=None) - - if start and end: - if start.tz is None and end.tz is not None: - end = end.replace(tzinfo=None) - - if end.tz is None and start.tz is not None: - start = start.replace(tzinfo=None) - - if freq is not None: - if _use_cached_range(freq, _normalized, start, end): - index = cls._cached_range(start, end, periods=periods, - freq=freq, name=name) - else: - index = _generate_regular_range(cls, start, end, - periods, freq) - - if tz is not None and getattr(index, 'tz', None) is None: - arr = conversion.tz_localize_to_utc(ensure_int64(index), - tz, - ambiguous=ambiguous) - - index = cls(arr) - - # index is localized datetime64 array -> have to convert - # start/end as well to compare - if start is not None: - start = start.tz_localize(tz).asm8 - if end is not None: - end = end.tz_localize(tz).asm8 - else: - # Create a linearly spaced date_range in local time - start = start.tz_localize(tz) - end = end.tz_localize(tz) - index = tools.to_datetime(np.linspace(start.value, - end.value, periods), - utc=True) - index = index.tz_convert(tz) - - if not left_closed and len(index) and index[0] == start: - index = index[1:] - if not right_closed and len(index) and index[-1] == end: - index = index[:-1] - - index = cls._simple_new(index.values, name=name, freq=freq, tz=tz) - - return index + @classmethod + def _use_cached_range(cls, freq, _normalized, start, end): + return _use_cached_range(freq, _normalized, start, end) def _convert_for_op(self, value): """ Convert value to be insertable to ndarray """ @@ -1685,48 +1566,6 @@ def to_julian_date(self): DatetimeIndex._add_datetimelike_methods() -def _generate_regular_range(cls, start, end, periods, freq): - if isinstance(freq, Tick): - stride = freq.nanos - if periods is None: - b = Timestamp(start).value - # cannot just use e = Timestamp(end) + 1 because arange breaks when - # stride is too large, see GH10887 - e = (b + (Timestamp(end).value - b) // stride * stride + - stride // 2 + 1) - # end.tz == start.tz by this point due to _generate implementation - tz = start.tz - elif start is not None: - b = Timestamp(start).value - e = b + np.int64(periods) * stride - tz = start.tz - elif end is not None: - e = Timestamp(end).value + stride - b = e - np.int64(periods) * stride - tz = end.tz - else: - raise ValueError("at least 'start' or 'end' should be specified " - "if a 'period' is given.") - - data = np.arange(b, e, stride, dtype=np.int64) - data = cls._simple_new(data.view(_NS_DTYPE), None, tz=tz) - else: - if isinstance(start, Timestamp): - start = start.to_pydatetime() - - if isinstance(end, Timestamp): - end = end.to_pydatetime() - - xdr = generate_range(start=start, end=end, - periods=periods, offset=freq) - - dates = list(xdr) - # utc = len(dates) > 0 and dates[0].tzinfo is not None - data = tools.to_datetime(dates) - - return data - - def date_range(start=None, end=None, periods=None, freq=None, tz=None, normalize=False, name=None, closed=None, **kwargs): """ diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 006758f276f87..9f14d4cfd5863 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -14,7 +14,6 @@ pandas_dtype, ensure_int64) from pandas.core.dtypes.missing import isna -from pandas.core.dtypes.generic import ABCSeries from pandas.core.arrays.timedeltas import ( TimedeltaArrayMixin, _is_convertible_to_td, _to_m8) @@ -25,18 +24,17 @@ import pandas.compat as compat from pandas.tseries.frequencies import to_offset -from pandas.core.algorithms import checked_add_with_arr from pandas.core.base import _shared_docs from pandas.core.indexes.base import _index_shared_docs import pandas.core.common as com import pandas.core.dtypes.concat as _concat from pandas.util._decorators import Appender, Substitution, deprecate_kwarg from pandas.core.indexes.datetimelike import ( - TimelikeOps, DatetimeIndexOpsMixin) + TimelikeOps, DatetimeIndexOpsMixin, wrap_arithmetic_op) from pandas.core.tools.timedeltas import ( to_timedelta, _coerce_scalar_to_timedelta_type) from pandas._libs import (lib, index as libindex, - join as libjoin, Timedelta, NaT, iNaT) + join as libjoin, Timedelta, NaT) def _wrap_field_accessor(name): @@ -197,11 +195,10 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, if unit is not None: data = to_timedelta(data, unit=unit, box=False) - if not isinstance(data, (np.ndarray, Index, ABCSeries)): - if is_scalar(data): - raise ValueError('TimedeltaIndex() must be called with a ' - 'collection of some kind, %s was passed' - % repr(data)) + if is_scalar(data): + raise ValueError('TimedeltaIndex() must be called with a ' + 'collection of some kind, {data} was passed' + .format(data=repr(data))) # convert if not already if getattr(data, 'dtype', None) != _TD_DTYPE: @@ -223,7 +220,8 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, return subarr @classmethod - def _generate_range(cls, start, end, periods, name, freq, closed=None): + def _generate_range(cls, start, end, periods, + name=None, freq=None, closed=None): # TimedeltaArray gets `name` via **kwargs, so we need to explicitly # override it if name is passed as a positional argument return super(TimedeltaIndex, cls)._generate_range(start, end, @@ -262,37 +260,7 @@ def _maybe_update_attributes(self, attrs): def _evaluate_with_timedelta_like(self, other, op): result = TimedeltaArrayMixin._evaluate_with_timedelta_like(self, other, op) - if result is NotImplemented: - return NotImplemented - return Index(result, name=self.name, copy=False) - - def _add_datelike(self, other): - # adding a timedeltaindex to a datetimelike - from pandas import Timestamp, DatetimeIndex - if isinstance(other, (DatetimeIndex, np.ndarray)): - # if other is an ndarray, we assume it is datetime64-dtype - # defer to implementation in DatetimeIndex - other = DatetimeIndex(other) - return other + self - else: - assert other is not NaT - other = Timestamp(other) - i8 = self.asi8 - result = checked_add_with_arr(i8, other.value, - arr_mask=self._isnan) - result = self._maybe_mask_results(result, fill_value=iNaT) - return DatetimeIndex(result) - - def _addsub_offset_array(self, other, op): - # Add or subtract Array-like of DateOffset objects - try: - # TimedeltaIndex can only operate with a subset of DateOffset - # subclasses. Incompatible classes will raise AttributeError, - # which we re-raise as TypeError - return DatetimeIndexOpsMixin._addsub_offset_array(self, other, op) - except AttributeError: - raise TypeError("Cannot add/subtract non-tick DateOffset to {cls}" - .format(cls=type(self).__name__)) + return wrap_arithmetic_op(self, other, result) def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs): from pandas.io.formats.format import Timedelta64Formatter diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 69e802fbaa3f0..24f34884dc077 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -16,6 +16,11 @@ def test_from_dti(self, tz_naive_fixture): arr = DatetimeArrayMixin(dti) assert list(dti) == list(arr) + # Check that Index.__new__ knows what to do with DatetimeArray + dti2 = pd.Index(arr) + assert isinstance(dti2, pd.DatetimeIndex) + assert list(dti2) == list(arr) + def test_astype_object(self, tz_naive_fixture): tz = tz_naive_fixture dti = pd.date_range('2016-01-01', periods=3, tz=tz) @@ -32,6 +37,11 @@ def test_from_tdi(self): arr = TimedeltaArrayMixin(tdi) assert list(arr) == list(tdi) + # Check that Index.__new__ knows what to do with TimedeltaArray + tdi2 = pd.Index(arr) + assert isinstance(tdi2, pd.TimedeltaIndex) + assert list(tdi2) == list(arr) + def test_astype_object(self): tdi = pd.TimedeltaIndex(['1 Day', '3 Hours']) arr = TimedeltaArrayMixin(tdi) @@ -48,6 +58,11 @@ def test_from_pi(self): arr = PeriodArrayMixin(pi) assert list(arr) == list(pi) + # Check that Index.__new__ knows what to do with TimedeltaArray + pi2 = pd.Index(arr) + assert isinstance(pi2, pd.PeriodIndex) + assert list(pi2) == list(arr) + def test_astype_object(self): pi = pd.period_range('2016', freq='Q', periods=3) arr = PeriodArrayMixin(pi)
With the DatetimeArray range-generating functions moved, we are finally able to move `shift`, and in turn `__add__`, `__sub__`, etc. Two non-trivial changes made during the move process, see inline comments. Upcoming commits will add docstrings and port tests. ATM many tests are a PITA bc the Array contructors don't know how to handle lists of strings. Trying to find a way to implement that in `__new__` without having it become a total mess.
https://api.github.com/repos/pandas-dev/pandas/pulls/22016
2018-07-21T21:00:40Z
2018-07-26T12:49:40Z
2018-07-26T12:49:40Z
2018-07-26T21:13:14Z
ENH: Number formatting support for excel styles
diff --git a/doc/source/style.ipynb b/doc/source/style.ipynb index 152ca90049bf1..6f66c1a9bf7f9 100644 --- a/doc/source/style.ipynb +++ b/doc/source/style.ipynb @@ -985,7 +985,10 @@ "- `vertical-align`\n", "- `white-space: nowrap`\n", "\n", - "Only CSS2 named colors and hex colors of the form `#rgb` or `#rrggbb` are currently supported." + "Only CSS2 named colors and hex colors of the form `#rgb` or `#rrggbb` are currently supported.\n", + "\n", + "The following pseudo CSS properties are also available to set excel specific style properties:\n", + "- `number-format`\n" ] }, { diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 973b75f0e1451..137fd5aafe5bd 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -372,6 +372,7 @@ Other API Changes - Trying to reindex a ``DataFrame`` with a non unique ``MultiIndex`` now raises a ``ValueError`` instead of an ``Exception`` (:issue:`21770`) - :meth:`PeriodIndex.tz_convert` and :meth:`PeriodIndex.tz_localize` have been removed (:issue:`21781`) - :class:`Index` subtraction will attempt to operate element-wise instead of raising ``TypeError`` (:issue:`19369`) +- :class:`pandas.io.formats.style.Styler` supports a ``number-format`` property when using :meth:`~pandas.io.formats.style.Styler.to_excel` (:issue:`22015`) .. _whatsnew_0240.deprecations: diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index ec95ce7a970ad..0bc268bc18b95 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -98,8 +98,8 @@ def build_xlstyle(self, props): 'border': self.build_border(props), 'fill': self.build_fill(props), 'font': self.build_font(props), + 'number_format': self.build_number_format(props), } - # TODO: support number format # TODO: handle cell width and height: needs support in pandas.io.excel def remove_none(d): @@ -314,6 +314,9 @@ def color_to_excel(self, val): warnings.warn('Unhandled color format: {val!r}'.format(val=val), CSSWarning) + def build_number_format(self, props): + return {'format_code': props.get('number-format')} + class ExcelFormatter(object): """ diff --git a/pandas/tests/io/formats/test_to_excel.py b/pandas/tests/io/formats/test_to_excel.py index 2d691bf2c5d8e..9fc16c43f5c1d 100644 --- a/pandas/tests/io/formats/test_to_excel.py +++ b/pandas/tests/io/formats/test_to_excel.py @@ -172,6 +172,9 @@ {'alignment': {'wrap_text': False}}), ('white-space: normal', {'alignment': {'wrap_text': True}}), + # NUMBER FORMAT + ('number-format: 0%', + {'number_format': {'format_code': '0%'}}), ]) def test_css_to_excel(css, expected): convert = CSSToExcelConverter() diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index d1eab16e7c22c..e51780891534f 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -2241,6 +2241,7 @@ def style(df): ['', 'font-style: italic', ''], ['', '', 'text-align: right'], ['background-color: red', '', ''], + ['number-format: 0%', '', ''], ['', '', ''], ['', '', ''], ['', '', '']], @@ -2266,7 +2267,7 @@ def custom_converter(css): # Prepare spreadsheets - df = DataFrame(np.random.randn(10, 3)) + df = DataFrame(np.random.randn(11, 3)) with ensure_clean('.xlsx' if engine != 'xlwt' else '.xls') as path: writer = ExcelWriter(path, engine=engine) df.to_excel(writer, sheet_name='frame') @@ -2294,7 +2295,7 @@ def custom_converter(css): n_cells += 1 # ensure iteration actually happened: - assert n_cells == (10 + 1) * (3 + 1) + assert n_cells == (11 + 1) * (3 + 1) # (2) check styling with default converter @@ -2344,13 +2345,16 @@ def custom_converter(css): assert cell1.fill.patternType != cell2.fill.patternType assert cell2.fill.fgColor.rgb == alpha + 'FF0000' assert cell2.fill.patternType == 'solid' + elif ref == 'B9': + assert cell1.number_format == 'General' + assert cell2.number_format == '0%' else: assert_equal_style(cell1, cell2) assert cell1.value == cell2.value n_cells += 1 - assert n_cells == (10 + 1) * (3 + 1) + assert n_cells == (11 + 1) * (3 + 1) # (3) check styling with custom converter n_cells = 0 @@ -2359,7 +2363,7 @@ def custom_converter(css): assert len(col1) == len(col2) for cell1, cell2 in zip(col1, col2): ref = '%s%d' % (cell2.column, cell2.row) - if ref in ('B2', 'C3', 'D4', 'B5', 'C6', 'D7', 'B8'): + if ref in ('B2', 'C3', 'D4', 'B5', 'C6', 'D7', 'B8', 'B9'): assert not cell1.font.bold assert cell2.font.bold else: @@ -2368,7 +2372,7 @@ def custom_converter(css): assert cell1.value == cell2.value n_cells += 1 - assert n_cells == (10 + 1) * (3 + 1) + assert n_cells == (11 + 1) * (3 + 1) @td.skip_if_no('openpyxl')
- [x] closes #22027 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Add number formatting support to the excel styles using a fake css entry. ```css number-format: 0%; ``` Added new tests, updated docs, and manually verified outputs using both excel output engines that support styling.
https://api.github.com/repos/pandas-dev/pandas/pulls/22015
2018-07-21T20:36:24Z
2018-07-24T13:11:19Z
2018-07-24T13:11:18Z
2018-07-24T16:04:32Z
[REF] separate blocks.py out of internals.__init__
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index fde3aaa14ac5d..a4cd301806569 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -1,12 +1,7 @@ # -*- coding: utf-8 -*- -import warnings import copy -from warnings import catch_warnings -import inspect import itertools -import re import operator -from datetime import datetime, timedelta, date from collections import defaultdict from functools import partial @@ -17,3203 +12,51 @@ from pandas.core.base import PandasObject from pandas.core.dtypes.dtypes import ( - ExtensionDtype, DatetimeTZDtype, - PandasExtensionDtype, - CategoricalDtype) + ExtensionDtype, + PandasExtensionDtype) from pandas.core.dtypes.common import ( - _TD_DTYPE, _NS_DTYPE, - ensure_int64, ensure_platform_int, - is_integer, - is_dtype_equal, + _NS_DTYPE, + ensure_int64, is_timedelta64_dtype, - is_datetime64_dtype, is_datetimetz, is_sparse, - is_categorical, is_categorical_dtype, - is_integer_dtype, - is_datetime64tz_dtype, - is_bool_dtype, - is_object_dtype, + is_datetime64_dtype, is_datetimetz, + is_categorical_dtype, is_datetimelike_v_numeric, is_float_dtype, is_numeric_dtype, is_numeric_v_string_like, is_extension_type, is_extension_array_dtype, - is_list_like, - is_re, - is_re_compilable, is_scalar, - _get_dtype, - pandas_dtype) + _get_dtype) from pandas.core.dtypes.cast import ( - maybe_downcast_to_dtype, - maybe_upcast, maybe_promote, - infer_dtype_from, infer_dtype_from_scalar, - soft_convert_objects, - maybe_convert_objects, - astype_nansafe, - find_common_type, - maybe_infer_dtype_type) -from pandas.core.dtypes.missing import ( - isna, notna, array_equivalent, - _isna_compat, - is_null_datelike_scalar) + find_common_type) +from pandas.core.dtypes.missing import isna import pandas.core.dtypes.concat as _concat +from pandas.core.dtypes.generic import ABCSeries, ABCExtensionArray -from pandas.core.dtypes.generic import ( - ABCSeries, - ABCDatetimeIndex, - ABCExtensionArray, - ABCIndexClass) -import pandas.core.common as com import pandas.core.algorithms as algos from pandas.core.index import Index, MultiIndex, ensure_index -from pandas.core.indexing import maybe_convert_indices, check_setitem_lengths -from pandas.core.arrays import Categorical -from pandas.core.indexes.datetimes import DatetimeIndex -from pandas.core.indexes.timedeltas import TimedeltaIndex +from pandas.core.indexing import maybe_convert_indices from pandas.io.formats.printing import pprint_thing -import pandas.core.missing as missing -from pandas.core.sparse.array import _maybe_to_sparse, SparseArray -from pandas._libs import lib, tslib, tslibs -from pandas._libs.tslibs import conversion, Timedelta +from pandas.core.sparse.array import _maybe_to_sparse +from pandas._libs import lib, tslibs from pandas._libs.internals import BlockPlacement from pandas.util._decorators import cache_readonly from pandas.util._validators import validate_bool_kwarg -from pandas import compat from pandas.compat import range, map, zip, u - -class Block(PandasObject): - """ - Canonical n-dimensional unit of homogeneous dtype contained in a pandas - data structure - - Index-ignorant; let the container take care of that - """ - __slots__ = ['_mgr_locs', 'values', 'ndim'] - is_numeric = False - is_float = False - is_integer = False - is_complex = False - is_datetime = False - is_datetimetz = False - is_timedelta = False - is_bool = False - is_object = False - is_categorical = False - is_sparse = False - is_extension = False - _box_to_block_values = True - _can_hold_na = False - _can_consolidate = True - _verify_integrity = True - _validate_ndim = True - _ftype = 'dense' - _concatenator = staticmethod(np.concatenate) - - def __init__(self, values, placement, ndim=None): - self.ndim = self._check_ndim(values, ndim) - self.mgr_locs = placement - self.values = values - - if (self._validate_ndim and self.ndim and - len(self.mgr_locs) != len(self.values)): - raise ValueError( - 'Wrong number of items passed {val}, placement implies ' - '{mgr}'.format(val=len(self.values), mgr=len(self.mgr_locs))) - - def _check_ndim(self, values, ndim): - """ndim inference and validation. - - Infers ndim from 'values' if not provided to __init__. - Validates that values.ndim and ndim are consistent if and only if - the class variable '_validate_ndim' is True. - - Parameters - ---------- - values : array-like - ndim : int or None - - Returns - ------- - ndim : int - - Raises - ------ - ValueError : the number of dimensions do not match - """ - if ndim is None: - ndim = values.ndim - - if self._validate_ndim and values.ndim != ndim: - msg = ("Wrong number of dimensions. values.ndim != ndim " - "[{} != {}]") - raise ValueError(msg.format(values.ndim, ndim)) - - return ndim - - @property - def _holder(self): - """The array-like that can hold the underlying values. - - None for 'Block', overridden by subclasses that don't - use an ndarray. - """ - return None - - @property - def _consolidate_key(self): - return (self._can_consolidate, self.dtype.name) - - @property - def _is_single_block(self): - return self.ndim == 1 - - @property - def is_view(self): - """ return a boolean if I am possibly a view """ - return self.values.base is not None - - @property - def is_datelike(self): - """ return True if I am a non-datelike """ - return self.is_datetime or self.is_timedelta - - def is_categorical_astype(self, dtype): - """ - validate that we have a astypeable to categorical, - returns a boolean if we are a categorical - """ - if dtype is Categorical or dtype is CategoricalDtype: - # this is a pd.Categorical, but is not - # a valid type for astypeing - raise TypeError("invalid type {0} for astype".format(dtype)) - - elif is_categorical_dtype(dtype): - return True - - return False - - def external_values(self, dtype=None): - """ return an outside world format, currently just the ndarray """ - return self.values - - def internal_values(self, dtype=None): - """ return an internal format, currently just the ndarray - this should be the pure internal API format - """ - return self.values - - def formatting_values(self): - """Return the internal values used by the DataFrame/SeriesFormatter""" - return self.internal_values() - - def get_values(self, dtype=None): - """ - return an internal format, currently just the ndarray - this is often overridden to handle to_dense like operations - """ - if is_object_dtype(dtype): - return self.values.astype(object) - return self.values - - def to_dense(self): - return self.values.view() - - @property - def _na_value(self): - return np.nan - - @property - def fill_value(self): - return np.nan - - @property - def mgr_locs(self): - return self._mgr_locs - - @mgr_locs.setter - def mgr_locs(self, new_mgr_locs): - if not isinstance(new_mgr_locs, BlockPlacement): - new_mgr_locs = BlockPlacement(new_mgr_locs) - - self._mgr_locs = new_mgr_locs - - @property - def array_dtype(self): - """ the dtype to return if I want to construct this block as an - array - """ - return self.dtype - - def make_block(self, values, placement=None, ndim=None): - """ - Create a new block, with type inference propagate any values that are - not specified - """ - if placement is None: - placement = self.mgr_locs - if ndim is None: - ndim = self.ndim - - return make_block(values, placement=placement, ndim=ndim) - - def make_block_scalar(self, values): - """ - Create a ScalarBlock - """ - return ScalarBlock(values) - - def make_block_same_class(self, values, placement=None, ndim=None, - dtype=None): - """ Wrap given values in a block of same type as self. """ - if dtype is not None: - # issue 19431 fastparquet is passing this - warnings.warn("dtype argument is deprecated, will be removed " - "in a future release.", DeprecationWarning) - if placement is None: - placement = self.mgr_locs - return make_block(values, placement=placement, ndim=ndim, - klass=self.__class__, dtype=dtype) - - def __unicode__(self): - - # don't want to print out all of the items here - name = pprint_thing(self.__class__.__name__) - if self._is_single_block: - - result = '{name}: {len} dtype: {dtype}'.format( - name=name, len=len(self), dtype=self.dtype) - - else: - - shape = ' x '.join(pprint_thing(s) for s in self.shape) - result = '{name}: {index}, {shape}, dtype: {dtype}'.format( - name=name, index=pprint_thing(self.mgr_locs.indexer), - shape=shape, dtype=self.dtype) - - return result - - def __len__(self): - return len(self.values) - - def __getstate__(self): - return self.mgr_locs.indexer, self.values - - def __setstate__(self, state): - self.mgr_locs = BlockPlacement(state[0]) - self.values = state[1] - self.ndim = self.values.ndim - - def _slice(self, slicer): - """ return a slice of my values """ - return self.values[slicer] - - def reshape_nd(self, labels, shape, ref_items, mgr=None): - """ - Parameters - ---------- - labels : list of new axis labels - shape : new shape - ref_items : new ref_items - - return a new block that is transformed to a nd block - """ - return _block2d_to_blocknd(values=self.get_values().T, - placement=self.mgr_locs, shape=shape, - labels=labels, ref_items=ref_items) - - def getitem_block(self, slicer, new_mgr_locs=None): - """ - Perform __getitem__-like, return result as block. - - As of now, only supports slices that preserve dimensionality. - """ - if new_mgr_locs is None: - if isinstance(slicer, tuple): - axis0_slicer = slicer[0] - else: - axis0_slicer = slicer - new_mgr_locs = self.mgr_locs[axis0_slicer] - - new_values = self._slice(slicer) - - if self._validate_ndim and new_values.ndim != self.ndim: - raise ValueError("Only same dim slicing is allowed") - - return self.make_block_same_class(new_values, new_mgr_locs) - - @property - def shape(self): - return self.values.shape - - @property - def dtype(self): - return self.values.dtype - - @property - def ftype(self): - return "{dtype}:{ftype}".format(dtype=self.dtype, ftype=self._ftype) - - def merge(self, other): - return _merge_blocks([self, other]) - - def concat_same_type(self, to_concat, placement=None): - """ - Concatenate list of single blocks of the same type. - """ - values = self._concatenator([blk.values for blk in to_concat], - axis=self.ndim - 1) - return self.make_block_same_class( - values, placement=placement or slice(0, len(values), 1)) - - def iget(self, i): - return self.values[i] - - def set(self, locs, values, check=False): - """ - Modify Block in-place with new item value - - Returns - ------- - None - """ - self.values[locs] = values - - def delete(self, loc): - """ - Delete given loc(-s) from block in-place. - """ - self.values = np.delete(self.values, loc, 0) - self.mgr_locs = self.mgr_locs.delete(loc) - - def apply(self, func, mgr=None, **kwargs): - """ apply the function to my values; return a block if we are not - one - """ - with np.errstate(all='ignore'): - result = func(self.values, **kwargs) - if not isinstance(result, Block): - result = self.make_block(values=_block_shape(result, - ndim=self.ndim)) - - return result - - def fillna(self, value, limit=None, inplace=False, downcast=None, - mgr=None): - """ fillna on the block with the value. If we fail, then convert to - ObjectBlock and try again - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - - if not self._can_hold_na: - if inplace: - return self - else: - return self.copy() - - mask = isna(self.values) - if limit is not None: - if not is_integer(limit): - raise ValueError('Limit must be an integer') - if limit < 1: - raise ValueError('Limit must be greater than 0') - if self.ndim > 2: - raise NotImplementedError("number of dimensions for 'fillna' " - "is currently limited to 2") - mask[mask.cumsum(self.ndim - 1) > limit] = False - - # fillna, but if we cannot coerce, then try again as an ObjectBlock - try: - values, _, _, _ = self._try_coerce_args(self.values, value) - blocks = self.putmask(mask, value, inplace=inplace) - blocks = [b.make_block(values=self._try_coerce_result(b.values)) - for b in blocks] - return self._maybe_downcast(blocks, downcast) - except (TypeError, ValueError): - - # we can't process the value, but nothing to do - if not mask.any(): - return self if inplace else self.copy() - - # operate column-by-column - def f(m, v, i): - block = self.coerce_to_target_dtype(value) - - # slice out our block - if i is not None: - block = block.getitem_block(slice(i, i + 1)) - return block.fillna(value, - limit=limit, - inplace=inplace, - downcast=None) - - return self.split_and_operate(mask, f, inplace) - - def split_and_operate(self, mask, f, inplace): - """ - split the block per-column, and apply the callable f - per-column, return a new block for each. Handle - masking which will not change a block unless needed. - - Parameters - ---------- - mask : 2-d boolean mask - f : callable accepting (1d-mask, 1d values, indexer) - inplace : boolean - - Returns - ------- - list of blocks - """ - - if mask is None: - mask = np.ones(self.shape, dtype=bool) - new_values = self.values - - def make_a_block(nv, ref_loc): - if isinstance(nv, Block): - block = nv - elif isinstance(nv, list): - block = nv[0] - else: - # Put back the dimension that was taken from it and make - # a block out of the result. - try: - nv = _block_shape(nv, ndim=self.ndim) - except (AttributeError, NotImplementedError): - pass - block = self.make_block(values=nv, - placement=ref_loc) - return block - - # ndim == 1 - if self.ndim == 1: - if mask.any(): - nv = f(mask, new_values, None) - else: - nv = new_values if inplace else new_values.copy() - block = make_a_block(nv, self.mgr_locs) - return [block] - - # ndim > 1 - new_blocks = [] - for i, ref_loc in enumerate(self.mgr_locs): - m = mask[i] - v = new_values[i] - - # need a new block - if m.any(): - nv = f(m, v, i) - else: - nv = v if inplace else v.copy() - - block = make_a_block(nv, [ref_loc]) - new_blocks.append(block) - - return new_blocks - - def _maybe_downcast(self, blocks, downcast=None): - - # no need to downcast our float - # unless indicated - if downcast is None and self.is_float: - return blocks - elif downcast is None and (self.is_timedelta or self.is_datetime): - return blocks - - if not isinstance(blocks, list): - blocks = [blocks] - return _extend_blocks([b.downcast(downcast) for b in blocks]) - - def downcast(self, dtypes=None, mgr=None): - """ try to downcast each item to the dict of dtypes if present """ - - # turn it off completely - if dtypes is False: - return self - - values = self.values - - # single block handling - if self._is_single_block: - - # try to cast all non-floats here - if dtypes is None: - dtypes = 'infer' - - nv = maybe_downcast_to_dtype(values, dtypes) - return self.make_block(nv) - - # ndim > 1 - if dtypes is None: - return self - - if not (dtypes == 'infer' or isinstance(dtypes, dict)): - raise ValueError("downcast must have a dictionary or 'infer' as " - "its argument") - - # operate column-by-column - # this is expensive as it splits the blocks items-by-item - def f(m, v, i): - - if dtypes == 'infer': - dtype = 'infer' - else: - raise AssertionError("dtypes as dict is not supported yet") - - if dtype is not None: - v = maybe_downcast_to_dtype(v, dtype) - return v - - return self.split_and_operate(None, f, False) - - def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs): - return self._astype(dtype, copy=copy, errors=errors, values=values, - **kwargs) - - def _astype(self, dtype, copy=False, errors='raise', values=None, - klass=None, mgr=None, **kwargs): - """Coerce to the new type - - Parameters - ---------- - dtype : str, dtype convertible - copy : boolean, default False - copy if indicated - errors : str, {'raise', 'ignore'}, default 'ignore' - - ``raise`` : allow exceptions to be raised - - ``ignore`` : suppress exceptions. On error return original object - - Returns - ------- - Block - """ - errors_legal_values = ('raise', 'ignore') - - if errors not in errors_legal_values: - invalid_arg = ("Expected value of kwarg 'errors' to be one of {}. " - "Supplied value is '{}'".format( - list(errors_legal_values), errors)) - raise ValueError(invalid_arg) - - if (inspect.isclass(dtype) and - issubclass(dtype, (PandasExtensionDtype, ExtensionDtype))): - msg = ("Expected an instance of {}, but got the class instead. " - "Try instantiating 'dtype'.".format(dtype.__name__)) - raise TypeError(msg) - - # may need to convert to categorical - if self.is_categorical_astype(dtype): - - # deprecated 17636 - if ('categories' in kwargs or 'ordered' in kwargs): - if isinstance(dtype, CategoricalDtype): - raise TypeError( - "Cannot specify a CategoricalDtype and also " - "`categories` or `ordered`. Use " - "`dtype=CategoricalDtype(categories, ordered)`" - " instead.") - warnings.warn("specifying 'categories' or 'ordered' in " - ".astype() is deprecated; pass a " - "CategoricalDtype instead", - FutureWarning, stacklevel=7) - - categories = kwargs.get('categories', None) - ordered = kwargs.get('ordered', None) - if com._any_not_none(categories, ordered): - dtype = CategoricalDtype(categories, ordered) - - if is_categorical_dtype(self.values): - # GH 10696/18593: update an existing categorical efficiently - return self.make_block(self.values.astype(dtype, copy=copy)) - - return self.make_block(Categorical(self.values, dtype=dtype)) - - # convert dtypes if needed - dtype = pandas_dtype(dtype) - - # astype processing - if is_dtype_equal(self.dtype, dtype): - if copy: - return self.copy() - return self - - if klass is None: - if dtype == np.object_: - klass = ObjectBlock - try: - # force the copy here - if values is None: - - if issubclass(dtype.type, - (compat.text_type, compat.string_types)): - - # use native type formatting for datetime/tz/timedelta - if self.is_datelike: - values = self.to_native_types() - - # astype formatting - else: - values = self.get_values() - - else: - values = self.get_values(dtype=dtype) - - # _astype_nansafe works fine with 1-d only - values = astype_nansafe(values.ravel(), dtype, copy=True) - - # TODO(extension) - # should we make this attribute? - try: - values = values.reshape(self.shape) - except AttributeError: - pass - - newb = make_block(values, placement=self.mgr_locs, - klass=klass) - except: - if errors == 'raise': - raise - newb = self.copy() if copy else self - - if newb.is_numeric and self.is_numeric: - if newb.shape != self.shape: - raise TypeError( - "cannot set astype for copy = [{copy}] for dtype " - "({dtype} [{itemsize}]) with smaller itemsize than " - "current ({newb_dtype} [{newb_size}])".format( - copy=copy, dtype=self.dtype.name, - itemsize=self.itemsize, newb_dtype=newb.dtype.name, - newb_size=newb.itemsize)) - return newb - - def convert(self, copy=True, **kwargs): - """ attempt to coerce any object types to better types return a copy - of the block (if copy = True) by definition we are not an ObjectBlock - here! - """ - - return self.copy() if copy else self - - def _can_hold_element(self, element): - """ require the same dtype as ourselves """ - dtype = self.values.dtype.type - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return issubclass(tipo.type, dtype) - return isinstance(element, dtype) - - def _try_cast_result(self, result, dtype=None): - """ try to cast the result to our original type, we may have - roundtripped thru object in the mean-time - """ - if dtype is None: - dtype = self.dtype - - if self.is_integer or self.is_bool or self.is_datetime: - pass - elif self.is_float and result.dtype == self.dtype: - - # protect against a bool/object showing up here - if isinstance(dtype, compat.string_types) and dtype == 'infer': - return result - if not isinstance(dtype, type): - dtype = dtype.type - if issubclass(dtype, (np.bool_, np.object_)): - if issubclass(dtype, np.bool_): - if isna(result).all(): - return result.astype(np.bool_) - else: - result = result.astype(np.object_) - result[result == 1] = True - result[result == 0] = False - return result - else: - return result.astype(np.object_) - - return result - - # may need to change the dtype here - return maybe_downcast_to_dtype(result, dtype) - - def _try_coerce_args(self, values, other): - """ provide coercion to our input arguments """ - - if np.any(notna(other)) and not self._can_hold_element(other): - # coercion issues - # let higher levels handle - raise TypeError("cannot convert {} to an {}".format( - type(other).__name__, - type(self).__name__.lower().replace('Block', ''))) - - return values, False, other, False - - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - return result - - def _try_coerce_and_cast_result(self, result, dtype=None): - result = self._try_coerce_result(result) - result = self._try_cast_result(result, dtype=dtype) - return result - - def to_native_types(self, slicer=None, na_rep='nan', quoting=None, - **kwargs): - """ convert to our native types format, slicing if desired """ - - values = self.get_values() - - if slicer is not None: - values = values[:, slicer] - mask = isna(values) - - if not self.is_object and not quoting: - values = values.astype(str) - else: - values = np.array(values, dtype='object') - - values[mask] = na_rep - return values - - # block actions #### - def copy(self, deep=True, mgr=None): - """ copy constructor """ - values = self.values - if deep: - values = values.copy() - return self.make_block_same_class(values) - - def replace(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True, mgr=None): - """ replace the to_replace value with value, possible to create new - blocks here this is just a call to putmask. regex is not used here. - It is used in ObjectBlocks. It is here for API - compatibility. - """ - - inplace = validate_bool_kwarg(inplace, 'inplace') - original_to_replace = to_replace - - # try to replace, if we raise an error, convert to ObjectBlock and - # retry - try: - values, _, to_replace, _ = self._try_coerce_args(self.values, - to_replace) - mask = missing.mask_missing(values, to_replace) - if filter is not None: - filtered_out = ~self.mgr_locs.isin(filter) - mask[filtered_out.nonzero()[0]] = False - - blocks = self.putmask(mask, value, inplace=inplace) - if convert: - blocks = [b.convert(by_item=True, numeric=False, - copy=not inplace) for b in blocks] - return blocks - except (TypeError, ValueError): - - # try again with a compatible block - block = self.astype(object) - return block.replace( - to_replace=original_to_replace, value=value, inplace=inplace, - filter=filter, regex=regex, convert=convert) - - def _replace_single(self, *args, **kwargs): - """ no-op on a non-ObjectBlock """ - return self if kwargs['inplace'] else self.copy() - - def setitem(self, indexer, value, mgr=None): - """Set the value inplace, returning a a maybe different typed block. - - Parameters - ---------- - indexer : tuple, list-like, array-like, slice - The subset of self.values to set - value : object - The value being set - mgr : BlockPlacement, optional - - Returns - ------- - Block - - Notes - ----- - `indexer` is a direct slice/positional indexer. `value` must - be a compatible shape. - """ - # coerce None values, if appropriate - if value is None: - if self.is_numeric: - value = np.nan - - # coerce if block dtype can store value - values = self.values - try: - values, _, value, _ = self._try_coerce_args(values, value) - # can keep its own dtype - if hasattr(value, 'dtype') and is_dtype_equal(values.dtype, - value.dtype): - dtype = self.dtype - else: - dtype = 'infer' - - except (TypeError, ValueError): - # current dtype cannot store value, coerce to common dtype - find_dtype = False - - if hasattr(value, 'dtype'): - dtype = value.dtype - find_dtype = True - - elif is_scalar(value): - if isna(value): - # NaN promotion is handled in latter path - dtype = False - else: - dtype, _ = infer_dtype_from_scalar(value, - pandas_dtype=True) - find_dtype = True - else: - dtype = 'infer' - - if find_dtype: - dtype = find_common_type([values.dtype, dtype]) - if not is_dtype_equal(self.dtype, dtype): - b = self.astype(dtype) - return b.setitem(indexer, value, mgr=mgr) - - # value must be storeable at this moment - arr_value = np.array(value) - - # cast the values to a type that can hold nan (if necessary) - if not self._can_hold_element(value): - dtype, _ = maybe_promote(arr_value.dtype) - values = values.astype(dtype) - - transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x) - values = transf(values) - - # length checking - check_setitem_lengths(indexer, value, values) - - def _is_scalar_indexer(indexer): - # return True if we are all scalar indexers - - if arr_value.ndim == 1: - if not isinstance(indexer, tuple): - indexer = tuple([indexer]) - return any(isinstance(idx, np.ndarray) and len(idx) == 0 - for idx in indexer) - return False - - def _is_empty_indexer(indexer): - # return a boolean if we have an empty indexer - - if is_list_like(indexer) and not len(indexer): - return True - if arr_value.ndim == 1: - if not isinstance(indexer, tuple): - indexer = tuple([indexer]) - return any(isinstance(idx, np.ndarray) and len(idx) == 0 - for idx in indexer) - return False - - # empty indexers - # 8669 (empty) - if _is_empty_indexer(indexer): - pass - - # setting a single element for each dim and with a rhs that could - # be say a list - # GH 6043 - elif _is_scalar_indexer(indexer): - values[indexer] = value - - # if we are an exact match (ex-broadcasting), - # then use the resultant dtype - elif (len(arr_value.shape) and - arr_value.shape[0] == values.shape[0] and - np.prod(arr_value.shape) == np.prod(values.shape)): - values[indexer] = value - try: - values = values.astype(arr_value.dtype) - except ValueError: - pass - - # set - else: - values[indexer] = value - - # coerce and try to infer the dtypes of the result - values = self._try_coerce_and_cast_result(values, dtype) - block = self.make_block(transf(values)) - return block - - def putmask(self, mask, new, align=True, inplace=False, axis=0, - transpose=False, mgr=None): - """ putmask the data to the block; it is possible that we may create a - new dtype of block - - return the resulting block(s) - - Parameters - ---------- - mask : the condition to respect - new : a ndarray/object - align : boolean, perform alignment on other/cond, default is True - inplace : perform inplace modification, default is False - axis : int - transpose : boolean - Set to True if self is stored with axes reversed - - Returns - ------- - a list of new blocks, the result of the putmask - """ - - new_values = self.values if inplace else self.values.copy() - - new = getattr(new, 'values', new) - mask = getattr(mask, 'values', mask) - - # if we are passed a scalar None, convert it here - if not is_list_like(new) and isna(new) and not self.is_object: - new = self.fill_value - - if self._can_hold_element(new): - _, _, new, _ = self._try_coerce_args(new_values, new) - - if transpose: - new_values = new_values.T - - # If the default repeat behavior in np.putmask would go in the - # wrong direction, then explicitly repeat and reshape new instead - if getattr(new, 'ndim', 0) >= 1: - if self.ndim - 1 == new.ndim and axis == 1: - new = np.repeat( - new, new_values.shape[-1]).reshape(self.shape) - new = new.astype(new_values.dtype) - - # we require exact matches between the len of the - # values we are setting (or is compat). np.putmask - # doesn't check this and will simply truncate / pad - # the output, but we want sane error messages - # - # TODO: this prob needs some better checking - # for 2D cases - if ((is_list_like(new) and - np.any(mask[mask]) and - getattr(new, 'ndim', 1) == 1)): - - if not (mask.shape[-1] == len(new) or - mask[mask].shape[-1] == len(new) or - len(new) == 1): - raise ValueError("cannot assign mismatch " - "length to masked array") - - np.putmask(new_values, mask, new) - - # maybe upcast me - elif mask.any(): - if transpose: - mask = mask.T - if isinstance(new, np.ndarray): - new = new.T - axis = new_values.ndim - axis - 1 - - # Pseudo-broadcast - if getattr(new, 'ndim', 0) >= 1: - if self.ndim - 1 == new.ndim: - new_shape = list(new.shape) - new_shape.insert(axis, 1) - new = new.reshape(tuple(new_shape)) - - # operate column-by-column - def f(m, v, i): - - if i is None: - # ndim==1 case. - n = new - else: - - if isinstance(new, np.ndarray): - n = np.squeeze(new[i % new.shape[0]]) - else: - n = np.array(new) - - # type of the new block - dtype, _ = maybe_promote(n.dtype) - - # we need to explicitly astype here to make a copy - n = n.astype(dtype) - - nv = _putmask_smart(v, m, n) - return nv - - new_blocks = self.split_and_operate(mask, f, inplace) - return new_blocks - - if inplace: - return [self] - - if transpose: - new_values = new_values.T - - return [self.make_block(new_values)] - - def coerce_to_target_dtype(self, other): - """ - coerce the current block to a dtype compat for other - we will return a block, possibly object, and not raise - - we can also safely try to coerce to the same dtype - and will receive the same block - """ - - # if we cannot then coerce to object - dtype, _ = infer_dtype_from(other, pandas_dtype=True) - - if is_dtype_equal(self.dtype, dtype): - return self - - if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype): - # we don't upcast to bool - return self.astype(object) - - elif ((self.is_float or self.is_complex) and - (is_integer_dtype(dtype) or is_float_dtype(dtype))): - # don't coerce float/complex to int - return self - - elif (self.is_datetime or - is_datetime64_dtype(dtype) or - is_datetime64tz_dtype(dtype)): - - # not a datetime - if not ((is_datetime64_dtype(dtype) or - is_datetime64tz_dtype(dtype)) and self.is_datetime): - return self.astype(object) - - # don't upcast timezone with different timezone or no timezone - mytz = getattr(self.dtype, 'tz', None) - othertz = getattr(dtype, 'tz', None) - - if str(mytz) != str(othertz): - return self.astype(object) - - raise AssertionError("possible recursion in " - "coerce_to_target_dtype: {} {}".format( - self, other)) - - elif (self.is_timedelta or is_timedelta64_dtype(dtype)): - - # not a timedelta - if not (is_timedelta64_dtype(dtype) and self.is_timedelta): - return self.astype(object) - - raise AssertionError("possible recursion in " - "coerce_to_target_dtype: {} {}".format( - self, other)) - - try: - return self.astype(dtype) - except (ValueError, TypeError): - pass - - return self.astype(object) - - def interpolate(self, method='pad', axis=0, index=None, values=None, - inplace=False, limit=None, limit_direction='forward', - limit_area=None, fill_value=None, coerce=False, - downcast=None, mgr=None, **kwargs): - - inplace = validate_bool_kwarg(inplace, 'inplace') - - def check_int_bool(self, inplace): - # Only FloatBlocks will contain NaNs. - # timedelta subclasses IntBlock - if (self.is_bool or self.is_integer) and not self.is_timedelta: - if inplace: - return self - else: - return self.copy() - - # a fill na type method - try: - m = missing.clean_fill_method(method) - except: - m = None - - if m is not None: - r = check_int_bool(self, inplace) - if r is not None: - return r - return self._interpolate_with_fill(method=m, axis=axis, - inplace=inplace, limit=limit, - fill_value=fill_value, - coerce=coerce, - downcast=downcast, mgr=mgr) - # try an interp method - try: - m = missing.clean_interp_method(method, **kwargs) - except: - m = None - - if m is not None: - r = check_int_bool(self, inplace) - if r is not None: - return r - return self._interpolate(method=m, index=index, values=values, - axis=axis, limit=limit, - limit_direction=limit_direction, - limit_area=limit_area, - fill_value=fill_value, inplace=inplace, - downcast=downcast, mgr=mgr, **kwargs) - - raise ValueError("invalid method '{0}' to interpolate.".format(method)) - - def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, - limit=None, fill_value=None, coerce=False, - downcast=None, mgr=None): - """ fillna but using the interpolate machinery """ - - inplace = validate_bool_kwarg(inplace, 'inplace') - - # if we are coercing, then don't force the conversion - # if the block can't hold the type - if coerce: - if not self._can_hold_na: - if inplace: - return [self] - else: - return [self.copy()] - - values = self.values if inplace else self.values.copy() - values, _, fill_value, _ = self._try_coerce_args(values, fill_value) - values = missing.interpolate_2d(values, method=method, axis=axis, - limit=limit, fill_value=fill_value, - dtype=self.dtype) - values = self._try_coerce_result(values) - - blocks = [self.make_block_same_class(values, ndim=self.ndim)] - return self._maybe_downcast(blocks, downcast) - - def _interpolate(self, method=None, index=None, values=None, - fill_value=None, axis=0, limit=None, - limit_direction='forward', limit_area=None, - inplace=False, downcast=None, mgr=None, **kwargs): - """ interpolate using scipy wrappers """ - - inplace = validate_bool_kwarg(inplace, 'inplace') - data = self.values if inplace else self.values.copy() - - # only deal with floats - if not self.is_float: - if not self.is_integer: - return self - data = data.astype(np.float64) - - if fill_value is None: - fill_value = self.fill_value - - if method in ('krogh', 'piecewise_polynomial', 'pchip'): - if not index.is_monotonic: - raise ValueError("{0} interpolation requires that the " - "index be monotonic.".format(method)) - # process 1-d slices in the axis direction - - def func(x): - - # process a 1-d slice, returning it - # should the axis argument be handled below in apply_along_axis? - # i.e. not an arg to missing.interpolate_1d - return missing.interpolate_1d(index, x, method=method, limit=limit, - limit_direction=limit_direction, - limit_area=limit_area, - fill_value=fill_value, - bounds_error=False, **kwargs) - - # interp each column independently - interp_values = np.apply_along_axis(func, axis, data) - - blocks = [self.make_block_same_class(interp_values)] - return self._maybe_downcast(blocks, downcast) - - def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): - """ - Take values according to indexer and return them as a block.bb - - """ - - # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock - # so need to preserve types - # sparse is treated like an ndarray, but needs .get_values() shaping - - values = self.values - if self.is_sparse: - values = self.get_values() - - if fill_tuple is None: - fill_value = self.fill_value - new_values = algos.take_nd(values, indexer, axis=axis, - allow_fill=False) - else: - fill_value = fill_tuple[0] - new_values = algos.take_nd(values, indexer, axis=axis, - allow_fill=True, fill_value=fill_value) - - if new_mgr_locs is None: - if axis == 0: - slc = libinternals.indexer_as_slice(indexer) - if slc is not None: - new_mgr_locs = self.mgr_locs[slc] - else: - new_mgr_locs = self.mgr_locs[indexer] - else: - new_mgr_locs = self.mgr_locs - - if not is_dtype_equal(new_values.dtype, self.dtype): - return self.make_block(new_values, new_mgr_locs) - else: - return self.make_block_same_class(new_values, new_mgr_locs) - - def diff(self, n, axis=1, mgr=None): - """ return block for the diff of the values """ - new_values = algos.diff(self.values, n, axis=axis) - return [self.make_block(values=new_values)] - - def shift(self, periods, axis=0, mgr=None): - """ shift the block by periods, possibly upcast """ - - # convert integer to float if necessary. need to do a lot more than - # that, handle boolean etc also - new_values, fill_value = maybe_upcast(self.values) - - # make sure array sent to np.roll is c_contiguous - f_ordered = new_values.flags.f_contiguous - if f_ordered: - new_values = new_values.T - axis = new_values.ndim - axis - 1 - - if np.prod(new_values.shape): - new_values = np.roll(new_values, ensure_platform_int(periods), - axis=axis) - - axis_indexer = [slice(None)] * self.ndim - if periods > 0: - axis_indexer[axis] = slice(None, periods) - else: - axis_indexer[axis] = slice(periods, None) - new_values[tuple(axis_indexer)] = fill_value - - # restore original order - if f_ordered: - new_values = new_values.T - - return [self.make_block(new_values)] - - def eval(self, func, other, errors='raise', try_cast=False, mgr=None): - """ - evaluate the block; return result block from the result - - Parameters - ---------- - func : how to combine self, other - other : a ndarray/object - errors : str, {'raise', 'ignore'}, default 'raise' - - ``raise`` : allow exceptions to be raised - - ``ignore`` : suppress exceptions. On error return original object - - try_cast : try casting the results to the input type - - Returns - ------- - a new block, the result of the func - """ - orig_other = other - values = self.values - - other = getattr(other, 'values', other) - - # make sure that we can broadcast - is_transposed = False - if hasattr(other, 'ndim') and hasattr(values, 'ndim'): - if values.ndim != other.ndim: - is_transposed = True - else: - if values.shape == other.shape[::-1]: - is_transposed = True - elif values.shape[0] == other.shape[-1]: - is_transposed = True - else: - # this is a broadcast error heree - raise ValueError( - "cannot broadcast shape [{t_shape}] with " - "block values [{oth_shape}]".format( - t_shape=values.T.shape, oth_shape=other.shape)) - - transf = (lambda x: x.T) if is_transposed else (lambda x: x) - - # coerce/transpose the args if needed - try: - values, values_mask, other, other_mask = self._try_coerce_args( - transf(values), other) - except TypeError: - block = self.coerce_to_target_dtype(orig_other) - return block.eval(func, orig_other, - errors=errors, - try_cast=try_cast, mgr=mgr) - - # get the result, may need to transpose the other - def get_result(other): - - # avoid numpy warning of comparisons again None - if other is None: - result = not func.__name__ == 'eq' - - # avoid numpy warning of elementwise comparisons to object - elif is_numeric_v_string_like(values, other): - result = False - - # avoid numpy warning of elementwise comparisons - elif func.__name__ == 'eq': - if is_list_like(other) and not isinstance(other, np.ndarray): - other = np.asarray(other) - - # if we can broadcast, then ok - if values.shape[-1] != other.shape[-1]: - return False - result = func(values, other) - else: - result = func(values, other) - - # mask if needed - if isinstance(values_mask, np.ndarray) and values_mask.any(): - result = result.astype('float64', copy=False) - result[values_mask] = np.nan - if other_mask is True: - result = result.astype('float64', copy=False) - result[:] = np.nan - elif isinstance(other_mask, np.ndarray) and other_mask.any(): - result = result.astype('float64', copy=False) - result[other_mask.ravel()] = np.nan - - return result - - # error handler if we have an issue operating with the function - def handle_error(): - - if errors == 'raise': - # The 'detail' variable is defined in outer scope. - raise TypeError( - 'Could not operate {other!r} with block values ' - '{detail!s}'.format(other=other, detail=detail)) # noqa - else: - # return the values - result = np.empty(values.shape, dtype='O') - result.fill(np.nan) - return result - - # get the result - try: - with np.errstate(all='ignore'): - result = get_result(other) - - # if we have an invalid shape/broadcast error - # GH4576, so raise instead of allowing to pass through - except ValueError as detail: - raise - except Exception as detail: - result = handle_error() - - # technically a broadcast error in numpy can 'work' by returning a - # boolean False - if not isinstance(result, np.ndarray): - if not isinstance(result, np.ndarray): - - # differentiate between an invalid ndarray-ndarray comparison - # and an invalid type comparison - if isinstance(values, np.ndarray) and is_list_like(other): - raise ValueError( - 'Invalid broadcasting comparison [{other!r}] with ' - 'block values'.format(other=other)) - - raise TypeError('Could not compare [{other!r}] ' - 'with block values'.format(other=other)) - - # transpose if needed - result = transf(result) - - # try to cast if requested - if try_cast: - result = self._try_cast_result(result) - - result = _block_shape(result, ndim=self.ndim) - return [self.make_block(result)] - - def where(self, other, cond, align=True, errors='raise', - try_cast=False, axis=0, transpose=False, mgr=None): - """ - evaluate the block; return result block(s) from the result - - Parameters - ---------- - other : a ndarray/object - cond : the condition to respect - align : boolean, perform alignment on other/cond - errors : str, {'raise', 'ignore'}, default 'raise' - - ``raise`` : allow exceptions to be raised - - ``ignore`` : suppress exceptions. On error return original object - - axis : int - transpose : boolean - Set to True if self is stored with axes reversed - - Returns - ------- - a new block(s), the result of the func - """ - import pandas.core.computation.expressions as expressions - assert errors in ['raise', 'ignore'] - - values = self.values - orig_other = other - if transpose: - values = values.T - - other = getattr(other, '_values', getattr(other, 'values', other)) - cond = getattr(cond, 'values', cond) - - # If the default broadcasting would go in the wrong direction, then - # explicitly reshape other instead - if getattr(other, 'ndim', 0) >= 1: - if values.ndim - 1 == other.ndim and axis == 1: - other = other.reshape(tuple(other.shape + (1, ))) - elif transpose and values.ndim == self.ndim - 1: - cond = cond.T - - if not hasattr(cond, 'shape'): - raise ValueError("where must have a condition that is ndarray " - "like") - - # our where function - def func(cond, values, other): - if cond.ravel().all(): - return values - - values, values_mask, other, other_mask = self._try_coerce_args( - values, other) - - try: - return self._try_coerce_result(expressions.where( - cond, values, other)) - except Exception as detail: - if errors == 'raise': - raise TypeError( - 'Could not operate [{other!r}] with block values ' - '[{detail!s}]'.format(other=other, detail=detail)) - else: - # return the values - result = np.empty(values.shape, dtype='float64') - result.fill(np.nan) - return result - - # see if we can operate on the entire block, or need item-by-item - # or if we are a single block (ndim == 1) - try: - result = func(cond, values, other) - except TypeError: - - # we cannot coerce, return a compat dtype - # we are explicitly ignoring errors - block = self.coerce_to_target_dtype(other) - blocks = block.where(orig_other, cond, align=align, - errors=errors, - try_cast=try_cast, axis=axis, - transpose=transpose) - return self._maybe_downcast(blocks, 'infer') - - if self._can_hold_na or self.ndim == 1: - - if transpose: - result = result.T - - # try to cast if requested - if try_cast: - result = self._try_cast_result(result) - - return self.make_block(result) - - # might need to separate out blocks - axis = cond.ndim - 1 - cond = cond.swapaxes(axis, 0) - mask = np.array([cond[i].all() for i in range(cond.shape[0])], - dtype=bool) - - result_blocks = [] - for m in [mask, ~mask]: - if m.any(): - r = self._try_cast_result(result.take(m.nonzero()[0], - axis=axis)) - result_blocks.append( - self.make_block(r.T, placement=self.mgr_locs[m])) - - return result_blocks - - def equals(self, other): - if self.dtype != other.dtype or self.shape != other.shape: - return False - return array_equivalent(self.values, other.values) - - def _unstack(self, unstacker_func, new_columns): - """Return a list of unstacked blocks of self - - Parameters - ---------- - unstacker_func : callable - Partially applied unstacker. - new_columns : Index - All columns of the unstacked BlockManager. - - Returns - ------- - blocks : list of Block - New blocks of unstacked values. - mask : array_like of bool - The mask of columns of `blocks` we should keep. - """ - unstacker = unstacker_func(self.values.T) - new_items = unstacker.get_new_columns() - new_placement = new_columns.get_indexer(new_items) - new_values, mask = unstacker.get_new_values() - - mask = mask.any(0) - new_values = new_values.T[mask] - new_placement = new_placement[mask] - - blocks = [make_block(new_values, placement=new_placement)] - return blocks, mask - - def quantile(self, qs, interpolation='linear', axis=0, mgr=None): - """ - compute the quantiles of the - - Parameters - ---------- - qs: a scalar or list of the quantiles to be computed - interpolation: type of interpolation, default 'linear' - axis: axis to compute, default 0 - - Returns - ------- - tuple of (axis, block) - - """ - kw = {'interpolation': interpolation} - values = self.get_values() - values, _, _, _ = self._try_coerce_args(values, values) - - def _nanpercentile1D(values, mask, q, **kw): - values = values[~mask] - - if len(values) == 0: - if is_scalar(q): - return self._na_value - else: - return np.array([self._na_value] * len(q), - dtype=values.dtype) - - return np.percentile(values, q, **kw) - - def _nanpercentile(values, q, axis, **kw): - - mask = isna(self.values) - if not is_scalar(mask) and mask.any(): - if self.ndim == 1: - return _nanpercentile1D(values, mask, q, **kw) - else: - # for nonconsolidatable blocks mask is 1D, but values 2D - if mask.ndim < values.ndim: - mask = mask.reshape(values.shape) - if axis == 0: - values = values.T - mask = mask.T - result = [_nanpercentile1D(val, m, q, **kw) for (val, m) - in zip(list(values), list(mask))] - result = np.array(result, dtype=values.dtype, copy=False).T - return result - else: - return np.percentile(values, q, axis=axis, **kw) - - from pandas import Float64Index - is_empty = values.shape[axis] == 0 - if is_list_like(qs): - ax = Float64Index(qs) - - if is_empty: - if self.ndim == 1: - result = self._na_value - else: - # create the array of na_values - # 2d len(values) * len(qs) - result = np.repeat(np.array([self._na_value] * len(qs)), - len(values)).reshape(len(values), - len(qs)) - else: - - try: - result = _nanpercentile(values, np.array(qs) * 100, - axis=axis, **kw) - except ValueError: - - # older numpies don't handle an array for q - result = [_nanpercentile(values, q * 100, - axis=axis, **kw) for q in qs] - - result = np.array(result, copy=False) - if self.ndim > 1: - result = result.T - - else: - - if self.ndim == 1: - ax = Float64Index([qs]) - else: - ax = mgr.axes[0] - - if is_empty: - if self.ndim == 1: - result = self._na_value - else: - result = np.array([self._na_value] * len(self)) - else: - result = _nanpercentile(values, qs * 100, axis=axis, **kw) - - ndim = getattr(result, 'ndim', None) or 0 - result = self._try_coerce_result(result) - if is_scalar(result): - return ax, self.make_block_scalar(result) - return ax, make_block(result, - placement=np.arange(len(result)), - ndim=ndim) - - -class ScalarBlock(Block): - """ - a scalar compat Block - """ - __slots__ = ['_mgr_locs', 'values', 'ndim'] - - def __init__(self, values): - self.ndim = 0 - self.mgr_locs = [0] - self.values = values - - @property - def dtype(self): - return type(self.values) - - @property - def shape(self): - return tuple([0]) - - def __len__(self): - return 0 - - -class NonConsolidatableMixIn(object): - """ hold methods for the nonconsolidatable blocks """ - _can_consolidate = False - _verify_integrity = False - _validate_ndim = False - - def __init__(self, values, placement, ndim=None): - """Initialize a non-consolidatable block. - - 'ndim' may be inferred from 'placement'. - - This will call continue to call __init__ for the other base - classes mixed in with this Mixin. - """ - # Placement must be converted to BlockPlacement so that we can check - # its length - if not isinstance(placement, BlockPlacement): - placement = BlockPlacement(placement) - - # Maybe infer ndim from placement - if ndim is None: - if len(placement) != 1: - ndim = 1 - else: - ndim = 2 - super(NonConsolidatableMixIn, self).__init__(values, placement, - ndim=ndim) - - @property - def shape(self): - if self.ndim == 1: - return (len(self.values)), - return (len(self.mgr_locs), len(self.values)) - - def get_values(self, dtype=None): - """ need to to_dense myself (and always return a ndim sized object) """ - values = self.values.to_dense() - if values.ndim == self.ndim - 1: - values = values.reshape((1,) + values.shape) - return values - - def iget(self, col): - - if self.ndim == 2 and isinstance(col, tuple): - col, loc = col - if not com.is_null_slice(col) and col != 0: - raise IndexError("{0} only contains one item".format(self)) - return self.values[loc] - else: - if col != 0: - raise IndexError("{0} only contains one item".format(self)) - return self.values - - def should_store(self, value): - return isinstance(value, self._holder) - - def set(self, locs, values, check=False): - assert locs.tolist() == [0] - self.values = values - - def putmask(self, mask, new, align=True, inplace=False, axis=0, - transpose=False, mgr=None): - """ - putmask the data to the block; we must be a single block and not - generate other blocks - - return the resulting block - - Parameters - ---------- - mask : the condition to respect - new : a ndarray/object - align : boolean, perform alignment on other/cond, default is True - inplace : perform inplace modification, default is False - - Returns - ------- - a new block, the result of the putmask - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - - # use block's copy logic. - # .values may be an Index which does shallow copy by default - new_values = self.values if inplace else self.copy().values - new_values, _, new, _ = self._try_coerce_args(new_values, new) - - if isinstance(new, np.ndarray) and len(new) == len(mask): - new = new[mask] - - mask = _safe_reshape(mask, new_values.shape) - - new_values[mask] = new - new_values = self._try_coerce_result(new_values) - return [self.make_block(values=new_values)] - - def _slice(self, slicer): - """ return a slice of my values (but densify first) """ - return self.get_values()[slicer] - - def _try_cast_result(self, result, dtype=None): - return result - - def _unstack(self, unstacker_func, new_columns): - """Return a list of unstacked blocks of self - - Parameters - ---------- - unstacker_func : callable - Partially applied unstacker. - new_columns : Index - All columns of the unstacked BlockManager. - - Returns - ------- - blocks : list of Block - New blocks of unstacked values. - mask : array_like of bool - The mask of columns of `blocks` we should keep. - """ - # NonConsolidatable blocks can have a single item only, so we return - # one block per item - unstacker = unstacker_func(self.values.T) - new_items = unstacker.get_new_columns() - new_placement = new_columns.get_indexer(new_items) - new_values, mask = unstacker.get_new_values() - - mask = mask.any(0) - new_values = new_values.T[mask] - new_placement = new_placement[mask] - - blocks = [self.make_block_same_class(vals, [place]) - for vals, place in zip(new_values, new_placement)] - return blocks, mask - - -class ExtensionBlock(NonConsolidatableMixIn, Block): - """Block for holding extension types. - - Notes - ----- - This holds all 3rd-party extension array types. It's also the immediate - parent class for our internal extension types' blocks, CategoricalBlock. - - ExtensionArrays are limited to 1-D. - """ - is_extension = True - - def __init__(self, values, placement, ndim=None): - values = self._maybe_coerce_values(values) - super(ExtensionBlock, self).__init__(values, placement, ndim) - - def _maybe_coerce_values(self, values): - """Unbox to an extension array. - - This will unbox an ExtensionArray stored in an Index or Series. - ExtensionArrays pass through. No dtype coercion is done. - - Parameters - ---------- - values : Index, Series, ExtensionArray - - Returns - ------- - ExtensionArray - """ - if isinstance(values, (ABCIndexClass, ABCSeries)): - values = values._values - return values - - @property - def _holder(self): - # For extension blocks, the holder is values-dependent. - return type(self.values) - - @property - def fill_value(self): - # Used in reindex_indexer - return self.values.dtype.na_value - - @property - def _can_hold_na(self): - # The default ExtensionArray._can_hold_na is True - return self._holder._can_hold_na - - @property - def is_view(self): - """Extension arrays are never treated as views.""" - return False - - def setitem(self, indexer, value, mgr=None): - """Set the value inplace, returning a same-typed block. - - This differs from Block.setitem by not allowing setitem to change - the dtype of the Block. - - Parameters - ---------- - indexer : tuple, list-like, array-like, slice - The subset of self.values to set - value : object - The value being set - mgr : BlockPlacement, optional - - Returns - ------- - Block - - Notes - ----- - `indexer` is a direct slice/positional indexer. `value` must - be a compatible shape. - """ - if isinstance(indexer, tuple): - # we are always 1-D - indexer = indexer[0] - - check_setitem_lengths(indexer, value, self.values) - self.values[indexer] = value - return self - - def get_values(self, dtype=None): - # ExtensionArrays must be iterable, so this works. - values = np.asarray(self.values) - if values.ndim == self.ndim - 1: - values = values.reshape((1,) + values.shape) - return values - - def to_dense(self): - return np.asarray(self.values) - - def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): - """ - Take values according to indexer and return them as a block. - """ - if fill_tuple is None: - fill_value = None - else: - fill_value = fill_tuple[0] - - # axis doesn't matter; we are really a single-dim object - # but are passed the axis depending on the calling routing - # if its REALLY axis 0, then this will be a reindex and not a take - new_values = self.values.take(indexer, fill_value=fill_value, - allow_fill=True) - - # if we are a 1-dim object, then always place at 0 - if self.ndim == 1: - new_mgr_locs = [0] - else: - if new_mgr_locs is None: - new_mgr_locs = self.mgr_locs - - return self.make_block_same_class(new_values, new_mgr_locs) - - def _can_hold_element(self, element): - # XXX: We may need to think about pushing this onto the array. - # We're doing the same as CategoricalBlock here. - return True - - def _slice(self, slicer): - """ return a slice of my values """ - - # slice the category - # return same dims as we currently have - - if isinstance(slicer, tuple) and len(slicer) == 2: - if not com.is_null_slice(slicer[0]): - raise AssertionError("invalid slicing for a 1-ndim " - "categorical") - slicer = slicer[1] - - return self.values[slicer] - - def formatting_values(self): - return self.values._formatting_values() - - def concat_same_type(self, to_concat, placement=None): - """ - Concatenate list of single blocks of the same type. - """ - values = self._holder._concat_same_type( - [blk.values for blk in to_concat]) - placement = placement or slice(0, len(values), 1) - return self.make_block_same_class(values, ndim=self.ndim, - placement=placement) - - def fillna(self, value, limit=None, inplace=False, downcast=None, - mgr=None): - values = self.values if inplace else self.values.copy() - values = values.fillna(value=value, limit=limit) - return [self.make_block_same_class(values=values, - placement=self.mgr_locs, - ndim=self.ndim)] - - def interpolate(self, method='pad', axis=0, inplace=False, limit=None, - fill_value=None, **kwargs): - - values = self.values if inplace else self.values.copy() - return self.make_block_same_class( - values=values.fillna(value=fill_value, method=method, - limit=limit), - placement=self.mgr_locs) - - -class NumericBlock(Block): - __slots__ = () - is_numeric = True - _can_hold_na = True - - -class FloatOrComplexBlock(NumericBlock): - __slots__ = () - - def equals(self, other): - if self.dtype != other.dtype or self.shape != other.shape: - return False - left, right = self.values, other.values - return ((left == right) | (np.isnan(left) & np.isnan(right))).all() - - -class FloatBlock(FloatOrComplexBlock): - __slots__ = () - is_float = True - - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return (issubclass(tipo.type, (np.floating, np.integer)) and - not issubclass(tipo.type, (np.datetime64, np.timedelta64))) - return ( - isinstance( - element, (float, int, np.floating, np.int_, compat.long)) - and not isinstance(element, (bool, np.bool_, datetime, timedelta, - np.datetime64, np.timedelta64))) - - def to_native_types(self, slicer=None, na_rep='', float_format=None, - decimal='.', quoting=None, **kwargs): - """ convert to our native types format, slicing if desired """ - - values = self.values - if slicer is not None: - values = values[:, slicer] - - # see gh-13418: no special formatting is desired at the - # output (important for appropriate 'quoting' behaviour), - # so do not pass it through the FloatArrayFormatter - if float_format is None and decimal == '.': - mask = isna(values) - - if not quoting: - values = values.astype(str) - else: - values = np.array(values, dtype='object') - - values[mask] = na_rep - return values - - from pandas.io.formats.format import FloatArrayFormatter - formatter = FloatArrayFormatter(values, na_rep=na_rep, - float_format=float_format, - decimal=decimal, quoting=quoting, - fixed_width=False) - return formatter.get_result_as_array() - - def should_store(self, value): - # when inserting a column should not coerce integers to floats - # unnecessarily - return (issubclass(value.dtype.type, np.floating) and - value.dtype == self.dtype) - - -class ComplexBlock(FloatOrComplexBlock): - __slots__ = () - is_complex = True - - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return issubclass(tipo.type, - (np.floating, np.integer, np.complexfloating)) - return ( - isinstance( - element, - (float, int, complex, np.float_, np.int_, compat.long)) - and not isinstance(element, (bool, np.bool_))) - - def should_store(self, value): - return issubclass(value.dtype.type, np.complexfloating) - - -class IntBlock(NumericBlock): - __slots__ = () - is_integer = True - _can_hold_na = False - - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return (issubclass(tipo.type, np.integer) and - not issubclass(tipo.type, (np.datetime64, - np.timedelta64)) and - self.dtype.itemsize >= tipo.itemsize) - return is_integer(element) - - def should_store(self, value): - return is_integer_dtype(value) and value.dtype == self.dtype - - -class DatetimeLikeBlockMixin(object): - """Mixin class for DatetimeBlock and DatetimeTZBlock.""" - - @property - def _holder(self): - return DatetimeIndex - - @property - def _na_value(self): - return tslibs.NaT - - @property - def fill_value(self): - return tslibs.iNaT - - def get_values(self, dtype=None): - """ - return object dtype as boxed values, such as Timestamps/Timedelta - """ - if is_object_dtype(dtype): - return lib.map_infer(self.values.ravel(), - self._box_func).reshape(self.values.shape) - return self.values - - -class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): - __slots__ = () - is_timedelta = True - _can_hold_na = True - is_numeric = False - - def __init__(self, values, placement, ndim=None): - if values.dtype != _TD_DTYPE: - values = conversion.ensure_timedelta64ns(values) - - super(TimeDeltaBlock, self).__init__(values, - placement=placement, ndim=ndim) - - @property - def _holder(self): - return TimedeltaIndex - - @property - def _box_func(self): - return lambda x: Timedelta(x, unit='ns') - - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return issubclass(tipo.type, np.timedelta64) - return is_integer(element) or isinstance( - element, (timedelta, np.timedelta64)) - - def fillna(self, value, **kwargs): - - # allow filling with integers to be - # interpreted as seconds - if is_integer(value) and not isinstance(value, np.timedelta64): - value = Timedelta(value, unit='s') - return super(TimeDeltaBlock, self).fillna(value, **kwargs) - - def _try_coerce_args(self, values, other): - """ - Coerce values and other to int64, with null values converted to - iNaT. values is always ndarray-like, other may not be - - Parameters - ---------- - values : ndarray-like - other : ndarray-like or scalar - - Returns - ------- - base-type values, values mask, base-type other, other mask - """ - - values_mask = isna(values) - values = values.view('i8') - other_mask = False - - if isinstance(other, bool): - raise TypeError - elif is_null_datelike_scalar(other): - other = tslibs.iNaT - other_mask = True - elif isinstance(other, Timedelta): - other_mask = isna(other) - other = other.value - elif isinstance(other, timedelta): - other = Timedelta(other).value - elif isinstance(other, np.timedelta64): - other_mask = isna(other) - other = Timedelta(other).value - elif hasattr(other, 'dtype') and is_timedelta64_dtype(other): - other_mask = isna(other) - other = other.astype('i8', copy=False).view('i8') - else: - # coercion issues - # let higher levels handle - raise TypeError - - return values, values_mask, other, other_mask - - def _try_coerce_result(self, result): - """ reverse of try_coerce_args / try_operate """ - if isinstance(result, np.ndarray): - mask = isna(result) - if result.dtype.kind in ['i', 'f', 'O']: - result = result.astype('m8[ns]') - result[mask] = tslibs.iNaT - elif isinstance(result, (np.integer, np.float)): - result = self._box_func(result) - return result - - def should_store(self, value): - return issubclass(value.dtype.type, np.timedelta64) - - def to_native_types(self, slicer=None, na_rep=None, quoting=None, - **kwargs): - """ convert to our native types format, slicing if desired """ - - values = self.values - if slicer is not None: - values = values[:, slicer] - mask = isna(values) - - rvalues = np.empty(values.shape, dtype=object) - if na_rep is None: - na_rep = 'NaT' - rvalues[mask] = na_rep - imask = (~mask).ravel() - - # FIXME: - # should use the formats.format.Timedelta64Formatter here - # to figure what format to pass to the Timedelta - # e.g. to not show the decimals say - rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all') - for val in values.ravel()[imask]], - dtype=object) - return rvalues - - -class BoolBlock(NumericBlock): - __slots__ = () - is_bool = True - _can_hold_na = False - - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return issubclass(tipo.type, np.bool_) - return isinstance(element, (bool, np.bool_)) - - def should_store(self, value): - return issubclass(value.dtype.type, np.bool_) - - def replace(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True, mgr=None): - inplace = validate_bool_kwarg(inplace, 'inplace') - to_replace_values = np.atleast_1d(to_replace) - if not np.can_cast(to_replace_values, bool): - return self - return super(BoolBlock, self).replace(to_replace, value, - inplace=inplace, filter=filter, - regex=regex, convert=convert, - mgr=mgr) - - -class ObjectBlock(Block): - __slots__ = () - is_object = True - _can_hold_na = True - - def __init__(self, values, placement=None, ndim=2): - if issubclass(values.dtype.type, compat.string_types): - values = np.array(values, dtype=object) - - super(ObjectBlock, self).__init__(values, ndim=ndim, - placement=placement) - - @property - def is_bool(self): - """ we can be a bool if we have only bool values but are of type - object - """ - return lib.is_bool_array(self.values.ravel()) - - # TODO: Refactor when convert_objects is removed since there will be 1 path - def convert(self, *args, **kwargs): - """ attempt to coerce any object types to better types return a copy of - the block (if copy = True) by definition we ARE an ObjectBlock!!!!! - - can return multiple blocks! - """ - - if args: - raise NotImplementedError - by_item = True if 'by_item' not in kwargs else kwargs['by_item'] - - new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta'] - new_style = False - for kw in new_inputs: - new_style |= kw in kwargs - - if new_style: - fn = soft_convert_objects - fn_inputs = new_inputs - else: - fn = maybe_convert_objects - fn_inputs = ['convert_dates', 'convert_numeric', - 'convert_timedeltas'] - fn_inputs += ['copy'] - - fn_kwargs = {} - for key in fn_inputs: - if key in kwargs: - fn_kwargs[key] = kwargs[key] - - # operate column-by-column - def f(m, v, i): - shape = v.shape - values = fn(v.ravel(), **fn_kwargs) - try: - values = values.reshape(shape) - values = _block_shape(values, ndim=self.ndim) - except (AttributeError, NotImplementedError): - pass - - return values - - if by_item and not self._is_single_block: - blocks = self.split_and_operate(None, f, False) - else: - values = f(None, self.values.ravel(), None) - blocks = [make_block(values, ndim=self.ndim, - placement=self.mgr_locs)] - - return blocks - - def set(self, locs, values, check=False): - """ - Modify Block in-place with new item value - - Returns - ------- - None - """ - - # GH6026 - if check: - try: - if (self.values[locs] == values).all(): - return - except: - pass - try: - self.values[locs] = values - except (ValueError): - - # broadcasting error - # see GH6171 - new_shape = list(values.shape) - new_shape[0] = len(self.items) - self.values = np.empty(tuple(new_shape), dtype=self.dtype) - self.values.fill(np.nan) - self.values[locs] = values - - def _maybe_downcast(self, blocks, downcast=None): - - if downcast is not None: - return blocks - - # split and convert the blocks - return _extend_blocks([b.convert(datetime=True, numeric=False) - for b in blocks]) - - def _can_hold_element(self, element): - return True - - def _try_coerce_args(self, values, other): - """ provide coercion to our input arguments """ - - if isinstance(other, ABCDatetimeIndex): - # to store DatetimeTZBlock as object - other = other.astype(object).values - - return values, False, other, False - - def should_store(self, value): - return not (issubclass(value.dtype.type, - (np.integer, np.floating, np.complexfloating, - np.datetime64, np.bool_)) or - # TODO(ExtensionArray): remove is_extension_type - # when all extension arrays have been ported. - is_extension_type(value) or - is_extension_array_dtype(value)) - - def replace(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True, mgr=None): - to_rep_is_list = is_list_like(to_replace) - value_is_list = is_list_like(value) - both_lists = to_rep_is_list and value_is_list - either_list = to_rep_is_list or value_is_list - - result_blocks = [] - blocks = [self] - - if not either_list and is_re(to_replace): - return self._replace_single(to_replace, value, inplace=inplace, - filter=filter, regex=True, - convert=convert, mgr=mgr) - elif not (either_list or regex): - return super(ObjectBlock, self).replace(to_replace, value, - inplace=inplace, - filter=filter, regex=regex, - convert=convert, mgr=mgr) - elif both_lists: - for to_rep, v in zip(to_replace, value): - result_blocks = [] - for b in blocks: - result = b._replace_single(to_rep, v, inplace=inplace, - filter=filter, regex=regex, - convert=convert, mgr=mgr) - result_blocks = _extend_blocks(result, result_blocks) - blocks = result_blocks - return result_blocks - - elif to_rep_is_list and regex: - for to_rep in to_replace: - result_blocks = [] - for b in blocks: - result = b._replace_single(to_rep, value, inplace=inplace, - filter=filter, regex=regex, - convert=convert, mgr=mgr) - result_blocks = _extend_blocks(result, result_blocks) - blocks = result_blocks - return result_blocks - - return self._replace_single(to_replace, value, inplace=inplace, - filter=filter, convert=convert, - regex=regex, mgr=mgr) - - def _replace_single(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True, mgr=None): - - inplace = validate_bool_kwarg(inplace, 'inplace') - - # to_replace is regex compilable - to_rep_re = regex and is_re_compilable(to_replace) - - # regex is regex compilable - regex_re = is_re_compilable(regex) - - # only one will survive - if to_rep_re and regex_re: - raise AssertionError('only one of to_replace and regex can be ' - 'regex compilable') - - # if regex was passed as something that can be a regex (rather than a - # boolean) - if regex_re: - to_replace = regex - - regex = regex_re or to_rep_re - - # try to get the pattern attribute (compiled re) or it's a string - try: - pattern = to_replace.pattern - except AttributeError: - pattern = to_replace - - # if the pattern is not empty and to_replace is either a string or a - # regex - if regex and pattern: - rx = re.compile(to_replace) - else: - # if the thing to replace is not a string or compiled regex call - # the superclass method -> to_replace is some kind of object - return super(ObjectBlock, self).replace(to_replace, value, - inplace=inplace, - filter=filter, regex=regex, - mgr=mgr) - - new_values = self.values if inplace else self.values.copy() - - # deal with replacing values with objects (strings) that match but - # whose replacement is not a string (numeric, nan, object) - if isna(value) or not isinstance(value, compat.string_types): - - def re_replacer(s): - try: - return value if rx.search(s) is not None else s - except TypeError: - return s - else: - # value is guaranteed to be a string here, s can be either a string - # or null if it's null it gets returned - def re_replacer(s): - try: - return rx.sub(value, s) - except TypeError: - return s - - f = np.vectorize(re_replacer, otypes=[self.dtype]) - - if filter is None: - filt = slice(None) - else: - filt = self.mgr_locs.isin(filter).nonzero()[0] - - new_values[filt] = f(new_values[filt]) - - # convert - block = self.make_block(new_values) - if convert: - block = block.convert(by_item=True, numeric=False) - - return block - - -class CategoricalBlock(ExtensionBlock): - __slots__ = () - is_categorical = True - _verify_integrity = True - _can_hold_na = True - _concatenator = staticmethod(_concat._concat_categorical) - - def __init__(self, values, placement, ndim=None): - from pandas.core.arrays.categorical import _maybe_to_categorical - - # coerce to categorical if we can - super(CategoricalBlock, self).__init__(_maybe_to_categorical(values), - placement=placement, - ndim=ndim) - - @property - def _holder(self): - return Categorical - - @property - def array_dtype(self): - """ the dtype to return if I want to construct this block as an - array - """ - return np.object_ - - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - - # GH12564: CategoricalBlock is 1-dim only - # while returned results could be any dim - if ((not is_categorical_dtype(result)) and - isinstance(result, np.ndarray)): - result = _block_shape(result, ndim=self.ndim) - - return result - - def shift(self, periods, axis=0, mgr=None): - return self.make_block_same_class(values=self.values.shift(periods), - placement=self.mgr_locs) - - def to_dense(self): - # Categorical.get_values returns a DatetimeIndex for datetime - # categories, so we can't simply use `np.asarray(self.values)` like - # other types. - return self.values.get_values() - - def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): - """ convert to our native types format, slicing if desired """ - - values = self.values - if slicer is not None: - # Categorical is always one dimension - values = values[slicer] - mask = isna(values) - values = np.array(values, dtype='object') - values[mask] = na_rep - - # we are expected to return a 2-d ndarray - return values.reshape(1, len(values)) - - def concat_same_type(self, to_concat, placement=None): - """ - Concatenate list of single blocks of the same type. - - Note that this CategoricalBlock._concat_same_type *may* not - return a CategoricalBlock. When the categories in `to_concat` - differ, this will return an object ndarray. - - If / when we decide we don't like that behavior: - - 1. Change Categorical._concat_same_type to use union_categoricals - 2. Delete this method. - """ - values = self._concatenator([blk.values for blk in to_concat], - axis=self.ndim - 1) - # not using self.make_block_same_class as values can be object dtype - return make_block( - values, placement=placement or slice(0, len(values), 1), - ndim=self.ndim) - - -class DatetimeBlock(DatetimeLikeBlockMixin, Block): - __slots__ = () - is_datetime = True - _can_hold_na = True - - def __init__(self, values, placement, ndim=None): - values = self._maybe_coerce_values(values) - super(DatetimeBlock, self).__init__(values, - placement=placement, ndim=ndim) - - def _maybe_coerce_values(self, values): - """Input validation for values passed to __init__. Ensure that - we have datetime64ns, coercing if necessary. - - Parameters - ---------- - values : array-like - Must be convertible to datetime64 - - Returns - ------- - values : ndarray[datetime64ns] - - Overridden by DatetimeTZBlock. - """ - if values.dtype != _NS_DTYPE: - values = conversion.ensure_datetime64ns(values) - return values - - def _astype(self, dtype, mgr=None, **kwargs): - """ - these automatically copy, so copy=True has no effect - raise on an except if raise == True - """ - - # if we are passed a datetime64[ns, tz] - if is_datetime64tz_dtype(dtype): - dtype = DatetimeTZDtype(dtype) - - values = self.values - if getattr(values, 'tz', None) is None: - values = DatetimeIndex(values).tz_localize('UTC') - values = values.tz_convert(dtype.tz) - return self.make_block(values) - - # delegate - return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs) - - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - # TODO: this still uses asarray, instead of dtype.type - element = np.array(element) - return element.dtype == _NS_DTYPE or element.dtype == np.int64 - return (is_integer(element) or isinstance(element, datetime) or - isna(element)) - - def _try_coerce_args(self, values, other): - """ - Coerce values and other to dtype 'i8'. NaN and NaT convert to - the smallest i8, and will correctly round-trip to NaT if converted - back in _try_coerce_result. values is always ndarray-like, other - may not be - - Parameters - ---------- - values : ndarray-like - other : ndarray-like or scalar - - Returns - ------- - base-type values, values mask, base-type other, other mask - """ - - values_mask = isna(values) - values = values.view('i8') - other_mask = False - - if isinstance(other, bool): - raise TypeError - elif is_null_datelike_scalar(other): - other = tslibs.iNaT - other_mask = True - elif isinstance(other, (datetime, np.datetime64, date)): - other = self._box_func(other) - if getattr(other, 'tz') is not None: - raise TypeError("cannot coerce a Timestamp with a tz on a " - "naive Block") - other_mask = isna(other) - other = other.asm8.view('i8') - elif hasattr(other, 'dtype') and is_datetime64_dtype(other): - other_mask = isna(other) - other = other.astype('i8', copy=False).view('i8') - else: - # coercion issues - # let higher levels handle - raise TypeError - - return values, values_mask, other, other_mask - - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - if isinstance(result, np.ndarray): - if result.dtype.kind in ['i', 'f', 'O']: - try: - result = result.astype('M8[ns]') - except ValueError: - pass - elif isinstance(result, (np.integer, np.float, np.datetime64)): - result = self._box_func(result) - return result - - @property - def _box_func(self): - return tslibs.Timestamp - - def to_native_types(self, slicer=None, na_rep=None, date_format=None, - quoting=None, **kwargs): - """ convert to our native types format, slicing if desired """ - - values = self.values - if slicer is not None: - values = values[..., slicer] - - from pandas.io.formats.format import _get_format_datetime64_from_values - format = _get_format_datetime64_from_values(values, date_format) - - result = tslib.format_array_from_datetime( - values.view('i8').ravel(), tz=getattr(self.values, 'tz', None), - format=format, na_rep=na_rep).reshape(values.shape) - return np.atleast_2d(result) - - def should_store(self, value): - return (issubclass(value.dtype.type, np.datetime64) and - not is_datetimetz(value)) - - def set(self, locs, values, check=False): - """ - Modify Block in-place with new item value - - Returns - ------- - None - """ - if values.dtype != _NS_DTYPE: - # Workaround for numpy 1.6 bug - values = conversion.ensure_datetime64ns(values) - - self.values[locs] = values - - -class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock): - """ implement a datetime64 block with a tz attribute """ - __slots__ = () - _concatenator = staticmethod(_concat._concat_datetime) - is_datetimetz = True - - def __init__(self, values, placement, ndim=2, dtype=None): - # XXX: This will end up calling _maybe_coerce_values twice - # when dtype is not None. It's relatively cheap (just an isinstance) - # but it'd nice to avoid. - # - # If we can remove dtype from __init__, and push that conversion - # push onto the callers, then we can remove this entire __init__ - # and just use DatetimeBlock's. - if dtype is not None: - values = self._maybe_coerce_values(values, dtype=dtype) - super(DatetimeTZBlock, self).__init__(values, placement=placement, - ndim=ndim) - - def _maybe_coerce_values(self, values, dtype=None): - """Input validation for values passed to __init__. Ensure that - we have datetime64TZ, coercing if necessary. - - Parametetrs - ----------- - values : array-like - Must be convertible to datetime64 - dtype : string or DatetimeTZDtype, optional - Does a shallow copy to this tz - - Returns - ------- - values : ndarray[datetime64ns] - """ - if not isinstance(values, self._holder): - values = self._holder(values) - - if dtype is not None: - if isinstance(dtype, compat.string_types): - dtype = DatetimeTZDtype.construct_from_string(dtype) - values = values._shallow_copy(tz=dtype.tz) - - if values.tz is None: - raise ValueError("cannot create a DatetimeTZBlock without a tz") - - return values - - @property - def is_view(self): - """ return a boolean if I am possibly a view """ - # check the ndarray values of the DatetimeIndex values - return self.values.values.base is not None - - def copy(self, deep=True, mgr=None): - """ copy constructor """ - values = self.values - if deep: - values = values.copy(deep=True) - return self.make_block_same_class(values) - - def external_values(self): - """ we internally represent the data as a DatetimeIndex, but for - external compat with ndarray, export as a ndarray of Timestamps - """ - return self.values.astype('datetime64[ns]').values - - def get_values(self, dtype=None): - # return object dtype as Timestamps with the zones - if is_object_dtype(dtype): - return lib.map_infer( - self.values.ravel(), self._box_func).reshape(self.values.shape) - return self.values - - def _slice(self, slicer): - """ return a slice of my values """ - if isinstance(slicer, tuple): - col, loc = slicer - if not com.is_null_slice(col) and col != 0: - raise IndexError("{0} only contains one item".format(self)) - return self.values[loc] - return self.values[slicer] - - def _try_coerce_args(self, values, other): - """ - localize and return i8 for the values - - Parameters - ---------- - values : ndarray-like - other : ndarray-like or scalar - - Returns - ------- - base-type values, values mask, base-type other, other mask - """ - values_mask = _block_shape(isna(values), ndim=self.ndim) - # asi8 is a view, needs copy - values = _block_shape(values.asi8, ndim=self.ndim) - other_mask = False - - if isinstance(other, ABCSeries): - other = self._holder(other) - other_mask = isna(other) - - if isinstance(other, bool): - raise TypeError - elif (is_null_datelike_scalar(other) or - (is_scalar(other) and isna(other))): - other = tslibs.iNaT - other_mask = True - elif isinstance(other, self._holder): - if other.tz != self.values.tz: - raise ValueError("incompatible or non tz-aware value") - other_mask = _block_shape(isna(other), ndim=self.ndim) - other = _block_shape(other.asi8, ndim=self.ndim) - elif isinstance(other, (np.datetime64, datetime, date)): - other = tslibs.Timestamp(other) - tz = getattr(other, 'tz', None) - - # test we can have an equal time zone - if tz is None or str(tz) != str(self.values.tz): - raise ValueError("incompatible or non tz-aware value") - other_mask = isna(other) - other = other.value - else: - raise TypeError - - return values, values_mask, other, other_mask - - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - if isinstance(result, np.ndarray): - if result.dtype.kind in ['i', 'f', 'O']: - result = result.astype('M8[ns]') - elif isinstance(result, (np.integer, np.float, np.datetime64)): - result = tslibs.Timestamp(result, tz=self.values.tz) - if isinstance(result, np.ndarray): - # allow passing of > 1dim if its trivial - if result.ndim > 1: - result = result.reshape(np.prod(result.shape)) - result = self.values._shallow_copy(result) - - return result - - @property - def _box_func(self): - return lambda x: tslibs.Timestamp(x, tz=self.dtype.tz) - - def shift(self, periods, axis=0, mgr=None): - """ shift the block by periods """ - - # think about moving this to the DatetimeIndex. This is a non-freq - # (number of periods) shift ### - - N = len(self) - indexer = np.zeros(N, dtype=int) - if periods > 0: - indexer[periods:] = np.arange(N - periods) - else: - indexer[:periods] = np.arange(-periods, N) - - new_values = self.values.asi8.take(indexer) - - if periods > 0: - new_values[:periods] = tslibs.iNaT - else: - new_values[periods:] = tslibs.iNaT - - new_values = self.values._shallow_copy(new_values) - return [self.make_block_same_class(new_values, - placement=self.mgr_locs)] - - def diff(self, n, axis=0, mgr=None): - """1st discrete difference - - Parameters - ---------- - n : int, number of periods to diff - axis : int, axis to diff upon. default 0 - mgr : default None - - Return - ------ - A list with a new TimeDeltaBlock. - - Note - ---- - The arguments here are mimicking shift so they are called correctly - by apply. - """ - if axis == 0: - # Cannot currently calculate diff across multiple blocks since this - # function is invoked via apply - raise NotImplementedError - new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8 - - # Reshape the new_values like how algos.diff does for timedelta data - new_values = new_values.reshape(1, len(new_values)) - new_values = new_values.astype('timedelta64[ns]') - return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)] - - def concat_same_type(self, to_concat, placement=None): - """ - Concatenate list of single blocks of the same type. - """ - values = self._concatenator([blk.values for blk in to_concat], - axis=self.ndim - 1) - # not using self.make_block_same_class as values can be non-tz dtype - return make_block( - values, placement=placement or slice(0, len(values), 1)) - - -class SparseBlock(NonConsolidatableMixIn, Block): - """ implement as a list of sparse arrays of the same dtype """ - __slots__ = () - is_sparse = True - is_numeric = True - _box_to_block_values = False - _can_hold_na = True - _ftype = 'sparse' - _concatenator = staticmethod(_concat._concat_sparse) - - def __init__(self, values, placement, ndim=None): - # Ensure that we have the underlying SparseArray here... - if isinstance(values, ABCSeries): - values = values.values - assert isinstance(values, SparseArray) - super(SparseBlock, self).__init__(values, placement, ndim=ndim) - - @property - def _holder(self): - return SparseArray - - @property - def shape(self): - return (len(self.mgr_locs), self.sp_index.length) - - @property - def fill_value(self): - # return np.nan - return self.values.fill_value - - @fill_value.setter - def fill_value(self, v): - self.values.fill_value = v - - def to_dense(self): - return self.values.to_dense().view() - - @property - def sp_values(self): - return self.values.sp_values - - @sp_values.setter - def sp_values(self, v): - # reset the sparse values - self.values = SparseArray(v, sparse_index=self.sp_index, - kind=self.kind, dtype=v.dtype, - fill_value=self.values.fill_value, - copy=False) - - @property - def sp_index(self): - return self.values.sp_index - - @property - def kind(self): - return self.values.kind - - def _astype(self, dtype, copy=False, errors='raise', values=None, - klass=None, mgr=None, **kwargs): - if values is None: - values = self.values - values = values.astype(dtype, copy=copy) - return self.make_block_same_class(values=values, - placement=self.mgr_locs) - - def __len__(self): - try: - return self.sp_index.length - except: - return 0 - - def copy(self, deep=True, mgr=None): - return self.make_block_same_class(values=self.values, - sparse_index=self.sp_index, - kind=self.kind, copy=deep, - placement=self.mgr_locs) - - def make_block_same_class(self, values, placement, sparse_index=None, - kind=None, dtype=None, fill_value=None, - copy=False, ndim=None): - """ return a new block """ - if dtype is None: - dtype = values.dtype - if fill_value is None and not isinstance(values, SparseArray): - fill_value = self.values.fill_value - - # if not isinstance(values, SparseArray) and values.ndim != self.ndim: - # raise ValueError("ndim mismatch") - - if values.ndim == 2: - nitems = values.shape[0] - - if nitems == 0: - # kludgy, but SparseBlocks cannot handle slices, where the - # output is 0-item, so let's convert it to a dense block: it - # won't take space since there's 0 items, plus it will preserve - # the dtype. - return self.make_block(np.empty(values.shape, dtype=dtype), - placement) - elif nitems > 1: - raise ValueError("Only 1-item 2d sparse blocks are supported") - else: - values = values.reshape(values.shape[1]) - - new_values = SparseArray(values, sparse_index=sparse_index, - kind=kind or self.kind, dtype=dtype, - fill_value=fill_value, copy=copy) - return self.make_block(new_values, - placement=placement) - - def interpolate(self, method='pad', axis=0, inplace=False, limit=None, - fill_value=None, **kwargs): - - values = missing.interpolate_2d(self.values.to_dense(), method, axis, - limit, fill_value) - return self.make_block_same_class(values=values, - placement=self.mgr_locs) - - def fillna(self, value, limit=None, inplace=False, downcast=None, - mgr=None): - # we may need to upcast our fill to match our dtype - if limit is not None: - raise NotImplementedError("specifying a limit for 'fillna' has " - "not been implemented yet") - values = self.values if inplace else self.values.copy() - values = values.fillna(value, downcast=downcast) - return [self.make_block_same_class(values=values, - placement=self.mgr_locs)] - - def shift(self, periods, axis=0, mgr=None): - """ shift the block by periods """ - N = len(self.values.T) - indexer = np.zeros(N, dtype=int) - if periods > 0: - indexer[periods:] = np.arange(N - periods) - else: - indexer[:periods] = np.arange(-periods, N) - new_values = self.values.to_dense().take(indexer) - # convert integer to float if necessary. need to do a lot more than - # that, handle boolean etc also - new_values, fill_value = maybe_upcast(new_values) - if periods > 0: - new_values[:periods] = fill_value - else: - new_values[periods:] = fill_value - return [self.make_block_same_class(new_values, - placement=self.mgr_locs)] - - def sparse_reindex(self, new_index): - """ sparse reindex and return a new block - current reindex only works for float64 dtype! """ - values = self.values - values = values.sp_index.to_int_index().reindex( - values.sp_values.astype('float64'), values.fill_value, new_index) - return self.make_block_same_class(values, sparse_index=new_index, - placement=self.mgr_locs) - - -def get_block_type(values, dtype=None): - """ - Find the appropriate Block subclass to use for the given values and dtype. - - Parameters - ---------- - values : ndarray-like - dtype : numpy or pandas dtype - - Returns - ------- - cls : class, subclass of Block - """ - dtype = dtype or values.dtype - vtype = dtype.type - - if is_sparse(values): - cls = SparseBlock - elif issubclass(vtype, np.floating): - cls = FloatBlock - elif issubclass(vtype, np.timedelta64): - assert issubclass(vtype, np.integer) - cls = TimeDeltaBlock - elif issubclass(vtype, np.complexfloating): - cls = ComplexBlock - elif is_categorical(values): - cls = CategoricalBlock - elif is_extension_array_dtype(values): - cls = ExtensionBlock - elif issubclass(vtype, np.datetime64): - assert not is_datetimetz(values) - cls = DatetimeBlock - elif is_datetimetz(values): - cls = DatetimeTZBlock - elif issubclass(vtype, np.integer): - cls = IntBlock - elif dtype == np.bool_: - cls = BoolBlock - else: - cls = ObjectBlock - return cls - - -def make_block(values, placement, klass=None, ndim=None, dtype=None, - fastpath=None): - if fastpath is not None: - # GH#19265 pyarrow is passing this - warnings.warn("fastpath argument is deprecated, will be removed " - "in a future release.", DeprecationWarning) - if klass is None: - dtype = dtype or values.dtype - klass = get_block_type(values, dtype) - - elif klass is DatetimeTZBlock and not is_datetimetz(values): - return klass(values, ndim=ndim, - placement=placement, dtype=dtype) - - return klass(values, ndim=ndim, placement=placement) +from .blocks import ( + Block, + _extend_blocks, _merge_blocks, _safe_reshape, + make_block, get_block_type) +from .blocks import ( # noqa:F401 + _block2d_to_blocknd, _factor_indexer, _block_shape, # io.pytables + FloatBlock, IntBlock, ComplexBlock, BoolBlock, ObjectBlock, + TimeDeltaBlock, DatetimeBlock, DatetimeTZBlock, + CategoricalBlock, ExtensionBlock, SparseBlock, ScalarBlock) # TODO: flexible with index=None and/or items=None @@ -5082,70 +1925,6 @@ def _consolidate(blocks): return new_blocks -def _merge_blocks(blocks, dtype=None, _can_consolidate=True): - - if len(blocks) == 1: - return blocks[0] - - if _can_consolidate: - - if dtype is None: - if len({b.dtype for b in blocks}) != 1: - raise AssertionError("_merge_blocks are invalid!") - dtype = blocks[0].dtype - - # FIXME: optimization potential in case all mgrs contain slices and - # combination of those slices is a slice, too. - new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks]) - new_values = _vstack([b.values for b in blocks], dtype) - - argsort = np.argsort(new_mgr_locs) - new_values = new_values[argsort] - new_mgr_locs = new_mgr_locs[argsort] - - return make_block(new_values, placement=new_mgr_locs) - - # no merge - return blocks - - -def _extend_blocks(result, blocks=None): - """ return a new extended blocks, givin the result """ - if blocks is None: - blocks = [] - if isinstance(result, list): - for r in result: - if isinstance(r, list): - blocks.extend(r) - else: - blocks.append(r) - elif isinstance(result, BlockManager): - blocks.extend(result.blocks) - else: - blocks.append(result) - return blocks - - -def _block_shape(values, ndim=1, shape=None): - """ guarantee the shape of the values to be at least 1 d """ - if values.ndim < ndim: - if shape is None: - shape = values.shape - values = values.reshape(tuple((1, ) + shape)) - return values - - -def _vstack(to_stack, dtype): - - # work around NumPy 1.6 bug - if dtype == _NS_DTYPE or dtype == _TD_DTYPE: - new_values = np.vstack([x.view('i8') for x in to_stack]) - return new_values.view(dtype) - - else: - return np.vstack(to_stack) - - def _maybe_compare(a, b, op): is_a_array = isinstance(a, np.ndarray) @@ -5181,41 +1960,6 @@ def _concat_indexes(indexes): return indexes[0].append(indexes[1:]) -def _block2d_to_blocknd(values, placement, shape, labels, ref_items): - """ pivot to the labels shape """ - panel_shape = (len(placement),) + shape - - # TODO: lexsort depth needs to be 2!! - - # Create observation selection vector using major and minor - # labels, for converting to panel format. - selector = _factor_indexer(shape[1:], labels) - mask = np.zeros(np.prod(shape), dtype=bool) - mask.put(selector, True) - - if mask.all(): - pvalues = np.empty(panel_shape, dtype=values.dtype) - else: - dtype, fill_value = maybe_promote(values.dtype) - pvalues = np.empty(panel_shape, dtype=dtype) - pvalues.fill(fill_value) - - for i in range(len(placement)): - pvalues[i].flat[mask] = values[:, i] - - return make_block(pvalues, placement=placement) - - -def _factor_indexer(shape, labels): - """ - given a tuple of shape and a list of Categorical labels, return the - expanded label indexer - """ - mult = np.array(shape)[::-1].cumprod()[::-1] - return ensure_platform_int( - np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T) - - def _get_blkno_placements(blknos, blk_count, group=True): """ @@ -5268,28 +2012,6 @@ def rrenamer(x): _transform_index(right, rrenamer)) -def _safe_reshape(arr, new_shape): - """ - If possible, reshape `arr` to have shape `new_shape`, - with a couple of exceptions (see gh-13012): - - 1) If `arr` is a ExtensionArray or Index, `arr` will be - returned as is. - 2) If `arr` is a Series, the `_values` attribute will - be reshaped and returned. - - Parameters - ---------- - arr : array-like, object to be reshaped - new_shape : int or tuple of ints, the new shape - """ - if isinstance(arr, ABCSeries): - arr = arr._values - if not isinstance(arr, ABCExtensionArray): - arr = arr.reshape(new_shape) - return arr - - def _transform_index(index, func, level=None): """ Apply function to all values found in index. @@ -5310,92 +2032,6 @@ def _transform_index(index, func, level=None): return Index(items, name=index.name, tupleize_cols=False) -def _putmask_smart(v, m, n): - """ - Return a new ndarray, try to preserve dtype if possible. - - Parameters - ---------- - v : `values`, updated in-place (array like) - m : `mask`, applies to both sides (array like) - n : `new values` either scalar or an array like aligned with `values` - - Returns - ------- - values : ndarray with updated values - this *may* be a copy of the original - - See Also - -------- - ndarray.putmask - """ - - # we cannot use np.asarray() here as we cannot have conversions - # that numpy does when numeric are mixed with strings - - # n should be the length of the mask or a scalar here - if not is_list_like(n): - n = np.repeat(n, len(m)) - elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar - n = np.repeat(np.array(n, ndmin=1), len(m)) - - # see if we are only masking values that if putted - # will work in the current dtype - try: - nn = n[m] - - # make sure that we have a nullable type - # if we have nulls - if not _isna_compat(v, nn[0]): - raise ValueError - - # we ignore ComplexWarning here - with catch_warnings(record=True): - nn_at = nn.astype(v.dtype) - - # avoid invalid dtype comparisons - # between numbers & strings - - # only compare integers/floats - # don't compare integers to datetimelikes - if (not is_numeric_v_string_like(nn, nn_at) and - (is_float_dtype(nn.dtype) or - is_integer_dtype(nn.dtype) and - is_float_dtype(nn_at.dtype) or - is_integer_dtype(nn_at.dtype))): - - comp = (nn == nn_at) - if is_list_like(comp) and comp.all(): - nv = v.copy() - nv[m] = nn_at - return nv - except (ValueError, IndexError, TypeError): - pass - - n = np.asarray(n) - - def _putmask_preserve(nv, n): - try: - nv[m] = n[m] - except (IndexError, ValueError): - nv[m] = n - return nv - - # preserves dtype if possible - if v.dtype.kind == n.dtype.kind: - return _putmask_preserve(v, n) - - # change the dtype if needed - dtype, _ = maybe_promote(n.dtype) - - if is_extension_type(v.dtype) and is_object_dtype(dtype): - v = v.get_values(dtype) - else: - v = v.astype(dtype) - - return _putmask_preserve(v, n) - - def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): """ Concatenate block managers into one. diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py new file mode 100644 index 0000000000000..ffa2267dd6877 --- /dev/null +++ b/pandas/core/internals/blocks.py @@ -0,0 +1,3417 @@ +# -*- coding: utf-8 -*- +import warnings +import inspect +import re +from datetime import datetime, timedelta, date + +import numpy as np + +from pandas._libs import lib, tslib, tslibs, internals as libinternals +from pandas._libs.tslibs import conversion, Timedelta + +from pandas import compat +from pandas.compat import range, zip + +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, DatetimeTZDtype, + PandasExtensionDtype, + CategoricalDtype) +from pandas.core.dtypes.common import ( + _TD_DTYPE, _NS_DTYPE, + ensure_platform_int, + is_integer, + is_dtype_equal, + is_timedelta64_dtype, + is_datetime64_dtype, is_datetimetz, is_sparse, + is_categorical, is_categorical_dtype, + is_integer_dtype, + is_datetime64tz_dtype, + is_bool_dtype, + is_object_dtype, + is_float_dtype, + is_numeric_v_string_like, is_extension_type, + is_extension_array_dtype, + is_list_like, + is_re, + is_re_compilable, + pandas_dtype) +from pandas.core.dtypes.cast import ( + maybe_downcast_to_dtype, + maybe_upcast, + maybe_promote, + infer_dtype_from, + infer_dtype_from_scalar, + soft_convert_objects, + maybe_convert_objects, + astype_nansafe, + find_common_type, + maybe_infer_dtype_type) +from pandas.core.dtypes.missing import ( + isna, notna, array_equivalent, + _isna_compat, + is_null_datelike_scalar) +import pandas.core.dtypes.concat as _concat +from pandas.core.dtypes.generic import ( + ABCSeries, + ABCDatetimeIndex, + ABCExtensionArray, + ABCIndexClass) + +import pandas.core.common as com +import pandas.core.algorithms as algos +import pandas.core.missing as missing +from pandas.core.base import PandasObject + +from pandas.core.arrays import Categorical +from pandas.core.sparse.array import SparseArray + +from pandas.core.indexes.datetimes import DatetimeIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex +from pandas.core.indexing import check_setitem_lengths + +from pandas.io.formats.printing import pprint_thing + + +class Block(PandasObject): + """ + Canonical n-dimensional unit of homogeneous dtype contained in a pandas + data structure + + Index-ignorant; let the container take care of that + """ + __slots__ = ['_mgr_locs', 'values', 'ndim'] + is_numeric = False + is_float = False + is_integer = False + is_complex = False + is_datetime = False + is_datetimetz = False + is_timedelta = False + is_bool = False + is_object = False + is_categorical = False + is_sparse = False + is_extension = False + _box_to_block_values = True + _can_hold_na = False + _can_consolidate = True + _verify_integrity = True + _validate_ndim = True + _ftype = 'dense' + _concatenator = staticmethod(np.concatenate) + + def __init__(self, values, placement, ndim=None): + self.ndim = self._check_ndim(values, ndim) + self.mgr_locs = placement + self.values = values + + if (self._validate_ndim and self.ndim and + len(self.mgr_locs) != len(self.values)): + raise ValueError( + 'Wrong number of items passed {val}, placement implies ' + '{mgr}'.format(val=len(self.values), mgr=len(self.mgr_locs))) + + def _check_ndim(self, values, ndim): + """ndim inference and validation. + + Infers ndim from 'values' if not provided to __init__. + Validates that values.ndim and ndim are consistent if and only if + the class variable '_validate_ndim' is True. + + Parameters + ---------- + values : array-like + ndim : int or None + + Returns + ------- + ndim : int + + Raises + ------ + ValueError : the number of dimensions do not match + """ + if ndim is None: + ndim = values.ndim + + if self._validate_ndim and values.ndim != ndim: + msg = ("Wrong number of dimensions. values.ndim != ndim " + "[{} != {}]") + raise ValueError(msg.format(values.ndim, ndim)) + + return ndim + + @property + def _holder(self): + """The array-like that can hold the underlying values. + + None for 'Block', overridden by subclasses that don't + use an ndarray. + """ + return None + + @property + def _consolidate_key(self): + return (self._can_consolidate, self.dtype.name) + + @property + def _is_single_block(self): + return self.ndim == 1 + + @property + def is_view(self): + """ return a boolean if I am possibly a view """ + return self.values.base is not None + + @property + def is_datelike(self): + """ return True if I am a non-datelike """ + return self.is_datetime or self.is_timedelta + + def is_categorical_astype(self, dtype): + """ + validate that we have a astypeable to categorical, + returns a boolean if we are a categorical + """ + if dtype is Categorical or dtype is CategoricalDtype: + # this is a pd.Categorical, but is not + # a valid type for astypeing + raise TypeError("invalid type {0} for astype".format(dtype)) + + elif is_categorical_dtype(dtype): + return True + + return False + + def external_values(self, dtype=None): + """ return an outside world format, currently just the ndarray """ + return self.values + + def internal_values(self, dtype=None): + """ return an internal format, currently just the ndarray + this should be the pure internal API format + """ + return self.values + + def formatting_values(self): + """Return the internal values used by the DataFrame/SeriesFormatter""" + return self.internal_values() + + def get_values(self, dtype=None): + """ + return an internal format, currently just the ndarray + this is often overridden to handle to_dense like operations + """ + if is_object_dtype(dtype): + return self.values.astype(object) + return self.values + + def to_dense(self): + return self.values.view() + + @property + def _na_value(self): + return np.nan + + @property + def fill_value(self): + return np.nan + + @property + def mgr_locs(self): + return self._mgr_locs + + @mgr_locs.setter + def mgr_locs(self, new_mgr_locs): + if not isinstance(new_mgr_locs, libinternals.BlockPlacement): + new_mgr_locs = libinternals.BlockPlacement(new_mgr_locs) + + self._mgr_locs = new_mgr_locs + + @property + def array_dtype(self): + """ the dtype to return if I want to construct this block as an + array + """ + return self.dtype + + def make_block(self, values, placement=None, ndim=None): + """ + Create a new block, with type inference propagate any values that are + not specified + """ + if placement is None: + placement = self.mgr_locs + if ndim is None: + ndim = self.ndim + + return make_block(values, placement=placement, ndim=ndim) + + def make_block_scalar(self, values): + """ + Create a ScalarBlock + """ + return ScalarBlock(values) + + def make_block_same_class(self, values, placement=None, ndim=None, + dtype=None): + """ Wrap given values in a block of same type as self. """ + if dtype is not None: + # issue 19431 fastparquet is passing this + warnings.warn("dtype argument is deprecated, will be removed " + "in a future release.", DeprecationWarning) + if placement is None: + placement = self.mgr_locs + return make_block(values, placement=placement, ndim=ndim, + klass=self.__class__, dtype=dtype) + + def __unicode__(self): + + # don't want to print out all of the items here + name = pprint_thing(self.__class__.__name__) + if self._is_single_block: + + result = '{name}: {len} dtype: {dtype}'.format( + name=name, len=len(self), dtype=self.dtype) + + else: + + shape = ' x '.join(pprint_thing(s) for s in self.shape) + result = '{name}: {index}, {shape}, dtype: {dtype}'.format( + name=name, index=pprint_thing(self.mgr_locs.indexer), + shape=shape, dtype=self.dtype) + + return result + + def __len__(self): + return len(self.values) + + def __getstate__(self): + return self.mgr_locs.indexer, self.values + + def __setstate__(self, state): + self.mgr_locs = libinternals.BlockPlacement(state[0]) + self.values = state[1] + self.ndim = self.values.ndim + + def _slice(self, slicer): + """ return a slice of my values """ + return self.values[slicer] + + def reshape_nd(self, labels, shape, ref_items, mgr=None): + """ + Parameters + ---------- + labels : list of new axis labels + shape : new shape + ref_items : new ref_items + + return a new block that is transformed to a nd block + """ + return _block2d_to_blocknd(values=self.get_values().T, + placement=self.mgr_locs, shape=shape, + labels=labels, ref_items=ref_items) + + def getitem_block(self, slicer, new_mgr_locs=None): + """ + Perform __getitem__-like, return result as block. + + As of now, only supports slices that preserve dimensionality. + """ + if new_mgr_locs is None: + if isinstance(slicer, tuple): + axis0_slicer = slicer[0] + else: + axis0_slicer = slicer + new_mgr_locs = self.mgr_locs[axis0_slicer] + + new_values = self._slice(slicer) + + if self._validate_ndim and new_values.ndim != self.ndim: + raise ValueError("Only same dim slicing is allowed") + + return self.make_block_same_class(new_values, new_mgr_locs) + + @property + def shape(self): + return self.values.shape + + @property + def dtype(self): + return self.values.dtype + + @property + def ftype(self): + return "{dtype}:{ftype}".format(dtype=self.dtype, ftype=self._ftype) + + def merge(self, other): + return _merge_blocks([self, other]) + + def concat_same_type(self, to_concat, placement=None): + """ + Concatenate list of single blocks of the same type. + """ + values = self._concatenator([blk.values for blk in to_concat], + axis=self.ndim - 1) + return self.make_block_same_class( + values, placement=placement or slice(0, len(values), 1)) + + def iget(self, i): + return self.values[i] + + def set(self, locs, values, check=False): + """ + Modify Block in-place with new item value + + Returns + ------- + None + """ + self.values[locs] = values + + def delete(self, loc): + """ + Delete given loc(-s) from block in-place. + """ + self.values = np.delete(self.values, loc, 0) + self.mgr_locs = self.mgr_locs.delete(loc) + + def apply(self, func, mgr=None, **kwargs): + """ apply the function to my values; return a block if we are not + one + """ + with np.errstate(all='ignore'): + result = func(self.values, **kwargs) + if not isinstance(result, Block): + result = self.make_block(values=_block_shape(result, + ndim=self.ndim)) + + return result + + def fillna(self, value, limit=None, inplace=False, downcast=None, + mgr=None): + """ fillna on the block with the value. If we fail, then convert to + ObjectBlock and try again + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + + if not self._can_hold_na: + if inplace: + return self + else: + return self.copy() + + mask = isna(self.values) + if limit is not None: + if not is_integer(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') + if self.ndim > 2: + raise NotImplementedError("number of dimensions for 'fillna' " + "is currently limited to 2") + mask[mask.cumsum(self.ndim - 1) > limit] = False + + # fillna, but if we cannot coerce, then try again as an ObjectBlock + try: + values, _, _, _ = self._try_coerce_args(self.values, value) + blocks = self.putmask(mask, value, inplace=inplace) + blocks = [b.make_block(values=self._try_coerce_result(b.values)) + for b in blocks] + return self._maybe_downcast(blocks, downcast) + except (TypeError, ValueError): + + # we can't process the value, but nothing to do + if not mask.any(): + return self if inplace else self.copy() + + # operate column-by-column + def f(m, v, i): + block = self.coerce_to_target_dtype(value) + + # slice out our block + if i is not None: + block = block.getitem_block(slice(i, i + 1)) + return block.fillna(value, + limit=limit, + inplace=inplace, + downcast=None) + + return self.split_and_operate(mask, f, inplace) + + def split_and_operate(self, mask, f, inplace): + """ + split the block per-column, and apply the callable f + per-column, return a new block for each. Handle + masking which will not change a block unless needed. + + Parameters + ---------- + mask : 2-d boolean mask + f : callable accepting (1d-mask, 1d values, indexer) + inplace : boolean + + Returns + ------- + list of blocks + """ + + if mask is None: + mask = np.ones(self.shape, dtype=bool) + new_values = self.values + + def make_a_block(nv, ref_loc): + if isinstance(nv, Block): + block = nv + elif isinstance(nv, list): + block = nv[0] + else: + # Put back the dimension that was taken from it and make + # a block out of the result. + try: + nv = _block_shape(nv, ndim=self.ndim) + except (AttributeError, NotImplementedError): + pass + block = self.make_block(values=nv, + placement=ref_loc) + return block + + # ndim == 1 + if self.ndim == 1: + if mask.any(): + nv = f(mask, new_values, None) + else: + nv = new_values if inplace else new_values.copy() + block = make_a_block(nv, self.mgr_locs) + return [block] + + # ndim > 1 + new_blocks = [] + for i, ref_loc in enumerate(self.mgr_locs): + m = mask[i] + v = new_values[i] + + # need a new block + if m.any(): + nv = f(m, v, i) + else: + nv = v if inplace else v.copy() + + block = make_a_block(nv, [ref_loc]) + new_blocks.append(block) + + return new_blocks + + def _maybe_downcast(self, blocks, downcast=None): + + # no need to downcast our float + # unless indicated + if downcast is None and self.is_float: + return blocks + elif downcast is None and (self.is_timedelta or self.is_datetime): + return blocks + + if not isinstance(blocks, list): + blocks = [blocks] + return _extend_blocks([b.downcast(downcast) for b in blocks]) + + def downcast(self, dtypes=None, mgr=None): + """ try to downcast each item to the dict of dtypes if present """ + + # turn it off completely + if dtypes is False: + return self + + values = self.values + + # single block handling + if self._is_single_block: + + # try to cast all non-floats here + if dtypes is None: + dtypes = 'infer' + + nv = maybe_downcast_to_dtype(values, dtypes) + return self.make_block(nv) + + # ndim > 1 + if dtypes is None: + return self + + if not (dtypes == 'infer' or isinstance(dtypes, dict)): + raise ValueError("downcast must have a dictionary or 'infer' as " + "its argument") + + # operate column-by-column + # this is expensive as it splits the blocks items-by-item + def f(m, v, i): + + if dtypes == 'infer': + dtype = 'infer' + else: + raise AssertionError("dtypes as dict is not supported yet") + + if dtype is not None: + v = maybe_downcast_to_dtype(v, dtype) + return v + + return self.split_and_operate(None, f, False) + + def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs): + return self._astype(dtype, copy=copy, errors=errors, values=values, + **kwargs) + + def _astype(self, dtype, copy=False, errors='raise', values=None, + klass=None, mgr=None, **kwargs): + """Coerce to the new type + + Parameters + ---------- + dtype : str, dtype convertible + copy : boolean, default False + copy if indicated + errors : str, {'raise', 'ignore'}, default 'ignore' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + Returns + ------- + Block + """ + errors_legal_values = ('raise', 'ignore') + + if errors not in errors_legal_values: + invalid_arg = ("Expected value of kwarg 'errors' to be one of {}. " + "Supplied value is '{}'".format( + list(errors_legal_values), errors)) + raise ValueError(invalid_arg) + + if (inspect.isclass(dtype) and + issubclass(dtype, (PandasExtensionDtype, ExtensionDtype))): + msg = ("Expected an instance of {}, but got the class instead. " + "Try instantiating 'dtype'.".format(dtype.__name__)) + raise TypeError(msg) + + # may need to convert to categorical + if self.is_categorical_astype(dtype): + + # deprecated 17636 + if ('categories' in kwargs or 'ordered' in kwargs): + if isinstance(dtype, CategoricalDtype): + raise TypeError( + "Cannot specify a CategoricalDtype and also " + "`categories` or `ordered`. Use " + "`dtype=CategoricalDtype(categories, ordered)`" + " instead.") + warnings.warn("specifying 'categories' or 'ordered' in " + ".astype() is deprecated; pass a " + "CategoricalDtype instead", + FutureWarning, stacklevel=7) + + categories = kwargs.get('categories', None) + ordered = kwargs.get('ordered', None) + if com._any_not_none(categories, ordered): + dtype = CategoricalDtype(categories, ordered) + + if is_categorical_dtype(self.values): + # GH 10696/18593: update an existing categorical efficiently + return self.make_block(self.values.astype(dtype, copy=copy)) + + return self.make_block(Categorical(self.values, dtype=dtype)) + + # convert dtypes if needed + dtype = pandas_dtype(dtype) + + # astype processing + if is_dtype_equal(self.dtype, dtype): + if copy: + return self.copy() + return self + + if klass is None: + if dtype == np.object_: + klass = ObjectBlock + try: + # force the copy here + if values is None: + + if issubclass(dtype.type, + (compat.text_type, compat.string_types)): + + # use native type formatting for datetime/tz/timedelta + if self.is_datelike: + values = self.to_native_types() + + # astype formatting + else: + values = self.get_values() + + else: + values = self.get_values(dtype=dtype) + + # _astype_nansafe works fine with 1-d only + values = astype_nansafe(values.ravel(), dtype, copy=True) + + # TODO(extension) + # should we make this attribute? + try: + values = values.reshape(self.shape) + except AttributeError: + pass + + newb = make_block(values, placement=self.mgr_locs, + klass=klass) + except: + if errors == 'raise': + raise + newb = self.copy() if copy else self + + if newb.is_numeric and self.is_numeric: + if newb.shape != self.shape: + raise TypeError( + "cannot set astype for copy = [{copy}] for dtype " + "({dtype} [{itemsize}]) with smaller itemsize than " + "current ({newb_dtype} [{newb_size}])".format( + copy=copy, dtype=self.dtype.name, + itemsize=self.itemsize, newb_dtype=newb.dtype.name, + newb_size=newb.itemsize)) + return newb + + def convert(self, copy=True, **kwargs): + """ attempt to coerce any object types to better types return a copy + of the block (if copy = True) by definition we are not an ObjectBlock + here! + """ + + return self.copy() if copy else self + + def _can_hold_element(self, element): + """ require the same dtype as ourselves """ + dtype = self.values.dtype.type + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, dtype) + return isinstance(element, dtype) + + def _try_cast_result(self, result, dtype=None): + """ try to cast the result to our original type, we may have + roundtripped thru object in the mean-time + """ + if dtype is None: + dtype = self.dtype + + if self.is_integer or self.is_bool or self.is_datetime: + pass + elif self.is_float and result.dtype == self.dtype: + + # protect against a bool/object showing up here + if isinstance(dtype, compat.string_types) and dtype == 'infer': + return result + if not isinstance(dtype, type): + dtype = dtype.type + if issubclass(dtype, (np.bool_, np.object_)): + if issubclass(dtype, np.bool_): + if isna(result).all(): + return result.astype(np.bool_) + else: + result = result.astype(np.object_) + result[result == 1] = True + result[result == 0] = False + return result + else: + return result.astype(np.object_) + + return result + + # may need to change the dtype here + return maybe_downcast_to_dtype(result, dtype) + + def _try_coerce_args(self, values, other): + """ provide coercion to our input arguments """ + + if np.any(notna(other)) and not self._can_hold_element(other): + # coercion issues + # let higher levels handle + raise TypeError("cannot convert {} to an {}".format( + type(other).__name__, + type(self).__name__.lower().replace('Block', ''))) + + return values, False, other, False + + def _try_coerce_result(self, result): + """ reverse of try_coerce_args """ + return result + + def _try_coerce_and_cast_result(self, result, dtype=None): + result = self._try_coerce_result(result) + result = self._try_cast_result(result, dtype=dtype) + return result + + def to_native_types(self, slicer=None, na_rep='nan', quoting=None, + **kwargs): + """ convert to our native types format, slicing if desired """ + + values = self.get_values() + + if slicer is not None: + values = values[:, slicer] + mask = isna(values) + + if not self.is_object and not quoting: + values = values.astype(str) + else: + values = np.array(values, dtype='object') + + values[mask] = na_rep + return values + + # block actions #### + def copy(self, deep=True, mgr=None): + """ copy constructor """ + values = self.values + if deep: + values = values.copy() + return self.make_block_same_class(values) + + def replace(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True, mgr=None): + """ replace the to_replace value with value, possible to create new + blocks here this is just a call to putmask. regex is not used here. + It is used in ObjectBlocks. It is here for API + compatibility. + """ + + inplace = validate_bool_kwarg(inplace, 'inplace') + original_to_replace = to_replace + + # try to replace, if we raise an error, convert to ObjectBlock and + # retry + try: + values, _, to_replace, _ = self._try_coerce_args(self.values, + to_replace) + mask = missing.mask_missing(values, to_replace) + if filter is not None: + filtered_out = ~self.mgr_locs.isin(filter) + mask[filtered_out.nonzero()[0]] = False + + blocks = self.putmask(mask, value, inplace=inplace) + if convert: + blocks = [b.convert(by_item=True, numeric=False, + copy=not inplace) for b in blocks] + return blocks + except (TypeError, ValueError): + + # try again with a compatible block + block = self.astype(object) + return block.replace( + to_replace=original_to_replace, value=value, inplace=inplace, + filter=filter, regex=regex, convert=convert) + + def _replace_single(self, *args, **kwargs): + """ no-op on a non-ObjectBlock """ + return self if kwargs['inplace'] else self.copy() + + def setitem(self, indexer, value, mgr=None): + """Set the value inplace, returning a a maybe different typed block. + + Parameters + ---------- + indexer : tuple, list-like, array-like, slice + The subset of self.values to set + value : object + The value being set + mgr : BlockPlacement, optional + + Returns + ------- + Block + + Notes + ----- + `indexer` is a direct slice/positional indexer. `value` must + be a compatible shape. + """ + # coerce None values, if appropriate + if value is None: + if self.is_numeric: + value = np.nan + + # coerce if block dtype can store value + values = self.values + try: + values, _, value, _ = self._try_coerce_args(values, value) + # can keep its own dtype + if hasattr(value, 'dtype') and is_dtype_equal(values.dtype, + value.dtype): + dtype = self.dtype + else: + dtype = 'infer' + + except (TypeError, ValueError): + # current dtype cannot store value, coerce to common dtype + find_dtype = False + + if hasattr(value, 'dtype'): + dtype = value.dtype + find_dtype = True + + elif lib.is_scalar(value): + if isna(value): + # NaN promotion is handled in latter path + dtype = False + else: + dtype, _ = infer_dtype_from_scalar(value, + pandas_dtype=True) + find_dtype = True + else: + dtype = 'infer' + + if find_dtype: + dtype = find_common_type([values.dtype, dtype]) + if not is_dtype_equal(self.dtype, dtype): + b = self.astype(dtype) + return b.setitem(indexer, value, mgr=mgr) + + # value must be storeable at this moment + arr_value = np.array(value) + + # cast the values to a type that can hold nan (if necessary) + if not self._can_hold_element(value): + dtype, _ = maybe_promote(arr_value.dtype) + values = values.astype(dtype) + + transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x) + values = transf(values) + + # length checking + check_setitem_lengths(indexer, value, values) + + def _is_scalar_indexer(indexer): + # return True if we are all scalar indexers + + if arr_value.ndim == 1: + if not isinstance(indexer, tuple): + indexer = tuple([indexer]) + return any(isinstance(idx, np.ndarray) and len(idx) == 0 + for idx in indexer) + return False + + def _is_empty_indexer(indexer): + # return a boolean if we have an empty indexer + + if is_list_like(indexer) and not len(indexer): + return True + if arr_value.ndim == 1: + if not isinstance(indexer, tuple): + indexer = tuple([indexer]) + return any(isinstance(idx, np.ndarray) and len(idx) == 0 + for idx in indexer) + return False + + # empty indexers + # 8669 (empty) + if _is_empty_indexer(indexer): + pass + + # setting a single element for each dim and with a rhs that could + # be say a list + # GH 6043 + elif _is_scalar_indexer(indexer): + values[indexer] = value + + # if we are an exact match (ex-broadcasting), + # then use the resultant dtype + elif (len(arr_value.shape) and + arr_value.shape[0] == values.shape[0] and + np.prod(arr_value.shape) == np.prod(values.shape)): + values[indexer] = value + try: + values = values.astype(arr_value.dtype) + except ValueError: + pass + + # set + else: + values[indexer] = value + + # coerce and try to infer the dtypes of the result + values = self._try_coerce_and_cast_result(values, dtype) + block = self.make_block(transf(values)) + return block + + def putmask(self, mask, new, align=True, inplace=False, axis=0, + transpose=False, mgr=None): + """ putmask the data to the block; it is possible that we may create a + new dtype of block + + return the resulting block(s) + + Parameters + ---------- + mask : the condition to respect + new : a ndarray/object + align : boolean, perform alignment on other/cond, default is True + inplace : perform inplace modification, default is False + axis : int + transpose : boolean + Set to True if self is stored with axes reversed + + Returns + ------- + a list of new blocks, the result of the putmask + """ + + new_values = self.values if inplace else self.values.copy() + + new = getattr(new, 'values', new) + mask = getattr(mask, 'values', mask) + + # if we are passed a scalar None, convert it here + if not is_list_like(new) and isna(new) and not self.is_object: + new = self.fill_value + + if self._can_hold_element(new): + _, _, new, _ = self._try_coerce_args(new_values, new) + + if transpose: + new_values = new_values.T + + # If the default repeat behavior in np.putmask would go in the + # wrong direction, then explicitly repeat and reshape new instead + if getattr(new, 'ndim', 0) >= 1: + if self.ndim - 1 == new.ndim and axis == 1: + new = np.repeat( + new, new_values.shape[-1]).reshape(self.shape) + new = new.astype(new_values.dtype) + + # we require exact matches between the len of the + # values we are setting (or is compat). np.putmask + # doesn't check this and will simply truncate / pad + # the output, but we want sane error messages + # + # TODO: this prob needs some better checking + # for 2D cases + if ((is_list_like(new) and + np.any(mask[mask]) and + getattr(new, 'ndim', 1) == 1)): + + if not (mask.shape[-1] == len(new) or + mask[mask].shape[-1] == len(new) or + len(new) == 1): + raise ValueError("cannot assign mismatch " + "length to masked array") + + np.putmask(new_values, mask, new) + + # maybe upcast me + elif mask.any(): + if transpose: + mask = mask.T + if isinstance(new, np.ndarray): + new = new.T + axis = new_values.ndim - axis - 1 + + # Pseudo-broadcast + if getattr(new, 'ndim', 0) >= 1: + if self.ndim - 1 == new.ndim: + new_shape = list(new.shape) + new_shape.insert(axis, 1) + new = new.reshape(tuple(new_shape)) + + # operate column-by-column + def f(m, v, i): + + if i is None: + # ndim==1 case. + n = new + else: + + if isinstance(new, np.ndarray): + n = np.squeeze(new[i % new.shape[0]]) + else: + n = np.array(new) + + # type of the new block + dtype, _ = maybe_promote(n.dtype) + + # we need to explicitly astype here to make a copy + n = n.astype(dtype) + + nv = _putmask_smart(v, m, n) + return nv + + new_blocks = self.split_and_operate(mask, f, inplace) + return new_blocks + + if inplace: + return [self] + + if transpose: + new_values = new_values.T + + return [self.make_block(new_values)] + + def coerce_to_target_dtype(self, other): + """ + coerce the current block to a dtype compat for other + we will return a block, possibly object, and not raise + + we can also safely try to coerce to the same dtype + and will receive the same block + """ + + # if we cannot then coerce to object + dtype, _ = infer_dtype_from(other, pandas_dtype=True) + + if is_dtype_equal(self.dtype, dtype): + return self + + if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype): + # we don't upcast to bool + return self.astype(object) + + elif ((self.is_float or self.is_complex) and + (is_integer_dtype(dtype) or is_float_dtype(dtype))): + # don't coerce float/complex to int + return self + + elif (self.is_datetime or + is_datetime64_dtype(dtype) or + is_datetime64tz_dtype(dtype)): + + # not a datetime + if not ((is_datetime64_dtype(dtype) or + is_datetime64tz_dtype(dtype)) and self.is_datetime): + return self.astype(object) + + # don't upcast timezone with different timezone or no timezone + mytz = getattr(self.dtype, 'tz', None) + othertz = getattr(dtype, 'tz', None) + + if str(mytz) != str(othertz): + return self.astype(object) + + raise AssertionError("possible recursion in " + "coerce_to_target_dtype: {} {}".format( + self, other)) + + elif (self.is_timedelta or is_timedelta64_dtype(dtype)): + + # not a timedelta + if not (is_timedelta64_dtype(dtype) and self.is_timedelta): + return self.astype(object) + + raise AssertionError("possible recursion in " + "coerce_to_target_dtype: {} {}".format( + self, other)) + + try: + return self.astype(dtype) + except (ValueError, TypeError): + pass + + return self.astype(object) + + def interpolate(self, method='pad', axis=0, index=None, values=None, + inplace=False, limit=None, limit_direction='forward', + limit_area=None, fill_value=None, coerce=False, + downcast=None, mgr=None, **kwargs): + + inplace = validate_bool_kwarg(inplace, 'inplace') + + def check_int_bool(self, inplace): + # Only FloatBlocks will contain NaNs. + # timedelta subclasses IntBlock + if (self.is_bool or self.is_integer) and not self.is_timedelta: + if inplace: + return self + else: + return self.copy() + + # a fill na type method + try: + m = missing.clean_fill_method(method) + except: + m = None + + if m is not None: + r = check_int_bool(self, inplace) + if r is not None: + return r + return self._interpolate_with_fill(method=m, axis=axis, + inplace=inplace, limit=limit, + fill_value=fill_value, + coerce=coerce, + downcast=downcast, mgr=mgr) + # try an interp method + try: + m = missing.clean_interp_method(method, **kwargs) + except: + m = None + + if m is not None: + r = check_int_bool(self, inplace) + if r is not None: + return r + return self._interpolate(method=m, index=index, values=values, + axis=axis, limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + fill_value=fill_value, inplace=inplace, + downcast=downcast, mgr=mgr, **kwargs) + + raise ValueError("invalid method '{0}' to interpolate.".format(method)) + + def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, + limit=None, fill_value=None, coerce=False, + downcast=None, mgr=None): + """ fillna but using the interpolate machinery """ + + inplace = validate_bool_kwarg(inplace, 'inplace') + + # if we are coercing, then don't force the conversion + # if the block can't hold the type + if coerce: + if not self._can_hold_na: + if inplace: + return [self] + else: + return [self.copy()] + + values = self.values if inplace else self.values.copy() + values, _, fill_value, _ = self._try_coerce_args(values, fill_value) + values = missing.interpolate_2d(values, method=method, axis=axis, + limit=limit, fill_value=fill_value, + dtype=self.dtype) + values = self._try_coerce_result(values) + + blocks = [self.make_block_same_class(values, ndim=self.ndim)] + return self._maybe_downcast(blocks, downcast) + + def _interpolate(self, method=None, index=None, values=None, + fill_value=None, axis=0, limit=None, + limit_direction='forward', limit_area=None, + inplace=False, downcast=None, mgr=None, **kwargs): + """ interpolate using scipy wrappers """ + + inplace = validate_bool_kwarg(inplace, 'inplace') + data = self.values if inplace else self.values.copy() + + # only deal with floats + if not self.is_float: + if not self.is_integer: + return self + data = data.astype(np.float64) + + if fill_value is None: + fill_value = self.fill_value + + if method in ('krogh', 'piecewise_polynomial', 'pchip'): + if not index.is_monotonic: + raise ValueError("{0} interpolation requires that the " + "index be monotonic.".format(method)) + # process 1-d slices in the axis direction + + def func(x): + + # process a 1-d slice, returning it + # should the axis argument be handled below in apply_along_axis? + # i.e. not an arg to missing.interpolate_1d + return missing.interpolate_1d(index, x, method=method, limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + fill_value=fill_value, + bounds_error=False, **kwargs) + + # interp each column independently + interp_values = np.apply_along_axis(func, axis, data) + + blocks = [self.make_block_same_class(interp_values)] + return self._maybe_downcast(blocks, downcast) + + def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): + """ + Take values according to indexer and return them as a block.bb + + """ + + # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock + # so need to preserve types + # sparse is treated like an ndarray, but needs .get_values() shaping + + values = self.values + if self.is_sparse: + values = self.get_values() + + if fill_tuple is None: + fill_value = self.fill_value + new_values = algos.take_nd(values, indexer, axis=axis, + allow_fill=False) + else: + fill_value = fill_tuple[0] + new_values = algos.take_nd(values, indexer, axis=axis, + allow_fill=True, fill_value=fill_value) + + if new_mgr_locs is None: + if axis == 0: + slc = libinternals.indexer_as_slice(indexer) + if slc is not None: + new_mgr_locs = self.mgr_locs[slc] + else: + new_mgr_locs = self.mgr_locs[indexer] + else: + new_mgr_locs = self.mgr_locs + + if not is_dtype_equal(new_values.dtype, self.dtype): + return self.make_block(new_values, new_mgr_locs) + else: + return self.make_block_same_class(new_values, new_mgr_locs) + + def diff(self, n, axis=1, mgr=None): + """ return block for the diff of the values """ + new_values = algos.diff(self.values, n, axis=axis) + return [self.make_block(values=new_values)] + + def shift(self, periods, axis=0, mgr=None): + """ shift the block by periods, possibly upcast """ + + # convert integer to float if necessary. need to do a lot more than + # that, handle boolean etc also + new_values, fill_value = maybe_upcast(self.values) + + # make sure array sent to np.roll is c_contiguous + f_ordered = new_values.flags.f_contiguous + if f_ordered: + new_values = new_values.T + axis = new_values.ndim - axis - 1 + + if np.prod(new_values.shape): + new_values = np.roll(new_values, ensure_platform_int(periods), + axis=axis) + + axis_indexer = [slice(None)] * self.ndim + if periods > 0: + axis_indexer[axis] = slice(None, periods) + else: + axis_indexer[axis] = slice(periods, None) + new_values[tuple(axis_indexer)] = fill_value + + # restore original order + if f_ordered: + new_values = new_values.T + + return [self.make_block(new_values)] + + def eval(self, func, other, errors='raise', try_cast=False, mgr=None): + """ + evaluate the block; return result block from the result + + Parameters + ---------- + func : how to combine self, other + other : a ndarray/object + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + try_cast : try casting the results to the input type + + Returns + ------- + a new block, the result of the func + """ + orig_other = other + values = self.values + + other = getattr(other, 'values', other) + + # make sure that we can broadcast + is_transposed = False + if hasattr(other, 'ndim') and hasattr(values, 'ndim'): + if values.ndim != other.ndim: + is_transposed = True + else: + if values.shape == other.shape[::-1]: + is_transposed = True + elif values.shape[0] == other.shape[-1]: + is_transposed = True + else: + # this is a broadcast error heree + raise ValueError( + "cannot broadcast shape [{t_shape}] with " + "block values [{oth_shape}]".format( + t_shape=values.T.shape, oth_shape=other.shape)) + + transf = (lambda x: x.T) if is_transposed else (lambda x: x) + + # coerce/transpose the args if needed + try: + values, values_mask, other, other_mask = self._try_coerce_args( + transf(values), other) + except TypeError: + block = self.coerce_to_target_dtype(orig_other) + return block.eval(func, orig_other, + errors=errors, + try_cast=try_cast, mgr=mgr) + + # get the result, may need to transpose the other + def get_result(other): + + # avoid numpy warning of comparisons again None + if other is None: + result = not func.__name__ == 'eq' + + # avoid numpy warning of elementwise comparisons to object + elif is_numeric_v_string_like(values, other): + result = False + + # avoid numpy warning of elementwise comparisons + elif func.__name__ == 'eq': + if is_list_like(other) and not isinstance(other, np.ndarray): + other = np.asarray(other) + + # if we can broadcast, then ok + if values.shape[-1] != other.shape[-1]: + return False + result = func(values, other) + else: + result = func(values, other) + + # mask if needed + if isinstance(values_mask, np.ndarray) and values_mask.any(): + result = result.astype('float64', copy=False) + result[values_mask] = np.nan + if other_mask is True: + result = result.astype('float64', copy=False) + result[:] = np.nan + elif isinstance(other_mask, np.ndarray) and other_mask.any(): + result = result.astype('float64', copy=False) + result[other_mask.ravel()] = np.nan + + return result + + # error handler if we have an issue operating with the function + def handle_error(): + + if errors == 'raise': + # The 'detail' variable is defined in outer scope. + raise TypeError( + 'Could not operate {other!r} with block values ' + '{detail!s}'.format(other=other, detail=detail)) # noqa + else: + # return the values + result = np.empty(values.shape, dtype='O') + result.fill(np.nan) + return result + + # get the result + try: + with np.errstate(all='ignore'): + result = get_result(other) + + # if we have an invalid shape/broadcast error + # GH4576, so raise instead of allowing to pass through + except ValueError as detail: + raise + except Exception as detail: + result = handle_error() + + # technically a broadcast error in numpy can 'work' by returning a + # boolean False + if not isinstance(result, np.ndarray): + if not isinstance(result, np.ndarray): + + # differentiate between an invalid ndarray-ndarray comparison + # and an invalid type comparison + if isinstance(values, np.ndarray) and is_list_like(other): + raise ValueError( + 'Invalid broadcasting comparison [{other!r}] with ' + 'block values'.format(other=other)) + + raise TypeError('Could not compare [{other!r}] ' + 'with block values'.format(other=other)) + + # transpose if needed + result = transf(result) + + # try to cast if requested + if try_cast: + result = self._try_cast_result(result) + + result = _block_shape(result, ndim=self.ndim) + return [self.make_block(result)] + + def where(self, other, cond, align=True, errors='raise', + try_cast=False, axis=0, transpose=False, mgr=None): + """ + evaluate the block; return result block(s) from the result + + Parameters + ---------- + other : a ndarray/object + cond : the condition to respect + align : boolean, perform alignment on other/cond + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + axis : int + transpose : boolean + Set to True if self is stored with axes reversed + + Returns + ------- + a new block(s), the result of the func + """ + import pandas.core.computation.expressions as expressions + assert errors in ['raise', 'ignore'] + + values = self.values + orig_other = other + if transpose: + values = values.T + + other = getattr(other, '_values', getattr(other, 'values', other)) + cond = getattr(cond, 'values', cond) + + # If the default broadcasting would go in the wrong direction, then + # explicitly reshape other instead + if getattr(other, 'ndim', 0) >= 1: + if values.ndim - 1 == other.ndim and axis == 1: + other = other.reshape(tuple(other.shape + (1, ))) + elif transpose and values.ndim == self.ndim - 1: + cond = cond.T + + if not hasattr(cond, 'shape'): + raise ValueError("where must have a condition that is ndarray " + "like") + + # our where function + def func(cond, values, other): + if cond.ravel().all(): + return values + + values, values_mask, other, other_mask = self._try_coerce_args( + values, other) + + try: + return self._try_coerce_result(expressions.where( + cond, values, other)) + except Exception as detail: + if errors == 'raise': + raise TypeError( + 'Could not operate [{other!r}] with block values ' + '[{detail!s}]'.format(other=other, detail=detail)) + else: + # return the values + result = np.empty(values.shape, dtype='float64') + result.fill(np.nan) + return result + + # see if we can operate on the entire block, or need item-by-item + # or if we are a single block (ndim == 1) + try: + result = func(cond, values, other) + except TypeError: + + # we cannot coerce, return a compat dtype + # we are explicitly ignoring errors + block = self.coerce_to_target_dtype(other) + blocks = block.where(orig_other, cond, align=align, + errors=errors, + try_cast=try_cast, axis=axis, + transpose=transpose) + return self._maybe_downcast(blocks, 'infer') + + if self._can_hold_na or self.ndim == 1: + + if transpose: + result = result.T + + # try to cast if requested + if try_cast: + result = self._try_cast_result(result) + + return self.make_block(result) + + # might need to separate out blocks + axis = cond.ndim - 1 + cond = cond.swapaxes(axis, 0) + mask = np.array([cond[i].all() for i in range(cond.shape[0])], + dtype=bool) + + result_blocks = [] + for m in [mask, ~mask]: + if m.any(): + r = self._try_cast_result(result.take(m.nonzero()[0], + axis=axis)) + result_blocks.append( + self.make_block(r.T, placement=self.mgr_locs[m])) + + return result_blocks + + def equals(self, other): + if self.dtype != other.dtype or self.shape != other.shape: + return False + return array_equivalent(self.values, other.values) + + def _unstack(self, unstacker_func, new_columns): + """Return a list of unstacked blocks of self + + Parameters + ---------- + unstacker_func : callable + Partially applied unstacker. + new_columns : Index + All columns of the unstacked BlockManager. + + Returns + ------- + blocks : list of Block + New blocks of unstacked values. + mask : array_like of bool + The mask of columns of `blocks` we should keep. + """ + unstacker = unstacker_func(self.values.T) + new_items = unstacker.get_new_columns() + new_placement = new_columns.get_indexer(new_items) + new_values, mask = unstacker.get_new_values() + + mask = mask.any(0) + new_values = new_values.T[mask] + new_placement = new_placement[mask] + + blocks = [make_block(new_values, placement=new_placement)] + return blocks, mask + + def quantile(self, qs, interpolation='linear', axis=0, mgr=None): + """ + compute the quantiles of the + + Parameters + ---------- + qs: a scalar or list of the quantiles to be computed + interpolation: type of interpolation, default 'linear' + axis: axis to compute, default 0 + + Returns + ------- + tuple of (axis, block) + + """ + kw = {'interpolation': interpolation} + values = self.get_values() + values, _, _, _ = self._try_coerce_args(values, values) + + def _nanpercentile1D(values, mask, q, **kw): + values = values[~mask] + + if len(values) == 0: + if lib.is_scalar(q): + return self._na_value + else: + return np.array([self._na_value] * len(q), + dtype=values.dtype) + + return np.percentile(values, q, **kw) + + def _nanpercentile(values, q, axis, **kw): + + mask = isna(self.values) + if not lib.is_scalar(mask) and mask.any(): + if self.ndim == 1: + return _nanpercentile1D(values, mask, q, **kw) + else: + # for nonconsolidatable blocks mask is 1D, but values 2D + if mask.ndim < values.ndim: + mask = mask.reshape(values.shape) + if axis == 0: + values = values.T + mask = mask.T + result = [_nanpercentile1D(val, m, q, **kw) for (val, m) + in zip(list(values), list(mask))] + result = np.array(result, dtype=values.dtype, copy=False).T + return result + else: + return np.percentile(values, q, axis=axis, **kw) + + from pandas import Float64Index + is_empty = values.shape[axis] == 0 + if is_list_like(qs): + ax = Float64Index(qs) + + if is_empty: + if self.ndim == 1: + result = self._na_value + else: + # create the array of na_values + # 2d len(values) * len(qs) + result = np.repeat(np.array([self._na_value] * len(qs)), + len(values)).reshape(len(values), + len(qs)) + else: + + try: + result = _nanpercentile(values, np.array(qs) * 100, + axis=axis, **kw) + except ValueError: + + # older numpies don't handle an array for q + result = [_nanpercentile(values, q * 100, + axis=axis, **kw) for q in qs] + + result = np.array(result, copy=False) + if self.ndim > 1: + result = result.T + + else: + + if self.ndim == 1: + ax = Float64Index([qs]) + else: + ax = mgr.axes[0] + + if is_empty: + if self.ndim == 1: + result = self._na_value + else: + result = np.array([self._na_value] * len(self)) + else: + result = _nanpercentile(values, qs * 100, axis=axis, **kw) + + ndim = getattr(result, 'ndim', None) or 0 + result = self._try_coerce_result(result) + if lib.is_scalar(result): + return ax, self.make_block_scalar(result) + return ax, make_block(result, + placement=np.arange(len(result)), + ndim=ndim) + + +class ScalarBlock(Block): + """ + a scalar compat Block + """ + __slots__ = ['_mgr_locs', 'values', 'ndim'] + + def __init__(self, values): + self.ndim = 0 + self.mgr_locs = [0] + self.values = values + + @property + def dtype(self): + return type(self.values) + + @property + def shape(self): + return tuple([0]) + + def __len__(self): + return 0 + + +class NonConsolidatableMixIn(object): + """ hold methods for the nonconsolidatable blocks """ + _can_consolidate = False + _verify_integrity = False + _validate_ndim = False + + def __init__(self, values, placement, ndim=None): + """Initialize a non-consolidatable block. + + 'ndim' may be inferred from 'placement'. + + This will call continue to call __init__ for the other base + classes mixed in with this Mixin. + """ + # Placement must be converted to BlockPlacement so that we can check + # its length + if not isinstance(placement, libinternals.BlockPlacement): + placement = libinternals.BlockPlacement(placement) + + # Maybe infer ndim from placement + if ndim is None: + if len(placement) != 1: + ndim = 1 + else: + ndim = 2 + super(NonConsolidatableMixIn, self).__init__(values, placement, + ndim=ndim) + + @property + def shape(self): + if self.ndim == 1: + return (len(self.values)), + return (len(self.mgr_locs), len(self.values)) + + def get_values(self, dtype=None): + """ need to to_dense myself (and always return a ndim sized object) """ + values = self.values.to_dense() + if values.ndim == self.ndim - 1: + values = values.reshape((1,) + values.shape) + return values + + def iget(self, col): + + if self.ndim == 2 and isinstance(col, tuple): + col, loc = col + if not com.is_null_slice(col) and col != 0: + raise IndexError("{0} only contains one item".format(self)) + return self.values[loc] + else: + if col != 0: + raise IndexError("{0} only contains one item".format(self)) + return self.values + + def should_store(self, value): + return isinstance(value, self._holder) + + def set(self, locs, values, check=False): + assert locs.tolist() == [0] + self.values = values + + def putmask(self, mask, new, align=True, inplace=False, axis=0, + transpose=False, mgr=None): + """ + putmask the data to the block; we must be a single block and not + generate other blocks + + return the resulting block + + Parameters + ---------- + mask : the condition to respect + new : a ndarray/object + align : boolean, perform alignment on other/cond, default is True + inplace : perform inplace modification, default is False + + Returns + ------- + a new block, the result of the putmask + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + + # use block's copy logic. + # .values may be an Index which does shallow copy by default + new_values = self.values if inplace else self.copy().values + new_values, _, new, _ = self._try_coerce_args(new_values, new) + + if isinstance(new, np.ndarray) and len(new) == len(mask): + new = new[mask] + + mask = _safe_reshape(mask, new_values.shape) + + new_values[mask] = new + new_values = self._try_coerce_result(new_values) + return [self.make_block(values=new_values)] + + def _slice(self, slicer): + """ return a slice of my values (but densify first) """ + return self.get_values()[slicer] + + def _try_cast_result(self, result, dtype=None): + return result + + def _unstack(self, unstacker_func, new_columns): + """Return a list of unstacked blocks of self + + Parameters + ---------- + unstacker_func : callable + Partially applied unstacker. + new_columns : Index + All columns of the unstacked BlockManager. + + Returns + ------- + blocks : list of Block + New blocks of unstacked values. + mask : array_like of bool + The mask of columns of `blocks` we should keep. + """ + # NonConsolidatable blocks can have a single item only, so we return + # one block per item + unstacker = unstacker_func(self.values.T) + new_items = unstacker.get_new_columns() + new_placement = new_columns.get_indexer(new_items) + new_values, mask = unstacker.get_new_values() + + mask = mask.any(0) + new_values = new_values.T[mask] + new_placement = new_placement[mask] + + blocks = [self.make_block_same_class(vals, [place]) + for vals, place in zip(new_values, new_placement)] + return blocks, mask + + +class ExtensionBlock(NonConsolidatableMixIn, Block): + """Block for holding extension types. + + Notes + ----- + This holds all 3rd-party extension array types. It's also the immediate + parent class for our internal extension types' blocks, CategoricalBlock. + + ExtensionArrays are limited to 1-D. + """ + is_extension = True + + def __init__(self, values, placement, ndim=None): + values = self._maybe_coerce_values(values) + super(ExtensionBlock, self).__init__(values, placement, ndim) + + def _maybe_coerce_values(self, values): + """Unbox to an extension array. + + This will unbox an ExtensionArray stored in an Index or Series. + ExtensionArrays pass through. No dtype coercion is done. + + Parameters + ---------- + values : Index, Series, ExtensionArray + + Returns + ------- + ExtensionArray + """ + if isinstance(values, (ABCIndexClass, ABCSeries)): + values = values._values + return values + + @property + def _holder(self): + # For extension blocks, the holder is values-dependent. + return type(self.values) + + @property + def fill_value(self): + # Used in reindex_indexer + return self.values.dtype.na_value + + @property + def _can_hold_na(self): + # The default ExtensionArray._can_hold_na is True + return self._holder._can_hold_na + + @property + def is_view(self): + """Extension arrays are never treated as views.""" + return False + + def setitem(self, indexer, value, mgr=None): + """Set the value inplace, returning a same-typed block. + + This differs from Block.setitem by not allowing setitem to change + the dtype of the Block. + + Parameters + ---------- + indexer : tuple, list-like, array-like, slice + The subset of self.values to set + value : object + The value being set + mgr : BlockPlacement, optional + + Returns + ------- + Block + + Notes + ----- + `indexer` is a direct slice/positional indexer. `value` must + be a compatible shape. + """ + if isinstance(indexer, tuple): + # we are always 1-D + indexer = indexer[0] + + check_setitem_lengths(indexer, value, self.values) + self.values[indexer] = value + return self + + def get_values(self, dtype=None): + # ExtensionArrays must be iterable, so this works. + values = np.asarray(self.values) + if values.ndim == self.ndim - 1: + values = values.reshape((1,) + values.shape) + return values + + def to_dense(self): + return np.asarray(self.values) + + def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): + """ + Take values according to indexer and return them as a block. + """ + if fill_tuple is None: + fill_value = None + else: + fill_value = fill_tuple[0] + + # axis doesn't matter; we are really a single-dim object + # but are passed the axis depending on the calling routing + # if its REALLY axis 0, then this will be a reindex and not a take + new_values = self.values.take(indexer, fill_value=fill_value, + allow_fill=True) + + # if we are a 1-dim object, then always place at 0 + if self.ndim == 1: + new_mgr_locs = [0] + else: + if new_mgr_locs is None: + new_mgr_locs = self.mgr_locs + + return self.make_block_same_class(new_values, new_mgr_locs) + + def _can_hold_element(self, element): + # XXX: We may need to think about pushing this onto the array. + # We're doing the same as CategoricalBlock here. + return True + + def _slice(self, slicer): + """ return a slice of my values """ + + # slice the category + # return same dims as we currently have + + if isinstance(slicer, tuple) and len(slicer) == 2: + if not com.is_null_slice(slicer[0]): + raise AssertionError("invalid slicing for a 1-ndim " + "categorical") + slicer = slicer[1] + + return self.values[slicer] + + def formatting_values(self): + return self.values._formatting_values() + + def concat_same_type(self, to_concat, placement=None): + """ + Concatenate list of single blocks of the same type. + """ + values = self._holder._concat_same_type( + [blk.values for blk in to_concat]) + placement = placement or slice(0, len(values), 1) + return self.make_block_same_class(values, ndim=self.ndim, + placement=placement) + + def fillna(self, value, limit=None, inplace=False, downcast=None, + mgr=None): + values = self.values if inplace else self.values.copy() + values = values.fillna(value=value, limit=limit) + return [self.make_block_same_class(values=values, + placement=self.mgr_locs, + ndim=self.ndim)] + + def interpolate(self, method='pad', axis=0, inplace=False, limit=None, + fill_value=None, **kwargs): + + values = self.values if inplace else self.values.copy() + return self.make_block_same_class( + values=values.fillna(value=fill_value, method=method, + limit=limit), + placement=self.mgr_locs) + + +class NumericBlock(Block): + __slots__ = () + is_numeric = True + _can_hold_na = True + + +class FloatOrComplexBlock(NumericBlock): + __slots__ = () + + def equals(self, other): + if self.dtype != other.dtype or self.shape != other.shape: + return False + left, right = self.values, other.values + return ((left == right) | (np.isnan(left) & np.isnan(right))).all() + + +class FloatBlock(FloatOrComplexBlock): + __slots__ = () + is_float = True + + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return (issubclass(tipo.type, (np.floating, np.integer)) and + not issubclass(tipo.type, (np.datetime64, np.timedelta64))) + return ( + isinstance( + element, (float, int, np.floating, np.int_, compat.long)) + and not isinstance(element, (bool, np.bool_, datetime, timedelta, + np.datetime64, np.timedelta64))) + + def to_native_types(self, slicer=None, na_rep='', float_format=None, + decimal='.', quoting=None, **kwargs): + """ convert to our native types format, slicing if desired """ + + values = self.values + if slicer is not None: + values = values[:, slicer] + + # see gh-13418: no special formatting is desired at the + # output (important for appropriate 'quoting' behaviour), + # so do not pass it through the FloatArrayFormatter + if float_format is None and decimal == '.': + mask = isna(values) + + if not quoting: + values = values.astype(str) + else: + values = np.array(values, dtype='object') + + values[mask] = na_rep + return values + + from pandas.io.formats.format import FloatArrayFormatter + formatter = FloatArrayFormatter(values, na_rep=na_rep, + float_format=float_format, + decimal=decimal, quoting=quoting, + fixed_width=False) + return formatter.get_result_as_array() + + def should_store(self, value): + # when inserting a column should not coerce integers to floats + # unnecessarily + return (issubclass(value.dtype.type, np.floating) and + value.dtype == self.dtype) + + +class ComplexBlock(FloatOrComplexBlock): + __slots__ = () + is_complex = True + + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, + (np.floating, np.integer, np.complexfloating)) + return ( + isinstance( + element, + (float, int, complex, np.float_, np.int_, compat.long)) + and not isinstance(element, (bool, np.bool_))) + + def should_store(self, value): + return issubclass(value.dtype.type, np.complexfloating) + + +class IntBlock(NumericBlock): + __slots__ = () + is_integer = True + _can_hold_na = False + + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return (issubclass(tipo.type, np.integer) and + not issubclass(tipo.type, (np.datetime64, + np.timedelta64)) and + self.dtype.itemsize >= tipo.itemsize) + return is_integer(element) + + def should_store(self, value): + return is_integer_dtype(value) and value.dtype == self.dtype + + +class DatetimeLikeBlockMixin(object): + """Mixin class for DatetimeBlock and DatetimeTZBlock.""" + + @property + def _holder(self): + return DatetimeIndex + + @property + def _na_value(self): + return tslibs.NaT + + @property + def fill_value(self): + return tslibs.iNaT + + def get_values(self, dtype=None): + """ + return object dtype as boxed values, such as Timestamps/Timedelta + """ + if is_object_dtype(dtype): + return lib.map_infer(self.values.ravel(), + self._box_func).reshape(self.values.shape) + return self.values + + +class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): + __slots__ = () + is_timedelta = True + _can_hold_na = True + is_numeric = False + + def __init__(self, values, placement, ndim=None): + if values.dtype != _TD_DTYPE: + values = conversion.ensure_timedelta64ns(values) + + super(TimeDeltaBlock, self).__init__(values, + placement=placement, ndim=ndim) + + @property + def _holder(self): + return TimedeltaIndex + + @property + def _box_func(self): + return lambda x: Timedelta(x, unit='ns') + + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, np.timedelta64) + return is_integer(element) or isinstance( + element, (timedelta, np.timedelta64)) + + def fillna(self, value, **kwargs): + + # allow filling with integers to be + # interpreted as seconds + if is_integer(value) and not isinstance(value, np.timedelta64): + value = Timedelta(value, unit='s') + return super(TimeDeltaBlock, self).fillna(value, **kwargs) + + def _try_coerce_args(self, values, other): + """ + Coerce values and other to int64, with null values converted to + iNaT. values is always ndarray-like, other may not be + + Parameters + ---------- + values : ndarray-like + other : ndarray-like or scalar + + Returns + ------- + base-type values, values mask, base-type other, other mask + """ + + values_mask = isna(values) + values = values.view('i8') + other_mask = False + + if isinstance(other, bool): + raise TypeError + elif is_null_datelike_scalar(other): + other = tslibs.iNaT + other_mask = True + elif isinstance(other, Timedelta): + other_mask = isna(other) + other = other.value + elif isinstance(other, timedelta): + other = Timedelta(other).value + elif isinstance(other, np.timedelta64): + other_mask = isna(other) + other = Timedelta(other).value + elif hasattr(other, 'dtype') and is_timedelta64_dtype(other): + other_mask = isna(other) + other = other.astype('i8', copy=False).view('i8') + else: + # coercion issues + # let higher levels handle + raise TypeError + + return values, values_mask, other, other_mask + + def _try_coerce_result(self, result): + """ reverse of try_coerce_args / try_operate """ + if isinstance(result, np.ndarray): + mask = isna(result) + if result.dtype.kind in ['i', 'f', 'O']: + result = result.astype('m8[ns]') + result[mask] = tslibs.iNaT + elif isinstance(result, (np.integer, np.float)): + result = self._box_func(result) + return result + + def should_store(self, value): + return issubclass(value.dtype.type, np.timedelta64) + + def to_native_types(self, slicer=None, na_rep=None, quoting=None, + **kwargs): + """ convert to our native types format, slicing if desired """ + + values = self.values + if slicer is not None: + values = values[:, slicer] + mask = isna(values) + + rvalues = np.empty(values.shape, dtype=object) + if na_rep is None: + na_rep = 'NaT' + rvalues[mask] = na_rep + imask = (~mask).ravel() + + # FIXME: + # should use the formats.format.Timedelta64Formatter here + # to figure what format to pass to the Timedelta + # e.g. to not show the decimals say + rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all') + for val in values.ravel()[imask]], + dtype=object) + return rvalues + + +class BoolBlock(NumericBlock): + __slots__ = () + is_bool = True + _can_hold_na = False + + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, np.bool_) + return isinstance(element, (bool, np.bool_)) + + def should_store(self, value): + return issubclass(value.dtype.type, np.bool_) + + def replace(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True, mgr=None): + inplace = validate_bool_kwarg(inplace, 'inplace') + to_replace_values = np.atleast_1d(to_replace) + if not np.can_cast(to_replace_values, bool): + return self + return super(BoolBlock, self).replace(to_replace, value, + inplace=inplace, filter=filter, + regex=regex, convert=convert, + mgr=mgr) + + +class ObjectBlock(Block): + __slots__ = () + is_object = True + _can_hold_na = True + + def __init__(self, values, placement=None, ndim=2): + if issubclass(values.dtype.type, compat.string_types): + values = np.array(values, dtype=object) + + super(ObjectBlock, self).__init__(values, ndim=ndim, + placement=placement) + + @property + def is_bool(self): + """ we can be a bool if we have only bool values but are of type + object + """ + return lib.is_bool_array(self.values.ravel()) + + # TODO: Refactor when convert_objects is removed since there will be 1 path + def convert(self, *args, **kwargs): + """ attempt to coerce any object types to better types return a copy of + the block (if copy = True) by definition we ARE an ObjectBlock!!!!! + + can return multiple blocks! + """ + + if args: + raise NotImplementedError + by_item = True if 'by_item' not in kwargs else kwargs['by_item'] + + new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta'] + new_style = False + for kw in new_inputs: + new_style |= kw in kwargs + + if new_style: + fn = soft_convert_objects + fn_inputs = new_inputs + else: + fn = maybe_convert_objects + fn_inputs = ['convert_dates', 'convert_numeric', + 'convert_timedeltas'] + fn_inputs += ['copy'] + + fn_kwargs = {} + for key in fn_inputs: + if key in kwargs: + fn_kwargs[key] = kwargs[key] + + # operate column-by-column + def f(m, v, i): + shape = v.shape + values = fn(v.ravel(), **fn_kwargs) + try: + values = values.reshape(shape) + values = _block_shape(values, ndim=self.ndim) + except (AttributeError, NotImplementedError): + pass + + return values + + if by_item and not self._is_single_block: + blocks = self.split_and_operate(None, f, False) + else: + values = f(None, self.values.ravel(), None) + blocks = [make_block(values, ndim=self.ndim, + placement=self.mgr_locs)] + + return blocks + + def set(self, locs, values, check=False): + """ + Modify Block in-place with new item value + + Returns + ------- + None + """ + + # GH6026 + if check: + try: + if (self.values[locs] == values).all(): + return + except: + pass + try: + self.values[locs] = values + except (ValueError): + + # broadcasting error + # see GH6171 + new_shape = list(values.shape) + new_shape[0] = len(self.items) + self.values = np.empty(tuple(new_shape), dtype=self.dtype) + self.values.fill(np.nan) + self.values[locs] = values + + def _maybe_downcast(self, blocks, downcast=None): + + if downcast is not None: + return blocks + + # split and convert the blocks + return _extend_blocks([b.convert(datetime=True, numeric=False) + for b in blocks]) + + def _can_hold_element(self, element): + return True + + def _try_coerce_args(self, values, other): + """ provide coercion to our input arguments """ + + if isinstance(other, ABCDatetimeIndex): + # to store DatetimeTZBlock as object + other = other.astype(object).values + + return values, False, other, False + + def should_store(self, value): + return not (issubclass(value.dtype.type, + (np.integer, np.floating, np.complexfloating, + np.datetime64, np.bool_)) or + # TODO(ExtensionArray): remove is_extension_type + # when all extension arrays have been ported. + is_extension_type(value) or + is_extension_array_dtype(value)) + + def replace(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True, mgr=None): + to_rep_is_list = is_list_like(to_replace) + value_is_list = is_list_like(value) + both_lists = to_rep_is_list and value_is_list + either_list = to_rep_is_list or value_is_list + + result_blocks = [] + blocks = [self] + + if not either_list and is_re(to_replace): + return self._replace_single(to_replace, value, inplace=inplace, + filter=filter, regex=True, + convert=convert, mgr=mgr) + elif not (either_list or regex): + return super(ObjectBlock, self).replace(to_replace, value, + inplace=inplace, + filter=filter, regex=regex, + convert=convert, mgr=mgr) + elif both_lists: + for to_rep, v in zip(to_replace, value): + result_blocks = [] + for b in blocks: + result = b._replace_single(to_rep, v, inplace=inplace, + filter=filter, regex=regex, + convert=convert, mgr=mgr) + result_blocks = _extend_blocks(result, result_blocks) + blocks = result_blocks + return result_blocks + + elif to_rep_is_list and regex: + for to_rep in to_replace: + result_blocks = [] + for b in blocks: + result = b._replace_single(to_rep, value, inplace=inplace, + filter=filter, regex=regex, + convert=convert, mgr=mgr) + result_blocks = _extend_blocks(result, result_blocks) + blocks = result_blocks + return result_blocks + + return self._replace_single(to_replace, value, inplace=inplace, + filter=filter, convert=convert, + regex=regex, mgr=mgr) + + def _replace_single(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True, mgr=None): + + inplace = validate_bool_kwarg(inplace, 'inplace') + + # to_replace is regex compilable + to_rep_re = regex and is_re_compilable(to_replace) + + # regex is regex compilable + regex_re = is_re_compilable(regex) + + # only one will survive + if to_rep_re and regex_re: + raise AssertionError('only one of to_replace and regex can be ' + 'regex compilable') + + # if regex was passed as something that can be a regex (rather than a + # boolean) + if regex_re: + to_replace = regex + + regex = regex_re or to_rep_re + + # try to get the pattern attribute (compiled re) or it's a string + try: + pattern = to_replace.pattern + except AttributeError: + pattern = to_replace + + # if the pattern is not empty and to_replace is either a string or a + # regex + if regex and pattern: + rx = re.compile(to_replace) + else: + # if the thing to replace is not a string or compiled regex call + # the superclass method -> to_replace is some kind of object + return super(ObjectBlock, self).replace(to_replace, value, + inplace=inplace, + filter=filter, regex=regex, + mgr=mgr) + + new_values = self.values if inplace else self.values.copy() + + # deal with replacing values with objects (strings) that match but + # whose replacement is not a string (numeric, nan, object) + if isna(value) or not isinstance(value, compat.string_types): + + def re_replacer(s): + try: + return value if rx.search(s) is not None else s + except TypeError: + return s + else: + # value is guaranteed to be a string here, s can be either a string + # or null if it's null it gets returned + def re_replacer(s): + try: + return rx.sub(value, s) + except TypeError: + return s + + f = np.vectorize(re_replacer, otypes=[self.dtype]) + + if filter is None: + filt = slice(None) + else: + filt = self.mgr_locs.isin(filter).nonzero()[0] + + new_values[filt] = f(new_values[filt]) + + # convert + block = self.make_block(new_values) + if convert: + block = block.convert(by_item=True, numeric=False) + + return block + + +class CategoricalBlock(ExtensionBlock): + __slots__ = () + is_categorical = True + _verify_integrity = True + _can_hold_na = True + _concatenator = staticmethod(_concat._concat_categorical) + + def __init__(self, values, placement, ndim=None): + from pandas.core.arrays.categorical import _maybe_to_categorical + + # coerce to categorical if we can + super(CategoricalBlock, self).__init__(_maybe_to_categorical(values), + placement=placement, + ndim=ndim) + + @property + def _holder(self): + return Categorical + + @property + def array_dtype(self): + """ the dtype to return if I want to construct this block as an + array + """ + return np.object_ + + def _try_coerce_result(self, result): + """ reverse of try_coerce_args """ + + # GH12564: CategoricalBlock is 1-dim only + # while returned results could be any dim + if ((not is_categorical_dtype(result)) and + isinstance(result, np.ndarray)): + result = _block_shape(result, ndim=self.ndim) + + return result + + def shift(self, periods, axis=0, mgr=None): + return self.make_block_same_class(values=self.values.shift(periods), + placement=self.mgr_locs) + + def to_dense(self): + # Categorical.get_values returns a DatetimeIndex for datetime + # categories, so we can't simply use `np.asarray(self.values)` like + # other types. + return self.values.get_values() + + def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): + """ convert to our native types format, slicing if desired """ + + values = self.values + if slicer is not None: + # Categorical is always one dimension + values = values[slicer] + mask = isna(values) + values = np.array(values, dtype='object') + values[mask] = na_rep + + # we are expected to return a 2-d ndarray + return values.reshape(1, len(values)) + + def concat_same_type(self, to_concat, placement=None): + """ + Concatenate list of single blocks of the same type. + + Note that this CategoricalBlock._concat_same_type *may* not + return a CategoricalBlock. When the categories in `to_concat` + differ, this will return an object ndarray. + + If / when we decide we don't like that behavior: + + 1. Change Categorical._concat_same_type to use union_categoricals + 2. Delete this method. + """ + values = self._concatenator([blk.values for blk in to_concat], + axis=self.ndim - 1) + # not using self.make_block_same_class as values can be object dtype + return make_block( + values, placement=placement or slice(0, len(values), 1), + ndim=self.ndim) + + +class DatetimeBlock(DatetimeLikeBlockMixin, Block): + __slots__ = () + is_datetime = True + _can_hold_na = True + + def __init__(self, values, placement, ndim=None): + values = self._maybe_coerce_values(values) + super(DatetimeBlock, self).__init__(values, + placement=placement, ndim=ndim) + + def _maybe_coerce_values(self, values): + """Input validation for values passed to __init__. Ensure that + we have datetime64ns, coercing if necessary. + + Parameters + ---------- + values : array-like + Must be convertible to datetime64 + + Returns + ------- + values : ndarray[datetime64ns] + + Overridden by DatetimeTZBlock. + """ + if values.dtype != _NS_DTYPE: + values = conversion.ensure_datetime64ns(values) + return values + + def _astype(self, dtype, mgr=None, **kwargs): + """ + these automatically copy, so copy=True has no effect + raise on an except if raise == True + """ + + # if we are passed a datetime64[ns, tz] + if is_datetime64tz_dtype(dtype): + dtype = DatetimeTZDtype(dtype) + + values = self.values + if getattr(values, 'tz', None) is None: + values = DatetimeIndex(values).tz_localize('UTC') + values = values.tz_convert(dtype.tz) + return self.make_block(values) + + # delegate + return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs) + + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + # TODO: this still uses asarray, instead of dtype.type + element = np.array(element) + return element.dtype == _NS_DTYPE or element.dtype == np.int64 + return (is_integer(element) or isinstance(element, datetime) or + isna(element)) + + def _try_coerce_args(self, values, other): + """ + Coerce values and other to dtype 'i8'. NaN and NaT convert to + the smallest i8, and will correctly round-trip to NaT if converted + back in _try_coerce_result. values is always ndarray-like, other + may not be + + Parameters + ---------- + values : ndarray-like + other : ndarray-like or scalar + + Returns + ------- + base-type values, values mask, base-type other, other mask + """ + + values_mask = isna(values) + values = values.view('i8') + other_mask = False + + if isinstance(other, bool): + raise TypeError + elif is_null_datelike_scalar(other): + other = tslibs.iNaT + other_mask = True + elif isinstance(other, (datetime, np.datetime64, date)): + other = self._box_func(other) + if getattr(other, 'tz') is not None: + raise TypeError("cannot coerce a Timestamp with a tz on a " + "naive Block") + other_mask = isna(other) + other = other.asm8.view('i8') + elif hasattr(other, 'dtype') and is_datetime64_dtype(other): + other_mask = isna(other) + other = other.astype('i8', copy=False).view('i8') + else: + # coercion issues + # let higher levels handle + raise TypeError + + return values, values_mask, other, other_mask + + def _try_coerce_result(self, result): + """ reverse of try_coerce_args """ + if isinstance(result, np.ndarray): + if result.dtype.kind in ['i', 'f', 'O']: + try: + result = result.astype('M8[ns]') + except ValueError: + pass + elif isinstance(result, (np.integer, np.float, np.datetime64)): + result = self._box_func(result) + return result + + @property + def _box_func(self): + return tslibs.Timestamp + + def to_native_types(self, slicer=None, na_rep=None, date_format=None, + quoting=None, **kwargs): + """ convert to our native types format, slicing if desired """ + + values = self.values + if slicer is not None: + values = values[..., slicer] + + from pandas.io.formats.format import _get_format_datetime64_from_values + format = _get_format_datetime64_from_values(values, date_format) + + result = tslib.format_array_from_datetime( + values.view('i8').ravel(), tz=getattr(self.values, 'tz', None), + format=format, na_rep=na_rep).reshape(values.shape) + return np.atleast_2d(result) + + def should_store(self, value): + return (issubclass(value.dtype.type, np.datetime64) and + not is_datetimetz(value)) + + def set(self, locs, values, check=False): + """ + Modify Block in-place with new item value + + Returns + ------- + None + """ + if values.dtype != _NS_DTYPE: + # Workaround for numpy 1.6 bug + values = conversion.ensure_datetime64ns(values) + + self.values[locs] = values + + +class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock): + """ implement a datetime64 block with a tz attribute """ + __slots__ = () + _concatenator = staticmethod(_concat._concat_datetime) + is_datetimetz = True + + def __init__(self, values, placement, ndim=2, dtype=None): + # XXX: This will end up calling _maybe_coerce_values twice + # when dtype is not None. It's relatively cheap (just an isinstance) + # but it'd nice to avoid. + # + # If we can remove dtype from __init__, and push that conversion + # push onto the callers, then we can remove this entire __init__ + # and just use DatetimeBlock's. + if dtype is not None: + values = self._maybe_coerce_values(values, dtype=dtype) + super(DatetimeTZBlock, self).__init__(values, placement=placement, + ndim=ndim) + + def _maybe_coerce_values(self, values, dtype=None): + """Input validation for values passed to __init__. Ensure that + we have datetime64TZ, coercing if necessary. + + Parametetrs + ----------- + values : array-like + Must be convertible to datetime64 + dtype : string or DatetimeTZDtype, optional + Does a shallow copy to this tz + + Returns + ------- + values : ndarray[datetime64ns] + """ + if not isinstance(values, self._holder): + values = self._holder(values) + + if dtype is not None: + if isinstance(dtype, compat.string_types): + dtype = DatetimeTZDtype.construct_from_string(dtype) + values = values._shallow_copy(tz=dtype.tz) + + if values.tz is None: + raise ValueError("cannot create a DatetimeTZBlock without a tz") + + return values + + @property + def is_view(self): + """ return a boolean if I am possibly a view """ + # check the ndarray values of the DatetimeIndex values + return self.values.values.base is not None + + def copy(self, deep=True, mgr=None): + """ copy constructor """ + values = self.values + if deep: + values = values.copy(deep=True) + return self.make_block_same_class(values) + + def external_values(self): + """ we internally represent the data as a DatetimeIndex, but for + external compat with ndarray, export as a ndarray of Timestamps + """ + return self.values.astype('datetime64[ns]').values + + def get_values(self, dtype=None): + # return object dtype as Timestamps with the zones + if is_object_dtype(dtype): + return lib.map_infer( + self.values.ravel(), self._box_func).reshape(self.values.shape) + return self.values + + def _slice(self, slicer): + """ return a slice of my values """ + if isinstance(slicer, tuple): + col, loc = slicer + if not com.is_null_slice(col) and col != 0: + raise IndexError("{0} only contains one item".format(self)) + return self.values[loc] + return self.values[slicer] + + def _try_coerce_args(self, values, other): + """ + localize and return i8 for the values + + Parameters + ---------- + values : ndarray-like + other : ndarray-like or scalar + + Returns + ------- + base-type values, values mask, base-type other, other mask + """ + values_mask = _block_shape(isna(values), ndim=self.ndim) + # asi8 is a view, needs copy + values = _block_shape(values.asi8, ndim=self.ndim) + other_mask = False + + if isinstance(other, ABCSeries): + other = self._holder(other) + other_mask = isna(other) + + if isinstance(other, bool): + raise TypeError + elif (is_null_datelike_scalar(other) or + (lib.is_scalar(other) and isna(other))): + other = tslibs.iNaT + other_mask = True + elif isinstance(other, self._holder): + if other.tz != self.values.tz: + raise ValueError("incompatible or non tz-aware value") + other_mask = _block_shape(isna(other), ndim=self.ndim) + other = _block_shape(other.asi8, ndim=self.ndim) + elif isinstance(other, (np.datetime64, datetime, date)): + other = tslibs.Timestamp(other) + tz = getattr(other, 'tz', None) + + # test we can have an equal time zone + if tz is None or str(tz) != str(self.values.tz): + raise ValueError("incompatible or non tz-aware value") + other_mask = isna(other) + other = other.value + else: + raise TypeError + + return values, values_mask, other, other_mask + + def _try_coerce_result(self, result): + """ reverse of try_coerce_args """ + if isinstance(result, np.ndarray): + if result.dtype.kind in ['i', 'f', 'O']: + result = result.astype('M8[ns]') + elif isinstance(result, (np.integer, np.float, np.datetime64)): + result = tslibs.Timestamp(result, tz=self.values.tz) + if isinstance(result, np.ndarray): + # allow passing of > 1dim if its trivial + if result.ndim > 1: + result = result.reshape(np.prod(result.shape)) + result = self.values._shallow_copy(result) + + return result + + @property + def _box_func(self): + return lambda x: tslibs.Timestamp(x, tz=self.dtype.tz) + + def shift(self, periods, axis=0, mgr=None): + """ shift the block by periods """ + + # think about moving this to the DatetimeIndex. This is a non-freq + # (number of periods) shift ### + + N = len(self) + indexer = np.zeros(N, dtype=int) + if periods > 0: + indexer[periods:] = np.arange(N - periods) + else: + indexer[:periods] = np.arange(-periods, N) + + new_values = self.values.asi8.take(indexer) + + if periods > 0: + new_values[:periods] = tslibs.iNaT + else: + new_values[periods:] = tslibs.iNaT + + new_values = self.values._shallow_copy(new_values) + return [self.make_block_same_class(new_values, + placement=self.mgr_locs)] + + def diff(self, n, axis=0, mgr=None): + """1st discrete difference + + Parameters + ---------- + n : int, number of periods to diff + axis : int, axis to diff upon. default 0 + mgr : default None + + Return + ------ + A list with a new TimeDeltaBlock. + + Note + ---- + The arguments here are mimicking shift so they are called correctly + by apply. + """ + if axis == 0: + # Cannot currently calculate diff across multiple blocks since this + # function is invoked via apply + raise NotImplementedError + new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8 + + # Reshape the new_values like how algos.diff does for timedelta data + new_values = new_values.reshape(1, len(new_values)) + new_values = new_values.astype('timedelta64[ns]') + return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)] + + def concat_same_type(self, to_concat, placement=None): + """ + Concatenate list of single blocks of the same type. + """ + values = self._concatenator([blk.values for blk in to_concat], + axis=self.ndim - 1) + # not using self.make_block_same_class as values can be non-tz dtype + return make_block( + values, placement=placement or slice(0, len(values), 1)) + + +class SparseBlock(NonConsolidatableMixIn, Block): + """ implement as a list of sparse arrays of the same dtype """ + __slots__ = () + is_sparse = True + is_numeric = True + _box_to_block_values = False + _can_hold_na = True + _ftype = 'sparse' + _concatenator = staticmethod(_concat._concat_sparse) + + def __init__(self, values, placement, ndim=None): + # Ensure that we have the underlying SparseArray here... + if isinstance(values, ABCSeries): + values = values.values + assert isinstance(values, SparseArray) + super(SparseBlock, self).__init__(values, placement, ndim=ndim) + + @property + def _holder(self): + return SparseArray + + @property + def shape(self): + return (len(self.mgr_locs), self.sp_index.length) + + @property + def fill_value(self): + # return np.nan + return self.values.fill_value + + @fill_value.setter + def fill_value(self, v): + self.values.fill_value = v + + def to_dense(self): + return self.values.to_dense().view() + + @property + def sp_values(self): + return self.values.sp_values + + @sp_values.setter + def sp_values(self, v): + # reset the sparse values + self.values = SparseArray(v, sparse_index=self.sp_index, + kind=self.kind, dtype=v.dtype, + fill_value=self.values.fill_value, + copy=False) + + @property + def sp_index(self): + return self.values.sp_index + + @property + def kind(self): + return self.values.kind + + def _astype(self, dtype, copy=False, errors='raise', values=None, + klass=None, mgr=None, **kwargs): + if values is None: + values = self.values + values = values.astype(dtype, copy=copy) + return self.make_block_same_class(values=values, + placement=self.mgr_locs) + + def __len__(self): + try: + return self.sp_index.length + except: + return 0 + + def copy(self, deep=True, mgr=None): + return self.make_block_same_class(values=self.values, + sparse_index=self.sp_index, + kind=self.kind, copy=deep, + placement=self.mgr_locs) + + def make_block_same_class(self, values, placement, sparse_index=None, + kind=None, dtype=None, fill_value=None, + copy=False, ndim=None): + """ return a new block """ + if dtype is None: + dtype = values.dtype + if fill_value is None and not isinstance(values, SparseArray): + fill_value = self.values.fill_value + + # if not isinstance(values, SparseArray) and values.ndim != self.ndim: + # raise ValueError("ndim mismatch") + + if values.ndim == 2: + nitems = values.shape[0] + + if nitems == 0: + # kludgy, but SparseBlocks cannot handle slices, where the + # output is 0-item, so let's convert it to a dense block: it + # won't take space since there's 0 items, plus it will preserve + # the dtype. + return self.make_block(np.empty(values.shape, dtype=dtype), + placement) + elif nitems > 1: + raise ValueError("Only 1-item 2d sparse blocks are supported") + else: + values = values.reshape(values.shape[1]) + + new_values = SparseArray(values, sparse_index=sparse_index, + kind=kind or self.kind, dtype=dtype, + fill_value=fill_value, copy=copy) + return self.make_block(new_values, + placement=placement) + + def interpolate(self, method='pad', axis=0, inplace=False, limit=None, + fill_value=None, **kwargs): + + values = missing.interpolate_2d(self.values.to_dense(), method, axis, + limit, fill_value) + return self.make_block_same_class(values=values, + placement=self.mgr_locs) + + def fillna(self, value, limit=None, inplace=False, downcast=None, + mgr=None): + # we may need to upcast our fill to match our dtype + if limit is not None: + raise NotImplementedError("specifying a limit for 'fillna' has " + "not been implemented yet") + values = self.values if inplace else self.values.copy() + values = values.fillna(value, downcast=downcast) + return [self.make_block_same_class(values=values, + placement=self.mgr_locs)] + + def shift(self, periods, axis=0, mgr=None): + """ shift the block by periods """ + N = len(self.values.T) + indexer = np.zeros(N, dtype=int) + if periods > 0: + indexer[periods:] = np.arange(N - periods) + else: + indexer[:periods] = np.arange(-periods, N) + new_values = self.values.to_dense().take(indexer) + # convert integer to float if necessary. need to do a lot more than + # that, handle boolean etc also + new_values, fill_value = maybe_upcast(new_values) + if periods > 0: + new_values[:periods] = fill_value + else: + new_values[periods:] = fill_value + return [self.make_block_same_class(new_values, + placement=self.mgr_locs)] + + def sparse_reindex(self, new_index): + """ sparse reindex and return a new block + current reindex only works for float64 dtype! """ + values = self.values + values = values.sp_index.to_int_index().reindex( + values.sp_values.astype('float64'), values.fill_value, new_index) + return self.make_block_same_class(values, sparse_index=new_index, + placement=self.mgr_locs) + + +# ----------------------------------------------------------------- +# Constructor Helpers + +def get_block_type(values, dtype=None): + """ + Find the appropriate Block subclass to use for the given values and dtype. + + Parameters + ---------- + values : ndarray-like + dtype : numpy or pandas dtype + + Returns + ------- + cls : class, subclass of Block + """ + dtype = dtype or values.dtype + vtype = dtype.type + + if is_sparse(values): + cls = SparseBlock + elif issubclass(vtype, np.floating): + cls = FloatBlock + elif issubclass(vtype, np.timedelta64): + assert issubclass(vtype, np.integer) + cls = TimeDeltaBlock + elif issubclass(vtype, np.complexfloating): + cls = ComplexBlock + elif is_categorical(values): + cls = CategoricalBlock + elif is_extension_array_dtype(values): + cls = ExtensionBlock + elif issubclass(vtype, np.datetime64): + assert not is_datetimetz(values) + cls = DatetimeBlock + elif is_datetimetz(values): + cls = DatetimeTZBlock + elif issubclass(vtype, np.integer): + cls = IntBlock + elif dtype == np.bool_: + cls = BoolBlock + else: + cls = ObjectBlock + return cls + + +def make_block(values, placement, klass=None, ndim=None, dtype=None, + fastpath=None): + if fastpath is not None: + # GH#19265 pyarrow is passing this + warnings.warn("fastpath argument is deprecated, will be removed " + "in a future release.", DeprecationWarning) + if klass is None: + dtype = dtype or values.dtype + klass = get_block_type(values, dtype) + + elif klass is DatetimeTZBlock and not is_datetimetz(values): + return klass(values, ndim=ndim, + placement=placement, dtype=dtype) + + return klass(values, ndim=ndim, placement=placement) + + +# ----------------------------------------------------------------- + +def _extend_blocks(result, blocks=None): + """ return a new extended blocks, givin the result """ + from pandas.core.internals import BlockManager + if blocks is None: + blocks = [] + if isinstance(result, list): + for r in result: + if isinstance(r, list): + blocks.extend(r) + else: + blocks.append(r) + elif isinstance(result, BlockManager): + blocks.extend(result.blocks) + else: + blocks.append(result) + return blocks + + +def _block_shape(values, ndim=1, shape=None): + """ guarantee the shape of the values to be at least 1 d """ + if values.ndim < ndim: + if shape is None: + shape = values.shape + values = values.reshape(tuple((1, ) + shape)) + return values + + +def _merge_blocks(blocks, dtype=None, _can_consolidate=True): + + if len(blocks) == 1: + return blocks[0] + + if _can_consolidate: + + if dtype is None: + if len({b.dtype for b in blocks}) != 1: + raise AssertionError("_merge_blocks are invalid!") + dtype = blocks[0].dtype + + # FIXME: optimization potential in case all mgrs contain slices and + # combination of those slices is a slice, too. + new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks]) + new_values = _vstack([b.values for b in blocks], dtype) + + argsort = np.argsort(new_mgr_locs) + new_values = new_values[argsort] + new_mgr_locs = new_mgr_locs[argsort] + + return make_block(new_values, placement=new_mgr_locs) + + # no merge + return blocks + + +def _vstack(to_stack, dtype): + + # work around NumPy 1.6 bug + if dtype == _NS_DTYPE or dtype == _TD_DTYPE: + new_values = np.vstack([x.view('i8') for x in to_stack]) + return new_values.view(dtype) + + else: + return np.vstack(to_stack) + + +def _block2d_to_blocknd(values, placement, shape, labels, ref_items): + """ pivot to the labels shape """ + panel_shape = (len(placement),) + shape + + # TODO: lexsort depth needs to be 2!! + + # Create observation selection vector using major and minor + # labels, for converting to panel format. + selector = _factor_indexer(shape[1:], labels) + mask = np.zeros(np.prod(shape), dtype=bool) + mask.put(selector, True) + + if mask.all(): + pvalues = np.empty(panel_shape, dtype=values.dtype) + else: + dtype, fill_value = maybe_promote(values.dtype) + pvalues = np.empty(panel_shape, dtype=dtype) + pvalues.fill(fill_value) + + for i in range(len(placement)): + pvalues[i].flat[mask] = values[:, i] + + return make_block(pvalues, placement=placement) + + +def _safe_reshape(arr, new_shape): + """ + If possible, reshape `arr` to have shape `new_shape`, + with a couple of exceptions (see gh-13012): + + 1) If `arr` is a ExtensionArray or Index, `arr` will be + returned as is. + 2) If `arr` is a Series, the `_values` attribute will + be reshaped and returned. + + Parameters + ---------- + arr : array-like, object to be reshaped + new_shape : int or tuple of ints, the new shape + """ + if isinstance(arr, ABCSeries): + arr = arr._values + if not isinstance(arr, ABCExtensionArray): + arr = arr.reshape(new_shape) + return arr + + +def _factor_indexer(shape, labels): + """ + given a tuple of shape and a list of Categorical labels, return the + expanded label indexer + """ + mult = np.array(shape)[::-1].cumprod()[::-1] + return ensure_platform_int( + np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T) + + +def _putmask_smart(v, m, n): + """ + Return a new ndarray, try to preserve dtype if possible. + + Parameters + ---------- + v : `values`, updated in-place (array like) + m : `mask`, applies to both sides (array like) + n : `new values` either scalar or an array like aligned with `values` + + Returns + ------- + values : ndarray with updated values + this *may* be a copy of the original + + See Also + -------- + ndarray.putmask + """ + + # we cannot use np.asarray() here as we cannot have conversions + # that numpy does when numeric are mixed with strings + + # n should be the length of the mask or a scalar here + if not is_list_like(n): + n = np.repeat(n, len(m)) + elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar + n = np.repeat(np.array(n, ndmin=1), len(m)) + + # see if we are only masking values that if putted + # will work in the current dtype + try: + nn = n[m] + + # make sure that we have a nullable type + # if we have nulls + if not _isna_compat(v, nn[0]): + raise ValueError + + # we ignore ComplexWarning here + with warnings.catch_warnings(record=True): + nn_at = nn.astype(v.dtype) + + # avoid invalid dtype comparisons + # between numbers & strings + + # only compare integers/floats + # don't compare integers to datetimelikes + if (not is_numeric_v_string_like(nn, nn_at) and + (is_float_dtype(nn.dtype) or + is_integer_dtype(nn.dtype) and + is_float_dtype(nn_at.dtype) or + is_integer_dtype(nn_at.dtype))): + + comp = (nn == nn_at) + if is_list_like(comp) and comp.all(): + nv = v.copy() + nv[m] = nn_at + return nv + except (ValueError, IndexError, TypeError): + pass + + n = np.asarray(n) + + def _putmask_preserve(nv, n): + try: + nv[m] = n[m] + except (IndexError, ValueError): + nv[m] = n + return nv + + # preserves dtype if possible + if v.dtype.kind == n.dtype.kind: + return _putmask_preserve(v, n) + + # change the dtype if needed + dtype, _ = maybe_promote(n.dtype) + + if is_extension_type(v.dtype) and is_object_dtype(dtype): + v = v.get_values(dtype) + else: + v = v.astype(dtype) + + return _putmask_preserve(v, n) diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py index 991da41168aa0..aa32bf6051617 100644 --- a/pandas/tests/extension/test_external_block.py +++ b/pandas/tests/extension/test_external_block.py @@ -5,7 +5,8 @@ import pandas as pd from pandas.core.internals import ( - BlockManager, SingleBlockManager, NonConsolidatableMixIn, Block) + BlockManager, SingleBlockManager) +from pandas.core.internals.blocks import Block, NonConsolidatableMixIn import pytest
Follow-up to #21903
https://api.github.com/repos/pandas-dev/pandas/pulls/22014
2018-07-21T19:03:25Z
2018-07-23T11:24:01Z
2018-07-23T11:24:01Z
2018-07-23T14:10:31Z