title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
CI: fix conda version
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 272e7f2e05d14..c92da8d4774e1 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -48,7 +48,12 @@ echo echo "[update conda]" conda config --set ssl_verify false || exit 1 conda config --set quiet true --set always_yes true --set changeps1 false || exit 1 -conda update -q conda + +# TODO(jreback), fix conoda version +echo +echo "[conda version]" +conda install conda=4.4.4 +# conda update -q conda if [ "$CONDA_BUILD_TEST" ]; then echo
https://api.github.com/repos/pandas-dev/pandas/pulls/19025
2018-01-01T13:43:53Z
2018-01-01T14:15:38Z
2018-01-01T14:15:38Z
2018-01-01T14:15:38Z
dispatch Series[datetime64] ops to DatetimeIndex
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index bd3bee507baa3..a182f8bd9c9a4 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -208,6 +208,9 @@ Other API Changes - In :func:`read_excel`, the ``comment`` argument is now exposed as a named parameter (:issue:`18735`) - Rearranged the order of keyword arguments in :func:`read_excel()` to align with :func:`read_csv()` (:issue:`16672`) - The options ``html.border`` and ``mode.use_inf_as_null`` were deprecated in prior versions, these will now show ``FutureWarning`` rather than a ``DeprecationWarning`` (:issue:`19003`) +- Subtracting ``NaT`` from a :class:`Series` with ``dtype='datetime64[ns]'`` returns a ``Series`` with ``dtype='timedelta64[ns]'`` instead of ``dtype='datetime64[ns]'``(:issue:`18808`) +- Operations between a :class:`Series` with dtype ``dtype='datetime64[ns]'`` and a :class:`PeriodIndex` will correctly raises ``TypeError`` (:issue:`18850`) +- Subtraction of :class:`Series` with timezone-aware ``dtype='datetime64[ns]'`` with mis-matched timezones will raise ``TypeError`` instead of ``ValueError`` (issue:`18817`) .. _whatsnew_0230.deprecations: diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 0229f7c256464..554f0cb3803e9 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -341,10 +341,8 @@ def get_op(cls, left, right, name, na_op): normal numpy path. """ is_timedelta_lhs = is_timedelta64_dtype(left) - is_datetime_lhs = (is_datetime64_dtype(left) or - is_datetime64tz_dtype(left)) - if not (is_datetime_lhs or is_timedelta_lhs): + if not is_timedelta_lhs: return _Op(left, right, name, na_op) else: return _TimeOp(left, right, name, na_op) @@ -364,14 +362,8 @@ def __init__(self, left, right, name, na_op): rvalues = self._convert_to_array(right, name=name, other=lvalues) # left - self.is_offset_lhs = is_offsetlike(left) self.is_timedelta_lhs = is_timedelta64_dtype(lvalues) - self.is_datetime64_lhs = is_datetime64_dtype(lvalues) - self.is_datetime64tz_lhs = is_datetime64tz_dtype(lvalues) - self.is_datetime_lhs = (self.is_datetime64_lhs or - self.is_datetime64tz_lhs) - self.is_integer_lhs = left.dtype.kind in ['i', 'u'] - self.is_floating_lhs = left.dtype.kind == 'f' + assert self.is_timedelta_lhs # right self.is_offset_rhs = is_offsetlike(right) @@ -387,34 +379,6 @@ def __init__(self, left, right, name, na_op): self.lvalues, self.rvalues = self._convert_for_datetime(lvalues, rvalues) - def _validate_datetime(self, lvalues, rvalues, name): - # assumes self.is_datetime_lhs - - if (self.is_timedelta_rhs or self.is_offset_rhs): - # datetime and timedelta/DateOffset - if name not in ('__add__', '__radd__', '__sub__'): - raise TypeError("can only operate on a datetime with a rhs of " - "a timedelta/DateOffset for addition and " - "subtraction, but the operator [{name}] was " - "passed".format(name=name)) - - elif self.is_datetime_rhs: - # 2 datetimes - if name not in ('__sub__', '__rsub__'): - raise TypeError("can only operate on a datetimes for" - " subtraction, but the operator [{name}] was" - " passed".format(name=name)) - - # if tz's must be equal (same or None) - if getattr(lvalues, 'tz', None) != getattr(rvalues, 'tz', None): - raise ValueError("Incompatible tz's on datetime subtraction " - "ops") - - else: - raise TypeError('cannot operate on a series without a rhs ' - 'of a series/ndarray of type datetime64[ns] ' - 'or a timedelta') - def _validate_timedelta(self, name): # assumes self.is_timedelta_lhs @@ -440,44 +404,8 @@ def _validate_timedelta(self, name): 'of a series/ndarray of type datetime64[ns] ' 'or a timedelta') - def _validate_offset(self, name): - # assumes self.is_offset_lhs - - if self.is_timedelta_rhs: - # 2 timedeltas - if name not in ('__div__', '__rdiv__', '__truediv__', - '__rtruediv__', '__add__', '__radd__', '__sub__', - '__rsub__'): - raise TypeError("can only operate on a timedeltas for addition" - ", subtraction, and division, but the operator" - " [{name}] was passed".format(name=name)) - - elif self.is_datetime_rhs: - if name not in ('__add__', '__radd__'): - raise TypeError("can only operate on a timedelta/DateOffset " - "and a datetime for addition, but the operator" - " [{name}] was passed".format(name=name)) - - else: - raise TypeError('cannot operate on a series without a rhs ' - 'of a series/ndarray of type datetime64[ns] ' - 'or a timedelta') - def _validate(self, lvalues, rvalues, name): - if self.is_datetime_lhs: - return self._validate_datetime(lvalues, rvalues, name) - elif self.is_timedelta_lhs: - return self._validate_timedelta(name) - elif self.is_offset_lhs: - return self._validate_offset(name) - - if ((self.is_integer_lhs or self.is_floating_lhs) and - self.is_timedelta_rhs): - self._check_timedelta_with_numeric(name) - else: - raise TypeError('cannot operate on a series without a rhs ' - 'of a series/ndarray of type datetime64[ns] ' - 'or a timedelta') + return self._validate_timedelta(name) def _check_timedelta_with_numeric(self, name): if name not in ('__div__', '__truediv__', '__mul__', '__rmul__'): @@ -498,7 +426,7 @@ def _convert_to_array(self, values, name=None, other=None): # if this is a Series that contains relevant dtype info, then use this # instead of the inferred type; this avoids coercing Series([NaT], # dtype='datetime64[ns]') to Series([NaT], dtype='timedelta64[ns]') - elif (isinstance(values, pd.Series) and + elif (isinstance(values, (pd.Series, ABCDatetimeIndex)) and (is_timedelta64_dtype(values) or is_datetime64_dtype(values))): supplied_dtype = values.dtype @@ -513,13 +441,11 @@ def _convert_to_array(self, values, name=None, other=None): values = np.empty(values.shape, dtype='timedelta64[ns]') values[:] = iNaT - # a datelike elif isinstance(values, ABCDatetimeIndex): - # TODO: why are we casting to_series in the first place? - values = values.to_series(keep_tz=True) - # datetime with tz - elif (isinstance(ovalues, datetime.datetime) and - hasattr(ovalues, 'tzinfo')): + # a datelike + pass + elif isinstance(ovalues, datetime.datetime): + # datetime scalar values = pd.DatetimeIndex(values) # datetime array with tz elif is_datetimetz(values): @@ -571,17 +497,10 @@ def _convert_for_datetime(self, lvalues, rvalues): mask = isna(lvalues) | isna(rvalues) # datetimes require views - if self.is_datetime_lhs or self.is_datetime_rhs: + if self.is_datetime_rhs: # datetime subtraction means timedelta - if self.is_datetime_lhs and self.is_datetime_rhs: - if self.name in ('__sub__', '__rsub__'): - self.dtype = 'timedelta64[ns]' - else: - self.dtype = 'datetime64[ns]' - elif self.is_datetime64tz_lhs: - self.dtype = lvalues.dtype - elif self.is_datetime64tz_rhs: + if self.is_datetime64tz_rhs: self.dtype = rvalues.dtype else: self.dtype = 'datetime64[ns]' @@ -601,15 +520,11 @@ def _offset(lvalues, rvalues): self.na_op = lambda x, y: getattr(x, self.name)(y) return lvalues, rvalues - if self.is_offset_lhs: - lvalues, rvalues = _offset(lvalues, rvalues) - elif self.is_offset_rhs: + if self.is_offset_rhs: rvalues, lvalues = _offset(rvalues, lvalues) else: # with tz, convert to UTC - if self.is_datetime64tz_lhs: - lvalues = lvalues.tz_convert('UTC').tz_localize(None) if self.is_datetime64tz_rhs: rvalues = rvalues.tz_convert('UTC').tz_localize(None) @@ -622,8 +537,6 @@ def _offset(lvalues, rvalues): self.dtype = 'timedelta64[ns]' # convert Tick DateOffset to underlying delta - if self.is_offset_lhs: - lvalues = to_timedelta(lvalues, box=False) if self.is_offset_rhs: rvalues = to_timedelta(rvalues, box=False) @@ -634,7 +547,7 @@ def _offset(lvalues, rvalues): # time delta division -> unit less # integer gets converted to timedelta in np < 1.6 if ((self.is_timedelta_lhs and self.is_timedelta_rhs) and - not self.is_integer_rhs and not self.is_integer_lhs and + not self.is_integer_rhs and self.name in ('__div__', '__rdiv__', '__truediv__', '__rtruediv__', '__floordiv__', '__rfloordiv__')): @@ -750,10 +663,16 @@ def wrapper(left, right, name=name, na_op=na_op): return NotImplemented left, right = _align_method_SERIES(left, right) + if is_datetime64_dtype(left) or is_datetime64tz_dtype(left): + result = op(pd.DatetimeIndex(left), right) + res_name = _get_series_op_result_name(left, right) + result.name = res_name # needs to be overriden if None + return construct_result(left, result, + index=left.index, name=res_name, + dtype=result.dtype) converted = _Op.get_op(left, right, name, na_op) - left, right = converted.left, converted.right lvalues, rvalues = converted.lvalues, converted.rvalues dtype = converted.dtype wrap_results = converted.wrap_results @@ -775,6 +694,7 @@ def wrapper(left, right, name=name, na_op=na_op): res_name = left.name result = wrap_results(safe_na_op(lvalues, rvalues)) + res_name = _get_series_op_result_name(left, right) return construct_result( left, result, @@ -786,6 +706,15 @@ def wrapper(left, right, name=name, na_op=na_op): return wrapper +def _get_series_op_result_name(left, right): + # `left` is always a pd.Series + if isinstance(right, (ABCSeries, pd.Index)): + name = _maybe_match_name(left, right) + else: + name = left.name + return name + + def _comp_method_OBJECT_ARRAY(op, x, y): if isinstance(y, list): y = construct_1d_object_array_from_listlike(y) @@ -1388,23 +1317,6 @@ def f(self, other): def _arith_method_PANEL(op, name, str_rep=None, fill_zeros=None, default_axis=None, **eval_kwargs): - # copied from Series na_op above, but without unnecessary branch for - # non-scalar - def na_op(x, y): - import pandas.core.computation.expressions as expressions - - try: - result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) - except TypeError: - - # TODO: might need to find_common_type here? - result = np.empty(len(x), dtype=x.dtype) - mask = notna(x) - result[mask] = op(x[mask], y) - result, changed = maybe_upcast_putmask(result, ~mask, np.nan) - - result = missing.fill_zeros(result, x, y, name, fill_zeros) - return result # work only for scalars def f(self, other): diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index a421f2cb15bba..c1e9a62d98fd3 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -960,6 +960,13 @@ def test_timedelta64_ops_nat(self): assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta) + def test_td64_sub_NaT(self): + # GH#18808 + ser = Series([NaT, Timedelta('1s')]) + res = ser - NaT + expected = Series([NaT, NaT], dtype='timedelta64[ns]') + tm.assert_series_equal(res, expected) + @pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4), Timedelta(minutes=5, seconds=4), Timedelta('5m4s').to_timedelta64()]) @@ -1076,7 +1083,7 @@ def run_ops(ops, get_ser, test_ser): # defined for op_str in ops: op = getattr(get_ser, op_str, None) - with tm.assert_raises_regex(TypeError, 'operate'): + with tm.assert_raises_regex(TypeError, 'operate|cannot'): op(test_ser) # ## timedelta64 ### @@ -1253,6 +1260,20 @@ def test_datetime_series_with_DateOffset(self): s + op(5) op(5) + s + def test_dt64_sub_NaT(self): + # GH#18808 + dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp('19900315')]) + ser = pd.Series(dti) + res = ser - pd.NaT + expected = pd.Series([pd.NaT, pd.NaT], dtype='timedelta64[ns]') + tm.assert_series_equal(res, expected) + + dti_tz = dti.tz_localize('Asia/Tokyo') + ser_tz = pd.Series(dti_tz) + res = ser_tz - pd.NaT + expected = pd.Series([pd.NaT, pd.NaT], dtype='timedelta64[ns]') + tm.assert_series_equal(res, expected) + def test_datetime64_ops_nat(self): # GH 11349 datetime_series = Series([NaT, Timestamp('19900315')]) @@ -1260,13 +1281,10 @@ def test_datetime64_ops_nat(self): single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]') # subtraction - assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp) assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp) with pytest.raises(TypeError): -single_nat_dtype_datetime + datetime_series - assert_series_equal(nat_series_dtype_timestamp - NaT, - nat_series_dtype_timestamp) assert_series_equal(-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp) with pytest.raises(TypeError): @@ -2036,8 +2054,9 @@ def test_datetime64_with_index(self): result = s - s.index assert_series_equal(result, expected) - result = s - s.index.to_period() - assert_series_equal(result, expected) + with pytest.raises(TypeError): + # GH#18850 + result = s - s.index.to_period() df = DataFrame(np.random.randn(5, 2), index=date_range('20130101', periods=5)) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 2e3a7a6c28a11..6e711abf4491b 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -107,7 +107,7 @@ def test_shift(self): # incompat tz s2 = Series(date_range('2000-01-01 09:00:00', periods=5, tz='CET'), name='foo') - pytest.raises(ValueError, lambda: s - s2) + pytest.raises(TypeError, lambda: s - s2) def test_shift2(self): ts = Series(np.random.randn(5),
This is the culmination of a bunch of recent work. - [x] closes #18850, closes #18808, closes #17837 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry If merged, this will subsume #18960 and will obviate parts of #18964. This will also fix (but not test, so we'll leave the issue open for now) the lack of overflow checks #12534. Also in a follow-up we'll be able to remove a bunch of _TimeOp.
https://api.github.com/repos/pandas-dev/pandas/pulls/19024
2018-01-01T02:18:52Z
2018-01-04T00:27:50Z
2018-01-04T00:27:49Z
2018-01-23T04:40:43Z
CLN: Remove tseries v0.19.0 deprecations
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 5fd7c3e217928..f0ed3ebf6e192 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -247,6 +247,8 @@ Removal of prior version deprecations/changes - :func:`read_csv` has dropped the ``compact_ints`` and ``use_unsigned`` parameters (:issue:`13323`) - The ``Timestamp`` class has dropped the ``offset`` attribute in favor of ``freq`` (:issue:`13593`) - The ``Series``, ``Categorical``, and ``Index`` classes have dropped the ``reshape`` method (:issue:`13012`) +- ``pandas.tseries.frequencies.get_standard_freq`` has been removed in favor of ``pandas.tseries.frequencies.to_offset(freq).rule_code`` (:issue:`13874`) +- The ``freqstr`` keyword has been removed from ``pandas.tseries.frequencies.to_offset`` in favor of ``freq`` (:issue:`13874`) .. _whatsnew_0230.performance: diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index e1a6463e7c351..7c7e5c4a5a35c 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -11,9 +11,8 @@ from pandas.compat.numpy import np_datetime64_compat from pandas.core.series import Series -from pandas.tseries.frequencies import (_offset_map, get_freq_code, - _get_freq_str, _INVALID_FREQ_ERROR, - get_offset, get_standard_freq) +from pandas.tseries.frequencies import (_offset_map, get_freq_code, get_offset, + _get_freq_str, _INVALID_FREQ_ERROR) from pandas.core.indexes.datetimes import ( _to_m8, DatetimeIndex, _daterange_cache) import pandas._libs.tslibs.offsets as liboffsets @@ -2786,33 +2785,6 @@ def test_get_offset_legacy(): get_offset(name) -def test_get_standard_freq(): - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - fstr = get_standard_freq('W') - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - assert fstr == get_standard_freq('w') - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - assert fstr == get_standard_freq('1w') - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - assert fstr == get_standard_freq(('W', 1)) - - with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR): - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - get_standard_freq('WeEk') - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - fstr = get_standard_freq('5Q') - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - assert fstr == get_standard_freq('5q') - - with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR): - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - get_standard_freq('5QuarTer') - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - assert fstr == get_standard_freq(('q', 5)) - - class TestOffsetAliases(object): def setup_method(self, method): diff --git a/pandas/tests/tseries/test_frequencies.py b/pandas/tests/tseries/test_frequencies.py index beea6df086b72..2486895086b2f 100644 --- a/pandas/tests/tseries/test_frequencies.py +++ b/pandas/tests/tseries/test_frequencies.py @@ -551,10 +551,6 @@ def test_frequency_misc(self): with tm.assert_raises_regex(ValueError, 'Could not evaluate'): frequencies.to_offset(('', '')) - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = frequencies.get_standard_freq(offsets.Hour()) - assert result == 'H' - _dti = DatetimeIndex diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index f6e3d1f271036..4d1dd422be946 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -3,7 +3,6 @@ from pandas.compat import zip from pandas import compat import re -import warnings import numpy as np @@ -14,7 +13,6 @@ is_datetime64_dtype) from pandas.tseries.offsets import DateOffset -from pandas.util._decorators import deprecate_kwarg import pandas.tseries.offsets as offsets from pandas._libs.tslib import Timedelta @@ -143,7 +141,6 @@ def get_period_alias(offset_str): 'nanoseconds': Nano(1)} -@deprecate_kwarg(old_arg_name='freqstr', new_arg_name='freq') def to_offset(freq): """ Return DateOffset object from string or tuple representation @@ -294,18 +291,6 @@ def get_offset(name): getOffset = get_offset - -def get_standard_freq(freq): - """ - Return the standardized frequency string - """ - - msg = ("get_standard_freq is deprecated. Use to_offset(freq).rule_code " - "instead.") - warnings.warn(msg, FutureWarning, stacklevel=2) - return to_offset(freq).rule_code - - # --------------------------------------------------------------------- # Period codes
* Remove frequencies.get_standard_freq * Drop the "freqstr" keyword from frequencies.to_offset Deprecated in v0.19.0 xref #13874
https://api.github.com/repos/pandas-dev/pandas/pulls/19023
2018-01-01T01:25:30Z
2018-01-02T11:20:59Z
2018-01-02T11:20:59Z
2018-01-02T17:11:30Z
API: Prohibit non-numeric dtypes in IntervalIndex
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 4d806f1f05a16..a62a737fbba31 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -211,6 +211,7 @@ Other API Changes - Subtracting ``NaT`` from a :class:`Series` with ``dtype='datetime64[ns]'`` returns a ``Series`` with ``dtype='timedelta64[ns]'`` instead of ``dtype='datetime64[ns]'``(:issue:`18808`) - Operations between a :class:`Series` with dtype ``dtype='datetime64[ns]'`` and a :class:`PeriodIndex` will correctly raises ``TypeError`` (:issue:`18850`) - Subtraction of :class:`Series` with timezone-aware ``dtype='datetime64[ns]'`` with mis-matched timezones will raise ``TypeError`` instead of ``ValueError`` (issue:`18817`) +- :class:`IntervalIndex` and ``IntervalDtype`` no longer support categorical, object, and string subtypes (:issue:`19016`) - The default ``Timedelta`` constructor now accepts an ``ISO 8601 Duration`` string as an argument (:issue:`19040`) .. _whatsnew_0230.deprecations: @@ -279,11 +280,11 @@ Performance Improvements Documentation Changes ~~~~~~~~~~~~~~~~~~~~~ -- Changed spelling of "numpy" to "NumPy", and "python" to "Python". (:issue:`19017`) +- Changed spelling of "numpy" to "NumPy", and "python" to "Python". (:issue:`19017`) - Consistency when introducing code samples, using either colon or period. Rewrote some sentences for greater clarity, added more dynamic references to functions, methods and classes. - (:issue:`18941`, :issue:`18948`, :issue:`18973`, :issue:`19017`) + (:issue:`18941`, :issue:`18948`, :issue:`18973`, :issue:`19017`) - .. _whatsnew_0230.bug_fixes: @@ -310,7 +311,7 @@ Conversion - Bug in :class:`DatetimeIndex` where adding or subtracting an array-like of ``DateOffset`` objects either raised (``np.array``, ``pd.Index``) or broadcast incorrectly (``pd.Series``) (:issue:`18849`) - Bug in :class:`Series` floor-division where operating on a scalar ``timedelta`` raises an exception (:issue:`18846`) - Bug in :class:`FY5253Quarter`, :class:`LastWeekOfMonth` where rollback and rollforward behavior was inconsistent with addition and subtraction behavior (:issue:`18854`) -- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) +- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) Indexing diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index d1637873eb6e1..08773354d44d8 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -641,6 +641,8 @@ def __new__(cls, subtype=None): ---------- subtype : the dtype of the Interval """ + from pandas.core.dtypes.common import ( + is_categorical_dtype, is_string_dtype, pandas_dtype) if isinstance(subtype, IntervalDtype): return subtype @@ -659,7 +661,6 @@ def __new__(cls, subtype=None): if m is not None: subtype = m.group('subtype') - from pandas.core.dtypes.common import pandas_dtype try: subtype = pandas_dtype(subtype) except TypeError: @@ -670,6 +671,12 @@ def __new__(cls, subtype=None): u.subtype = None return u + if is_categorical_dtype(subtype) or is_string_dtype(subtype): + # GH 19016 + msg = ('category, object, and string subtypes are not supported ' + 'for IntervalDtype') + raise TypeError(msg) + try: return cls._cache[str(subtype)] except KeyError: diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index def9b151f5c91..fd1980f9ab429 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -11,6 +11,8 @@ is_list_like, is_datetime_or_timedelta_dtype, is_datetime64tz_dtype, + is_categorical_dtype, + is_string_dtype, is_integer_dtype, is_float_dtype, is_interval_dtype, @@ -92,6 +94,30 @@ def _get_interval_closed_bounds(interval): return left, right +def maybe_convert_platform_interval(values): + """ + Try to do platform conversion, with special casing for IntervalIndex. + Wrapper around maybe_convert_platform that alters the default return + dtype in certain cases to be compatible with IntervalIndex. For example, + empty lists return with integer dtype instead of object dtype, which is + prohibited for IntervalIndex. + + Parameters + ---------- + values : array-like + + Returns + ------- + array + """ + if isinstance(values, (list, tuple)) and len(values) == 0: + # GH 19016 + # empty lists/tuples get object dtype by default, but this is not + # prohibited for IntervalIndex, so coerce to integer instead + return np.array([], dtype=np.intp) + return maybe_convert_platform(values) + + def _new_IntervalIndex(cls, d): """ This is called upon unpickling, rather than the default which doesn't have @@ -206,7 +232,7 @@ def __new__(cls, data, closed=None, if is_scalar(data): cls._scalar_data_error(data) - data = maybe_convert_platform(data) + data = maybe_convert_platform_interval(data) left, right, infer_closed = intervals_to_interval_bounds(data) if _all_not_none(closed, infer_closed) and closed != infer_closed: @@ -242,6 +268,11 @@ def _simple_new(cls, left, right, closed=None, name=None, '[{rtype}] types') raise ValueError(msg.format(ltype=type(left).__name__, rtype=type(right).__name__)) + elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype): + # GH 19016 + msg = ('category, object, and string subtypes are not supported ' + 'for IntervalIndex') + raise TypeError(msg) elif isinstance(left, ABCPeriodIndex): msg = 'Period dtypes are not supported, use a PeriodIndex instead' raise ValueError(msg) @@ -403,7 +434,7 @@ def from_breaks(cls, breaks, closed='right', name=None, copy=False): IntervalIndex.from_tuples : Construct an IntervalIndex from a list/array of tuples """ - breaks = maybe_convert_platform(breaks) + breaks = maybe_convert_platform_interval(breaks) return cls.from_arrays(breaks[:-1], breaks[1:], closed, name=name, copy=copy) @@ -444,8 +475,8 @@ def from_arrays(cls, left, right, closed='right', name=None, copy=False): IntervalIndex.from_tuples : Construct an IntervalIndex from a list/array of tuples """ - left = maybe_convert_platform(left) - right = maybe_convert_platform(right) + left = maybe_convert_platform_interval(left) + right = maybe_convert_platform_interval(right) return cls._simple_new(left, right, closed, name=name, copy=copy, verify_integrity=True) @@ -493,7 +524,7 @@ def from_intervals(cls, data, name=None, copy=False): left, right, closed = data.left, data.right, data.closed name = name or data.name else: - data = maybe_convert_platform(data) + data = maybe_convert_platform_interval(data) left, right, closed = intervals_to_interval_bounds(data) return cls.from_arrays(left, right, closed, name=name, copy=False) diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index d8e16482a414e..6a3715fd66159 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -152,7 +152,7 @@ def test_update_dtype(self, dtype, new_dtype): assert result.ordered is expected_ordered @pytest.mark.parametrize('bad_dtype', [ - 'foo', object, np.int64, PeriodDtype('Q'), IntervalDtype(object)]) + 'foo', object, np.int64, PeriodDtype('Q')]) def test_update_dtype_errors(self, bad_dtype): dtype = CategoricalDtype(list('abc'), False) msg = 'a CategoricalDtype must be passed to perform an update, ' @@ -460,6 +460,17 @@ def test_construction(self): assert i.subtype == np.dtype('int64') assert is_interval_dtype(i) + @pytest.mark.parametrize('subtype', [ + CategoricalDtype(list('abc'), False), + CategoricalDtype(list('wxyz'), True), + object, str, '<U10', 'interval[category]', 'interval[object]']) + def test_construction_not_supported(self, subtype): + # GH 19016 + msg = ('category, object, and string subtypes are not supported ' + 'for IntervalDtype') + with tm.assert_raises_regex(TypeError, msg): + IntervalDtype(subtype) + def test_construction_generic(self): # generic i = IntervalDtype('interval') diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index e2f48f40e9b7a..dd673294b128f 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -4,7 +4,7 @@ import numpy as np from pandas import ( Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp, - Timedelta, date_range, timedelta_range) + Timedelta, date_range, timedelta_range, Categorical) from pandas.compat import lzip from pandas.core.common import _asarray_tuplesafe from pandas.tests.indexes.common import Base @@ -42,7 +42,6 @@ def create_index_with_nan(self, closed='right'): @pytest.mark.parametrize('data', [ Index([0, 1, 2, 3, 4]), - Index(list('abcde')), date_range('2017-01-01', periods=5), date_range('2017-01-01', periods=5, tz='US/Eastern'), timedelta_range('1 day', periods=5)]) @@ -138,10 +137,10 @@ def test_constructors_nan(self, closed, data): [], np.array([], dtype='int64'), np.array([], dtype='float64'), - np.array([], dtype=object)]) + np.array([], dtype='datetime64[ns]')]) def test_constructors_empty(self, data, closed): # GH 18421 - expected_dtype = data.dtype if isinstance(data, np.ndarray) else object + expected_dtype = getattr(data, 'dtype', np.intp) expected_values = np.array([], dtype=object) expected_index = IntervalIndex(data, closed=closed) @@ -223,6 +222,48 @@ def test_constructors_errors(self): with tm.assert_raises_regex(ValueError, msg): IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1)) + # GH 19016: categorical data + data = Categorical(list('01234abcde'), ordered=True) + msg = ('category, object, and string subtypes are not supported ' + 'for IntervalIndex') + + with tm.assert_raises_regex(TypeError, msg): + IntervalIndex.from_breaks(data) + + with tm.assert_raises_regex(TypeError, msg): + IntervalIndex.from_arrays(data[:-1], data[1:]) + + @pytest.mark.parametrize('data', [ + tuple('0123456789'), + list('abcdefghij'), + np.array(list('abcdefghij'), dtype=object), + np.array(list('abcdefghij'), dtype='<U1')]) + def test_constructors_errors_string(self, data): + # GH 19016 + left, right = data[:-1], data[1:] + tuples = lzip(left, right) + ivs = [Interval(l, r) for l, r in tuples] or data + msg = ('category, object, and string subtypes are not supported ' + 'for IntervalIndex') + + with tm.assert_raises_regex(TypeError, msg): + IntervalIndex(ivs) + + with tm.assert_raises_regex(TypeError, msg): + Index(ivs) + + with tm.assert_raises_regex(TypeError, msg): + IntervalIndex.from_intervals(ivs) + + with tm.assert_raises_regex(TypeError, msg): + IntervalIndex.from_breaks(data) + + with tm.assert_raises_regex(TypeError, msg): + IntervalIndex.from_arrays(left, right) + + with tm.assert_raises_regex(TypeError, msg): + IntervalIndex.from_tuples(tuples) + @pytest.mark.parametrize('tz_left, tz_right', [ (None, 'UTC'), ('UTC', None), ('UTC', 'US/Eastern')]) def test_constructors_errors_tz(self, tz_left, tz_right): @@ -298,18 +339,6 @@ def test_length(self, closed, breaks): expected = Index(iv.length if notna(iv) else iv for iv in index) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize('breaks', [ - list('abcdefgh'), - lzip(range(10), range(1, 11)), - [['A', 'B'], ['a', 'b'], ['c', 'd'], ['e', 'f']], - [Interval(0, 1), Interval(1, 2), Interval(3, 4), Interval(4, 5)]]) - def test_length_errors(self, closed, breaks): - # GH 18789 - index = IntervalIndex.from_breaks(breaks) - msg = 'IntervalIndex contains Intervals without defined length' - with tm.assert_raises_regex(TypeError, msg): - index.length - def test_with_nans(self, closed): index = self.create_index(closed=closed) assert not index.hasnans @@ -428,9 +457,7 @@ def test_delete(self, closed): interval_range(0, periods=10, closed='neither'), interval_range(1.7, periods=8, freq=2.5, closed='both'), interval_range(Timestamp('20170101'), periods=12, closed='left'), - interval_range(Timedelta('1 day'), periods=6, closed='right'), - IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]), - IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])]) + interval_range(Timedelta('1 day'), periods=6, closed='right')]) def test_insert(self, data): item = data[0] idx_item = IntervalIndex([item]) @@ -504,15 +531,6 @@ def test_unique(self, closed): [(0, 1), (0, 1), (2, 3)], closed=closed) assert not idx.is_unique - # unique mixed - idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed) - assert idx.is_unique - - # duplicate mixed - idx = IntervalIndex.from_tuples( - [(0, 1), ('a', 'b'), (0, 1)], closed=closed) - assert not idx.is_unique - # empty idx = IntervalIndex([], closed=closed) assert idx.is_unique
- [X] closes #19016 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19022
2018-01-01T00:56:09Z
2018-01-05T14:15:45Z
2018-01-05T14:15:45Z
2018-01-05T17:44:28Z
ENH: DataFrame.append preserves columns dtype if possible
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 408a52e0526ee..14146b9e455b4 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -380,6 +380,7 @@ Other Enhancements - :class:`IntervalIndex` now supports time zone aware ``Interval`` objects (:issue:`18537`, :issue:`18538`) - :func:`Series` / :func:`DataFrame` tab completion also returns identifiers in the first level of a :func:`MultiIndex`. (:issue:`16326`) - :func:`read_excel()` has gained the ``nrows`` parameter (:issue:`16645`) +- :meth:`DataFrame.append` can now in more cases preserve the type of the calling dataframe's columns (e.g. if both are ``CategoricalIndex``) (:issue:`18359`) - :func:``DataFrame.to_json`` and ``Series.to_json`` now accept an ``index`` argument which allows the user to exclude the index from the JSON output (:issue:`17394`) - ``IntervalIndex.to_tuples()`` has gained the ``na_tuple`` parameter to control whether NA is returned as a tuple of NA, or NA itself (:issue:`18756`) - ``Categorical.rename_categories``, ``CategoricalIndex.rename_categories`` and :attr:`Series.cat.rename_categories` diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9e57579ddfc05..ca20def643c2b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6113,8 +6113,11 @@ def append(self, other, ignore_index=False, verify_integrity=False): # index name will be reset index = Index([other.name], name=self.index.name) - combined_columns = self.columns.tolist() + self.columns.union( - other.index).difference(self.columns).tolist() + idx_diff = other.index.difference(self.columns) + try: + combined_columns = self.columns.append(idx_diff) + except TypeError: + combined_columns = self.columns.astype(object).append(idx_diff) other = other.reindex(combined_columns, copy=False) other = DataFrame(other.values.reshape((1, len(other))), index=index, diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index ffd37dc4b2f59..640d09f3587fb 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1,5 +1,7 @@ from warnings import catch_warnings +from itertools import combinations, product +import datetime as dt import dateutil import numpy as np from numpy.random import randn @@ -829,12 +831,102 @@ def test_append_preserve_index_name(self): result = df1.append(df2) assert result.index.name == 'A' + indexes_can_append = [ + pd.RangeIndex(3), + pd.Index([4, 5, 6]), + pd.Index([4.5, 5.5, 6.5]), + pd.Index(list('abc')), + pd.CategoricalIndex('A B C'.split()), + pd.CategoricalIndex('D E F'.split(), ordered=True), + pd.DatetimeIndex([dt.datetime(2013, 1, 3, 0, 0), + dt.datetime(2013, 1, 3, 6, 10), + dt.datetime(2013, 1, 3, 7, 12)]), + ] + + indexes_cannot_append_with_other = [ + pd.IntervalIndex.from_breaks([0, 1, 2, 3]), + pd.MultiIndex.from_arrays(['A B C'.split(), 'D E F'.split()]), + ] + + all_indexes = indexes_can_append + indexes_cannot_append_with_other + + @pytest.mark.parametrize("index", + all_indexes, + ids=lambda x: x.__class__.__name__) + def test_append_same_columns_type(self, index): + # GH18359 + + # df wider than ser + df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index) + ser_index = index[:2] + ser = pd.Series([7, 8], index=ser_index, name=2) + result = df.append(ser) + expected = pd.DataFrame([[1., 2., 3.], [4, 5, 6], [7, 8, np.nan]], + index=[0, 1, 2], + columns=index) + assert_frame_equal(result, expected) + + # ser wider than df + ser_index = index + index = index[:2] + df = pd.DataFrame([[1, 2], [4, 5]], columns=index) + ser = pd.Series([7, 8, 9], index=ser_index, name=2) + result = df.append(ser) + expected = pd.DataFrame([[1, 2, np.nan], [4, 5, np.nan], [7, 8, 9]], + index=[0, 1, 2], + columns=ser_index) + assert_frame_equal(result, expected) + + @pytest.mark.parametrize("df_columns, series_index", + combinations(indexes_can_append, r=2), + ids=lambda x: x.__class__.__name__) + def test_append_different_columns_types(self, df_columns, series_index): + # GH18359 + # See also test 'test_append_different_columns_types_raises' below + # for errors raised when appending + + df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=df_columns) + ser = pd.Series([7, 8, 9], index=series_index, name=2) + + result = df.append(ser) + idx_diff = ser.index.difference(df_columns) + combined_columns = Index(df_columns.tolist()).append(idx_diff) + expected = pd.DataFrame([[1., 2., 3., np.nan, np.nan, np.nan], + [4, 5, 6, np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan, 7, 8, 9]], + index=[0, 1, 2], + columns=combined_columns) + assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "index_can_append, index_cannot_append_with_other", + product(indexes_can_append, indexes_cannot_append_with_other), + ids=lambda x: x.__class__.__name__) + def test_append_different_columns_types_raises( + self, index_can_append, index_cannot_append_with_other): + # GH18359 + # Dataframe.append will raise if IntervalIndex/MultiIndex appends + # or is appended to a different index type + # + # See also test 'test_append_different_columns_types' above for + # appending without raising. + + df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_can_append) + ser = pd.Series([7, 8, 9], index=index_cannot_append_with_other, + name=2) + with pytest.raises(TypeError): + df.append(ser) + + df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], + columns=index_cannot_append_with_other) + ser = pd.Series([7, 8, 9], index=index_can_append, name=2) + with pytest.raises(TypeError): + df.append(ser) + def test_append_dtype_coerce(self): # GH 4993 # appending with datetime will incorrectly convert datetime64 - import datetime as dt - from pandas import NaT df1 = DataFrame(index=[1, 2], data=[dt.datetime(2013, 1, 1, 0, 0), dt.datetime(2013, 1, 2, 0, 0)], @@ -845,7 +937,9 @@ def test_append_dtype_coerce(self): dt.datetime(2013, 1, 4, 7, 10)]], columns=['start_time', 'end_time']) - expected = concat([Series([NaT, NaT, dt.datetime(2013, 1, 3, 6, 10), + expected = concat([Series([pd.NaT, + pd.NaT, + dt.datetime(2013, 1, 3, 6, 10), dt.datetime(2013, 1, 4, 7, 10)], name='end_time'), Series([dt.datetime(2013, 1, 1, 0, 0), diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 92bedbabdf2f1..1004b40bfb4c1 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1540,12 +1540,14 @@ def test_crosstab_normalize(self): index=pd.Index([1, 2, 'All'], name='a', dtype='object'), - columns=pd.Index([3, 4], name='b')) + columns=pd.Index([3, 4], name='b', + dtype='object')) col_normal_margins = pd.DataFrame([[0.5, 0, 0.2], [0.5, 1.0, 0.8]], index=pd.Index([1, 2], name='a', dtype='object'), columns=pd.Index([3, 4, 'All'], - name='b')) + name='b', + dtype='object')) all_normal_margins = pd.DataFrame([[0.2, 0, 0.2], [0.2, 0.6, 0.8], @@ -1554,7 +1556,8 @@ def test_crosstab_normalize(self): name='a', dtype='object'), columns=pd.Index([3, 4, 'All'], - name='b')) + name='b', + dtype='object')) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index', margins=True), row_normal_margins) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='columns',
- [x] closes #18359 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This PR makes ``DataFrame.append`` preserve columns dtype, e.g.: ```python >>> idx = pd.CategoricalIndex('a b'.split()) >>> df = pd.DataFrame([[1, 2]], columns=idx) >>> ser = pd.Series([3, 4], index=idx, name=1) >>> df.append(ser).columns CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=False, dtype='category') ``` Previously, the above returned ``Index(['a', 'b'], dtype='object')``, i.e. the index type information was lost when using ``append``.
https://api.github.com/repos/pandas-dev/pandas/pulls/19021
2017-12-31T23:14:24Z
2018-04-20T00:35:14Z
2018-04-20T00:35:14Z
2021-05-22T23:20:06Z
Spellcheck
diff --git a/doc/source/10min.rst b/doc/source/10min.rst index 46c3ffef58228..da7679d8a3f54 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -48,7 +48,7 @@ a default integer index: s = pd.Series([1,3,5,np.nan,6,8]) s -Creating a :class:`DataFrame` by passing a numpy array, with a datetime index +Creating a :class:`DataFrame` by passing a NumPy array, with a datetime index and labeled columns: .. ipython:: python @@ -114,7 +114,7 @@ Here is how to view the top and bottom rows of the frame: df.head() df.tail(3) -Display the index, columns, and the underlying numpy data: +Display the index, columns, and the underlying NumPy data: .. ipython:: python @@ -311,7 +311,7 @@ Setting values by position: df.iat[0,1] = 0 -Setting by assigning with a numpy array: +Setting by assigning with a NumPy array: .. ipython:: python diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index be749dfc1f594..25f7c5a3ad948 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -316,7 +316,9 @@ Basic multi-index slicing using slices, lists, and labels. dfmi.loc[(slice('A1','A3'), slice(None), ['C1', 'C3']), :] -You can use :class:`pandas.IndexSlice` to facilitate a more natural syntax using ``:``, rather than using ``slice(None)``. + +You can use :class:`pandas.IndexSlice` to facilitate a more natural syntax +using ``:``, rather than using ``slice(None)``. .. ipython:: python @@ -557,7 +559,7 @@ Take Methods .. _advanced.take: -Similar to numpy ndarrays, pandas Index, Series, and DataFrame also provides +Similar to NumPy ndarrays, pandas Index, Series, and DataFrame also provides the ``take`` method that retrieves elements along a given axis at the given indices. The given indices must be either a list or an ndarray of integer index positions. ``take`` will also accept negative integers as relative positions to the end of the object. @@ -729,7 +731,7 @@ This is an Immutable array implementing an ordered, sliceable set. Prior to 0.18.0, the ``Int64Index`` would provide the default index for all ``NDFrame`` objects. ``RangeIndex`` is a sub-class of ``Int64Index`` added in version 0.18.0, now providing the default index for all ``NDFrame`` objects. -``RangeIndex`` is an optimized version of ``Int64Index`` that can represent a monotonic ordered set. These are analogous to python `range types <https://docs.python.org/3/library/stdtypes.html#typesseq-range>`__. +``RangeIndex`` is an optimized version of ``Int64Index`` that can represent a monotonic ordered set. These are analogous to Python `range types <https://docs.python.org/3/library/stdtypes.html#typesseq-range>`__. .. _indexing.float64index: @@ -763,7 +765,6 @@ The only positional indexing is via ``iloc``. sf.iloc[3] A scalar index that is not found will raise a ``KeyError``. - Slicing is primarily on the values of the index when using ``[],ix,loc``, and **always** positional when using ``iloc``. The exception is when the slice is boolean, in which case it will always be positional. diff --git a/doc/source/api.rst b/doc/source/api.rst index 17f6b8df0170d..02f729c89295b 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -730,7 +730,7 @@ The dtype information is available on the ``Categorical`` Categorical.codes ``np.asarray(categorical)`` works by implementing the array interface. Be aware, that this converts -the Categorical back to a numpy array, so categories and order information is not preserved! +the Categorical back to a NumPy array, so categories and order information is not preserved! .. autosummary:: :toctree: generated/ diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 74b3dbb83ea91..bd49b5b7c9b32 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -395,7 +395,7 @@ raise a ValueError: In [56]: pd.Series(['foo', 'bar', 'baz']) == pd.Series(['foo']) ValueError: Series lengths must match to compare -Note that this is different from the numpy behavior where a comparison can +Note that this is different from the NumPy behavior where a comparison can be broadcast: .. ipython:: python @@ -1000,7 +1000,7 @@ We create a frame similar to the one used in the above sections. tsdf.iloc[3:7] = np.nan tsdf -Transform the entire frame. ``.transform()`` allows input functions as: a numpy function, a string +Transform the entire frame. ``.transform()`` allows input functions as: a NumPy function, a string function name or a user defined function. .. ipython:: python @@ -1510,7 +1510,7 @@ To iterate over the rows of a DataFrame, you can use the following methods: one of the following approaches: * Look for a *vectorized* solution: many operations can be performed using - built-in methods or numpy functions, (boolean) indexing, ... + built-in methods or NumPy functions, (boolean) indexing, ... * When you have a function that cannot work on the full DataFrame/Series at once, it is better to use :meth:`~DataFrame.apply` instead of iterating @@ -1971,7 +1971,7 @@ from the current type (e.g. ``int`` to ``float``). df3.dtypes The ``values`` attribute on a DataFrame return the *lower-common-denominator* of the dtypes, meaning -the dtype that can accommodate **ALL** of the types in the resulting homogeneous dtyped numpy array. This can +the dtype that can accommodate **ALL** of the types in the resulting homogeneous dtyped NumPy array. This can force some *upcasting*. .. ipython:: python @@ -2253,7 +2253,7 @@ can define a function that returns a tree of child dtypes: return dtype return [dtype, [subdtypes(dt) for dt in subs]] -All numpy dtypes are subclasses of ``numpy.generic``: +All NumPy dtypes are subclasses of ``numpy.generic``: .. ipython:: python @@ -2262,4 +2262,4 @@ All numpy dtypes are subclasses of ``numpy.generic``: .. note:: Pandas also defines the types ``category``, and ``datetime64[ns, tz]``, which are not integrated into the normal - numpy hierarchy and wont show up with the above function. + NumPy hierarchy and wont show up with the above function. diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst index 2acc919d1fbdf..7364167611730 100644 --- a/doc/source/categorical.rst +++ b/doc/source/categorical.rst @@ -40,7 +40,7 @@ The categorical data type is useful in the following cases: * The lexical order of a variable is not the same as the logical order ("one", "two", "three"). By converting to a categorical and specifying an order on the categories, sorting and min/max will use the logical order instead of the lexical order, see :ref:`here <categorical.sort>`. -* As a signal to other python libraries that this column should be treated as a categorical +* As a signal to other Python libraries that this column should be treated as a categorical variable (e.g. to use suitable statistical methods or plot types). See also the :ref:`API docs on categoricals<api.categorical>`. @@ -366,7 +366,7 @@ or simply set the categories to a predefined scale, use :func:`Categorical.set_c .. note:: Be aware that :func:`Categorical.set_categories` cannot know whether some category is omitted intentionally or because it is misspelled or (under Python3) due to a type difference (e.g., - numpys S1 dtype and python strings). This can result in surprising behaviour! + numpys S1 dtype and Python strings). This can result in surprising behaviour! Sorting and Order ----------------- diff --git a/doc/source/comparison_with_sas.rst b/doc/source/comparison_with_sas.rst index 1f2424d8a22f3..e9e0d7716af3a 100644 --- a/doc/source/comparison_with_sas.rst +++ b/doc/source/comparison_with_sas.rst @@ -10,7 +10,7 @@ performed in pandas. If you're new to pandas, you might want to first read through :ref:`10 Minutes to pandas<10min>` to familiarize yourself with the library. -As is customary, we import pandas and numpy as follows: +As is customary, we import pandas and NumPy as follows: .. ipython:: python @@ -100,7 +100,7 @@ specifying the column names. A pandas ``DataFrame`` can be constructed in many different ways, but for a small number of values, it is often convenient to specify it as -a python dictionary, where the keys are the column names +a Python dictionary, where the keys are the column names and the values are the data. .. ipython:: python diff --git a/doc/source/comparison_with_sql.rst b/doc/source/comparison_with_sql.rst index 2112c7de8c897..ba069b5a44c72 100644 --- a/doc/source/comparison_with_sql.rst +++ b/doc/source/comparison_with_sql.rst @@ -10,7 +10,7 @@ various SQL operations would be performed using pandas. If you're new to pandas, you might want to first read through :ref:`10 Minutes to pandas<10min>` to familiarize yourself with the library. -As is customary, we import pandas and numpy as follows: +As is customary, we import pandas and NumPy as follows: .. ipython:: python diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 30071c6c5b83c..06afa440aa26c 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -57,9 +57,8 @@ Covariance s2 = pd.Series(np.random.randn(1000)) s1.cov(s2) -Analogously, :meth:`DataFrame.cov` to compute -pairwise covariances among the series in the DataFrame, also excluding -NA/null values. +Analogously, :meth:`DataFrame.cov` to compute pairwise covariances among the +series in the DataFrame, also excluding NA/null values. .. _computation.covariance.caveats: diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index b25f9779d3636..83437022563d5 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -118,7 +118,7 @@ Creating a development environment ---------------------------------- To test out code changes, you'll need to build pandas from source, which -requires a C compiler and python environment. If you're making documentation +requires a C compiler and Python environment. If you're making documentation changes, you can skip to :ref:`contributing.documentation` but you won't be able to build the documentation locally before pushing your changes. @@ -187,7 +187,7 @@ At this point you should be able to import pandas from your locally built versio 0.22.0.dev0+29.g4ad6d4d74 This will create the new environment, and not touch any of your existing environments, -nor any existing python installation. +nor any existing Python installation. To view your environments:: diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index f13e5e67de07e..da54a6a5f5c02 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -41,7 +41,7 @@ above what the in-line examples offer. Pandas (pd) and Numpy (np) are the only two abbreviated imported modules. The rest are kept explicitly imported for newer users. -These examples are written for python 3.4. Minor tweaks might be necessary for earlier python +These examples are written for Python 3. Minor tweaks might be necessary for earlier python versions. Idioms @@ -750,7 +750,7 @@ Timeseries <http://nipunbatra.github.io/2015/06/timeseries/>`__ Turn a matrix with hours in columns and days in rows into a continuous row sequence in the form of a time series. -`How to rearrange a python pandas DataFrame? +`How to rearrange a Python pandas DataFrame? <http://stackoverflow.com/questions/15432659/how-to-rearrange-a-python-pandas-dataframe>`__ `Dealing with duplicates when reindexing a timeseries to a specified frequency @@ -1152,7 +1152,7 @@ Storing Attributes to a group node store = pd.HDFStore('test.h5') store.put('df',df) - # you can store an arbitrary python object via pickle + # you can store an arbitrary Python object via pickle store.get_storer('df').attrs.my_attribute = dict(A = 10) store.get_storer('df').attrs.my_attribute @@ -1167,7 +1167,7 @@ Storing Attributes to a group node Binary Files ************ -pandas readily accepts numpy record arrays, if you need to read in a binary +pandas readily accepts NumPy record arrays, if you need to read in a binary file consisting of an array of C structs. For example, given this C program in a file called ``main.c`` compiled with ``gcc main.c -std=gnu99`` on a 64-bit machine, diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index da9d2123bd1ca..7237dc5f1200b 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -23,7 +23,7 @@ Intro to Data Structures We'll start with a quick, non-comprehensive overview of the fundamental data structures in pandas to get you started. The fundamental behavior about data types, indexing, and axis labeling / alignment apply across all of the -objects. To get started, import numpy and load pandas into your namespace: +objects. To get started, import NumPy and load pandas into your namespace: .. ipython:: python @@ -877,7 +877,7 @@ of DataFrames: wp['Item3'] = wp['Item1'] / wp['Item2'] The API for insertion and deletion is the same as for DataFrame. And as with -DataFrame, if the item is a valid python identifier, you can access it as an +DataFrame, if the item is a valid Python identifier, you can access it as an attribute and tab-complete it in IPython. Transposing diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 8ed647c2a19bc..c770bf2851643 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -27,7 +27,7 @@ Statistics and Machine Learning `Statsmodels <http://www.statsmodels.org/>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Statsmodels is the prominent python "statistics and econometrics library" and it has +Statsmodels is the prominent Python "statistics and econometrics library" and it has a long-standing special relationship with pandas. Statsmodels provides powerful statistics, econometrics, analysis and modeling functionality that is out of pandas' scope. Statsmodels leverages pandas objects as the underlying data container for computation. @@ -72,7 +72,7 @@ Hadley Wickham's `ggplot2 <http://ggplot2.org/>`__ is a foundational exploratory Based on `"The Grammar of Graphics" <http://www.cs.uic.edu/~wilkinson/TheGrammarOfGraphics/GOG.html>`__ it provides a powerful, declarative and extremely general way to generate bespoke plots of any kind of data. It's really quite incredible. Various implementations to other languages are available, -but a faithful implementation for python users has long been missing. Although still young +but a faithful implementation for Python users has long been missing. Although still young (as of Jan-2014), the `yhat/ggplot <https://github.com/yhat/ggplot>`__ project has been progressing quickly in that direction. @@ -192,7 +192,7 @@ or multi-indexed DataFrames. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ fredapi is a Python interface to the `Federal Reserve Economic Data (FRED) <http://research.stlouisfed.org/fred2/>`__ provided by the Federal Reserve Bank of St. Louis. It works with both the FRED database and ALFRED database that -contains point-in-time data (i.e. historic data revisions). fredapi provides a wrapper in python to the FRED +contains point-in-time data (i.e. historic data revisions). fredapi provides a wrapper in Python to the FRED HTTP API, and also provides several convenient methods for parsing and analyzing point-in-time data from ALFRED. fredapi makes use of pandas and returns data in a Series or DataFrame. This module requires a FRED API key that you can obtain for free on the FRED website. diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index 362c998493ae8..57f07a41afbc3 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -24,13 +24,13 @@ Enhancing Performance Cython (Writing C extensions for pandas) ---------------------------------------- -For many use cases writing pandas in pure python and numpy is sufficient. In some +For many use cases writing pandas in pure Python and NumPy is sufficient. In some computationally heavy applications however, it can be possible to achieve sizeable speed-ups by offloading work to `cython <http://cython.org/>`__. This tutorial assumes you have refactored as much as possible in python, for example -trying to remove for loops and making use of numpy vectorization, it's always worth -optimising in python first. +trying to remove for loops and making use of NumPy vectorization, it's always worth +optimising in Python first. This tutorial walks through a "typical" process of cythonizing a slow computation. We use an `example from the cython documentation <http://docs.cython.org/src/quickstart/cythonize.html>`__ @@ -86,8 +86,8 @@ hence we'll concentrate our efforts cythonizing these two functions. .. note:: - In python 2 replacing the ``range`` with its generator counterpart (``xrange``) - would mean the ``range`` line would vanish. In python 3 ``range`` is already a generator. + In Python 2 replacing the ``range`` with its generator counterpart (``xrange``) + would mean the ``range`` line would vanish. In Python 3 ``range`` is already a generator. .. _enhancingperf.plain: @@ -232,7 +232,7 @@ the rows, applying our ``integrate_f_typed``, and putting this in the zeros arra .. note:: Loops like this would be *extremely* slow in python, but in Cython looping - over numpy arrays is *fast*. + over NumPy arrays is *fast*. .. code-block:: ipython @@ -315,7 +315,7 @@ Numba works by generating optimized machine code using the LLVM compiler infrast Jit ~~~ -Using ``numba`` to just-in-time compile your code. We simply take the plain python code from above and annotate with the ``@jit`` decorator. +Using ``numba`` to just-in-time compile your code. We simply take the plain Python code from above and annotate with the ``@jit`` decorator. .. code-block:: python @@ -391,7 +391,7 @@ Caveats ``numba`` will execute on any function, but can only accelerate certain classes of functions. -``numba`` is best at accelerating functions that apply numerical functions to numpy arrays. When passed a function that only uses operations it knows how to accelerate, it will execute in ``nopython`` mode. +``numba`` is best at accelerating functions that apply numerical functions to NumPy arrays. When passed a function that only uses operations it knows how to accelerate, it will execute in ``nopython`` mode. If ``numba`` is passed a function that includes something it doesn't know how to work with -- a category that currently includes sets, lists, dictionaries, or string functions -- it will revert to ``object mode``. In ``object mode``, numba will execute but your code will not speed up significantly. If you would prefer that ``numba`` throw an error if it cannot compile a function in a way that speeds up your code, pass numba the argument ``nopython=True`` (e.g. ``@numba.jit(nopython=True)``). For more on troubleshooting ``numba`` modes, see the `numba troubleshooting page <http://numba.pydata.org/numba-doc/0.20.0/user/troubleshoot.html#the-compiled-code-is-too-slow>`__. @@ -779,7 +779,7 @@ Technical Minutia Regarding Expression Evaluation Expressions that would result in an object dtype or involve datetime operations (because of ``NaT``) must be evaluated in Python space. The main reason for -this behavior is to maintain backwards compatibility with versions of numpy < +this behavior is to maintain backwards compatibility with versions of NumPy < 1.7. In those versions of ``numpy`` a call to ``ndarray.astype(str)`` will truncate any strings that are more than 60 characters in length. Second, we can't pass ``object`` arrays to ``numexpr`` thus string comparisons must be diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst index 5da0f4fd07819..bc490877e190d 100644 --- a/doc/source/gotchas.rst +++ b/doc/source/gotchas.rst @@ -91,7 +91,7 @@ See also :ref:`Categorical Memory Usage <categorical.memory>`. Using If/Truth Statements with pandas ------------------------------------- -pandas follows the numpy convention of raising an error when you try to convert something to a ``bool``. +pandas follows the NumPy convention of raising an error when you try to convert something to a ``bool``. This happens in a ``if`` or when using the boolean operations, ``and``, ``or``, or ``not``. It is not clear what the result of diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 552ddabb7359a..413138b1e52fc 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -20,38 +20,38 @@ Group By: split-apply-combine ***************************** By "group by" we are referring to a process involving one or more of the following -steps +steps: - - **Splitting** the data into groups based on some criteria - - **Applying** a function to each group independently - - **Combining** the results into a data structure + - **Splitting** the data into groups based on some criteria. + - **Applying** a function to each group independently. + - **Combining** the results into a data structure. -Of these, the split step is the most straightforward. In fact, in many -situations you may wish to split the data set into groups and do something with -those groups yourself. In the apply step, we might wish to one of the +Out of these, the split step is the most straightforward. In fact, in many +situations we may wish to split the data set into groups and do something with +those groups. In the apply step, we might wish to one of the following: - - **Aggregation**: computing a summary statistic (or statistics) about each + - **Aggregation**: compute a summary statistic (or statistics) for each group. Some examples: - - Compute group sums or means - - Compute group sizes / counts + - Compute group sums or means. + - Compute group sizes / counts. - **Transformation**: perform some group-specific computations and return a - like-indexed. Some examples: + like-indexed object. Some examples: - - Standardizing data (zscore) within group - - Filling NAs within groups with a value derived from each group + - Standardize data (zscore) within a group. + - Filling NAs within groups with a value derived from each group. - **Filtration**: discard some groups, according to a group-wise computation that evaluates True or False. Some examples: - - Discarding data that belongs to groups with only a few members - - Filtering out data based on the group sum or mean + - Discard data that belongs to groups with only a few members. + - Filter out data based on the group sum or mean. - Some combination of the above: GroupBy will examine the results of the apply step and try to return a sensibly combined result if it doesn't fit into - either of the above two categories + either of the above two categories. Since the set of object instance methods on pandas data structures are generally rich and expressive, we often simply want to invoke, say, a DataFrame function @@ -68,7 +68,7 @@ We aim to make operations like this natural and easy to express using pandas. We'll address each area of GroupBy functionality then provide some non-trivial examples / use cases. -See the :ref:`cookbook<cookbook.grouping>` for some advanced strategies +See the :ref:`cookbook<cookbook.grouping>` for some advanced strategies. .. _groupby.split: @@ -77,7 +77,7 @@ Splitting an object into groups pandas objects can be split on any of their axes. The abstract definition of grouping is to provide a mapping of labels to group names. To create a GroupBy -object (more on what the GroupBy object is later), you do the following: +object (more on what the GroupBy object is later), you may do the following: .. code-block:: ipython @@ -88,17 +88,18 @@ object (more on what the GroupBy object is later), you do the following: The mapping can be specified many different ways: - - A Python function, to be called on each of the axis labels - - A list or NumPy array of the same length as the selected axis - - A dict or Series, providing a ``label -> group name`` mapping - - For DataFrame objects, a string indicating a column to be used to group. Of - course ``df.groupby('A')`` is just syntactic sugar for - ``df.groupby(df['A'])``, but it makes life simpler - - For DataFrame objects, a string indicating an index level to be used to group. - - A list of any of the above things + - A Python function, to be called on each of the axis labels. + - A list or NumPy array of the same length as the selected axis. + - A dict or ``Series``, providing a ``label -> group name`` mapping. + - For ``DataFrame`` objects, a string indicating a column to be used to group. + Of course ``df.groupby('A')`` is just syntactic sugar for + ``df.groupby(df['A'])``, but it makes life simpler. + - For ``DataFrame`` objects, a string indicating an index level to be used to + group. + - A list of any of the above things. Collectively we refer to the grouping objects as the **keys**. For example, -consider the following DataFrame: +consider the following ``DataFrame``: .. note:: @@ -119,7 +120,8 @@ consider the following DataFrame: 'D' : np.random.randn(8)}) df -We could naturally group by either the ``A`` or ``B`` columns or both: +On a DataFrame, we obtain a GroupBy object by calling :meth:`~DataFrame.groupby`. +We could naturally group by either the ``A`` or ``B`` columns, or both: .. ipython:: python @@ -140,7 +142,7 @@ columns: In [5]: grouped = df.groupby(get_letter_type, axis=1) -pandas Index objects support duplicate values. If a +pandas :class:`~pandas.Index` objects support duplicate values. If a non-unique index is used as the group key in a groupby operation, all values for the same index value will be considered to be in one group and thus the output of aggregation functions will only contain unique index values: @@ -220,7 +222,7 @@ the length of the ``groups`` dict, so it is largely just a convenience: .. _groupby.tabcompletion: -``GroupBy`` will tab complete column names (and other attributes) +``GroupBy`` will tab complete column names (and other attributes): .. ipython:: python :suppress: @@ -358,9 +360,9 @@ Index level names may be specified as keys directly to ``groupby``. DataFrame column selection in GroupBy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Once you have created the GroupBy object from a DataFrame, for example, you -might want to do something different for each of the columns. Thus, using -``[]`` similar to getting a column from a DataFrame, you can do: +Once you have created the GroupBy object from a DataFrame, you might want to do +something different for each of the columns. Thus, using ``[]`` similar to +getting a column from a DataFrame, you can do: .. ipython:: python :suppress: @@ -393,7 +395,7 @@ Iterating through groups ------------------------ With the GroupBy object in hand, iterating through the grouped data is very -natural and functions similarly to ``itertools.groupby``: +natural and functions similarly to :py:func:`itertools.groupby`: .. ipython:: @@ -419,7 +421,8 @@ statement if you wish: ``for (k1, k2), group in grouped:``. Selecting a group ----------------- -A single group can be selected using ``GroupBy.get_group()``: +A single group can be selected using +:meth:`~pandas.core.groupby.DataFrameGroupBy.get_group`: .. ipython:: python @@ -441,7 +444,9 @@ perform a computation on the grouped data. These operations are similar to the :ref:`aggregating API <basics.aggregate>`, :ref:`window functions API <stats.aggregate>`, and :ref:`resample API <timeseries.aggregate>`. -An obvious one is aggregation via the ``aggregate`` or equivalently ``agg`` method: +An obvious one is aggregation via the +:meth:`~pandas.core.groupby.DataFrameGroupBy.aggregate` or equivalently +:meth:`~pandas.core.groupby.DataFrameGroupBy.agg` method: .. ipython:: python @@ -491,11 +496,34 @@ index are the group names and whose values are the sizes of each group. Passing ``as_index=False`` **will** return the groups that you are aggregating over, if they are named *columns*. - Aggregating functions are ones that reduce the dimension of the returned objects, - for example: ``mean, sum, size, count, std, var, sem, describe, first, last, nth, min, max``. This is - what happens when you do for example ``DataFrame.sum()`` and get back a ``Series``. - - ``nth`` can act as a reducer *or* a filter, see :ref:`here <groupby.nth>` +Aggregating functions are the ones that reduce the dimension of the returned objects. +Some common aggregating functions are tabulated below: + +.. csv-table:: + :header: "Function", "Description" + :widths: 20, 80 + :delim: ; + + :meth:`~pd.core.groupby.DataFrameGroupBy.mean`;Compute mean of groups + :meth:`~pd.core.groupby.DataFrameGroupBy.sum`;Compute sum of group values + :meth:`~pd.core.groupby.DataFrameGroupBy.size`;Compute group sizes + :meth:`~pd.core.groupby.DataFrameGroupBy.count`;Compute count of group + :meth:`~pd.core.groupby.DataFrameGroupBy.std`;Standard deviation of groups + :meth:`~pd.core.groupby.DataFrameGroupBy.var`;Compute variance of groups + :meth:`~pd.core.groupby.DataFrameGroupBy.sem`;Standard error of the mean of groups + :meth:`~pd.core.groupby.DataFrameGroupBy.describe`;Generates descriptive statistics + :meth:`~pd.core.groupby.DataFrameGroupBy.first`;Compute first of group values + :meth:`~pd.core.groupby.DataFrameGroupBy.last`;Compute last of group values + :meth:`~pd.core.groupby.DataFrameGroupBy.nth`;Take nth value, or a subset if n is a list + :meth:`~pd.core.groupby.DataFrameGroupBy.min`;Compute min of group values + :meth:`~pd.core.groupby.DataFrameGroupBy.max`;Compute max of group values + + +The aggregating functions above will exclude NA values. Any function which +reduces a :class:`Series` to a scalar value is an aggregation function and will work, +a trivial example is ``df.groupby('A').agg(lambda ser: 1)``. Note that +:meth:`~pd.core.groupby.DataFrameGroupBy.nth` can act as a reducer *or* a +filter, see :ref:`here <groupby.nth>`. .. _groupby.aggregate.multifunc: @@ -703,11 +731,11 @@ and that the transformed data contains no NAs. .. note:: - Some functions when applied to a groupby object will automatically transform - the input, returning an object of the same shape as the original. Passing - ``as_index=False`` will not affect these transformation methods. + Some functions will automatically transform the input when applied to a + GroupBy object, but returning an object of the same shape as the original. + Passing ``as_index=False`` will not affect these transformation methods. - For example: ``fillna, ffill, bfill, shift``. + For example: ``fillna, ffill, bfill, shift.``. .. ipython:: python @@ -898,7 +926,8 @@ The dimension of the returned result can also change: In [11]: grouped.apply(f) -``apply`` on a Series can operate on a returned value from the applied function, that is itself a series, and possibly upcast the result to a DataFrame +``apply`` on a Series can operate on a returned value from the applied function, +that is itself a series, and possibly upcast the result to a DataFrame: .. ipython:: python @@ -955,15 +984,21 @@ will be (silently) dropped. Thus, this does not pose any problems: df.groupby('A').std() +Note that ``df.groupby('A').colname.std().`` is more efficient than +``df.groupby('A').std().colname``, so if the result of an aggregation function +is only interesting over one column (here ``colname``), it may be filtered +*before* applying the aggregation function. + .. _groupby.missing: NA and NaT group handling ~~~~~~~~~~~~~~~~~~~~~~~~~ -If there are any NaN or NaT values in the grouping key, these will be automatically -excluded. So there will never be an "NA group" or "NaT group". This was not the case in older -versions of pandas, but users were generally discarding the NA group anyway -(and supporting it was an implementation headache). +If there are any NaN or NaT values in the grouping key, these will be +automatically excluded. In other words, there will never be an "NA group" or +"NaT group". This was not the case in older versions of pandas, but users were +generally discarding the NA group anyway (and supporting it was an +implementation headache). Grouping with ordered factors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1049,7 +1084,9 @@ This shows the first or last n rows from each group. Taking the nth row of each group ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To select from a DataFrame or Series the nth item, use the nth method. This is a reduction method, and will return a single row (or no row) per group if you pass an int for n: +To select from a DataFrame or Series the nth item, use +:meth:`~pd.core.groupby.DataFrameGroupBy.nth`. This is a reduction method, and +will return a single row (or no row) per group if you pass an int for n: .. ipython:: python @@ -1116,8 +1153,10 @@ Enumerate groups .. versionadded:: 0.20.2 To see the ordering of the groups (as opposed to the order of rows -within a group given by ``cumcount``) you can use the ``ngroup`` -method. +within a group given by ``cumcount``) you can use +:meth:`~pandas.core.groupby.DataFrameGroupBy.ngroup`. + + Note that the numbers given to the groups match the order in which the groups would be seen when iterating over the groupby object, not the @@ -1178,7 +1217,7 @@ allow for a cleaner, more readable syntax. To read about ``.pipe`` in general te see :ref:`here <basics.pipe>`. Combining ``.groupby`` and ``.pipe`` is often useful when you need to reuse -GroupB objects. +GroupBy objects. For an example, imagine having a DataFrame with columns for stores, products, revenue and sold quantity. We'd like to do a groupwise calculation of *prices* @@ -1233,9 +1272,9 @@ Regroup columns of a DataFrame according to their sum, and sum the aggregated on Multi-column factorization ~~~~~~~~~~~~~~~~~~~~~~~~~~ -By using ``.ngroup()``, we can extract information about the groups in -a way similar to :func:`factorize` (as described further in the -:ref:`reshaping API <reshaping.factorize>`) but which applies +By using :meth:`~pandas.core.groupby.DataFrameGroupBy.ngroup`, we can extract +information about the groups in a way similar to :func:`factorize` (as described +further in the :ref:`reshaping API <reshaping.factorize>`) but which applies naturally to multiple columns of mixed type and different sources. This can be useful as an intermediate categorical-like step in processing, when the relationships between the group rows are more diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 355be5039f146..0467ac225585b 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -228,7 +228,7 @@ as an attribute: .. warning:: - - You can use this access only if the index element is a valid python identifier, e.g. ``s.1`` is not allowed. + - You can use this access only if the index element is a valid Python identifier, e.g. ``s.1`` is not allowed. See `here for an explanation of valid identifiers <https://docs.python.org/3/reference/lexical_analysis.html#identifiers>`__. @@ -441,7 +441,7 @@ Selection By Position This is sometimes called ``chained assignment`` and should be avoided. See :ref:`Returning a View versus Copy <indexing.view_versus_copy>`. -Pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely python and numpy slicing. These are ``0-based`` indexing. When slicing, the start bounds is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise an ``IndexError``. +Pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely Python and NumPy slicing. These are ``0-based`` indexing. When slicing, the start bounds is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise an ``IndexError``. The ``.iloc`` attribute is the primary access method. The following are valid inputs: @@ -777,7 +777,7 @@ using the ``replace`` option: By default, each row has an equal probability of being selected, but if you want rows to have different probabilities, you can pass the ``sample`` function sampling weights as -``weights``. These weights can be a list, a numpy array, or a Series, but they must be of the same length as the object you are sampling. Missing values will be treated as a weight of zero, and inf values are not allowed. If weights do not sum to 1, they will be re-normalized by dividing all weights by the sum of the weights. For example: +``weights``. These weights can be a list, a NumPy array, or a Series, but they must be of the same length as the object you are sampling. Missing values will be treated as a weight of zero, and inf values are not allowed. If weights do not sum to 1, they will be re-normalized by dividing all weights by the sum of the weights. For example: .. ipython :: python @@ -805,7 +805,7 @@ as a string. df3 = pd.DataFrame({'col1':[1,2,3], 'col2':[2,3,4]}) df3.sample(n=1, axis=1) -Finally, one can also set a seed for ``sample``'s random number generator using the ``random_state`` argument, which will accept either an integer (as a seed) or a numpy RandomState object. +Finally, one can also set a seed for ``sample``'s random number generator using the ``random_state`` argument, which will accept either an integer (as a seed) or a NumPy RandomState object. .. ipython :: python @@ -893,7 +893,7 @@ evaluate an expression such as ``df.A > 2 & df.B < 3`` as ``df.A > (2 & df.B) < 3``, while the desired evaluation order is ``(df.A > 2) & (df.B < 3)``. -Using a boolean vector to index a Series works exactly as in a numpy ndarray: +Using a boolean vector to index a Series works exactly as in a NumPy ndarray: .. ipython:: python @@ -1125,7 +1125,6 @@ as condition and ``other`` argument. 'C': [7, 8, 9]}) df3.where(lambda x: x > 4, lambda x: x + 10) - Mask ~~~~ @@ -1712,6 +1711,7 @@ As a convenience, there is a new function on DataFrame called DataFrame's columns and sets a simple integer index. This is the inverse operation of :meth:`~DataFrame.set_index`. + .. ipython:: python data @@ -1772,7 +1772,7 @@ These both yield the same results, so which should you use? It is instructive to of operations on these and why method 2 (``.loc``) is much preferred over method 1 (chained ``[]``). ``dfmi['one']`` selects the first level of the columns and returns a DataFrame that is singly-indexed. -Then another python operation ``dfmi_with_one['second']`` selects the series indexed by ``'second'`` happens. +Then another Python operation ``dfmi_with_one['second']`` selects the series indexed by ``'second'`` happens. This is indicated by the variable ``dfmi_with_one`` because pandas sees these operations as separate events. e.g. separate calls to ``__getitem__``, so it has to treat them as linear operations, they happen one after another. diff --git a/doc/source/io.rst b/doc/source/io.rst index 49d742d9905d7..5878272a3da42 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -164,7 +164,7 @@ dtype : Type name or dict of column -> type, default ``None`` .. versionadded:: 0.20.0 support for the Python parser. engine : {``'c'``, ``'python'``} - Parser engine to use. The C engine is faster while the python engine is + Parser engine to use. The C engine is faster while the Python engine is currently more feature-complete. converters : dict, default ``None`` Dict of functions for converting values in certain columns. Keys can either be @@ -1529,9 +1529,9 @@ Specifying the parser engine '''''''''''''''''''''''''''' Under the hood pandas uses a fast and efficient parser implemented in C as well -as a python implementation which is currently more feature-complete. Where +as a Python implementation which is currently more feature-complete. Where possible pandas uses the C parser (specified as ``engine='c'``), but may fall -back to python if C-unsupported options are specified. Currently, C-unsupported +back to Python if C-unsupported options are specified. Currently, C-unsupported options include: - ``sep`` other than a single character (e.g. regex separators) @@ -1582,7 +1582,7 @@ function takes a number of arguments. Only the first is required. used. (A sequence should be given if the DataFrame uses MultiIndex). - ``mode`` : Python write mode, default 'w' - ``encoding``: a string representing the encoding to use if the contents are - non-ASCII, for python versions prior to 3 + non-ASCII, for Python versions prior to 3 - ``line_terminator``: Character sequence denoting line end (default '\\n') - ``quoting``: Set quoting rules as in csv module (default csv.QUOTE_MINIMAL). Note that if you have set a `float_format` then floats are converted to strings and csv.QUOTE_NONNUMERIC will treat them as non-numeric - ``quotechar``: Character used to quote fields (default '"') @@ -1851,7 +1851,7 @@ is ``None``. To explicitly force ``Series`` parsing, pass ``typ=series`` - ``convert_axes`` : boolean, try to convert the axes to the proper dtypes, default is True - ``convert_dates`` : a list of columns to parse for dates; If True, then try to parse date-like columns, default is True - ``keep_default_dates`` : boolean, default True. If parsing dates, then parse the default date-like columns -- ``numpy`` : direct decoding to numpy arrays. default is False; +- ``numpy`` : direct decoding to NumPy arrays. default is False; Supports numeric data only, although labels may be non-numeric. Also note that the JSON ordering **MUST** be the same for each term if ``numpy=True`` - ``precise_float`` : boolean, default ``False``. Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (``False``) is to use fast but less precise builtin functionality - ``date_unit`` : string, the timestamp unit to detect if converting dates. Default @@ -1962,7 +1962,7 @@ The Numpy Parameter If ``numpy=True`` is passed to ``read_json`` an attempt will be made to sniff an appropriate dtype during deserialization and to subsequently decode directly -to numpy arrays, bypassing the need for intermediate Python objects. +to NumPy arrays, bypassing the need for intermediate Python objects. This can provide speedups if you are deserialising a large amount of numeric data: @@ -1999,7 +1999,7 @@ The speedup is less noticeable for smaller datasets: .. warning:: - Direct numpy decoding makes a number of assumptions and may fail or produce + Direct NumPy decoding makes a number of assumptions and may fail or produce unexpected output if these assumptions are not satisfied: - data is numeric. @@ -3187,7 +3187,7 @@ You can pass ``append=True`` to the writer to append to an existing pack Unlike other io methods, ``to_msgpack`` is available on both a per-object basis, ``df.to_msgpack()`` and using the top-level ``pd.to_msgpack(...)`` where you -can pack arbitrary collections of python lists, dicts, scalars, while intermixing +can pack arbitrary collections of Python lists, dicts, scalars, while intermixing pandas objects. .. ipython:: python @@ -4411,7 +4411,7 @@ Several caveats. can ``.reset_index()`` to store the index or ``.reset_index(drop=True)`` to ignore it. - Duplicate column names and non-string columns names are not supported -- Non supported types include ``Period`` and actual python object types. These will raise a helpful error message +- Non supported types include ``Period`` and actual Python object types. These will raise a helpful error message on an attempt at serialization. See the `Full Documentation <https://github.com/wesm/feather>`__ @@ -4475,7 +4475,7 @@ Several caveats. - Duplicate column names and non-string columns names are not supported - Index level names, if specified, must be strings - Categorical dtypes can be serialized to parquet, but will de-serialize as ``object`` dtype. -- Non supported types include ``Period`` and actual python object types. These will raise a helpful error message +- Non supported types include ``Period`` and actual Python object types. These will raise a helpful error message on an attempt at serialization. You can specify an ``engine`` to direct the serialization. This can be one of ``pyarrow``, or ``fastparquet``, or ``auto``. diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index e20537efc0e71..d2250ae7b2116 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -27,7 +27,7 @@ pandas. NumPy will soon be able to provide a native NA type solution (similar to R) performant enough to be used in pandas. -See the :ref:`cookbook<cookbook.missing_data>` for some advanced strategies +See the :ref:`cookbook<cookbook.missing_data>` for some advanced strategies. Missing data basics ------------------- @@ -43,7 +43,7 @@ series might start on different dates. Thus, values prior to the start date would generally be marked as missing. In pandas, one of the most common ways that missing data is **introduced** into -a data set is by reindexing. For example +a data set is by reindexing. For example: .. ipython:: python @@ -86,7 +86,7 @@ pandas provides the :func:`isna` and .. warning:: - One has to be mindful that in python (and numpy), the ``nan's`` don't compare equal, but ``None's`` **do**. + One has to be mindful that in Python (and numpy), the ``nan's`` don't compare equal, but ``None's`` **do**. Note that Pandas/numpy uses the fact that ``np.nan != np.nan``, and treats ``None`` like ``np.nan``. .. ipython:: python @@ -104,7 +104,7 @@ Datetimes --------- For datetime64[ns] types, ``NaT`` represents missing values. This is a pseudo-native -sentinel value that can be represented by numpy in a singular dtype (datetime64[ns]). +sentinel value that can be represented by NumPy in a singular dtype (datetime64[ns]). pandas objects provide intercompatibility between ``NaT`` and ``NaN``. .. ipython:: python @@ -169,10 +169,10 @@ The descriptive statistics and computational methods discussed in the <api.series.stats>` and :ref:`here <api.dataframe.stats>`) are all written to account for missing data. For example: -* When summing data, NA (missing) values will be treated as zero -* If the data are all NA, the result will be NA +* When summing data, NA (missing) values will be treated as zero. +* If the data are all NA, the result will be NA. * Methods like **cumsum** and **cumprod** ignore NA values, but preserve them - in the resulting arrays + in the resulting arrays. .. ipython:: python @@ -190,7 +190,8 @@ Sum/Prod of Empties/Nans .. warning:: This behavior is now standard as of v0.21.0; previously sum/prod would give different - results if the ``bottleneck`` package was installed. See the :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. + results if the ``bottleneck`` package was installed. + See the :ref:`v0.21.0 whatsnew <whatsnew_0210.api_breaking.bottleneck>`. With ``sum`` or ``prod`` on an empty or all-``NaN`` ``Series``, or columns of a ``DataFrame``, the result will be all-``NaN``. @@ -200,7 +201,7 @@ With ``sum`` or ``prod`` on an empty or all-``NaN`` ``Series``, or columns of a s.sum() -Summing of an empty ``Series`` +Summing over an empty ``Series`` will return ``NaN``: .. ipython:: python @@ -250,7 +251,7 @@ of ways, which we illustrate: df2 df2.fillna(0) - df2['four'].fillna('missing') + df2['one'].fillna('missing') **Fill gaps forward or backward** @@ -328,7 +329,7 @@ Dropping axis labels with missing data: dropna ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You may wish to simply exclude labels from a data set which refer to missing -data. To do this, use the **dropna** method: +data. To do this, use the :meth:`~DataFrame.dropna` method: .. ipython:: python :suppress: @@ -343,7 +344,7 @@ data. To do this, use the **dropna** method: df.dropna(axis=1) df['one'].dropna() -Series.dropna is a simpler method as it only has one axis to consider. +An equivalent :meth:`~Series.dropna` method is available for Series. DataFrame.dropna has considerably more options than Series.dropna, which can be examined :ref:`in the API <api.dataframe.missing>`. @@ -352,8 +353,8 @@ examined :ref:`in the API <api.dataframe.missing>`. Interpolation ~~~~~~~~~~~~~ -Both Series and DataFrame objects have an ``interpolate`` method that, by default, -performs linear interpolation at missing datapoints. +Both Series and DataFrame objects have an :meth:`~DataFrame.interpolate` method +that, by default, performs linear interpolation at missing datapoints. .. ipython:: python :suppress: @@ -411,7 +412,7 @@ You can also interpolate with a DataFrame: df.interpolate() The ``method`` argument gives access to fancier interpolation methods. -If you have scipy_ installed, you can set pass the name of a 1-d interpolation routine to ``method``. +If you have scipy_ installed, you can pass the name of a 1-d interpolation routine to ``method``. You'll want to consult the full scipy interpolation documentation_ and reference guide_ for details. The appropriate interpolation method will depend on the type of data you are working with. @@ -419,7 +420,7 @@ The appropriate interpolation method will depend on the type of data you are wor ``method='quadratic'`` may be appropriate. * If you have values approximating a cumulative distribution function, then ``method='pchip'`` should work well. -* To fill missing values with goal of smooth plotting, use ``method='akima'``. +* To fill missing values with goal of smooth plotting, consider ``method='akima'``. .. warning:: @@ -562,7 +563,7 @@ String/Regular Expression Replacement <https://docs.python.org/3/reference/lexical_analysis.html#string-literals>`__ if this is unclear. -Replace the '.' with ``NaN`` (str -> str) +Replace the '.' with ``NaN`` (str -> str): .. ipython:: python @@ -571,58 +572,58 @@ Replace the '.' with ``NaN`` (str -> str) df.replace('.', np.nan) Now do it with a regular expression that removes surrounding whitespace -(regex -> regex) +(regex -> regex): .. ipython:: python df.replace(r'\s*\.\s*', np.nan, regex=True) -Replace a few different values (list -> list) +Replace a few different values (list -> list): .. ipython:: python df.replace(['a', '.'], ['b', np.nan]) -list of regex -> list of regex +list of regex -> list of regex: .. ipython:: python df.replace([r'\.', r'(a)'], ['dot', '\1stuff'], regex=True) -Only search in column ``'b'`` (dict -> dict) +Only search in column ``'b'`` (dict -> dict): .. ipython:: python df.replace({'b': '.'}, {'b': np.nan}) Same as the previous example, but use a regular expression for -searching instead (dict of regex -> dict) +searching instead (dict of regex -> dict): .. ipython:: python df.replace({'b': r'\s*\.\s*'}, {'b': np.nan}, regex=True) -You can pass nested dictionaries of regular expressions that use ``regex=True`` +You can pass nested dictionaries of regular expressions that use ``regex=True``: .. ipython:: python df.replace({'b': {'b': r''}}, regex=True) -or you can pass the nested dictionary like so +Alternatively, you can pass the nested dictionary like so: .. ipython:: python df.replace(regex={'b': {r'\s*\.\s*': np.nan}}) You can also use the group of a regular expression match when replacing (dict -of regex -> dict of regex), this works for lists as well +of regex -> dict of regex), this works for lists as well. .. ipython:: python df.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True) You can pass a list of regular expressions, of which those that match -will be replaced with a scalar (list of regex -> regex) +will be replaced with a scalar (list of regex -> regex). .. ipython:: python @@ -631,7 +632,7 @@ will be replaced with a scalar (list of regex -> regex) All of the regular expression examples can also be passed with the ``to_replace`` argument as the ``regex`` argument. In this case the ``value`` argument must be passed explicitly by name or ``regex`` must be a nested -dictionary. The previous example, in this case, would then be +dictionary. The previous example, in this case, would then be: .. ipython:: python @@ -648,7 +649,7 @@ want to use a regular expression. Numeric Replacement ~~~~~~~~~~~~~~~~~~~ -Similar to ``DataFrame.fillna`` +The :meth:`~DataFrame.replace` method is similar to :meth:`~DataFrame.fillna`. .. ipython:: python @@ -656,7 +657,7 @@ Similar to ``DataFrame.fillna`` df[np.random.rand(df.shape[0]) > 0.5] = 1.5 df.replace(1.5, np.nan) -Replacing more than one value via lists works as well +Replacing more than one value is possible by passing a list. .. ipython:: python @@ -664,7 +665,7 @@ Replacing more than one value via lists works as well df.replace([1.5, df00], [np.nan, 'a']) df[1].dtype -You can also operate on the DataFrame in place +You can also operate on the DataFrame in place: .. ipython:: python @@ -674,7 +675,7 @@ You can also operate on the DataFrame in place When replacing multiple ``bool`` or ``datetime64`` objects, the first argument to ``replace`` (``to_replace``) must match the type of the value - being replaced type. For example, + being replaced. For example, .. code-block:: python @@ -702,9 +703,9 @@ Missing data casting rules and indexing While pandas supports storing arrays of integer and boolean type, these types are not capable of storing missing data. Until we can switch to using a native -NA type in NumPy, we've established some "casting rules" when reindexing will -cause missing data to be introduced into, say, a Series or DataFrame. Here they -are: +NA type in NumPy, we've established some "casting rules". When a reindexing +operation introduces missing data, the Series will be cast according to the +rules introduced in the table below. .. csv-table:: :header: "data type", "Cast to" diff --git a/doc/source/release.rst b/doc/source/release.rst index 12932d9fcee4f..de045c426cf7b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -2894,7 +2894,7 @@ Improvements to existing features ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Fixed various issues with internal pprinting code, the repr() for various objects - including TimeStamp and Index now produces valid python code strings and + including TimeStamp and Index now produces valid Python code strings and can be used to recreate the object, (:issue:`3038`, :issue:`3379`, :issue:`3251`, :issue:`3460`) - ``convert_objects`` now accepts a ``copy`` parameter (defaults to ``True``) - ``HDFStore`` diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst index 778db17a56b58..6bbfb54629c4d 100644 --- a/doc/source/timedeltas.rst +++ b/doc/source/timedeltas.rst @@ -238,7 +238,7 @@ Frequency Conversion Timedelta Series, ``TimedeltaIndex``, and ``Timedelta`` scalars can be converted to other 'frequencies' by dividing by another timedelta, or by astyping to a specific timedelta type. These operations yield Series and propagate ``NaT`` -> ``nan``. -Note that division by the numpy scalar is true division, while astyping is equivalent of floor division. +Note that division by the NumPy scalar is true division, while astyping is equivalent of floor division. .. ipython:: python diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 201af3c7d5355..fa21cc997d4f4 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -2016,7 +2016,7 @@ Pandas provides rich support for working with timestamps in different time zones using ``pytz`` and ``dateutil`` libraries. ``dateutil`` currently is only supported for fixed offset and tzfile zones. The default library is ``pytz``. Support for ``dateutil`` is provided for compatibility with other -applications e.g. if you use ``dateutil`` in other python packages. +applications e.g. if you use ``dateutil`` in other Python packages. Working with Time Zones ~~~~~~~~~~~~~~~~~~~~~~~ @@ -2264,15 +2264,15 @@ a convert on an aware stamp. .. note:: - Using the ``.values`` accessor on a ``Series``, returns an numpy array of the data. - These values are converted to UTC, as numpy does not currently support timezones (even though it is *printing* in the local timezone!). + Using the ``.values`` accessor on a ``Series``, returns an NumPy array of the data. + These values are converted to UTC, as NumPy does not currently support timezones (even though it is *printing* in the local timezone!). .. ipython:: python s_naive.values s_aware.values - Further note that once converted to a numpy array these would lose the tz tenor. + Further note that once converted to a NumPy array these would lose the tz tenor. .. ipython:: python diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 2c1d54c27caab..cbd17493beb7e 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -1270,7 +1270,7 @@ The layout of subplots can be specified by ``layout`` keyword. It can accept The number of axes which can be contained by rows x columns specified by ``layout`` must be larger than the number of required subplots. If layout can contain more axes than required, -blank axes are not drawn. Similar to a numpy array's ``reshape`` method, you +blank axes are not drawn. Similar to a NumPy array's ``reshape`` method, you can use ``-1`` for one dimension to automatically calculate the number of rows or columns needed, given the other. diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index bd3bee507baa3..1280634aa6c1a 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -273,8 +273,11 @@ Performance Improvements Documentation Changes ~~~~~~~~~~~~~~~~~~~~~ -- -- +- Changed spelling of "numpy" to "NumPy", and "python" to "Python". (:issue:`19017`) +- Consistency when introducing code samples, using either colon or period. + Rewrote some sentences for greater clarity, added more dynamic references + to functions, methods and classes. + (:issue:`18941`, :issue:`18948`, :issue:`18973`, :issue:`19017`) - .. _whatsnew_0230.bug_fixes:
This PR is a continuation of my read-through of the docs, the earlier PRs are #18941, #18973 and #18948. **The changes included in this PR include** - Changed spelling of "python" to "Python" for consistency, same with "numpy" to "NumPy". This change was made globally, i.e. for most documents in `/docs/source/`. - A review of `groupby.rst` and `missing_data.rst` - Added punctuation where missing. - Added more function references (i.e. `:meth:...`) where appropriate. - Rewrote a few sentences. Feedback is very welcome.
https://api.github.com/repos/pandas-dev/pandas/pulls/19017
2017-12-31T16:19:22Z
2018-01-03T12:25:22Z
2018-01-03T12:25:22Z
2018-01-03T12:25:53Z
DOC: Update some outdated information
diff --git a/README.md b/README.md index ac043f5586498..4b9c9505e320a 100644 --- a/README.md +++ b/README.md @@ -160,10 +160,9 @@ pip install pandas ``` ## Dependencies -- [NumPy](http://www.numpy.org): 1.7.0 or higher -- [python-dateutil](https://labix.org/python-dateutil): 1.5 or higher -- [pytz](https://pythonhosted.org/pytz) - - Needed for time zone support with ``pandas.date_range`` +- [NumPy](http://www.numpy.org): 1.9.0 or higher +- [python-dateutil](https://labix.org/python-dateutil): 2.5.0 or higher +- [pytz](https://pythonhosted.org/pytz): 2011k or higher See the [full installation instructions](https://pandas.pydata.org/pandas-docs/stable/install.html#dependencies) for recommended and optional dependencies. @@ -205,9 +204,6 @@ See the full instructions for [installing from source](https://pandas.pydata.org ## Documentation The official documentation is hosted on PyData.org: https://pandas.pydata.org/pandas-docs/stable -The Sphinx documentation should provide a good starting point for learning how -to use the library. Expect the docs to continue to expand as time goes on. - ## Background Work on ``pandas`` started at AQR (a quantitative hedge fund) in 2008 and has been under active development since then. diff --git a/setup.py b/setup.py index 443f3eba69b4d..7dbf6c84a0451 100755 --- a/setup.py +++ b/setup.py @@ -198,10 +198,6 @@ def build_extensions(self): munging and cleaning data, analyzing / modeling it, then organizing the results of the analysis into a form suitable for plotting or tabular display. pandas is the ideal tool for all of these tasks. - -Notes ------ -Windows binaries built against NumPy 1.8.1 """ DISTNAME = 'pandas'
https://api.github.com/repos/pandas-dev/pandas/pulls/19015
2017-12-31T12:56:45Z
2017-12-31T14:43:54Z
2017-12-31T14:43:54Z
2018-05-02T13:09:34Z
TST: fix FileNotFoundError: [Errno 2] No such file or directory: 'pa…
diff --git a/setup.py b/setup.py index 0fea6f5641475..d518bea62da00 100755 --- a/setup.py +++ b/setup.py @@ -718,6 +718,7 @@ def pxd(name): 'parser/data/*.tar', 'parser/data/*.zip', 'parser/data/*.tar.gz', + 'parser/data/*.jsonl', 'sas/data/*.csv', 'sas/data/*.xpt', 'sas/data/*.sas7bdat',
null
https://api.github.com/repos/pandas-dev/pandas/pulls/19014
2017-12-31T02:58:58Z
2017-12-31T14:43:07Z
null
2023-05-11T01:17:03Z
datetimelike indexes add/sub zero-dim integer arrays
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 6407a33c442d0..b169d8600169e 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -368,7 +368,7 @@ Numeric ^^^^^^^ - Bug in :func:`Series.__sub__` subtracting a non-nanosecond ``np.datetime64`` object from a ``Series`` gave incorrect results (:issue:`7996`) -- +- Bug in :class:`DatetimeIndex`, :class:`TimedeltaIndex` addition and subtraction of zero-dimensional integer arrays gave incorrect results (:issue:`19012`) - Categorical diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 10c9e8e7dd18f..2a77a23c2cfa1 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -669,6 +669,8 @@ def __add__(self, other): from pandas.core.index import Index from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.tseries.offsets import DateOffset + + other = lib.item_from_zerodim(other) if is_timedelta64_dtype(other): return self._add_delta(other) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): @@ -689,6 +691,7 @@ def __add__(self, other): return self._add_datelike(other) else: # pragma: no cover return NotImplemented + cls.__add__ = __add__ cls.__radd__ = __add__ @@ -697,6 +700,8 @@ def __sub__(self, other): from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.tseries.offsets import DateOffset + + other = lib.item_from_zerodim(other) if is_timedelta64_dtype(other): return self._add_delta(-other) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): @@ -724,6 +729,7 @@ def __sub__(self, other): else: # pragma: no cover return NotImplemented + cls.__sub__ = __sub__ def __rsub__(self, other): @@ -737,8 +743,10 @@ def _add_delta(self, other): return NotImplemented def _add_delta_td(self, other): - # add a delta of a timedeltalike - # return the i8 result view + """ + Add a delta of a timedeltalike + return the i8 result view + """ inc = delta_to_nanoseconds(other) new_values = checked_add_with_arr(self.asi8, inc, @@ -748,8 +756,10 @@ def _add_delta_td(self, other): return new_values.view('i8') def _add_delta_tdi(self, other): - # add a delta of a TimedeltaIndex - # return the i8 result view + """ + Add a delta of a TimedeltaIndex + return the i8 result view + """ # delta operation if not len(self) == len(other): diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py index a0ee3e511ef37..217ee07affa84 100644 --- a/pandas/tests/indexes/conftest.py +++ b/pandas/tests/indexes/conftest.py @@ -1,4 +1,5 @@ import pytest +import numpy as np import pandas.util.testing as tm from pandas.core.indexes.api import Index, MultiIndex @@ -22,3 +23,9 @@ ids=lambda x: type(x).__name__) def indices(request): return request.param + + +@pytest.fixture(params=[1, np.array(1, dtype=np.int64)]) +def one(request): + # zero-dim integer array behaves like an integer + return request.param diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index 11a52267ed1b4..4684eb89557bf 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -58,36 +58,37 @@ def test_dti_radd_timestamp_raises(self): # ------------------------------------------------------------- # Binary operations DatetimeIndex and int - def test_dti_add_int(self, tz): + def test_dti_add_int(self, tz, one): + # Variants of `one` for #19012 rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz) - result = rng + 1 + result = rng + one expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, tz=tz) tm.assert_index_equal(result, expected) - def test_dti_iadd_int(self, tz): + def test_dti_iadd_int(self, tz, one): rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz) expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, tz=tz) - rng += 1 + rng += one tm.assert_index_equal(rng, expected) - def test_dti_sub_int(self, tz): + def test_dti_sub_int(self, tz, one): rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz) - result = rng - 1 + result = rng - one expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, tz=tz) tm.assert_index_equal(result, expected) - def test_dti_isub_int(self, tz): + def test_dti_isub_int(self, tz, one): rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz) expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, tz=tz) - rng -= 1 + rng -= one tm.assert_index_equal(rng, expected) # ------------------------------------------------------------- diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py index b64f9074c3cf0..356ea5fc656de 100644 --- a/pandas/tests/indexes/period/test_arithmetic.py +++ b/pandas/tests/indexes/period/test_arithmetic.py @@ -131,19 +131,21 @@ def test_add_iadd(self): period.IncompatibleFrequency, msg): rng += delta - # int + def test_pi_add_int(self, one): + # Variants of `one` for #19012 rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10) - result = rng + 1 + result = rng + one expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10) tm.assert_index_equal(result, expected) - rng += 1 + rng += one tm.assert_index_equal(rng, expected) - def test_sub(self): + @pytest.mark.parametrize('five', [5, np.array(5, dtype=np.int64)]) + def test_sub(self, five): rng = period_range('2007-01', periods=50) - result = rng - 5 - exp = rng + (-5) + result = rng - five + exp = rng + (-five) tm.assert_index_equal(result, exp) def test_sub_isub(self): diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 3c567e52cccb5..3ecfcaff63bc5 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -121,28 +121,29 @@ def test_ufunc_coercions(self): # ------------------------------------------------------------- # Binary operations TimedeltaIndex and integer - def test_tdi_add_int(self): + def test_tdi_add_int(self, one): + # Variants of `one` for #19012 rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) - result = rng + 1 + result = rng + one expected = timedelta_range('1 days 10:00:00', freq='H', periods=10) tm.assert_index_equal(result, expected) - def test_tdi_iadd_int(self): + def test_tdi_iadd_int(self, one): rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) expected = timedelta_range('1 days 10:00:00', freq='H', periods=10) - rng += 1 + rng += one tm.assert_index_equal(rng, expected) - def test_tdi_sub_int(self): + def test_tdi_sub_int(self, one): rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) - result = rng - 1 + result = rng - one expected = timedelta_range('1 days 08:00:00', freq='H', periods=10) tm.assert_index_equal(result, expected) - def test_tdi_isub_int(self): + def test_tdi_isub_int(self, one): rng = timedelta_range('1 days 09:00:00', freq='H', periods=10) expected = timedelta_range('1 days 08:00:00', freq='H', periods=10) - rng -= 1 + rng -= one tm.assert_index_equal(rng, expected) # -------------------------------------------------------------
Setup: ``` dti = pd.date_range('2016-01-01', periods=3, freq='H') one = np.array(1) ``` 0.21.1: ``` >>> dti + one DatetimeIndex(['2016-01-01 00:00:00.000000001', '2016-01-01 01:00:00.000000001', '2016-01-01 02:00:00.000000001'], dtype='datetime64[ns]', freq='H') >>> dti.freq = None >>> dti + one DatetimeIndex(['2016-01-01 00:00:00.000000001', '2016-01-01 01:00:00.000000001', '2016-01-01 02:00:00.000000001'], dtype='datetime64[ns]', freq=None) ``` Master: (See #19011) ``` >>> dti + one Traceback (most recent call last): File "<stdin>", line 1, in <module> File "pandas/core/indexes/datetimelike.py", line 685, in __add__ elif is_offsetlike(other): File "pandas/core/dtypes/common.py", line 294, in is_offsetlike elif (is_list_like(arr_or_obj) and len(arr_or_obj) and TypeError: len() of unsized object ``` After ``` >>> dti + one DatetimeIndex(['2016-01-01 01:00:00', '2016-01-01 02:00:00', '2016-01-01 03:00:00'], dtype='datetime64[ns]', freq='H') >>> dti.freq = None >>> dti + one Traceback (most recent call last): File "<stdin>", line 1, in <module> File "pandas/core/indexes/datetimelike.py", line 683, in __add__ return self.shift(other) File "pandas/core/indexes/datetimelike.py", line 821, in shift raise ValueError("Cannot shift with no freq") ValueError: Cannot shift with no freq ``` - [x] closes #19012 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19013
2017-12-31T01:18:48Z
2017-12-31T14:49:25Z
2017-12-31T14:49:25Z
2018-01-23T04:40:46Z
TST: Split tests/indexes/interval/test_interval.py into separate files
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 3ca4c31b7f059..73520e984ae12 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -2,14 +2,11 @@ import pytest import numpy as np -from datetime import timedelta from pandas import ( Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp, - Timedelta, compat, date_range, timedelta_range, DateOffset) + Timedelta, date_range, timedelta_range) from pandas.compat import lzip from pandas.core.common import _asarray_tuplesafe -from pandas.tseries.offsets import Day -from pandas._libs.interval import IntervalTree from pandas.tests.indexes.common import Base import pandas.util.testing as tm import pandas as pd @@ -1158,367 +1155,3 @@ def test_to_tuples_na(self, tuples, na_tuple): assert all(isna(x) for x in result_na) else: assert isna(result_na) - - -class TestIntervalRange(object): - - def test_construction_from_numeric(self, closed, name): - # combinations of start/end/periods without freq - expected = IntervalIndex.from_breaks( - np.arange(0, 6), name=name, closed=closed) - - result = interval_range(start=0, end=5, name=name, closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=0, periods=5, name=name, closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(end=5, periods=5, name=name, closed=closed) - tm.assert_index_equal(result, expected) - - # combinations of start/end/periods with freq - expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)], - name=name, closed=closed) - - result = interval_range(start=0, end=6, freq=2, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=0, periods=3, freq=2, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(end=6, periods=3, freq=2, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - # output truncates early if freq causes end to be skipped. - expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)], - name=name, closed=closed) - result = interval_range(start=0, end=4, freq=1.5, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize('tz', [None, 'US/Eastern']) - def test_construction_from_timestamp(self, closed, name, tz): - # combinations of start/end/periods without freq - start = Timestamp('2017-01-01', tz=tz) - end = Timestamp('2017-01-06', tz=tz) - breaks = date_range(start=start, end=end) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - - result = interval_range(start=start, end=end, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=start, periods=5, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(end=end, periods=5, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - # combinations of start/end/periods with fixed freq - freq = '2D' - start = Timestamp('2017-01-01', tz=tz) - end = Timestamp('2017-01-07', tz=tz) - breaks = date_range(start=start, end=end, freq=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=start, periods=3, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(end=end, periods=3, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - # output truncates early if freq causes end to be skipped. - end = Timestamp('2017-01-08', tz=tz) - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - # combinations of start/end/periods with non-fixed freq - freq = 'M' - start = Timestamp('2017-01-01', tz=tz) - end = Timestamp('2017-12-31', tz=tz) - breaks = date_range(start=start, end=end, freq=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=start, periods=11, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(end=end, periods=11, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - # output truncates early if freq causes end to be skipped. - end = Timestamp('2018-01-15', tz=tz) - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - def test_construction_from_timedelta(self, closed, name): - # combinations of start/end/periods without freq - start, end = Timedelta('1 day'), Timedelta('6 days') - breaks = timedelta_range(start=start, end=end) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - - result = interval_range(start=start, end=end, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=start, periods=5, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(end=end, periods=5, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - # combinations of start/end/periods with fixed freq - freq = '2D' - start, end = Timedelta('1 day'), Timedelta('7 days') - breaks = timedelta_range(start=start, end=end, freq=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) - - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(start=start, periods=3, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - result = interval_range(end=end, periods=3, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - # output truncates early if freq causes end to be skipped. - end = Timedelta('7 days 1 hour') - result = interval_range(start=start, end=end, freq=freq, name=name, - closed=closed) - tm.assert_index_equal(result, expected) - - def test_constructor_coverage(self): - # float value for periods - expected = pd.interval_range(start=0, periods=10) - result = pd.interval_range(start=0, periods=10.5) - tm.assert_index_equal(result, expected) - - # equivalent timestamp-like start/end - start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15') - expected = pd.interval_range(start=start, end=end) - - result = pd.interval_range(start=start.to_pydatetime(), - end=end.to_pydatetime()) - tm.assert_index_equal(result, expected) - - result = pd.interval_range(start=start.asm8, end=end.asm8) - tm.assert_index_equal(result, expected) - - # equivalent freq with timestamp - equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1), - DateOffset(days=1)] - for freq in equiv_freq: - result = pd.interval_range(start=start, end=end, freq=freq) - tm.assert_index_equal(result, expected) - - # equivalent timedelta-like start/end - start, end = Timedelta(days=1), Timedelta(days=10) - expected = pd.interval_range(start=start, end=end) - - result = pd.interval_range(start=start.to_pytimedelta(), - end=end.to_pytimedelta()) - tm.assert_index_equal(result, expected) - - result = pd.interval_range(start=start.asm8, end=end.asm8) - tm.assert_index_equal(result, expected) - - # equivalent freq with timedelta - equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1)] - for freq in equiv_freq: - result = pd.interval_range(start=start, end=end, freq=freq) - tm.assert_index_equal(result, expected) - - def test_errors(self): - # not enough params - msg = ('Of the three parameters: start, end, and periods, ' - 'exactly two must be specified') - - with tm.assert_raises_regex(ValueError, msg): - interval_range(start=0) - - with tm.assert_raises_regex(ValueError, msg): - interval_range(end=5) - - with tm.assert_raises_regex(ValueError, msg): - interval_range(periods=2) - - with tm.assert_raises_regex(ValueError, msg): - interval_range() - - # too many params - with tm.assert_raises_regex(ValueError, msg): - interval_range(start=0, end=5, periods=6) - - # mixed units - msg = 'start, end, freq need to be type compatible' - with tm.assert_raises_regex(TypeError, msg): - interval_range(start=0, end=Timestamp('20130101'), freq=2) - - with tm.assert_raises_regex(TypeError, msg): - interval_range(start=0, end=Timedelta('1 day'), freq=2) - - with tm.assert_raises_regex(TypeError, msg): - interval_range(start=0, end=10, freq='D') - - with tm.assert_raises_regex(TypeError, msg): - interval_range(start=Timestamp('20130101'), end=10, freq='D') - - with tm.assert_raises_regex(TypeError, msg): - interval_range(start=Timestamp('20130101'), - end=Timedelta('1 day'), freq='D') - - with tm.assert_raises_regex(TypeError, msg): - interval_range(start=Timestamp('20130101'), - end=Timestamp('20130110'), freq=2) - - with tm.assert_raises_regex(TypeError, msg): - interval_range(start=Timedelta('1 day'), end=10, freq='D') - - with tm.assert_raises_regex(TypeError, msg): - interval_range(start=Timedelta('1 day'), - end=Timestamp('20130110'), freq='D') - - with tm.assert_raises_regex(TypeError, msg): - interval_range(start=Timedelta('1 day'), - end=Timedelta('10 days'), freq=2) - - # invalid periods - msg = 'periods must be a number, got foo' - with tm.assert_raises_regex(TypeError, msg): - interval_range(start=0, periods='foo') - - # invalid start - msg = 'start must be numeric or datetime-like, got foo' - with tm.assert_raises_regex(ValueError, msg): - interval_range(start='foo', periods=10) - - # invalid end - msg = r'end must be numeric or datetime-like, got \(0, 1\]' - with tm.assert_raises_regex(ValueError, msg): - interval_range(end=Interval(0, 1), periods=10) - - # invalid freq for datetime-like - msg = 'freq must be numeric or convertible to DateOffset, got foo' - with tm.assert_raises_regex(ValueError, msg): - interval_range(start=0, end=10, freq='foo') - - with tm.assert_raises_regex(ValueError, msg): - interval_range(start=Timestamp('20130101'), periods=10, freq='foo') - - with tm.assert_raises_regex(ValueError, msg): - interval_range(end=Timedelta('1 day'), periods=10, freq='foo') - - # mixed tz - start = Timestamp('2017-01-01', tz='US/Eastern') - end = Timestamp('2017-01-07', tz='US/Pacific') - msg = 'Start and end cannot both be tz-aware with different timezones' - with tm.assert_raises_regex(TypeError, msg): - interval_range(start=start, end=end) - - -class TestIntervalTree(object): - def setup_method(self, method): - gentree = lambda dtype: IntervalTree(np.arange(5, dtype=dtype), - np.arange(5, dtype=dtype) + 2) - self.tree = gentree('int64') - self.trees = {dtype: gentree(dtype) - for dtype in ['int32', 'int64', 'float32', 'float64']} - - def test_get_loc(self): - for dtype, tree in self.trees.items(): - tm.assert_numpy_array_equal(tree.get_loc(1), - np.array([0], dtype='int64')) - tm.assert_numpy_array_equal(np.sort(tree.get_loc(2)), - np.array([0, 1], dtype='int64')) - with pytest.raises(KeyError): - tree.get_loc(-1) - - def test_get_indexer(self): - for dtype, tree in self.trees.items(): - tm.assert_numpy_array_equal( - tree.get_indexer(np.array([1.0, 5.5, 6.5])), - np.array([0, 4, -1], dtype='int64')) - with pytest.raises(KeyError): - tree.get_indexer(np.array([3.0])) - - def test_get_indexer_non_unique(self): - indexer, missing = self.tree.get_indexer_non_unique( - np.array([1.0, 2.0, 6.5])) - tm.assert_numpy_array_equal(indexer[:1], - np.array([0], dtype='int64')) - tm.assert_numpy_array_equal(np.sort(indexer[1:3]), - np.array([0, 1], dtype='int64')) - tm.assert_numpy_array_equal(np.sort(indexer[3:]), - np.array([-1], dtype='int64')) - tm.assert_numpy_array_equal(missing, np.array([2], dtype='int64')) - - def test_duplicates(self): - tree = IntervalTree([0, 0, 0], [1, 1, 1]) - tm.assert_numpy_array_equal(np.sort(tree.get_loc(0.5)), - np.array([0, 1, 2], dtype='int64')) - - with pytest.raises(KeyError): - tree.get_indexer(np.array([0.5])) - - indexer, missing = tree.get_indexer_non_unique(np.array([0.5])) - tm.assert_numpy_array_equal(np.sort(indexer), - np.array([0, 1, 2], dtype='int64')) - tm.assert_numpy_array_equal(missing, np.array([], dtype='int64')) - - def test_get_loc_closed(self): - for closed in ['left', 'right', 'both', 'neither']: - tree = IntervalTree([0], [1], closed=closed) - for p, errors in [(0, tree.open_left), - (1, tree.open_right)]: - if errors: - with pytest.raises(KeyError): - tree.get_loc(p) - else: - tm.assert_numpy_array_equal(tree.get_loc(p), - np.array([0], dtype='int64')) - - @pytest.mark.skipif(compat.is_platform_32bit(), - reason="int type mismatch on 32bit") - def test_get_indexer_closed(self): - x = np.arange(1000, dtype='float64') - found = x.astype('intp') - not_found = (-1 * np.ones(1000)).astype('intp') - - for leaf_size in [1, 10, 100, 10000]: - for closed in ['left', 'right', 'both', 'neither']: - tree = IntervalTree(x, x + 0.5, closed=closed, - leaf_size=leaf_size) - tm.assert_numpy_array_equal(found, - tree.get_indexer(x + 0.25)) - - expected = found if tree.closed_left else not_found - tm.assert_numpy_array_equal(expected, - tree.get_indexer(x + 0.0)) - - expected = found if tree.closed_right else not_found - tm.assert_numpy_array_equal(expected, - tree.get_indexer(x + 0.5)) diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py new file mode 100644 index 0000000000000..203e8e3128edc --- /dev/null +++ b/pandas/tests/indexes/interval/test_interval_range.py @@ -0,0 +1,301 @@ +from __future__ import division + +import pytest +import numpy as np +from datetime import timedelta +from pandas import ( + Interval, IntervalIndex, Timestamp, Timedelta, DateOffset, + interval_range, date_range, timedelta_range) +from pandas.tseries.offsets import Day +import pandas.util.testing as tm +import pandas as pd + + +@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither']) +def closed(request): + return request.param + + +@pytest.fixture(scope='class', params=[None, 'foo']) +def name(request): + return request.param + + +class TestIntervalRange(object): + + def test_construction_from_numeric(self, closed, name): + # combinations of start/end/periods without freq + expected = IntervalIndex.from_breaks( + np.arange(0, 6), name=name, closed=closed) + + result = interval_range(start=0, end=5, name=name, closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(start=0, periods=5, name=name, closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(end=5, periods=5, name=name, closed=closed) + tm.assert_index_equal(result, expected) + + # combinations of start/end/periods with freq + expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)], + name=name, closed=closed) + + result = interval_range(start=0, end=6, freq=2, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(start=0, periods=3, freq=2, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(end=6, periods=3, freq=2, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + # output truncates early if freq causes end to be skipped. + expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)], + name=name, closed=closed) + result = interval_range(start=0, end=4, freq=1.5, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize('tz', [None, 'US/Eastern']) + def test_construction_from_timestamp(self, closed, name, tz): + # combinations of start/end/periods without freq + start = Timestamp('2017-01-01', tz=tz) + end = Timestamp('2017-01-06', tz=tz) + breaks = date_range(start=start, end=end) + expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) + + result = interval_range(start=start, end=end, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(start=start, periods=5, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(end=end, periods=5, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + # combinations of start/end/periods with fixed freq + freq = '2D' + start = Timestamp('2017-01-01', tz=tz) + end = Timestamp('2017-01-07', tz=tz) + breaks = date_range(start=start, end=end, freq=freq) + expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) + + result = interval_range(start=start, end=end, freq=freq, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(start=start, periods=3, freq=freq, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(end=end, periods=3, freq=freq, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + # output truncates early if freq causes end to be skipped. + end = Timestamp('2017-01-08', tz=tz) + result = interval_range(start=start, end=end, freq=freq, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + # combinations of start/end/periods with non-fixed freq + freq = 'M' + start = Timestamp('2017-01-01', tz=tz) + end = Timestamp('2017-12-31', tz=tz) + breaks = date_range(start=start, end=end, freq=freq) + expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) + + result = interval_range(start=start, end=end, freq=freq, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(start=start, periods=11, freq=freq, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(end=end, periods=11, freq=freq, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + # output truncates early if freq causes end to be skipped. + end = Timestamp('2018-01-15', tz=tz) + result = interval_range(start=start, end=end, freq=freq, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + def test_construction_from_timedelta(self, closed, name): + # combinations of start/end/periods without freq + start, end = Timedelta('1 day'), Timedelta('6 days') + breaks = timedelta_range(start=start, end=end) + expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) + + result = interval_range(start=start, end=end, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(start=start, periods=5, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(end=end, periods=5, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + # combinations of start/end/periods with fixed freq + freq = '2D' + start, end = Timedelta('1 day'), Timedelta('7 days') + breaks = timedelta_range(start=start, end=end, freq=freq) + expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) + + result = interval_range(start=start, end=end, freq=freq, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(start=start, periods=3, freq=freq, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + result = interval_range(end=end, periods=3, freq=freq, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + # output truncates early if freq causes end to be skipped. + end = Timedelta('7 days 1 hour') + result = interval_range(start=start, end=end, freq=freq, name=name, + closed=closed) + tm.assert_index_equal(result, expected) + + def test_constructor_coverage(self): + # float value for periods + expected = pd.interval_range(start=0, periods=10) + result = pd.interval_range(start=0, periods=10.5) + tm.assert_index_equal(result, expected) + + # equivalent timestamp-like start/end + start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15') + expected = pd.interval_range(start=start, end=end) + + result = pd.interval_range(start=start.to_pydatetime(), + end=end.to_pydatetime()) + tm.assert_index_equal(result, expected) + + result = pd.interval_range(start=start.asm8, end=end.asm8) + tm.assert_index_equal(result, expected) + + # equivalent freq with timestamp + equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1), + DateOffset(days=1)] + for freq in equiv_freq: + result = pd.interval_range(start=start, end=end, freq=freq) + tm.assert_index_equal(result, expected) + + # equivalent timedelta-like start/end + start, end = Timedelta(days=1), Timedelta(days=10) + expected = pd.interval_range(start=start, end=end) + + result = pd.interval_range(start=start.to_pytimedelta(), + end=end.to_pytimedelta()) + tm.assert_index_equal(result, expected) + + result = pd.interval_range(start=start.asm8, end=end.asm8) + tm.assert_index_equal(result, expected) + + # equivalent freq with timedelta + equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1)] + for freq in equiv_freq: + result = pd.interval_range(start=start, end=end, freq=freq) + tm.assert_index_equal(result, expected) + + def test_errors(self): + # not enough params + msg = ('Of the three parameters: start, end, and periods, ' + 'exactly two must be specified') + + with tm.assert_raises_regex(ValueError, msg): + interval_range(start=0) + + with tm.assert_raises_regex(ValueError, msg): + interval_range(end=5) + + with tm.assert_raises_regex(ValueError, msg): + interval_range(periods=2) + + with tm.assert_raises_regex(ValueError, msg): + interval_range() + + # too many params + with tm.assert_raises_regex(ValueError, msg): + interval_range(start=0, end=5, periods=6) + + # mixed units + msg = 'start, end, freq need to be type compatible' + with tm.assert_raises_regex(TypeError, msg): + interval_range(start=0, end=Timestamp('20130101'), freq=2) + + with tm.assert_raises_regex(TypeError, msg): + interval_range(start=0, end=Timedelta('1 day'), freq=2) + + with tm.assert_raises_regex(TypeError, msg): + interval_range(start=0, end=10, freq='D') + + with tm.assert_raises_regex(TypeError, msg): + interval_range(start=Timestamp('20130101'), end=10, freq='D') + + with tm.assert_raises_regex(TypeError, msg): + interval_range(start=Timestamp('20130101'), + end=Timedelta('1 day'), freq='D') + + with tm.assert_raises_regex(TypeError, msg): + interval_range(start=Timestamp('20130101'), + end=Timestamp('20130110'), freq=2) + + with tm.assert_raises_regex(TypeError, msg): + interval_range(start=Timedelta('1 day'), end=10, freq='D') + + with tm.assert_raises_regex(TypeError, msg): + interval_range(start=Timedelta('1 day'), + end=Timestamp('20130110'), freq='D') + + with tm.assert_raises_regex(TypeError, msg): + interval_range(start=Timedelta('1 day'), + end=Timedelta('10 days'), freq=2) + + # invalid periods + msg = 'periods must be a number, got foo' + with tm.assert_raises_regex(TypeError, msg): + interval_range(start=0, periods='foo') + + # invalid start + msg = 'start must be numeric or datetime-like, got foo' + with tm.assert_raises_regex(ValueError, msg): + interval_range(start='foo', periods=10) + + # invalid end + msg = r'end must be numeric or datetime-like, got \(0, 1\]' + with tm.assert_raises_regex(ValueError, msg): + interval_range(end=Interval(0, 1), periods=10) + + # invalid freq for datetime-like + msg = 'freq must be numeric or convertible to DateOffset, got foo' + with tm.assert_raises_regex(ValueError, msg): + interval_range(start=0, end=10, freq='foo') + + with tm.assert_raises_regex(ValueError, msg): + interval_range(start=Timestamp('20130101'), periods=10, freq='foo') + + with tm.assert_raises_regex(ValueError, msg): + interval_range(end=Timedelta('1 day'), periods=10, freq='foo') + + # mixed tz + start = Timestamp('2017-01-01', tz='US/Eastern') + end = Timestamp('2017-01-07', tz='US/Pacific') + msg = 'Start and end cannot both be tz-aware with different timezones' + with tm.assert_raises_regex(TypeError, msg): + interval_range(start=start, end=end) diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py new file mode 100644 index 0000000000000..343131125f640 --- /dev/null +++ b/pandas/tests/indexes/interval/test_interval_tree.py @@ -0,0 +1,93 @@ +from __future__ import division + +import pytest +import numpy as np +from pandas import compat +from pandas._libs.interval import IntervalTree +import pandas.util.testing as tm + + +@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither']) +def closed(request): + return request.param + + +class TestIntervalTree(object): + def setup_method(self, method): + def gentree(dtype): + left = np.arange(5, dtype=dtype) + right = left + 2 + return IntervalTree(left, right) + + self.tree = gentree('int64') + self.trees = {dtype: gentree(dtype) + for dtype in ['int32', 'int64', 'float32', 'float64']} + + def test_get_loc(self): + for dtype, tree in self.trees.items(): + tm.assert_numpy_array_equal(tree.get_loc(1), + np.array([0], dtype='int64')) + tm.assert_numpy_array_equal(np.sort(tree.get_loc(2)), + np.array([0, 1], dtype='int64')) + with pytest.raises(KeyError): + tree.get_loc(-1) + + def test_get_indexer(self): + for dtype, tree in self.trees.items(): + tm.assert_numpy_array_equal( + tree.get_indexer(np.array([1.0, 5.5, 6.5])), + np.array([0, 4, -1], dtype='int64')) + with pytest.raises(KeyError): + tree.get_indexer(np.array([3.0])) + + def test_get_indexer_non_unique(self): + indexer, missing = self.tree.get_indexer_non_unique( + np.array([1.0, 2.0, 6.5])) + tm.assert_numpy_array_equal(indexer[:1], + np.array([0], dtype='int64')) + tm.assert_numpy_array_equal(np.sort(indexer[1:3]), + np.array([0, 1], dtype='int64')) + tm.assert_numpy_array_equal(np.sort(indexer[3:]), + np.array([-1], dtype='int64')) + tm.assert_numpy_array_equal(missing, np.array([2], dtype='int64')) + + def test_duplicates(self): + tree = IntervalTree([0, 0, 0], [1, 1, 1]) + tm.assert_numpy_array_equal(np.sort(tree.get_loc(0.5)), + np.array([0, 1, 2], dtype='int64')) + + with pytest.raises(KeyError): + tree.get_indexer(np.array([0.5])) + + indexer, missing = tree.get_indexer_non_unique(np.array([0.5])) + tm.assert_numpy_array_equal(np.sort(indexer), + np.array([0, 1, 2], dtype='int64')) + tm.assert_numpy_array_equal(missing, np.array([], dtype='int64')) + + def test_get_loc_closed(self, closed): + tree = IntervalTree([0], [1], closed=closed) + for p, errors in [(0, tree.open_left), + (1, tree.open_right)]: + if errors: + with pytest.raises(KeyError): + tree.get_loc(p) + else: + tm.assert_numpy_array_equal(tree.get_loc(p), + np.array([0], dtype='int64')) + + @pytest.mark.skipif(compat.is_platform_32bit(), + reason="int type mismatch on 32bit") + @pytest.mark.parametrize('leaf_size', [1, 10, 100, 10000]) + def test_get_indexer_closed(self, closed, leaf_size): + x = np.arange(1000, dtype='float64') + found = x.astype('intp') + not_found = (-1 * np.ones(1000)).astype('intp') + + tree = IntervalTree(x, x + 0.5, closed=closed, leaf_size=leaf_size) + tm.assert_numpy_array_equal(found, tree.get_indexer(x + 0.25)) + + expected = found if tree.closed_left else not_found + tm.assert_numpy_array_equal(expected, tree.get_indexer(x + 0.0)) + + expected = found if tree.closed_right else not_found + tm.assert_numpy_array_equal(expected, tree.get_indexer(x + 0.5))
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Since we now have an interval subdirectory, seems logical to split `test_interval.py` into separate files, similar to what has been done for `DatetimeIndex`, `PeriodIndex`, and `TimedeltaIndex`. Just made very basic changes here, moving things at the class level, and didn't break apart any classes. All of the tests should functionally be the same. Summary: - Moved `TestIntervalRange` class from `test_interval.py` to `test_interval_range.py` - Moved `TestIntervalTree` class from `test_interval.py` to `test_interval_tree.py` - Changed a few `for` loops to `@pytest.mark.parametrize` - Converted a `lambda` to an actual function (my linter was complaining about PEP8 E731) - No changes to `test_interval_new.py` - Should still only cover tests in `test_interval.py` Down the road we could probably split `test_interval.py` into smaller components, much like what was done for `DatetimeIndex`, `PeriodIndex`, and `TimedeltaIndex`. Might want to wait until the `_new.py` files are fully addressed though. Didn't look like there were any high level changes like this that could be made in `tests/indexing/interval/`.
https://api.github.com/repos/pandas-dev/pandas/pulls/19009
2017-12-30T22:37:00Z
2017-12-30T23:49:23Z
2017-12-30T23:49:23Z
2017-12-31T20:47:18Z
CI: move 3.5 build back to required on travis
diff --git a/.travis.yml b/.travis.yml index e56435faeec19..5cc6547968b7d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -49,7 +49,6 @@ matrix: apt: packages: - python-gtk2 - # In allow_failures - dist: trusty env: - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true @@ -77,10 +76,6 @@ matrix: env: - JOB="3.6_DOC" DOC=true allow_failures: - # TODO(jreback) - - dist: trusty - env: - - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true - dist: trusty env: - JOB="2.7_SLOW" SLOW=true
https://api.github.com/repos/pandas-dev/pandas/pulls/19007
2017-12-30T22:23:18Z
2017-12-30T23:20:59Z
2017-12-30T23:20:59Z
2017-12-30T23:20:59Z
COMPAT: Drop reference to deprecated dateutil.zoneinfo.gettz
diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index de9f75344b2bf..fdcf40337fab9 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -10,13 +10,7 @@ from dateutil.tz import ( tzlocal as _dateutil_tzlocal, tzfile as _dateutil_tzfile) -import sys -if sys.platform == 'win32' or sys.platform == 'cygwin': - # equiv pd.compat.is_platform_windows() - from dateutil.zoneinfo import gettz as dateutil_gettz -else: - from dateutil.tz import gettz as dateutil_gettz - +from dateutil.tz import gettz as dateutil_gettz from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo import pytz
- [X] closes #19004 I'm assuming these will be handled by CI: - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Not sure this is necessary, it's a very "behind the scenes" fix, replacing `zoneinfo.gettz` with the superior `tz.gettz()`: - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19006
2017-12-30T20:02:30Z
2017-12-30T22:12:05Z
2017-12-30T22:12:05Z
2017-12-30T22:14:38Z
DOC: Fix min_count docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 06d82578cb9ef..84799d12df0c4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7664,7 +7664,7 @@ def _doc_parms(cls): _min_count_stub = """\ -min_count : int, default 1 +min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA.
[ci skip] missed this earlier.
https://api.github.com/repos/pandas-dev/pandas/pulls/19005
2017-12-30T19:55:04Z
2017-12-30T19:55:37Z
2017-12-30T19:55:37Z
2017-12-30T19:55:47Z
COMPAT: clean up warnings
diff --git a/appveyor.yml b/appveyor.yml index 0aaac322c4ac7..ba001208864a8 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -15,6 +15,7 @@ environment: # See: http://stackoverflow.com/a/13751649/163740 CMD_IN_ENV: "cmd /E:ON /V:ON /C .\\ci\\run_with_env.cmd" clone_folder: C:\projects\pandas + PANDAS_TESTING_MODE: "deprecate" matrix: diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 92564285bb36a..6407a33c442d0 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -207,6 +207,7 @@ Other API Changes - :func:`wide_to_long` previously kept numeric-like suffixes as ``object`` dtype. Now they are cast to numeric if possible (:issue:`17627`) - In :func:`read_excel`, the ``comment`` argument is now exposed as a named parameter (:issue:`18735`) - Rearranged the order of keyword arguments in :func:`read_excel()` to align with :func:`read_csv()` (:issue:`16672`) +- The options ``html.border`` and ``mode.use_inf_as_null`` were deprecated in prior versions, these will now show ``FutureWarning`` rather than a ``DeprecationWarning`` (:issue:`19003`) .. _whatsnew_0230.deprecations: diff --git a/pandas/core/config.py b/pandas/core/config.py index d10e2d19be665..692aed178719d 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -613,7 +613,7 @@ def _warn_if_deprecated(key): if d: if d.msg: print(d.msg) - warnings.warn(d.msg, DeprecationWarning) + warnings.warn(d.msg, FutureWarning) else: msg = "'{key}' is deprecated".format(key=key) if d.removal_ver: @@ -624,7 +624,7 @@ def _warn_if_deprecated(key): else: msg += ', please refrain from using it.' - warnings.warn(msg, DeprecationWarning) + warnings.warn(msg, FutureWarning) return True return False diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index d208c72ffee19..ffac702476af1 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -316,6 +316,10 @@ def array_equivalent(left, right, strict_nan=False): # NaNs can occur in float and complex arrays. if is_float_dtype(left) or is_complex_dtype(left): + + # empty + if not (np.prod(left.shape) and np.prod(right.shape)): + return True return ((left == right) | (isna(left) & isna(right))).all() # numpy will will not allow this type of datetimelike vs integer comparison diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 8be6c4875ae24..b7d3a60ecf6e4 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -328,13 +328,13 @@ def test_constructor_error_msgs(self): # wrong size axis labels with tm.assert_raises_regex(ValueError, "Shape of passed values " - "is \(3, 2\), indices " - "imply \(3, 1\)"): + r"is \(3, 2\), indices " + r"imply \(3, 1\)"): DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1]) with tm.assert_raises_regex(ValueError, "Shape of passed values " - "is \(3, 2\), indices " - "imply \(2, 2\)"): + r"is \(3, 2\), indices " + r"imply \(2, 2\)"): DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2]) with tm.assert_raises_regex(ValueError, "If using all scalar " @@ -1220,12 +1220,12 @@ def test_constructor_from_items(self): def test_constructor_from_items_scalars(self): # GH 17312 with tm.assert_raises_regex(ValueError, - 'The value in each \(key, value\) ' + r'The value in each \(key, value\) ' 'pair must be an array, Series, or dict'): DataFrame.from_items([('A', 1), ('B', 4)]) with tm.assert_raises_regex(ValueError, - 'The value in each \(key, value\) ' + r'The value in each \(key, value\) ' 'pair must be an array, Series, or dict'): DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'], orient='index') diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 22066d59cf14d..55aeaf6e77be1 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -1040,6 +1040,6 @@ def test_invalid_type_for_operator_raises(self, parser, engine): ops = '+', '-', '*', '/' for op in ops: with tm.assert_raises_regex(TypeError, - "unsupported operand type\(s\) " + r"unsupported operand type\(s\) " "for .+: '.+' and '.+'"): df.eval('a {0} b'.format(op), engine=engine, parser=parser) diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py index 2a408b85f0ed1..ccde545b5b8e9 100644 --- a/pandas/tests/groupby/test_nth.py +++ b/pandas/tests/groupby/test_nth.py @@ -175,7 +175,7 @@ def test_nth(self): df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) g = df.groupby('A') # PR 17493, related to issue 11038 - # test Series.nth with True for dropna produces DeprecationWarning + # test Series.nth with True for dropna produces FutureWarning with assert_produces_warning(FutureWarning): result = g.B.nth(0, dropna=True) expected = g.B.first() diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index c0ea968ab0819..8f72da293a50c 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -261,7 +261,7 @@ def test_transform_casting(self): 9 B-053 b76cd912ff "2014-10-08 19:17:48" 10 B-065 b76cd912ff "2014-10-08 19:21:38" """ - df = pd.read_csv(StringIO(data), sep='\s+', + df = pd.read_csv(StringIO(data), sep=r'\s+', index_col=[0], parse_dates=['DATETIME']) result = df.groupby('ID3')['DATETIME'].transform(lambda x: x.diff()) diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 3ce51983c111d..3738398d017f8 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -402,7 +402,7 @@ def test_daterange_bug_456(self): assert isinstance(result, DatetimeIndex) def test_error_with_zero_monthends(self): - msg = 'Offset <0 \* MonthEnds> did not increment date' + msg = r'Offset <0 \* MonthEnds> did not increment date' with tm.assert_raises_regex(ValueError, msg): date_range('1/1/2000', '1/1/2001', freq=MonthEnd(0)) diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index f94a438fcdaa5..44f3c21d23e62 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -709,7 +709,7 @@ def test_dataframe(self, cache): 'day': [4, 5]}) msg = ("cannot assemble the datetimes: time data .+ does not " - "match format '%Y%m%d' \(match\)") + r"match format '%Y%m%d' \(match\)") with tm.assert_raises_regex(ValueError, msg): to_datetime(df2, cache=cache) result = to_datetime(df2, errors='coerce', cache=cache) @@ -719,15 +719,15 @@ def test_dataframe(self, cache): # extra columns msg = ("extra keys have been passed to the datetime assemblage: " - "\[foo\]") + r"\[foo\]") with tm.assert_raises_regex(ValueError, msg): df2 = df.copy() df2['foo'] = 1 to_datetime(df2, cache=cache) # not enough - msg = ('to assemble mappings requires at least that \[year, month, ' - 'day\] be specified: \[.+\] is missing') + msg = (r'to assemble mappings requires at least that \[year, month, ' + r'day\] be specified: \[.+\] is missing') for c in [['year'], ['year', 'month'], ['year', 'month', 'second'], diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 3ca4c31b7f059..4805c957907e6 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -175,7 +175,7 @@ def test_constructors_empty(self, data, closed): def test_constructors_errors(self): # scalar - msg = ('IntervalIndex\(...\) must be called with a collection of ' + msg = (r'IntervalIndex\(...\) must be called with a collection of ' 'some kind, 5 was passed') with tm.assert_raises_regex(TypeError, msg): IntervalIndex(5) diff --git a/pandas/tests/indexing/test_multiindex.py b/pandas/tests/indexing/test_multiindex.py index d89c64fc5b9f8..f69b9d98143b0 100644 --- a/pandas/tests/indexing/test_multiindex.py +++ b/pandas/tests/indexing/test_multiindex.py @@ -299,7 +299,7 @@ def test_getitem_partial_int(self): # missing item: with tm.assert_raises_regex(KeyError, '1'): df[1] - with tm.assert_raises_regex(KeyError, "'\[1\] not in index'"): + with tm.assert_raises_regex(KeyError, r"'\[1\] not in index'"): df[[1]] def test_loc_multiindex_indexer_none(self): diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index b263d368f41f5..9e063c2d176e1 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -1411,8 +1411,9 @@ def test_to_html_border_zero(self): result = df.to_html(border=0) assert 'border="0"' in result + @tm.capture_stdout def test_display_option_warning(self): - with tm.assert_produces_warning(DeprecationWarning, + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): pd.options.html.border diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 2f8ef32722051..f266a8b3a3268 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -424,11 +424,11 @@ def test_to_latex_longtable(self, frame): df = DataFrame({'a': [1, 2]}) with1column_result = df.to_latex(index=False, longtable=True) - assert "\multicolumn{1}" in with1column_result + assert r"\multicolumn{1}" in with1column_result df = DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]}) with3columns_result = df.to_latex(index=False, longtable=True) - assert "\multicolumn{3}" in with3columns_result + assert r"\multicolumn{3}" in with3columns_result def test_to_latex_escape_special_chars(self): special_characters = ['&', '%', '$', '#', '_', '{', '}', '~', '^', diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index c59acbd946f91..31c2ded49b7a0 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -7,7 +7,7 @@ import numpy as np import pandas as pd -from pandas.compat import PY3, is_platform_windows +from pandas.compat import PY3, is_platform_windows, is_platform_mac from pandas.io.parquet import (to_parquet, read_parquet, get_engine, PyArrowImpl, FastParquetImpl) from pandas.util import testing as tm @@ -174,8 +174,8 @@ def test_options_get_engine(fp, pa): assert isinstance(get_engine('fastparquet'), FastParquetImpl) -@pytest.mark.xfail(is_platform_windows(), - reason="reading pa metadata failing on Windows") +@pytest.mark.xfail(is_platform_windows() or is_platform_mac(), + reason="reading pa metadata failing on Windows/mac") def test_cross_engine_pa_fp(df_cross_compat, pa, fp): # cross-compat with differing reading/writing engines diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 60ed280bc050e..9e538ae130a85 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -206,6 +206,7 @@ def test_parallel_coordinates(self): def test_parallel_coordinates_with_sorted_labels(self): """ For #15908 """ from pandas.plotting import parallel_coordinates + df = DataFrame({"feat": [i for i in range(30)], "class": [2 for _ in range(10)] + [3 for _ in range(10)] + diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 4b2680b9be592..2f48aef1894a9 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -976,7 +976,7 @@ def test_on_float_by_int(self): def test_merge_datatype_error(self): """ Tests merge datatype mismatch error """ - msg = 'merge keys \[0\] object and int64, must be the same type' + msg = r'merge keys \[0\] object and int64, must be the same type' left = pd.DataFrame({'left_val': [1, 5, 10], 'a': ['a', 'b', 'c']}) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index bdbf2a0ee2f68..f66cb12b11210 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -178,9 +178,9 @@ def test_concatlike_same_dtypes(self): tm.assert_series_equal(res, exp, check_index_type=True) # cannot append non-index - msg = ('cannot concatenate object of type \"(.+?)\";' + msg = (r'cannot concatenate object of type \"(.+?)\";' ' only pd.Series, pd.DataFrame, and pd.Panel' - ' \(deprecated\) objs are valid') + r' \(deprecated\) objs are valid') with tm.assert_raises_regex(TypeError, msg): pd.Series(vals1).append(vals2) diff --git a/pandas/tests/scalar/test_interval.py b/pandas/tests/scalar/test_interval.py index 3db474e32c4dd..23dad9736dac5 100644 --- a/pandas/tests/scalar/test_interval.py +++ b/pandas/tests/scalar/test_interval.py @@ -122,7 +122,7 @@ def test_math_add(self, interval): actual += 1 assert expected == actual - msg = "unsupported operand type\(s\) for \+" + msg = r"unsupported operand type\(s\) for \+" with tm.assert_raises_regex(TypeError, msg): interval + Interval(1, 2) @@ -138,7 +138,7 @@ def test_math_sub(self, interval): actual -= 1 assert expected == actual - msg = "unsupported operand type\(s\) for -" + msg = r"unsupported operand type\(s\) for -" with tm.assert_raises_regex(TypeError, msg): interval - Interval(1, 2) @@ -158,11 +158,11 @@ def test_math_mult(self, interval): actual *= 2 assert expected == actual - msg = "unsupported operand type\(s\) for \*" + msg = r"unsupported operand type\(s\) for \*" with tm.assert_raises_regex(TypeError, msg): interval * Interval(1, 2) - msg = "can\'t multiply sequence by non-int" + msg = r"can\'t multiply sequence by non-int" with tm.assert_raises_regex(TypeError, msg): interval * 'foo' @@ -175,7 +175,7 @@ def test_math_div(self, interval): actual /= 2.0 assert expected == actual - msg = "unsupported operand type\(s\) for /" + msg = r"unsupported operand type\(s\) for /" with tm.assert_raises_regex(TypeError, msg): interval / Interval(1, 2) diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 8c6a4fcf4b1d4..0dc5e23184af7 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -480,12 +480,9 @@ def test_isna_for_inf(self): def test_isnull_for_inf_deprecated(self): # gh-17115 s = Series(['a', np.inf, np.nan, 1.0]) - with tm.assert_produces_warning(DeprecationWarning, - check_stacklevel=False): - pd.set_option('mode.use_inf_as_null', True) + with pd.option_context('mode.use_inf_as_null', True): r = s.isna() dr = s.dropna() - pd.reset_option('mode.use_inf_as_null') e = Series([False, True, True, False]) de = Series(['a', 1.0], index=[0, 3]) diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index cf002ff046c2e..058892e3b85ff 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -7,21 +7,14 @@ from numpy import nan import numpy as np import pandas as pd -from distutils.version import LooseVersion from pandas import Series, DataFrame, bdate_range, Panel -from pandas.core.dtypes.common import ( - is_bool_dtype, - is_float_dtype, - is_object_dtype, - is_float) from pandas.core.indexes.datetimes import DatetimeIndex from pandas.tseries.offsets import BDay from pandas.util import testing as tm from pandas.compat import lrange from pandas import compat from pandas.core.sparse import frame as spf -import pandas.util._test_decorators as td from pandas._libs.sparse import BlockIndex, IntIndex from pandas.core.sparse.api import SparseSeries, SparseDataFrame, SparseArray @@ -1171,163 +1164,6 @@ def test_notna(self): tm.assert_frame_equal(res.to_dense(), exp) -@td.skip_if_no_scipy -@pytest.mark.parametrize('index', [None, list('abc')]) # noqa: F811 -@pytest.mark.parametrize('columns', [None, list('def')]) -@pytest.mark.parametrize('fill_value', [None, 0, np.nan]) -@pytest.mark.parametrize('dtype', [bool, int, float, np.uint16]) -def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype): - # GH 4343 - # Make one ndarray and from it one sparse matrix, both to be used for - # constructing frames and comparing results - arr = np.eye(3, dtype=dtype) - # GH 16179 - arr[0, 1] = dtype(2) - try: - spm = spmatrix(arr) - assert spm.dtype == arr.dtype - except (TypeError, AssertionError): - # If conversion to sparse fails for this spmatrix type and arr.dtype, - # then the combination is not currently supported in NumPy, so we - # can just skip testing it thoroughly - return - - sdf = pd.SparseDataFrame(spm, index=index, columns=columns, - default_fill_value=fill_value) - - # Expected result construction is kind of tricky for all - # dtype-fill_value combinations; easiest to cast to something generic - # and except later on - rarr = arr.astype(object) - rarr[arr == 0] = np.nan - expected = pd.SparseDataFrame(rarr, index=index, columns=columns).fillna( - fill_value if fill_value is not None else np.nan) - - # Assert frame is as expected - sdf_obj = sdf.astype(object) - tm.assert_sp_frame_equal(sdf_obj, expected) - tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense()) - - # Assert spmatrices equal - assert dict(sdf.to_coo().todok()) == dict(spm.todok()) - - # Ensure dtype is preserved if possible - was_upcast = ((fill_value is None or is_float(fill_value)) and - not is_object_dtype(dtype) and - not is_float_dtype(dtype)) - res_dtype = (bool if is_bool_dtype(dtype) else - float if was_upcast else - dtype) - tm.assert_contains_all(sdf.dtypes, {np.dtype(res_dtype)}) - assert sdf.to_coo().dtype == res_dtype - - # However, adding a str column results in an upcast to object - sdf['strings'] = np.arange(len(sdf)).astype(str) - assert sdf.to_coo().dtype == np.object_ - - -@td.skip_if_no_scipy -@pytest.mark.parametrize('fill_value', [None, 0, np.nan]) # noqa: F811 -def test_from_to_scipy_object(spmatrix, fill_value): - # GH 4343 - dtype = object - columns = list('cd') - index = list('ab') - import scipy - if (spmatrix is scipy.sparse.dok_matrix and LooseVersion( - scipy.__version__) >= LooseVersion('0.19.0')): - pytest.skip("dok_matrix from object does not work in SciPy >= 0.19") - - # Make one ndarray and from it one sparse matrix, both to be used for - # constructing frames and comparing results - arr = np.eye(2, dtype=dtype) - try: - spm = spmatrix(arr) - assert spm.dtype == arr.dtype - except (TypeError, AssertionError): - # If conversion to sparse fails for this spmatrix type and arr.dtype, - # then the combination is not currently supported in NumPy, so we - # can just skip testing it thoroughly - return - - sdf = pd.SparseDataFrame(spm, index=index, columns=columns, - default_fill_value=fill_value) - - # Expected result construction is kind of tricky for all - # dtype-fill_value combinations; easiest to cast to something generic - # and except later on - rarr = arr.astype(object) - rarr[arr == 0] = np.nan - expected = pd.SparseDataFrame(rarr, index=index, columns=columns).fillna( - fill_value if fill_value is not None else np.nan) - - # Assert frame is as expected - sdf_obj = sdf.astype(object) - tm.assert_sp_frame_equal(sdf_obj, expected) - tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense()) - - # Assert spmatrices equal - assert dict(sdf.to_coo().todok()) == dict(spm.todok()) - - # Ensure dtype is preserved if possible - res_dtype = object - tm.assert_contains_all(sdf.dtypes, {np.dtype(res_dtype)}) - assert sdf.to_coo().dtype == res_dtype - - -@td.skip_if_no_scipy -def test_from_scipy_correct_ordering(spmatrix): - # GH 16179 - arr = np.arange(1, 5).reshape(2, 2) - try: - spm = spmatrix(arr) - assert spm.dtype == arr.dtype - except (TypeError, AssertionError): - # If conversion to sparse fails for this spmatrix type and arr.dtype, - # then the combination is not currently supported in NumPy, so we - # can just skip testing it thoroughly - return - - sdf = pd.SparseDataFrame(spm) - expected = pd.SparseDataFrame(arr) - tm.assert_sp_frame_equal(sdf, expected) - tm.assert_frame_equal(sdf.to_dense(), expected.to_dense()) - - -@td.skip_if_no_scipy -def test_from_scipy_fillna(spmatrix): - # GH 16112 - arr = np.eye(3) - arr[1:, 0] = np.nan - - try: - spm = spmatrix(arr) - assert spm.dtype == arr.dtype - except (TypeError, AssertionError): - # If conversion to sparse fails for this spmatrix type and arr.dtype, - # then the combination is not currently supported in NumPy, so we - # can just skip testing it thoroughly - return - - sdf = pd.SparseDataFrame(spm).fillna(-1.0) - - # Returning frame should fill all nan values with -1.0 - expected = pd.SparseDataFrame({ - 0: pd.SparseSeries([1., -1, -1]), - 1: pd.SparseSeries([np.nan, 1, np.nan]), - 2: pd.SparseSeries([np.nan, np.nan, 1]), - }, default_fill_value=-1) - - # fill_value is expected to be what .fillna() above was called with - # We don't use -1 as initial fill_value in expected SparseSeries - # construction because this way we obtain "compressed" SparseArrays, - # avoiding having to construct them ourselves - for col in expected: - expected[col].fill_value = -1 - - tm.assert_sp_frame_equal(sdf, expected) - - class TestSparseDataFrameArithmetic(object): def test_numeric_op_scalar(self): diff --git a/pandas/tests/sparse/frame/test_to_from_scipy.py b/pandas/tests/sparse/frame/test_to_from_scipy.py new file mode 100644 index 0000000000000..aef49c84fc2ad --- /dev/null +++ b/pandas/tests/sparse/frame/test_to_from_scipy.py @@ -0,0 +1,168 @@ +import pytest +import numpy as np +from warnings import catch_warnings +from pandas.util import testing as tm +from pandas import SparseDataFrame, SparseSeries +from distutils.version import LooseVersion +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_float_dtype, + is_object_dtype, + is_float) + + +scipy = pytest.importorskip('scipy') + + +@pytest.mark.parametrize('index', [None, list('abc')]) # noqa: F811 +@pytest.mark.parametrize('columns', [None, list('def')]) +@pytest.mark.parametrize('fill_value', [None, 0, np.nan]) +@pytest.mark.parametrize('dtype', [bool, int, float, np.uint16]) +def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype): + # GH 4343 + # Make one ndarray and from it one sparse matrix, both to be used for + # constructing frames and comparing results + arr = np.eye(3, dtype=dtype) + # GH 16179 + arr[0, 1] = dtype(2) + try: + spm = spmatrix(arr) + assert spm.dtype == arr.dtype + except (TypeError, AssertionError): + # If conversion to sparse fails for this spmatrix type and arr.dtype, + # then the combination is not currently supported in NumPy, so we + # can just skip testing it thoroughly + return + + sdf = SparseDataFrame(spm, index=index, columns=columns, + default_fill_value=fill_value) + + # Expected result construction is kind of tricky for all + # dtype-fill_value combinations; easiest to cast to something generic + # and except later on + rarr = arr.astype(object) + rarr[arr == 0] = np.nan + expected = SparseDataFrame(rarr, index=index, columns=columns).fillna( + fill_value if fill_value is not None else np.nan) + + # Assert frame is as expected + sdf_obj = sdf.astype(object) + tm.assert_sp_frame_equal(sdf_obj, expected) + tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense()) + + # Assert spmatrices equal + assert dict(sdf.to_coo().todok()) == dict(spm.todok()) + + # Ensure dtype is preserved if possible + was_upcast = ((fill_value is None or is_float(fill_value)) and + not is_object_dtype(dtype) and + not is_float_dtype(dtype)) + res_dtype = (bool if is_bool_dtype(dtype) else + float if was_upcast else + dtype) + tm.assert_contains_all(sdf.dtypes, {np.dtype(res_dtype)}) + assert sdf.to_coo().dtype == res_dtype + + # However, adding a str column results in an upcast to object + sdf['strings'] = np.arange(len(sdf)).astype(str) + assert sdf.to_coo().dtype == np.object_ + + +@pytest.mark.parametrize('fill_value', [None, 0, np.nan]) # noqa: F811 +def test_from_to_scipy_object(spmatrix, fill_value): + # GH 4343 + dtype = object + columns = list('cd') + index = list('ab') + + if (spmatrix is scipy.sparse.dok_matrix and LooseVersion( + scipy.__version__) >= LooseVersion('0.19.0')): + pytest.skip("dok_matrix from object does not work in SciPy >= 0.19") + + # Make one ndarray and from it one sparse matrix, both to be used for + # constructing frames and comparing results + arr = np.eye(2, dtype=dtype) + try: + spm = spmatrix(arr) + assert spm.dtype == arr.dtype + except (TypeError, AssertionError): + # If conversion to sparse fails for this spmatrix type and arr.dtype, + # then the combination is not currently supported in NumPy, so we + # can just skip testing it thoroughly + return + + sdf = SparseDataFrame(spm, index=index, columns=columns, + default_fill_value=fill_value) + + # Expected result construction is kind of tricky for all + # dtype-fill_value combinations; easiest to cast to something generic + # and except later on + rarr = arr.astype(object) + rarr[arr == 0] = np.nan + expected = SparseDataFrame(rarr, index=index, columns=columns).fillna( + fill_value if fill_value is not None else np.nan) + + # Assert frame is as expected + sdf_obj = sdf.astype(object) + tm.assert_sp_frame_equal(sdf_obj, expected) + tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense()) + + # Assert spmatrices equal + with catch_warnings(record=True): + assert dict(sdf.to_coo().todok()) == dict(spm.todok()) + + # Ensure dtype is preserved if possible + res_dtype = object + tm.assert_contains_all(sdf.dtypes, {np.dtype(res_dtype)}) + assert sdf.to_coo().dtype == res_dtype + + +def test_from_scipy_correct_ordering(spmatrix): + # GH 16179 + arr = np.arange(1, 5).reshape(2, 2) + try: + spm = spmatrix(arr) + assert spm.dtype == arr.dtype + except (TypeError, AssertionError): + # If conversion to sparse fails for this spmatrix type and arr.dtype, + # then the combination is not currently supported in NumPy, so we + # can just skip testing it thoroughly + return + + sdf = SparseDataFrame(spm) + expected = SparseDataFrame(arr) + tm.assert_sp_frame_equal(sdf, expected) + tm.assert_frame_equal(sdf.to_dense(), expected.to_dense()) + + +def test_from_scipy_fillna(spmatrix): + # GH 16112 + arr = np.eye(3) + arr[1:, 0] = np.nan + + try: + spm = spmatrix(arr) + assert spm.dtype == arr.dtype + except (TypeError, AssertionError): + # If conversion to sparse fails for this spmatrix type and arr.dtype, + # then the combination is not currently supported in NumPy, so we + # can just skip testing it thoroughly + return + + sdf = SparseDataFrame(spm).fillna(-1.0) + + # Returning frame should fill all nan values with -1.0 + expected = SparseDataFrame({ + 0: SparseSeries([1., -1, -1]), + 1: SparseSeries([np.nan, 1, np.nan]), + 2: SparseSeries([np.nan, np.nan, 1]), + }, default_fill_value=-1) + + # fill_value is expected to be what .fillna() above was called with + # We don't use -1 as initial fill_value in expected SparseSeries + # construction because this way we obtain "compressed" SparseArrays, + # avoiding having to construct them ourselves + for col in expected: + expected[col].fill_value = -1 + + tm.assert_sp_frame_equal(sdf, expected) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index d7fc5033bab90..6b3b519d49f7f 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -788,10 +788,10 @@ def test_duplicated_with_nas(self): 2, 4, 1, 5, 6]), np.array([1.1, 2.2, 1.1, np.nan, 3.3, 2.2, 4.4, 1.1, np.nan, 6.6]), - pytest.mark.xfail(reason="Complex bug. GH 16399")( - np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j, - 2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]) - ), + pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j, + 2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]), + marks=pytest.mark.xfail(reason="Complex bug. GH 16399") + ), np.array(['a', 'b', 'a', 'e', 'c', 'b', 'd', 'a', 'e', 'f'], dtype=object), np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7], diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 86d9a9fa91e47..424ba6aab9a56 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -2289,7 +2289,7 @@ def test_reset_index_multiindex_columns(self): # gh-16120: already existing column with tm.assert_raises_regex(ValueError, - ("cannot insert \('A', ''\), " + (r"cannot insert \('A', ''\), " "already exists")): df.rename_axis('A').reset_index() @@ -2323,7 +2323,7 @@ def test_reset_index_multiindex_columns(self): # ... which is incompatible with col_fill=None with tm.assert_raises_regex(ValueError, ("col_fill=None is incompatible with " - "incomplete column name \('C', 'c'\)")): + r"incomplete column name \('C', 'c'\)")): df2.rename_axis([('C', 'c')]).reset_index(col_fill=None) # with col_level != 0 diff --git a/pandas/tests/util/test_testing.py b/pandas/tests/util/test_testing.py index 31580bc9eab57..1c878604b11a2 100644 --- a/pandas/tests/util/test_testing.py +++ b/pandas/tests/util/test_testing.py @@ -48,12 +48,18 @@ def test_assert_almost_equal_numbers_with_mixed(self): self._assert_not_almost_equal_both(1, [1, ]) self._assert_not_almost_equal_both(1, object()) - def test_assert_almost_equal_edge_case_ndarrays(self): - self._assert_almost_equal_both(np.array([], dtype='M8[ns]'), - np.array([], dtype='float64'), - check_dtype=False) - self._assert_almost_equal_both(np.array([], dtype=str), - np.array([], dtype='int64'), + @pytest.mark.parametrize( + "left_dtype", + ['M8[ns]', 'm8[ns]', 'float64', 'int64', 'object']) + @pytest.mark.parametrize( + "right_dtype", + ['M8[ns]', 'm8[ns]', 'float64', 'int64', 'object']) + def test_assert_almost_equal_edge_case_ndarrays( + self, left_dtype, right_dtype): + + # empty compare + self._assert_almost_equal_both(np.array([], dtype=left_dtype), + np.array([], dtype=right_dtype), check_dtype=False) def test_assert_almost_equal_dicts(self):
https://api.github.com/repos/pandas-dev/pandas/pulls/19003
2017-12-30T17:42:27Z
2017-12-30T22:48:01Z
2017-12-30T22:48:01Z
2017-12-30T22:48:02Z
DOC: More 0.22.0 updates
diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 89e2d3006696c..3e673bd4cbc28 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -372,6 +372,12 @@ Additionally, support has been dropped for Python 3.4 (:issue:`15251`). Sum/Prod of all-NaN or empty Series/DataFrames is now consistently NaN ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. note:: + + The changes described here have been partially reverted. See + the :ref:`v0.22.0 Whatsnew <whatsnew_0220>` for more. + + The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, and return value of ``sum`` and ``prod`` on an empty Series has changed (:issue:`9422`, :issue:`15507`). diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index da4acd99e3873..d165339cb0de9 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -218,3 +218,26 @@ returns ``0``. The default behavior of ``min_periods=None``, implying that ``min_periods`` equals the window size, is unchanged. + +Compatibility +~~~~~~~~~~~~~ + +If you maintain a library that should work across pandas versions, it +may be easiest to exclude pandas 0.21 from your requirements. Otherwise, all your +``sum()`` calls would need to check if the ``Series`` is empty before summing. + +With setuptools, in your ``setup.py`` use:: + + install_requires=['pandas!=0.21.*', ...] + +With conda, use + +.. code-block:: yaml + + requirements: + run: + - pandas !=0.21.0,!=0.21.1 + +Note that the inconsistency in the return value for all-*NA* series is still +there for pandas 0.20.3 and earlier. Avoiding pandas 0.21 will only help with +the empty case. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c5359ba2c5ea1..7a0e1fe361c59 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7534,8 +7534,7 @@ def _doc_parms(cls): ---------- axis : %(axis_descr)s skipna : boolean, default True - Exclude NA/null values. If an entire row/column is NA or empty, the result - will be NA + Exclude NA/null values when computing the result. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a %(name1)s @@ -7669,7 +7668,7 @@ def _doc_parms(cls): The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. - .. versionadded :: 0.21.2 + .. versionadded :: 0.22.0 Added with the default being 1. This means the sum or product of an all-NA or empty series is ``NaN``.
[ci skip] cc @jorisvandenbossche xref https://github.com/pandas-dev/pandas/issues/18985#issuecomment-354525713 and https://github.com/pandas-dev/pandas/pull/18983#issuecomment-354525079
https://api.github.com/repos/pandas-dev/pandas/pulls/19002
2017-12-30T13:37:57Z
2017-12-30T19:30:40Z
2017-12-30T19:30:40Z
2017-12-30T19:31:11Z
TST: limit printing of xfail cases & catch Performance Warnings
diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index 6cfa083172921..11a52267ed1b4 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -368,12 +368,15 @@ def test_dti_add_offset_array(self, tz, box): # GH#18849 dti = pd.date_range('2017-01-01', periods=2, tz=tz) other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) - res = dti + other + + with tm.assert_produces_warning(PerformanceWarning): + res = dti + other expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))], name=dti.name, freq='infer') tm.assert_index_equal(res, expected) - res2 = other + dti + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + dti tm.assert_index_equal(res2, expected) @pytest.mark.parametrize('box', [np.array, pd.Index]) @@ -381,7 +384,9 @@ def test_dti_sub_offset_array(self, tz, box): # GH#18824 dti = pd.date_range('2017-01-01', periods=2, tz=tz) other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) - res = dti - other + + with tm.assert_produces_warning(PerformanceWarning): + res = dti - other expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))], name=dti.name, freq='infer') tm.assert_index_equal(res, expected) @@ -392,20 +397,25 @@ def test_dti_sub_offset_array(self, tz, box): def test_dti_with_offset_series(self, tz, names): # GH#18849 dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0]) - other = pd.Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], - name=names[1]) + other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], + name=names[1]) - expected_add = pd.Series([dti[n] + other[n] for n in range(len(dti))], - name=names[2]) - res = dti + other + expected_add = Series([dti[n] + other[n] for n in range(len(dti))], + name=names[2]) + + with tm.assert_produces_warning(PerformanceWarning): + res = dti + other tm.assert_series_equal(res, expected_add) - res2 = other + dti + + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + dti tm.assert_series_equal(res2, expected_add) - expected_sub = pd.Series([dti[n] - other[n] for n in range(len(dti))], - name=names[2]) + expected_sub = Series([dti[n] - other[n] for n in range(len(dti))], + name=names[2]) - res3 = dti - other + with tm.assert_produces_warning(PerformanceWarning): + res3 = dti - other tm.assert_series_equal(res3, expected_sub) diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 619a8ca3bf112..52b2d7205c849 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -819,8 +819,8 @@ def test_replace_series(self, how, to_key, from_key): assert obj.dtype == from_key if (from_key.startswith('datetime') and to_key.startswith('datetime')): - pytest.xfail("different tz, currently mask_missing " - "raises SystemError") + # tested below + return if how == 'dict': replacer = dict(zip(self.rep[from_key], self.rep[to_key])) @@ -849,5 +849,38 @@ def test_replace_series(self, how, to_key, from_key): tm.assert_series_equal(result, exp) + # TODO(jreback) commented out to only have a single xfail printed + @pytest.mark.xfail(reason="different tz, " + "currently mask_missing raises SystemError") + # @pytest.mark.parametrize('how', ['dict', 'series']) + # @pytest.mark.parametrize('to_key', [ + # 'datetime64[ns]', 'datetime64[ns, UTC]', + # 'datetime64[ns, US/Eastern]']) + # @pytest.mark.parametrize('from_key', [ + # 'datetime64[ns]', 'datetime64[ns, UTC]', + # 'datetime64[ns, US/Eastern]']) + # def test_replace_series_datetime_datetime(self, how, to_key, from_key): + def test_replace_series_datetime_datetime(self): + how = 'dict' + to_key = 'datetime64[ns]' + from_key = 'datetime64[ns]' + + index = pd.Index([3, 4], name='xxx') + obj = pd.Series(self.rep[from_key], index=index, name='yyy') + assert obj.dtype == from_key + + if how == 'dict': + replacer = dict(zip(self.rep[from_key], self.rep[to_key])) + elif how == 'series': + replacer = pd.Series(self.rep[to_key], index=self.rep[from_key]) + else: + raise ValueError + + result = obj.replace(replacer) + exp = pd.Series(self.rep[to_key], index=index, name='yyy') + assert exp.dtype == to_key + + tm.assert_series_equal(result, exp) + def test_replace_series_period(self): pass diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py index bccc46f1e0ca8..6220ce8ff7669 100644 --- a/pandas/tests/series/test_rank.py +++ b/pandas/tests/series/test_rank.py @@ -209,11 +209,9 @@ def test_rank_signature(self): pytest.param([np.iinfo(np.int64).min, -100, 0, 1, 9999, 100000, 1e10, np.iinfo(np.int64).max], 'int64', - marks=pytest.mark.xfail(reason='''iNaT is equivalent to - minimum value of dtype - int64 pending issue - #16674'''), - ), + marks=pytest.mark.xfail( + reason="iNaT is equivalent to minimum value of dtype" + "int64 pending issue #16674")), ([NegInfinity(), '1', 'A', 'BA', 'Ba', 'C', Infinity()], 'object') ]) diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index b304ebff55b6e..edabf4a7ccc99 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -113,7 +113,7 @@ def _get_offset(self, klass, value=1, normalize=False): else: try: klass = klass(value, normalize=normalize) - except: + except Exception: klass = klass(normalize=normalize) return klass @@ -143,10 +143,10 @@ def test_apply_out_of_range(self, tz): except tslib.OutOfBoundsDatetime: raise - except (ValueError, KeyError) as e: - pytest.skip( - "cannot create out_of_range offset: {0} {1}".format( - str(self).split('.')[-1], e)) + except (ValueError, KeyError): + # we are creating an invalid offset + # so ignore + pass class TestCommon(Base):
TST: limit printing of xfail cases no need to report invalid offset creation STYLE/DEPR: catch PerformanceWarnings closes #18989
https://api.github.com/repos/pandas-dev/pandas/pulls/19001
2017-12-30T13:21:53Z
2017-12-30T15:10:23Z
2017-12-30T15:10:23Z
2017-12-30T15:10:23Z
CLN: rename lib.isscalar to lib.is_scalar
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 3898f7499e85e..bfcf0c6e69a2f 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -98,7 +98,7 @@ def memory_usage_of_objects(ndarray[object, ndim=1] arr): # ---------------------------------------------------------------------- -cpdef bint isscalar(object val): +cpdef bint is_scalar(object val): """ Return True if given value is scalar. diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 167f215b6c0ac..1f1e47a6c54d6 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -750,7 +750,7 @@ def _broadcast(arr_or_scalar, shape): Helper function to broadcast arrays / scalars to the desired shape. """ if _np_version_under1p10: - if lib.isscalar(arr_or_scalar): + if is_scalar(arr_or_scalar): out = np.empty(shape) out.fill(arr_or_scalar) else: diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 0e7ae0cbe7c87..2e912b0075bfd 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -126,7 +126,7 @@ def _align(terms): return np.result_type(terms.type), None # if all resolved variables are numeric scalars - if all(term.isscalar for term in terms): + if all(term.is_scalar for term in terms): return _result_type_many(*(term.value for term in terms)).type, None # perform the main alignment diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 23abfa8b3fca1..b68b6970a89cc 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -368,11 +368,11 @@ def _maybe_transform_eq_ne(self, node, left=None, right=None): def _maybe_downcast_constants(self, left, right): f32 = np.dtype(np.float32) - if left.isscalar and not right.isscalar and right.return_type == f32: + if left.is_scalar and not right.is_scalar and right.return_type == f32: # right is a float32 array, left is a scalar name = self.env.add_tmp(np.float32(left.value)) left = self.term_type(name, self.env) - if right.isscalar and not left.isscalar and left.return_type == f32: + if right.is_scalar and not left.is_scalar and left.return_type == f32: # left is a float32 array, right is a scalar name = self.env.add_tmp(np.float32(right.value)) right = self.term_type(name, self.env) diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 7ba2c16530cad..ca0c4db4947c4 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -99,7 +99,7 @@ def update(self, value): self.value = value @property - def isscalar(self): + def is_scalar(self): return is_scalar(self._value) @property @@ -214,8 +214,8 @@ def operand_types(self): return frozenset(term.type for term in com.flatten(self)) @property - def isscalar(self): - return all(operand.isscalar for operand in self.operands) + def is_scalar(self): + return all(operand.is_scalar for operand in self.operands) @property def is_datetime(self): @@ -412,7 +412,7 @@ def stringify(value): lhs, rhs = self.lhs, self.rhs - if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.isscalar: + if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar: v = rhs.value if isinstance(v, (int, float)): v = stringify(v) @@ -421,7 +421,7 @@ def stringify(value): v = v.tz_convert('UTC') self.rhs.update(v) - if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.isscalar: + if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar: v = lhs.value if isinstance(v, (int, float)): v = stringify(v) @@ -431,7 +431,7 @@ def stringify(value): self.lhs.update(v) def _disallow_scalar_only_bool_ops(self): - if ((self.lhs.isscalar or self.rhs.isscalar) and + if ((self.lhs.is_scalar or self.rhs.is_scalar) and self.op in _bool_ops_dict and (not (issubclass(self.rhs.return_type, (bool, np.bool_)) and issubclass(self.lhs.return_type, (bool, np.bool_))))): diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index de769c69f44fd..8010a213efaf0 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -17,7 +17,7 @@ is_complex = lib.is_complex -is_scalar = lib.isscalar +is_scalar = lib.is_scalar is_decimal = lib.is_decimal diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 04b8ade7e5253..94fbf290900b4 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1101,7 +1101,7 @@ def _convert_for_op(self, value): def _assert_can_do_op(self, value): """ Check value is valid for scalar op """ - if not lib.isscalar(value): + if not is_scalar(value): msg = "'value' must be a scalar, passed: {0}" raise TypeError(msg.format(type(value).__name__)) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 40c07376d2522..e40a3ba742609 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -905,7 +905,7 @@ def astype(self, dtype, copy=True): def _ensure_datetimelike_to_i8(other): """ helper for coercing an input scalar or array to i8 """ - if lib.isscalar(other) and isna(other): + if is_scalar(other) and isna(other): other = iNaT elif isinstance(other, ABCIndexClass): # convert tz if needed diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 5995b9fc7674c..6337c2f73d5ec 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -38,7 +38,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, if fastpath: return cls._simple_new(data, name=name) - # isscalar, generators handled in coerce_to_ndarray + # is_scalar, generators handled in coerce_to_ndarray data = cls._coerce_to_ndarray(data) if issubclass(data.dtype.type, compat.string_types): diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 219d1b2852938..33c570a814e7d 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -1098,9 +1098,9 @@ def test_is_timedelta(self): assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64[h]')) -class Testisscalar(object): +class TestIsScalar(object): - def test_isscalar_builtin_scalars(self): + def test_is_scalar_builtin_scalars(self): assert is_scalar(None) assert is_scalar(True) assert is_scalar(False) @@ -1115,7 +1115,7 @@ def test_isscalar_builtin_scalars(self): assert is_scalar(timedelta(hours=1)) assert is_scalar(pd.NaT) - def test_isscalar_builtin_nonscalars(self): + def test_is_scalar_builtin_nonscalars(self): assert not is_scalar({}) assert not is_scalar([]) assert not is_scalar([1]) @@ -1124,7 +1124,7 @@ def test_isscalar_builtin_nonscalars(self): assert not is_scalar(slice(None)) assert not is_scalar(Ellipsis) - def test_isscalar_numpy_array_scalars(self): + def test_is_scalar_numpy_array_scalars(self): assert is_scalar(np.int64(1)) assert is_scalar(np.float64(1.)) assert is_scalar(np.int32(1)) @@ -1135,7 +1135,7 @@ def test_isscalar_numpy_array_scalars(self): assert is_scalar(np.datetime64('2014-01-01')) assert is_scalar(np.timedelta64(1, 'h')) - def test_isscalar_numpy_zerodim_arrays(self): + def test_is_scalar_numpy_zerodim_arrays(self): for zerodim in [np.array(1), np.array('foobar'), np.array(np.datetime64('2014-01-01')), np.array(np.timedelta64(1, 'h')), @@ -1143,19 +1143,19 @@ def test_isscalar_numpy_zerodim_arrays(self): assert not is_scalar(zerodim) assert is_scalar(lib.item_from_zerodim(zerodim)) - def test_isscalar_numpy_arrays(self): + def test_is_scalar_numpy_arrays(self): assert not is_scalar(np.array([])) assert not is_scalar(np.array([[]])) assert not is_scalar(np.matrix('1; 2')) - def test_isscalar_pandas_scalars(self): + def test_is_scalar_pandas_scalars(self): assert is_scalar(Timestamp('2014-01-01')) assert is_scalar(Timedelta(hours=1)) assert is_scalar(Period('2014-01-01')) assert is_scalar(Interval(left=0, right=1)) assert is_scalar(DateOffset(days=1)) - def test_lisscalar_pandas_containers(self): + def test_is_scalar_pandas_containers(self): assert not is_scalar(Series()) assert not is_scalar(Series([1])) assert not is_scalar(DataFrame())
- [x] closes #18987 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry There's also a handful of methods which call `is_scalar` that are named `isscalar`, e.g.: https://github.com/pandas-dev/pandas/blob/84335621ad0a0d83302a80b6911d3985c00b5cee/pandas/core/computation/ops.py#L101-L103 should these be changed as well? Also does a CLN require an entry in whatsnew?
https://api.github.com/repos/pandas-dev/pandas/pulls/19000
2017-12-30T05:37:45Z
2017-12-30T22:41:23Z
2017-12-30T22:41:23Z
2017-12-30T22:41:27Z
Fix IntervalDtype Bugs and Inconsistencies
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index d7a3f0d077302..783de6569a542 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -270,6 +270,7 @@ Other API Changes - Subtraction of :class:`Series` with timezone-aware ``dtype='datetime64[ns]'`` with mis-matched timezones will raise ``TypeError`` instead of ``ValueError`` (issue:`18817`) - :class:`IntervalIndex` and ``IntervalDtype`` no longer support categorical, object, and string subtypes (:issue:`19016`) - The default ``Timedelta`` constructor now accepts an ``ISO 8601 Duration`` string as an argument (:issue:`19040`) +- ``IntervalDtype`` now returns ``True`` when compared against ``'interval'`` regardless of subtype, and ``IntervalDtype.name`` now returns ``'interval'`` regardless of subtype (:issue:`18980`) .. _whatsnew_0230.deprecations: diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 08773354d44d8..2ec35889d6a7a 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -626,6 +626,7 @@ class IntervalDtype(ExtensionDtype): THIS IS NOT A REAL NUMPY DTYPE """ + name = 'interval' type = IntervalDtypeType kind = None str = '|O08' @@ -653,8 +654,8 @@ def __new__(cls, subtype=None): u.subtype = None return u elif (isinstance(subtype, compat.string_types) and - subtype == 'interval'): - subtype = '' + subtype.lower() == 'interval'): + subtype = None else: if isinstance(subtype, compat.string_types): m = cls._match.search(subtype) @@ -666,11 +667,6 @@ def __new__(cls, subtype=None): except TypeError: raise ValueError("could not construct IntervalDtype") - if subtype is None: - u = object.__new__(cls) - u.subtype = None - return u - if is_categorical_dtype(subtype) or is_string_dtype(subtype): # GH 19016 msg = ('category, object, and string subtypes are not supported ' @@ -692,31 +688,29 @@ def construct_from_string(cls, string): if its not possible """ if isinstance(string, compat.string_types): - try: - return cls(string) - except ValueError: - pass - raise TypeError("could not construct IntervalDtype") + return cls(string) + msg = "a string needs to be passed, got type {typ}" + raise TypeError(msg.format(typ=type(string))) def __unicode__(self): if self.subtype is None: return "interval" return "interval[{subtype}]".format(subtype=self.subtype) - @property - def name(self): - return str(self) - def __hash__(self): # make myself hashable return hash(str(self)) def __eq__(self, other): if isinstance(other, compat.string_types): - return other == self.name or other == self.name.title() - - return (isinstance(other, IntervalDtype) and - self.subtype == other.subtype) + return other.lower() in (self.name.lower(), str(self).lower()) + elif not isinstance(other, IntervalDtype): + return False + elif self.subtype is None or other.subtype is None: + # None should match any subtype + return True + else: + return self.subtype == other.subtype @classmethod def is_dtype(cls, dtype): diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 6a3715fd66159..692fb3271cfda 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -433,7 +433,7 @@ def test_hash_vs_equality(self): assert dtype2 == dtype assert dtype3 == dtype assert dtype is dtype2 - assert dtype2 is dtype + assert dtype2 is dtype3 assert dtype3 is dtype assert hash(dtype) == hash(dtype2) assert hash(dtype) == hash(dtype3) @@ -451,14 +451,19 @@ def test_hash_vs_equality(self): assert hash(dtype2) == hash(dtype2) assert hash(dtype2) == hash(dtype3) - def test_construction(self): - with pytest.raises(ValueError): - IntervalDtype('xx') + @pytest.mark.parametrize('subtype', [ + 'interval[int64]', 'Interval[int64]', 'int64', np.dtype('int64')]) + def test_construction(self, subtype): + i = IntervalDtype(subtype) + assert i.subtype == np.dtype('int64') + assert is_interval_dtype(i) - for s in ['interval[int64]', 'Interval[int64]', 'int64']: - i = IntervalDtype(s) - assert i.subtype == np.dtype('int64') - assert is_interval_dtype(i) + @pytest.mark.parametrize('subtype', [None, 'interval', 'Interval']) + def test_construction_generic(self, subtype): + # generic + i = IntervalDtype(subtype) + assert i.subtype is None + assert is_interval_dtype(i) @pytest.mark.parametrize('subtype', [ CategoricalDtype(list('abc'), False), @@ -471,17 +476,27 @@ def test_construction_not_supported(self, subtype): with tm.assert_raises_regex(TypeError, msg): IntervalDtype(subtype) - def test_construction_generic(self): - # generic - i = IntervalDtype('interval') - assert i.subtype == '' - assert is_interval_dtype(i) - assert str(i) == 'interval[]' + def test_construction_errors(self): + msg = 'could not construct IntervalDtype' + with tm.assert_raises_regex(ValueError, msg): + IntervalDtype('xx') - i = IntervalDtype() - assert i.subtype is None - assert is_interval_dtype(i) - assert str(i) == 'interval' + def test_construction_from_string(self): + result = IntervalDtype('interval[int64]') + assert is_dtype_equal(self.dtype, result) + result = IntervalDtype.construct_from_string('interval[int64]') + assert is_dtype_equal(self.dtype, result) + + @pytest.mark.parametrize('string', [ + 'foo', 'interval[foo]', 'foo[int64]', 0, 3.14, ('a', 'b'), None]) + def test_construction_from_string_errors(self, string): + if isinstance(string, string_types): + error, msg = ValueError, 'could not construct IntervalDtype' + else: + error, msg = TypeError, 'a string needs to be passed, got type' + + with tm.assert_raises_regex(error, msg): + IntervalDtype.construct_from_string(string) def test_subclass(self): a = IntervalDtype('interval[int64]') @@ -506,36 +521,45 @@ def test_is_dtype(self): assert not IntervalDtype.is_dtype(np.int64) assert not IntervalDtype.is_dtype(np.float64) - def test_identity(self): - assert (IntervalDtype('interval[int64]') == - IntervalDtype('interval[int64]')) - def test_coerce_to_dtype(self): assert (_coerce_to_dtype('interval[int64]') == IntervalDtype('interval[int64]')) - def test_construction_from_string(self): - result = IntervalDtype('interval[int64]') - assert is_dtype_equal(self.dtype, result) - result = IntervalDtype.construct_from_string('interval[int64]') - assert is_dtype_equal(self.dtype, result) - with pytest.raises(TypeError): - IntervalDtype.construct_from_string('foo') - with pytest.raises(TypeError): - IntervalDtype.construct_from_string('interval[foo]') - with pytest.raises(TypeError): - IntervalDtype.construct_from_string('foo[int64]') - def test_equality(self): assert is_dtype_equal(self.dtype, 'interval[int64]') assert is_dtype_equal(self.dtype, IntervalDtype('int64')) - assert is_dtype_equal(self.dtype, IntervalDtype('int64')) assert is_dtype_equal(IntervalDtype('int64'), IntervalDtype('int64')) assert not is_dtype_equal(self.dtype, 'int64') assert not is_dtype_equal(IntervalDtype('int64'), IntervalDtype('float64')) + @pytest.mark.parametrize('subtype', [ + None, 'interval', 'Interval', 'int64', 'uint64', 'float64', + 'complex128', 'datetime64', 'timedelta64', PeriodDtype('Q')]) + def test_equality_generic(self, subtype): + # GH 18980 + dtype = IntervalDtype(subtype) + assert is_dtype_equal(dtype, 'interval') + assert is_dtype_equal(dtype, IntervalDtype()) + + @pytest.mark.parametrize('subtype', [ + 'int64', 'uint64', 'float64', 'complex128', 'datetime64', + 'timedelta64', PeriodDtype('Q')]) + def test_name_repr(self, subtype): + # GH 18980 + dtype = IntervalDtype(subtype) + expected = 'interval[{subtype}]'.format(subtype=subtype) + assert str(dtype) == expected + assert dtype.name == 'interval' + + @pytest.mark.parametrize('subtype', [None, 'interval', 'Interval']) + def test_name_repr_generic(self, subtype): + # GH 18980 + dtype = IntervalDtype(subtype) + assert str(dtype) == 'interval' + assert dtype.name == 'interval' + def test_basic(self): assert is_interval_dtype(self.dtype)
- [X] closes #18980 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Summary: - `IntervalDtype(*)` now returns `True` when compared against `'interval'` and `IntervalDtype()` regardless of subtype - `IntervalDtype(*)` now returns `'interval'` regardless of subtype - `str(IntervalDtype(*))` still displays subtype information, e.g. `'interval[int64]'` - Cleaned up miscellaneous tests related to `IntervalDtype`
https://api.github.com/repos/pandas-dev/pandas/pulls/18997
2017-12-29T21:09:36Z
2018-01-10T21:28:03Z
2018-01-10T21:28:03Z
2018-09-24T17:26:29Z
TST: Remove pow test in expressions
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index ab45b5113802c..272e7f2e05d14 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -56,11 +56,6 @@ if [ "$CONDA_BUILD_TEST" ]; then conda install conda-build fi -# TODO(jreback) -echo -echo "[fix conda version]" -conda install conda=4.3.30 - echo echo "[add channels]" conda config --remove channels defaults || exit 1 diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 6d2607962dfb0..aebc9cd3deaac 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -73,17 +73,11 @@ def teardown_method(self, method): def run_arithmetic(self, df, other, assert_func, check_dtype=False, test_flex=True): expr._MIN_ELEMENTS = 0 - operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv', 'pow'] + operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv'] if not compat.PY3: operations.append('div') for arith in operations: - # numpy >= 1.11 doesn't handle integers - # raised to integer powers - # https://github.com/pandas-dev/pandas/issues/15363 - if arith == 'pow' and not _np_version_under1p11: - continue - operator_name = arith if arith == 'div': operator_name = 'truediv'
These are already skipped for NumPy>=1.12, and buggy for NumPy 1.10.4 cc @jreback This will be backported. closes #18992
https://api.github.com/repos/pandas-dev/pandas/pulls/18995
2017-12-29T19:15:27Z
2017-12-29T20:21:29Z
2017-12-29T20:21:29Z
2018-06-29T08:42:01Z
CI: remove fixed conda version
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index ab45b5113802c..272e7f2e05d14 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -56,11 +56,6 @@ if [ "$CONDA_BUILD_TEST" ]; then conda install conda-build fi -# TODO(jreback) -echo -echo "[fix conda version]" -conda install conda=4.3.30 - echo echo "[add channels]" conda config --remove channels defaults || exit 1 diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 6d2607962dfb0..da9ae03d05d88 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -17,6 +17,7 @@ from pandas.util.testing import (assert_almost_equal, assert_series_equal, assert_frame_equal, assert_panel_equal, assert_panel4d_equal) +from pandas.util._test_decorators import skip_if_no from pandas.io.formats.printing import pprint_thing import pandas.util.testing as tm @@ -107,6 +108,7 @@ def run_arithmetic(self, df, other, assert_func, check_dtype=False, pprint_thing("Failed test with operator %r" % op.__name__) raise + @skip_if_no("numpy", "1.11") def test_integer_arithmetic(self): self.run_arithmetic(self.integer, self.integer, assert_frame_equal) @@ -191,6 +193,7 @@ def run_panel(self, panel, other, binary_comp=None, run_binary=True, self.run_binary(panel, binary_comp, assert_func, test_flex=True, **kwargs) + @skip_if_no("numpy", "1.11") def test_integer_arithmetic_frame(self): self.run_frame(self.integer, self.integer) @@ -243,6 +246,7 @@ def test_mixed_arithmetic(self): self.run_arithmetic(self.mixed[col], self.mixed[col], assert_series_equal) + @skip_if_no("numpy", "1.11") def test_integer_with_zeros(self): self.integer *= np.random.randint(0, 2, size=np.shape(self.integer)) self.run_arithmetic(self.integer, self.integer,
xref #18870
https://api.github.com/repos/pandas-dev/pandas/pulls/18994
2017-12-29T18:02:15Z
2017-12-29T19:18:08Z
null
2017-12-29T19:18:08Z
CI: Pin NumPy version
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 693a2fe1fd6a6..1c1d256fa83db 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -56,11 +56,6 @@ if [ "$CONDA_BUILD_TEST" ]; then conda install conda-build fi -# TODO(jreback) -echo -echo "[fix conda version]" -conda install conda=4.3.30 - echo echo "[add channels]" conda config --remove channels defaults || exit 1 @@ -114,6 +109,12 @@ if [ -e ${REQ} ]; then time bash $REQ || exit 1 fi +# Pin NumPy +echo ["pin NumPy"] +NUMPY_VERSION="$(conda list numpy | grep '^n.*' | awk '{print $2}')" +conda config --env --add pinned_packages numpy=" ${NUMPY_VERSION}" +conda config --show pinned_packages + time conda install -n pandas pytest>=3.1.0 time pip install pytest-xdist moto @@ -201,7 +202,7 @@ fi echo echo "[show pandas]" -conda list pandas +conda list -n pandas echo echo "[done]"
closes #18992 This should fail.
https://api.github.com/repos/pandas-dev/pandas/pulls/18993
2017-12-29T17:51:56Z
2017-12-29T19:18:52Z
null
2017-12-29T19:21:50Z
CI: fix pip install
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 693a2fe1fd6a6..ab45b5113802c 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -106,6 +106,9 @@ time conda create -n pandas --file=${REQ} || exit 1 source activate pandas +# https://github.com/travis-ci/travis-ci/issues/8920#issuecomment-352661024 +python -c "import fcntl; fcntl.fcntl(1, fcntl.F_SETFL, 0)" + # may have addtl installation instructions for this build echo echo "[build addtl installs]"
https://api.github.com/repos/pandas-dev/pandas/pulls/18990
2017-12-29T16:49:42Z
2017-12-29T17:42:28Z
2017-12-29T17:42:28Z
2017-12-29T17:42:28Z
0.22.0 backports
diff --git a/doc/source/release.rst b/doc/source/release.rst index 0298eda2c78ab..aea6280a490d6 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -37,6 +37,27 @@ analysis / manipulation tool available in any language. * Binary installers on PyPI: http://pypi.python.org/pypi/pandas * Documentation: http://pandas.pydata.org +pandas 0.22.0 +------------- + +**Release date:** December 29, 2017 + +This is a major release from 0.21.1 and includes a single, API-breaking change. +We recommend that all users upgrade to this version after carefully reading the +release note. + +The only changes are: + +- The sum of an empty or all-*NA* ``Series`` is now ``0`` +- The product of an empty or all-*NA* ``Series`` is now ``1`` +- We've added a ``min_count`` parameter to ``.sum()`` and ``.prod()`` controlling + the minimum number of valid values for the result to be valid. If fewer than + ``min_count`` non-*NA* values are present, the result is *NA*. The default is + ``0``. To return ``NaN``, the 0.21 behavior, use ``min_count=1``. + +See the :ref:`v0.22.0 Whatsnew <whatsnew_0220>` overview for further explanation +of all the places in the library this affects. + pandas 0.21.1 ------------- diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst index 8f779e01a6be5..64cbe0b050a61 100644 --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -18,6 +18,8 @@ What's New These are new features and improvements of note in each release. +.. include:: whatsnew/v0.22.0.txt + .. include:: whatsnew/v0.21.1.txt .. include:: whatsnew/v0.21.0.txt diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 53b052a955b45..da4acd99e3873 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -1,156 +1,220 @@ .. _whatsnew_0220: -v0.22.0 -------- +v0.22.0 (December 29, 2017) +--------------------------- -This is a major release from 0.21.1 and includes a number of API changes, -deprecations, new features, enhancements, and performance improvements along -with a large number of bug fixes. We recommend that all users upgrade to this -version. +This is a major release from 0.21.1 and includes a single, API-breaking change. +We recommend that all users upgrade to this version after carefully reading the +release note (singular!). -.. _whatsnew_0220.enhancements: +.. _whatsnew_0220.api_breaking: -New features -~~~~~~~~~~~~ +Backwards incompatible API changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- -- +Pandas 0.22.0 changes the handling of empty and all-*NA* sums and products. The +summary is that -.. _whatsnew_0220.enhancements.other: +* The sum of an empty or all-*NA* ``Series`` is now ``0`` +* The product of an empty or all-*NA* ``Series`` is now ``1`` +* We've added a ``min_count`` parameter to ``.sum()`` and ``.prod()`` controlling + the minimum number of valid values for the result to be valid. If fewer than + ``min_count`` non-*NA* values are present, the result is *NA*. The default is + ``0``. To return ``NaN``, the 0.21 behavior, use ``min_count=1``. -Other Enhancements -^^^^^^^^^^^^^^^^^^ +Some background: In pandas 0.21, we fixed a long-standing inconsistency +in the return value of all-*NA* series depending on whether or not bottleneck +was installed. See :ref:`whatsnew_0210.api_breaking.bottleneck`. At the same +time, we changed the sum and prod of an empty ``Series`` to also be ``NaN``. -- -- -- +Based on feedback, we've partially reverted those changes. -.. _whatsnew_0220.api_breaking: +Arithmetic Operations +^^^^^^^^^^^^^^^^^^^^^ -Backwards incompatible API changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The default sum for empty or all-*NA* ``Series`` is now ``0``. -- -- -- +*pandas 0.21.x* -.. _whatsnew_0220.api: +.. code-block:: ipython -Other API Changes -^^^^^^^^^^^^^^^^^ + In [1]: pd.Series([]).sum() + Out[1]: nan -- -- -- + In [2]: pd.Series([np.nan]).sum() + Out[2]: nan -.. _whatsnew_0220.deprecations: +*pandas 0.22.0* -Deprecations -~~~~~~~~~~~~ +.. ipython:: python -- -- -- + pd.Series([]).sum() + pd.Series([np.nan]).sum() -.. _whatsnew_0220.prior_deprecations: +The default behavior is the same as pandas 0.20.3 with bottleneck installed. It +also matches the behavior of NumPy's ``np.nansum`` on empty and all-*NA* arrays. -Removal of prior version deprecations/changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +To have the sum of an empty series return ``NaN`` (the default behavior of +pandas 0.20.3 without bottleneck, or pandas 0.21.x), use the ``min_count`` +keyword. -- -- -- +.. ipython:: python -.. _whatsnew_0220.performance: + pd.Series([]).sum(min_count=1) -Performance Improvements -~~~~~~~~~~~~~~~~~~~~~~~~ +Thanks to the ``skipna`` parameter, the ``.sum`` on an all-*NA* +series is conceptually the same as the ``.sum`` of an empty one with +``skipna=True`` (the default). -- -- -- +.. ipython:: python -.. _whatsnew_0220.docs: + pd.Series([np.nan]).sum(min_count=1) # skipna=True by default -Documentation Changes -~~~~~~~~~~~~~~~~~~~~~ +The ``min_count`` parameter refers to the minimum number of *non-null* values +required for a non-NA sum or product. -- -- -- +:meth:`Series.prod` has been updated to behave the same as :meth:`Series.sum`, +returning ``1`` instead. -.. _whatsnew_0220.bug_fixes: +.. ipython:: python -Bug Fixes -~~~~~~~~~ + pd.Series([]).prod() + pd.Series([np.nan]).prod() + pd.Series([]).prod(min_count=1) -Conversion -^^^^^^^^^^ +These changes affect :meth:`DataFrame.sum` and :meth:`DataFrame.prod` as well. +Finally, a few less obvious places in pandas are affected by this change. -- -- -- +Grouping by a Categorical +^^^^^^^^^^^^^^^^^^^^^^^^^ -Indexing -^^^^^^^^ +Grouping by a ``Categorical`` and summing now returns ``0`` instead of +``NaN`` for categories with no observations. The product now returns ``1`` +instead of ``NaN``. + +*pandas 0.21.x* + +.. code-block:: ipython -- -- -- + In [8]: grouper = pd.Categorical(['a', 'a'], categories=['a', 'b']) -I/O -^^^ + In [9]: pd.Series([1, 2]).groupby(grouper).sum() + Out[9]: + a 3.0 + b NaN + dtype: float64 -- -- -- +*pandas 0.22* -Plotting +.. ipython:: python + + grouper = pd.Categorical(['a', 'a'], categories=['a', 'b']) + pd.Series([1, 2]).groupby(grouper).sum() + +To restore the 0.21 behavior of returning ``NaN`` for unobserved groups, +use ``min_count>=1``. + +.. ipython:: python + + pd.Series([1, 2]).groupby(grouper).sum(min_count=1) + +Resample ^^^^^^^^ -- -- -- +The sum and product of all-*NA* bins has changed from ``NaN`` to ``0`` for +sum and ``1`` for product. + +*pandas 0.21.x* + +.. code-block:: ipython + + In [11]: s = pd.Series([1, 1, np.nan, np.nan], + ...: index=pd.date_range('2017', periods=4)) + ...: s + Out[11]: + 2017-01-01 1.0 + 2017-01-02 1.0 + 2017-01-03 NaN + 2017-01-04 NaN + Freq: D, dtype: float64 + + In [12]: s.resample('2d').sum() + Out[12]: + 2017-01-01 2.0 + 2017-01-03 NaN + Freq: 2D, dtype: float64 + +*pandas 0.22.0* + +.. ipython:: python + + s = pd.Series([1, 1, np.nan, np.nan], + index=pd.date_range('2017', periods=4)) + s.resample('2d').sum() + +To restore the 0.21 behavior of returning ``NaN``, use ``min_count>=1``. + +.. ipython:: python + + s.resample('2d').sum(min_count=1) + +In particular, upsampling and taking the sum or product is affected, as +upsampling introduces missing values even if the original series was +entirely valid. + +*pandas 0.21.x* + +.. code-block:: ipython + + In [14]: idx = pd.DatetimeIndex(['2017-01-01', '2017-01-02']) + + In [15]: pd.Series([1, 2], index=idx).resample('12H').sum() + Out[15]: + 2017-01-01 00:00:00 1.0 + 2017-01-01 12:00:00 NaN + 2017-01-02 00:00:00 2.0 + Freq: 12H, dtype: float64 + +*pandas 0.22.0* + +.. ipython:: python + + idx = pd.DatetimeIndex(['2017-01-01', '2017-01-02']) + pd.Series([1, 2], index=idx).resample("12H").sum() + +Once again, the ``min_count`` keyword is available to restore the 0.21 behavior. -Groupby/Resample/Rolling -^^^^^^^^^^^^^^^^^^^^^^^^ +.. ipython:: python -- -- -- + pd.Series([1, 2], index=idx).resample("12H").sum(min_count=1) -Sparse -^^^^^^ +Rolling and Expanding +^^^^^^^^^^^^^^^^^^^^^ -- -- -- +Rolling and expanding already have a ``min_periods`` keyword that behaves +similar to ``min_count``. The only case that changes is when doing a rolling +or expanding sum with ``min_periods=0``. Previously this returned ``NaN``, +when fewer than ``min_periods`` non-*NA* values were in the window. Now it +returns ``0``. -Reshaping -^^^^^^^^^ +*pandas 0.21.1* -- -- -- +.. code-block:: ipython -Numeric -^^^^^^^ + In [17]: s = pd.Series([np.nan, np.nan]) -- -- -- + In [18]: s.rolling(2, min_periods=0).sum() + Out[18]: + 0 NaN + 1 NaN + dtype: float64 -Categorical -^^^^^^^^^^^ +*pandas 0.22.0* -- -- -- +.. ipython:: python -Other -^^^^^ + s = pd.Series([np.nan, np.nan]) + s.rolling(2, min_periods=0).sum() -- -- -- +The default behavior of ``min_periods=None``, implying that ``min_periods`` +equals the window size, is unchanged. diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index d38b677df321c..14d47398ac1df 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -36,7 +36,8 @@ def get_dispatch(dtypes): def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=0): """ Only aggregates on axis=0 """ @@ -88,7 +89,7 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, for i in range(ncounts): for j in range(K): - if nobs[i, j] == 0: + if nobs[i, j] < min_count: out[i, j] = NAN else: out[i, j] = sumx[i, j] @@ -99,7 +100,8 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=0): """ Only aggregates on axis=0 """ @@ -147,7 +149,7 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, for i in range(ncounts): for j in range(K): - if nobs[i, j] == 0: + if nobs[i, j] < min_count: out[i, j] = NAN else: out[i, j] = prodx[i, j] @@ -159,12 +161,15 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) {{dest_type2}} val, ct, oldmean ndarray[{{dest_type2}}, ndim=2] nobs, mean + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -208,12 +213,15 @@ def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) {{dest_type2}} val, count ndarray[{{dest_type2}}, ndim=2] sumx, nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -263,7 +271,8 @@ def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -272,6 +281,8 @@ def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{dest_type2}} val, count Py_ssize_t ngroups = len(counts) + assert min_count == -1, "'min_count' only used in add and prod" + if len(labels) == 0: return @@ -332,7 +343,8 @@ def get_dispatch(dtypes): def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -342,6 +354,8 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[{{dest_type2}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -382,7 +396,8 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels, int64_t rank): + ndarray[int64_t] labels, int64_t rank, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -392,6 +407,8 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[{{dest_type2}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -455,7 +472,8 @@ def get_dispatch(dtypes): def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -464,6 +482,8 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{dest_type2}} val, count ndarray[{{dest_type2}}, ndim=2] maxx, nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -526,7 +546,8 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -535,6 +556,8 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{dest_type2}} val, count ndarray[{{dest_type2}}, ndim=2] minx, nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -686,7 +709,8 @@ def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_median_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float64_t, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -695,6 +719,9 @@ def group_median_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] _counts ndarray data float64_t* ptr + + assert min_count == -1, "'min_count' only used in add and prod" + ngroups = len(counts) N, K = (<object> values).shape diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index a1c4ddbc8d0b0..3a7a6d54d3851 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -225,14 +225,16 @@ cdef class VariableWindowIndexer(WindowIndexer): right_closed: bint right endpoint closedness True if the right endpoint is closed, False if open - + floor: optional + unit for flooring the unit """ def __init__(self, ndarray input, int64_t win, int64_t minp, - bint left_closed, bint right_closed, ndarray index): + bint left_closed, bint right_closed, ndarray index, + object floor=None): self.is_variable = 1 self.N = len(index) - self.minp = _check_minp(win, minp, self.N) + self.minp = _check_minp(win, minp, self.N, floor=floor) self.start = np.empty(self.N, dtype='int64') self.start.fill(-1) @@ -347,7 +349,7 @@ def get_window_indexer(input, win, minp, index, closed, if index is not None: indexer = VariableWindowIndexer(input, win, minp, left_closed, - right_closed, index) + right_closed, index, floor) elif use_mock: indexer = MockFixedWindowIndexer(input, win, minp, left_closed, right_closed, index, floor) @@ -446,7 +448,7 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, object index, object closed): cdef: double val, prev_x, sum_x = 0 - int64_t s, e + int64_t s, e, range_endpoint int64_t nobs = 0, i, j, N bint is_variable ndarray[int64_t] start, end @@ -454,7 +456,8 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, start, end, N, win, minp, is_variable = get_window_indexer(input, win, minp, index, - closed) + closed, + floor=0) output = np.empty(N, dtype=float) # for performance we are going to iterate @@ -494,13 +497,15 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, # fixed window + range_endpoint = int_max(minp, 1) - 1 + with nogil: - for i in range(0, minp - 1): + for i in range(0, range_endpoint): add_sum(input[i], &nobs, &sum_x) output[i] = NaN - for i in range(minp - 1, N): + for i in range(range_endpoint, N): val = input[i] add_sum(val, &nobs, &sum_x) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 58d86251a4a62..9a2a763cf6def 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6921,7 +6921,8 @@ def _add_numeric_operations(cls): @Substitution(outname='mad', desc="Return the mean absolute deviation of the values " "for the requested axis", - name1=name, name2=name2, axis_descr=axis_descr) + name1=name, name2=name2, axis_descr=axis_descr, + min_count='', examples='') @Appender(_num_doc) def mad(self, axis=None, skipna=None, level=None): if skipna is None: @@ -6962,7 +6963,8 @@ def mad(self, axis=None, skipna=None, level=None): @Substitution(outname='compounded', desc="Return the compound percentage of the values for " "the requested axis", name1=name, name2=name2, - axis_descr=axis_descr) + axis_descr=axis_descr, + min_count='', examples='') @Appender(_num_doc) def compound(self, axis=None, skipna=None, level=None): if skipna is None: @@ -6986,10 +6988,10 @@ def compound(self, axis=None, skipna=None, level=None): lambda y, axis: np.maximum.accumulate(y, axis), "max", -np.inf, np.nan) - cls.sum = _make_stat_function( + cls.sum = _make_min_count_stat_function( cls, 'sum', name, name2, axis_descr, 'Return the sum of the values for the requested axis', - nanops.nansum) + nanops.nansum, _sum_examples) cls.mean = _make_stat_function( cls, 'mean', name, name2, axis_descr, 'Return the mean of the values for the requested axis', @@ -7005,10 +7007,10 @@ def compound(self, axis=None, skipna=None, level=None): "by N-1\n", nanops.nankurt) cls.kurtosis = cls.kurt - cls.prod = _make_stat_function( + cls.prod = _make_min_count_stat_function( cls, 'prod', name, name2, axis_descr, 'Return the product of the values for the requested axis', - nanops.nanprod) + nanops.nanprod, _prod_examples) cls.product = cls.prod cls.median = _make_stat_function( cls, 'median', name, name2, axis_descr, @@ -7139,10 +7141,13 @@ def _doc_parms(cls): numeric_only : boolean, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. +%(min_count)s\ Returns ------- -%(outname)s : %(name1)s or %(name2)s (if level specified)\n""" +%(outname)s : %(name1)s or %(name2)s (if level specified) + +%(examples)s""" _num_ddof_doc = """ @@ -7210,9 +7215,92 @@ def _doc_parms(cls): """ +_sum_examples = """\ +Examples +-------- +By default, the sum of an empty or all-NA Series is ``0``. + +>>> pd.Series([]).sum() # min_count=0 is the default +0.0 + +This can be controlled with the ``min_count`` parameter. For example, if +you'd like the sum of an empty series to be NaN, pass ``min_count=1``. + +>>> pd.Series([]).sum(min_count=1) +nan + +Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and +empty series identically. + +>>> pd.Series([np.nan]).sum() +0.0 + +>>> pd.Series([np.nan]).sum(min_count=1) +nan +""" + +_prod_examples = """\ +Examples +-------- +By default, the product of an empty or all-NA Series is ``1`` + +>>> pd.Series([]).prod() +1.0 + +This can be controlled with the ``min_count`` parameter + +>>> pd.Series([]).prod(min_count=1) +nan + +Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and +empty series identically. + +>>> pd.Series([np.nan]).prod() +1.0 + +>>> pd.Series([np.nan]).sum(min_count=1) +nan +""" + + +_min_count_stub = """\ +min_count : int, default 1 + The required number of valid values to perform the operation. If fewer than + ``min_count`` non-NA values are present the result will be NA. + + .. versionadded :: 0.21.2 + + Added with the default being 1. This means the sum or product + of an all-NA or empty series is ``NaN``. +""" + + +def _make_min_count_stat_function(cls, name, name1, name2, axis_descr, desc, + f, examples): + @Substitution(outname=name, desc=desc, name1=name1, name2=name2, + axis_descr=axis_descr, min_count=_min_count_stub, + examples=examples) + @Appender(_num_doc) + def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, + min_count=0, + **kwargs): + nv.validate_stat_func(tuple(), kwargs, fname=name) + if skipna is None: + skipna = True + if axis is None: + axis = self._stat_axis_number + if level is not None: + return self._agg_by_level(name, axis=axis, level=level, + skipna=skipna, min_count=min_count) + return self._reduce(f, name, axis=axis, skipna=skipna, + numeric_only=numeric_only, min_count=min_count) + + return set_function_name(stat_func, name, cls) + + def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f): @Substitution(outname=name, desc=desc, name1=name1, name2=name2, - axis_descr=axis_descr) + axis_descr=axis_descr, min_count='', examples='') @Appender(_num_doc) def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 5931f6e009dab..aef5ff7ba64d3 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -908,7 +908,8 @@ def _cython_transform(self, how, numeric_only=True): return self._wrap_transformed_output(output, names) - def _cython_agg_general(self, how, alt=None, numeric_only=True): + def _cython_agg_general(self, how, alt=None, numeric_only=True, + min_count=-1): output = {} for name, obj in self._iterate_slices(): is_numeric = is_numeric_dtype(obj.dtype) @@ -916,7 +917,8 @@ def _cython_agg_general(self, how, alt=None, numeric_only=True): continue try: - result, names = self.grouper.aggregate(obj.values, how) + result, names = self.grouper.aggregate(obj.values, how, + min_count=min_count) except AssertionError as e: raise GroupByError(str(e)) output[name] = self._try_cast(result, obj) @@ -1223,7 +1225,8 @@ def _add_numeric_operations(cls): """ add numeric operations to the GroupBy generically """ def groupby_function(name, alias, npfunc, - numeric_only=True, _convert=False): + numeric_only=True, _convert=False, + min_count=-1): _local_template = "Compute %(f)s of group values" @@ -1233,6 +1236,8 @@ def groupby_function(name, alias, npfunc, def f(self, **kwargs): if 'numeric_only' not in kwargs: kwargs['numeric_only'] = numeric_only + if 'min_count' not in kwargs: + kwargs['min_count'] = min_count self._set_group_selection() try: return self._cython_agg_general( @@ -1280,8 +1285,8 @@ def last(x): else: return last(x) - cls.sum = groupby_function('sum', 'add', np.sum) - cls.prod = groupby_function('prod', 'prod', np.prod) + cls.sum = groupby_function('sum', 'add', np.sum, min_count=0) + cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0) cls.min = groupby_function('min', 'min', np.min, numeric_only=False) cls.max = groupby_function('max', 'max', np.max, numeric_only=False) cls.first = groupby_function('first', 'first', first_compat, @@ -2107,7 +2112,7 @@ def get_group_levels(self): 'var': 'group_var', 'first': { 'name': 'group_nth', - 'f': lambda func, a, b, c, d: func(a, b, c, d, 1) + 'f': lambda func, a, b, c, d, e: func(a, b, c, d, 1, -1) }, 'last': 'group_last', 'ohlc': 'group_ohlc', @@ -2177,7 +2182,7 @@ def wrapper(*args, **kwargs): (how, dtype_str)) return func, dtype_str - def _cython_operation(self, kind, values, how, axis): + def _cython_operation(self, kind, values, how, axis, min_count=-1): assert kind in ['transform', 'aggregate'] # can we do this operation with our cython functions @@ -2262,11 +2267,12 @@ def _cython_operation(self, kind, values, how, axis): counts = np.zeros(self.ngroups, dtype=np.int64) result = self._aggregate( result, counts, values, labels, func, is_numeric, - is_datetimelike) + is_datetimelike, min_count) elif kind == 'transform': result = _maybe_fill(np.empty_like(values, dtype=out_dtype), fill_value=np.nan) + # TODO: min_count result = self._transform( result, values, labels, func, is_numeric, is_datetimelike) @@ -2303,14 +2309,15 @@ def _cython_operation(self, kind, values, how, axis): return result, names - def aggregate(self, values, how, axis=0): - return self._cython_operation('aggregate', values, how, axis) + def aggregate(self, values, how, axis=0, min_count=-1): + return self._cython_operation('aggregate', values, how, axis, + min_count=min_count) def transform(self, values, how, axis=0): return self._cython_operation('transform', values, how, axis) def _aggregate(self, result, counts, values, comp_ids, agg_func, - is_numeric, is_datetimelike): + is_numeric, is_datetimelike, min_count=-1): if values.ndim > 3: # punting for now raise NotImplementedError("number of dimensions is currently " @@ -2319,9 +2326,10 @@ def _aggregate(self, result, counts, values, comp_ids, agg_func, for i, chunk in enumerate(values.transpose(2, 0, 1)): chunk = chunk.squeeze() - agg_func(result[:, :, i], counts, chunk, comp_ids) + agg_func(result[:, :, i], counts, chunk, comp_ids, + min_count) else: - agg_func(result, counts, values, comp_ids) + agg_func(result, counts, values, comp_ids, min_count) return result @@ -3595,9 +3603,10 @@ def _iterate_slices(self): continue yield val, slicer(val) - def _cython_agg_general(self, how, alt=None, numeric_only=True): + def _cython_agg_general(self, how, alt=None, numeric_only=True, + min_count=-1): new_items, new_blocks = self._cython_agg_blocks( - how, alt=alt, numeric_only=numeric_only) + how, alt=alt, numeric_only=numeric_only, min_count=min_count) return self._wrap_agged_blocks(new_items, new_blocks) def _wrap_agged_blocks(self, items, blocks): @@ -3623,7 +3632,8 @@ def _wrap_agged_blocks(self, items, blocks): _block_agg_axis = 0 - def _cython_agg_blocks(self, how, alt=None, numeric_only=True): + def _cython_agg_blocks(self, how, alt=None, numeric_only=True, + min_count=-1): # TODO: the actual managing of mgr_locs is a PITA # here, it should happen via BlockManager.combine @@ -3640,7 +3650,7 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True): locs = block.mgr_locs.as_array try: result, _ = self.grouper.aggregate( - block.values, how, axis=agg_axis) + block.values, how, axis=agg_axis, min_count=min_count) except NotImplementedError: # generally if we have numeric_only=False # and non-applicable functions diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index e1c09947ac0b4..d1a355021f388 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -107,21 +107,14 @@ def f(values, axis=None, skipna=True, **kwds): if k not in kwds: kwds[k] = v try: - if values.size == 0: - - # we either return np.nan or pd.NaT - if is_numeric_dtype(values): - values = values.astype('float64') - fill_value = na_value_for_dtype(values.dtype) - - if values.ndim == 1: - return fill_value - else: - result_shape = (values.shape[:axis] + - values.shape[axis + 1:]) - result = np.empty(result_shape, dtype=values.dtype) - result.fill(fill_value) - return result + if values.size == 0 and kwds.get('min_count') is None: + # We are empty, returning NA for our type + # Only applies for the default `min_count` of None + # since that affects how empty arrays are handled. + # TODO(GH-18976) update all the nanops methods to + # correctly handle empty inputs and remove this check. + # It *may* just be `var` + return _na_for_min_count(values, axis) if (_USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name)): @@ -292,6 +285,36 @@ def _wrap_results(result, dtype): return result +def _na_for_min_count(values, axis): + """Return the missing value for `values` + + Parameters + ---------- + values : ndarray + axis : int or None + axis for the reduction + + Returns + ------- + result : scalar or ndarray + For 1-D values, returns a scalar of the correct missing type. + For 2-D values, returns a 1-D array where each element is missing. + """ + # we either return np.nan or pd.NaT + if is_numeric_dtype(values): + values = values.astype('float64') + fill_value = na_value_for_dtype(values.dtype) + + if values.ndim == 1: + return fill_value + else: + result_shape = (values.shape[:axis] + + values.shape[axis + 1:]) + result = np.empty(result_shape, dtype=values.dtype) + result.fill(fill_value) + return result + + def nanany(values, axis=None, skipna=True): values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna) return values.any(axis) @@ -304,7 +327,7 @@ def nanall(values, axis=None, skipna=True): @disallow('M8') @bottleneck_switch() -def nansum(values, axis=None, skipna=True): +def nansum(values, axis=None, skipna=True, min_count=0): values, mask, dtype, dtype_max = _get_values(values, skipna, 0) dtype_sum = dtype_max if is_float_dtype(dtype): @@ -312,7 +335,7 @@ def nansum(values, axis=None, skipna=True): elif is_timedelta64_dtype(dtype): dtype_sum = np.float64 the_sum = values.sum(axis, dtype=dtype_sum) - the_sum = _maybe_null_out(the_sum, axis, mask) + the_sum = _maybe_null_out(the_sum, axis, mask, min_count=min_count) return _wrap_results(the_sum, dtype) @@ -641,13 +664,13 @@ def nankurt(values, axis=None, skipna=True): @disallow('M8', 'm8') -def nanprod(values, axis=None, skipna=True): +def nanprod(values, axis=None, skipna=True, min_count=0): mask = isna(values) if skipna and not is_any_int_dtype(values): values = values.copy() values[mask] = 1 result = values.prod(axis) - return _maybe_null_out(result, axis, mask) + return _maybe_null_out(result, axis, mask, min_count=min_count) def _maybe_arg_null_out(result, axis, mask, skipna): @@ -683,9 +706,9 @@ def _get_counts(mask, axis, dtype=float): return np.array(count, dtype=dtype) -def _maybe_null_out(result, axis, mask): +def _maybe_null_out(result, axis, mask, min_count=1): if axis is not None and getattr(result, 'ndim', False): - null_mask = (mask.shape[axis] - mask.sum(axis)) == 0 + null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0 if np.any(null_mask): if is_numeric_dtype(result): if np.iscomplexobj(result): @@ -698,7 +721,7 @@ def _maybe_null_out(result, axis, mask): result[null_mask] = None elif result is not tslib.NaT: null_mask = mask.size - mask.sum() - if null_mask == 0: + if null_mask < min_count: result = np.nan return result diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 1adb3a078cca3..db1d3d4c5e31b 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -601,9 +601,20 @@ def size(self): Resampler._deprecated_valids += dir(Resampler) + +# downsample methods +for method in ['sum', 'prod']: + + def f(self, _method=method, min_count=0, *args, **kwargs): + nv.validate_resampler_func(_method, args, kwargs) + return self._downsample(_method, min_count=min_count) + f.__doc__ = getattr(GroupBy, method).__doc__ + setattr(Resampler, method, f) + + # downsample methods -for method in ['min', 'max', 'first', 'last', 'sum', 'mean', 'sem', - 'median', 'prod', 'ohlc']: +for method in ['min', 'max', 'first', 'last', 'mean', 'sem', + 'median', 'ohlc']: def f(self, _method=method, *args, **kwargs): nv.validate_resampler_func(_method, args, kwargs) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 1bac4037e99c9..97ab0deb50d50 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -440,7 +440,8 @@ def test_nunique(self): Series({0: 1, 1: 3, 2: 2})) def test_sum(self): - self._check_stat_op('sum', np.sum, has_numeric_only=True) + self._check_stat_op('sum', np.sum, has_numeric_only=True, + skipna_alternative=np.nansum) # mixed types (with upcasting happening) self._check_stat_op('sum', np.sum, @@ -716,7 +717,8 @@ def alt(x): def _check_stat_op(self, name, alternative, frame=None, has_skipna=True, has_numeric_only=False, check_dtype=True, - check_dates=False, check_less_precise=False): + check_dates=False, check_less_precise=False, + skipna_alternative=None): if frame is None: frame = self.frame # set some NAs @@ -737,15 +739,11 @@ def _check_stat_op(self, name, alternative, frame=None, has_skipna=True, assert len(result) if has_skipna: - def skipna_wrapper(x): - nona = x.dropna() - if len(nona) == 0: - return np.nan - return alternative(nona) - def wrapper(x): return alternative(x.values) + skipna_wrapper = tm._make_skipna_wrapper(alternative, + skipna_alternative) result0 = f(axis=0, skipna=False) result1 = f(axis=1, skipna=False) tm.assert_series_equal(result0, frame.apply(wrapper), @@ -797,8 +795,11 @@ def wrapper(x): r0 = getattr(all_na, name)(axis=0) r1 = getattr(all_na, name)(axis=1) if name in ['sum', 'prod']: - assert np.isnan(r0).all() - assert np.isnan(r1).all() + unit = int(name == 'prod') + expected = pd.Series(unit, index=r0.index, dtype=r0.dtype) + tm.assert_series_equal(r0, expected) + expected = pd.Series(unit, index=r1.index, dtype=r1.dtype) + tm.assert_series_equal(r1, expected) def test_mode(self): df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11], @@ -936,6 +937,66 @@ def test_sum_corner(self): assert len(axis0) == 0 assert len(axis1) == 0 + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_sum_prod_nanops(self, method, unit): + idx = ['a', 'b', 'c'] + df = pd.DataFrame({"a": [unit, unit], + "b": [unit, np.nan], + "c": [np.nan, np.nan]}) + # The default + result = getattr(df, method) + expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') + + # min_count=1 + result = getattr(df, method)(min_count=1) + expected = pd.Series([unit, unit, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = getattr(df, method)(min_count=0) + expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') + tm.assert_series_equal(result, expected) + + result = getattr(df.iloc[1:], method)(min_count=1) + expected = pd.Series([unit, np.nan, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + # min_count > 1 + df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) + result = getattr(df, method)(min_count=5) + expected = pd.Series(result, index=['A', 'B']) + tm.assert_series_equal(result, expected) + + result = getattr(df, method)(min_count=6) + expected = pd.Series(result, index=['A', 'B']) + tm.assert_series_equal(result, expected) + + def test_sum_nanops_timedelta(self): + # prod isn't defined on timedeltas + idx = ['a', 'b', 'c'] + df = pd.DataFrame({"a": [0, 0], + "b": [0, np.nan], + "c": [np.nan, np.nan]}) + + df2 = df.apply(pd.to_timedelta) + + # 0 by default + result = df2.sum() + expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df2.sum(min_count=0) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df2.sum(min_count=1) + expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx) + tm.assert_series_equal(result, expected) + def test_sum_object(self): values = self.frame.values.astype(int) frame = DataFrame(values, index=self.frame.index, diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py index 913d3bcc09869..ad1a322fdaae9 100644 --- a/pandas/tests/groupby/test_aggregate.py +++ b/pandas/tests/groupby/test_aggregate.py @@ -809,26 +809,60 @@ def test__cython_agg_general(self): exc.args += ('operation: %s' % op, ) raise - def test_cython_agg_empty_buckets(self): - ops = [('mean', np.mean), - ('median', lambda x: np.median(x) if len(x) > 0 else np.nan), - ('var', lambda x: np.var(x, ddof=1)), - ('add', lambda x: np.sum(x) if len(x) > 0 else np.nan), - ('prod', np.prod), - ('min', np.min), - ('max', np.max), ] - + @pytest.mark.parametrize('op, targop', [ + ('mean', np.mean), + ('median', lambda x: np.median(x) if len(x) > 0 else np.nan), + ('var', lambda x: np.var(x, ddof=1)), + ('min', np.min), + ('max', np.max), ] + ) + def test_cython_agg_empty_buckets(self, op, targop): df = pd.DataFrame([11, 12, 13]) grps = range(0, 55, 5) - for op, targop in ops: - result = df.groupby(pd.cut(df[0], grps))._cython_agg_general(op) - expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x)) - try: - tm.assert_frame_equal(result, expected) - except BaseException as exc: - exc.args += ('operation: %s' % op,) - raise + # calling _cython_agg_general directly, instead of via the user API + # which sets different values for min_count, so do that here. + result = df.groupby(pd.cut(df[0], grps))._cython_agg_general(op) + expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x)) + try: + tm.assert_frame_equal(result, expected) + except BaseException as exc: + exc.args += ('operation: %s' % op,) + raise + + def test_cython_agg_empty_buckets_nanops(self): + # GH-18869 can't call nanops on empty groups, so hardcode expected + # for these + df = pd.DataFrame([11, 12, 13], columns=['a']) + grps = range(0, 25, 5) + # add / sum + result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('add') + intervals = pd.interval_range(0, 20, freq=5) + expected = pd.DataFrame( + {"a": [0, 0, 36, 0]}, + index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + tm.assert_frame_equal(result, expected) + + # prod + result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('prod') + expected = pd.DataFrame( + {"a": [1, 1, 1716, 1]}, + index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + tm.assert_frame_equal(result, expected) + + @pytest.mark.xfail(reason="GH-18869: agg func not called on empty groups.") + def test_agg_category_nansum(self): + categories = ['a', 'b', 'c'] + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=categories), + 'B': [1, 2, 3]}) + result = df.groupby("A").B.agg(np.nansum) + expected = pd.Series([3, 3, 0], + index=pd.CategoricalIndex(['a', 'b', 'c'], + categories=categories, + name='A'), + name='B') + tm.assert_series_equal(result, expected) def test_agg_over_numpy_arrays(self): # GH 3788 diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index fdc03acd3e931..d4f35aa8755d1 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -17,6 +17,142 @@ class TestGroupByCategorical(MixIn): + def test_groupby(self): + + cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], + categories=["a", "b", "c", "d"], ordered=True) + data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats}) + + exp_index = CategoricalIndex(list('abcd'), name='b', ordered=True) + expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index) + result = data.groupby("b").mean() + tm.assert_frame_equal(result, expected) + + raw_cat1 = Categorical(["a", "a", "b", "b"], + categories=["a", "b", "z"], ordered=True) + raw_cat2 = Categorical(["c", "d", "c", "d"], + categories=["c", "d", "y"], ordered=True) + df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]}) + + # single grouper + gb = df.groupby("A") + exp_idx = CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True) + expected = DataFrame({'values': Series([3, 7, 0], index=exp_idx)}) + result = gb.sum() + tm.assert_frame_equal(result, expected) + + # multiple groupers + gb = df.groupby(['A', 'B']) + exp_index = pd.MultiIndex.from_product( + [Categorical(["a", "b", "z"], ordered=True), + Categorical(["c", "d", "y"], ordered=True)], + names=['A', 'B']) + expected = DataFrame({'values': [1, 2, np.nan, 3, 4, np.nan, + np.nan, np.nan, np.nan]}, + index=exp_index) + result = gb.sum() + tm.assert_frame_equal(result, expected) + + # multiple groupers with a non-cat + df = df.copy() + df['C'] = ['foo', 'bar'] * 2 + gb = df.groupby(['A', 'B', 'C']) + exp_index = pd.MultiIndex.from_product( + [Categorical(["a", "b", "z"], ordered=True), + Categorical(["c", "d", "y"], ordered=True), + ['foo', 'bar']], + names=['A', 'B', 'C']) + expected = DataFrame({'values': Series( + np.nan, index=exp_index)}).sort_index() + expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4] + result = gb.sum() + tm.assert_frame_equal(result, expected) + + # GH 8623 + x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'], + [1, 'John P. Doe']], + columns=['person_id', 'person_name']) + x['person_name'] = Categorical(x.person_name) + + g = x.groupby(['person_id']) + result = g.transform(lambda x: x) + tm.assert_frame_equal(result, x[['person_name']]) + + result = x.drop_duplicates('person_name') + expected = x.iloc[[0, 1]] + tm.assert_frame_equal(result, expected) + + def f(x): + return x.drop_duplicates('person_name').iloc[0] + + result = g.apply(f) + expected = x.iloc[[0, 1]].copy() + expected.index = Index([1, 2], name='person_id') + expected['person_name'] = expected['person_name'].astype('object') + tm.assert_frame_equal(result, expected) + + # GH 9921 + # Monotonic + df = DataFrame({"a": [5, 15, 25]}) + c = pd.cut(df.a, bins=[0, 10, 20, 30, 40]) + + result = df.a.groupby(c).transform(sum) + tm.assert_series_equal(result, df['a']) + + tm.assert_series_equal( + df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) + tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) + tm.assert_frame_equal( + df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']]) + + # Filter + tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a']) + tm.assert_frame_equal(df.groupby(c).filter(np.all), df) + + # Non-monotonic + df = DataFrame({"a": [5, 15, 25, -5]}) + c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40]) + + result = df.a.groupby(c).transform(sum) + tm.assert_series_equal(result, df['a']) + + tm.assert_series_equal( + df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) + tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) + tm.assert_frame_equal( + df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']]) + + # GH 9603 + df = DataFrame({'a': [1, 0, 0, 0]}) + c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list('abcd'))) + result = df.groupby(c).apply(len) + + exp_index = CategoricalIndex( + c.values.categories, ordered=c.values.ordered) + expected = Series([1, 0, 0, 0], index=exp_index) + expected.index.name = 'a' + tm.assert_series_equal(result, expected) + + def test_groupby_sort(self): + + # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby + # This should result in a properly sorted Series so that the plot + # has a sorted x axis + # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar') + + df = DataFrame({'value': np.random.randint(0, 10000, 100)}) + labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)] + cat_labels = Categorical(labels, labels) + + df = df.sort_values(by=['value'], ascending=True) + df['value_group'] = pd.cut(df.value, range(0, 10500, 500), + right=False, labels=cat_labels) + + res = df.groupby(['value_group'])['value_group'].count() + exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))] + exp.index = CategoricalIndex(exp.index, name=exp.index.name) + tm.assert_series_equal(res, exp) + def test_level_groupby_get_group(self): # GH15155 df = DataFrame(data=np.arange(2, 22, 2), @@ -526,3 +662,53 @@ def test_groupby_categorical_two_columns(self): "C3": [nan, nan, nan, nan, 10, 100, nan, nan, nan, nan, 200, 34]}, index=idx) tm.assert_frame_equal(res, exp) + + def test_empty_sum(self): + # https://github.com/pandas-dev/pandas/issues/18678 + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=['a', 'b', 'c']), + 'B': [1, 2, 1]}) + expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A') + + # 0 by default + result = df.groupby("A").B.sum() + expected = pd.Series([3, 1, 0], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df.groupby("A").B.sum(min_count=0) + expected = pd.Series([3, 1, 0], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df.groupby("A").B.sum(min_count=1) + expected = pd.Series([3, 1, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count>1 + result = df.groupby("A").B.sum(min_count=2) + expected = pd.Series([3, np.nan, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + def test_empty_prod(self): + # https://github.com/pandas-dev/pandas/issues/18678 + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=['a', 'b', 'c']), + 'B': [1, 2, 1]}) + + expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A') + + # 1 by default + result = df.groupby("A").B.prod() + expected = pd.Series([2, 1, 1], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df.groupby("A").B.prod(min_count=0) + expected = pd.Series([2, 1, 1], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df.groupby("A").B.prod(min_count=1) + expected = pd.Series([2, 1, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 675f8d6413b2a..7a5581c897231 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -3850,7 +3850,7 @@ def h(df, arg3): # Assert the results here index = pd.Index(['A', 'B', 'C'], name='group') - expected = pd.Series([-79.5160891089, -78.4839108911, None], + expected = pd.Series([-79.5160891089, -78.4839108911, -80], index=index) assert_series_equal(expected, result) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index c8503b16a0e16..d359bfa5351a9 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -41,12 +41,11 @@ def test_groupby_with_timegrouper(self): df = df.set_index(['Date']) expected = DataFrame( - {'Quantity': np.nan}, + {'Quantity': 0}, index=date_range('20130901 13:00:00', '20131205 13:00:00', freq='5D', name='Date', closed='left')) - expected.iloc[[0, 6, 18], 0] = np.array( - [24., 6., 9.], dtype='float64') + expected.iloc[[0, 6, 18], 0] = np.array([24, 6, 9], dtype='int64') result1 = df.resample('5D') .sum() assert_frame_equal(result1, expected) @@ -245,6 +244,8 @@ def test_timegrouper_with_reg_groups(self): result = df.groupby([pd.Grouper(freq='1M', key='Date')]).sum() assert_frame_equal(result, expected) + @pytest.mark.parametrize('freq', ['D', 'M', 'A', 'Q-APR']) + def test_timegrouper_with_reg_groups_freq(self, freq): # GH 6764 multiple grouping with/without sort df = DataFrame({ 'date': pd.to_datetime([ @@ -258,20 +259,24 @@ def test_timegrouper_with_reg_groups(self): 'cost1': [12, 15, 10, 24, 39, 1, 0, 90, 45, 34, 1, 12] }).set_index('date') - for freq in ['D', 'M', 'A', 'Q-APR']: - expected = df.groupby('user_id')[ - 'whole_cost'].resample( - freq).sum().dropna().reorder_levels( - ['date', 'user_id']).sort_index().astype('int64') - expected.name = 'whole_cost' - - result1 = df.sort_index().groupby([pd.Grouper(freq=freq), - 'user_id'])['whole_cost'].sum() - assert_series_equal(result1, expected) - - result2 = df.groupby([pd.Grouper(freq=freq), 'user_id'])[ - 'whole_cost'].sum() - assert_series_equal(result2, expected) + expected = ( + df.groupby('user_id')['whole_cost'] + .resample(freq) + .sum(min_count=1) # XXX + .dropna() + .reorder_levels(['date', 'user_id']) + .sort_index() + .astype('int64') + ) + expected.name = 'whole_cost' + + result1 = df.sort_index().groupby([pd.Grouper(freq=freq), + 'user_id'])['whole_cost'].sum() + assert_series_equal(result1, expected) + + result2 = df.groupby([pd.Grouper(freq=freq), 'user_id'])[ + 'whole_cost'].sum() + assert_series_equal(result2, expected) def test_timegrouper_get_group(self): # GH 6914 diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 2ee404ab5fe0d..d6db2ab83098b 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -28,40 +28,124 @@ class TestSeriesAnalytics(TestData): @pytest.mark.parametrize("use_bottleneck", [True, False]) - @pytest.mark.parametrize("method", ["sum", "prod"]) - def test_empty(self, method, use_bottleneck): - + @pytest.mark.parametrize("method, unit", [ + ("sum", 0.0), + ("prod", 1.0) + ]) + def test_empty(self, method, unit, use_bottleneck): with pd.option_context("use_bottleneck", use_bottleneck): - # GH 9422 - # treat all missing as NaN + # GH 9422 / 18921 + # Entirely empty s = Series([]) + # NA by default result = getattr(s, method)() + assert result == unit + + # Explict + result = getattr(s, method)(min_count=0) + assert result == unit + + result = getattr(s, method)(min_count=1) assert isna(result) + # Skipna, default result = getattr(s, method)(skipna=True) + result == unit + + # Skipna, explicit + result = getattr(s, method)(skipna=True, min_count=0) + assert result == unit + + result = getattr(s, method)(skipna=True, min_count=1) assert isna(result) + # All-NA s = Series([np.nan]) + # NA by default result = getattr(s, method)() + assert result == unit + + # Explicit + result = getattr(s, method)(min_count=0) + assert result == unit + + result = getattr(s, method)(min_count=1) assert isna(result) + # Skipna, default result = getattr(s, method)(skipna=True) + result == unit + + # skipna, explicit + result = getattr(s, method)(skipna=True, min_count=0) + assert result == unit + + result = getattr(s, method)(skipna=True, min_count=1) assert isna(result) + # Mix of valid, empty s = Series([np.nan, 1]) + # Default result = getattr(s, method)() assert result == 1.0 - s = Series([np.nan, 1]) + # Explicit + result = getattr(s, method)(min_count=0) + assert result == 1.0 + + result = getattr(s, method)(min_count=1) + assert result == 1.0 + + # Skipna result = getattr(s, method)(skipna=True) assert result == 1.0 + result = getattr(s, method)(skipna=True, min_count=0) + assert result == 1.0 + + result = getattr(s, method)(skipna=True, min_count=1) + assert result == 1.0 + # GH #844 (changed in 9422) df = DataFrame(np.empty((10, 0))) - assert (df.sum(1).isnull()).all() + assert (getattr(df, method)(1) == unit).all() + + s = pd.Series([1]) + result = getattr(s, method)(min_count=2) + assert isna(result) + + s = pd.Series([np.nan]) + result = getattr(s, method)(min_count=2) + assert isna(result) + + s = pd.Series([np.nan, 1]) + result = getattr(s, method)(min_count=2) + assert isna(result) + + @pytest.mark.parametrize('method, unit', [ + ('sum', 0.0), + ('prod', 1.0), + ]) + def test_empty_multi(self, method, unit): + s = pd.Series([1, np.nan, np.nan, np.nan], + index=pd.MultiIndex.from_product([('a', 'b'), (0, 1)])) + # 1 / 0 by default + result = getattr(s, method)(level=0) + expected = pd.Series([1, unit], index=['a', 'b']) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = getattr(s, method)(level=0, min_count=0) + expected = pd.Series([1, unit], index=['a', 'b']) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = getattr(s, method)(level=0, min_count=1) + expected = pd.Series([1, np.nan], index=['a', 'b']) + tm.assert_series_equal(result, expected) @pytest.mark.parametrize( - "method", ['sum', 'mean', 'median', 'std', 'var']) + "method", ['mean', 'median', 'std', 'var']) def test_ops_consistency_on_empty(self, method): # GH 7869 @@ -109,7 +193,7 @@ def test_sum_overflow(self, use_bottleneck): assert np.allclose(float(result), v[-1]) def test_sum(self): - self._check_stat_op('sum', np.sum, check_allna=True) + self._check_stat_op('sum', np.sum, check_allna=False) def test_sum_inf(self): s = Series(np.random.randn(10)) diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py index cf5e3fe4f29b0..255367523a3d8 100644 --- a/pandas/tests/series/test_quantile.py +++ b/pandas/tests/series/test_quantile.py @@ -38,7 +38,7 @@ def test_quantile(self): # GH7661 result = Series([np.timedelta64('NaT')]).sum() - assert result is pd.NaT + assert result == pd.Timedelta(0) msg = 'percentiles should all be in the interval \\[0, 1\\]' for invalid in [-1, 2, [0.5, -1], [0.5, 2]]: diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 6366aae8ccdf6..48c1622aa0c4e 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -3163,18 +3163,6 @@ def test_info(self): buf = compat.StringIO() df2.info(buf=buf) - def test_groupby_sort(self): - - # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby - # This should result in a properly sorted Series so that the plot - # has a sorted x axis - # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar') - - res = self.cat.groupby(['value_group'])['value_group'].count() - exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))] - exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name) - tm.assert_series_equal(res, exp) - def test_min_max(self): # unordered cats have no min/max cat = Series(Categorical(["a", "b", "c", "d"], ordered=False)) @@ -3294,123 +3282,6 @@ def test_value_counts_with_nan(self): res = s.value_counts(dropna=False, sort=False) tm.assert_series_equal(res, exp) - def test_groupby(self): - - cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], - categories=["a", "b", "c", "d"], ordered=True) - data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats}) - - exp_index = pd.CategoricalIndex(['a', 'b', 'c', 'd'], name='b', - ordered=True) - expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index) - result = data.groupby("b").mean() - tm.assert_frame_equal(result, expected) - - raw_cat1 = Categorical(["a", "a", "b", "b"], - categories=["a", "b", "z"], ordered=True) - raw_cat2 = Categorical(["c", "d", "c", "d"], - categories=["c", "d", "y"], ordered=True) - df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]}) - - # single grouper - gb = df.groupby("A") - exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True) - expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)}) - result = gb.sum() - tm.assert_frame_equal(result, expected) - - # multiple groupers - gb = df.groupby(['A', 'B']) - exp_index = pd.MultiIndex.from_product( - [Categorical(["a", "b", "z"], ordered=True), - Categorical(["c", "d", "y"], ordered=True)], - names=['A', 'B']) - expected = DataFrame({'values': [1, 2, np.nan, 3, 4, np.nan, - np.nan, np.nan, np.nan]}, - index=exp_index) - result = gb.sum() - tm.assert_frame_equal(result, expected) - - # multiple groupers with a non-cat - df = df.copy() - df['C'] = ['foo', 'bar'] * 2 - gb = df.groupby(['A', 'B', 'C']) - exp_index = pd.MultiIndex.from_product( - [Categorical(["a", "b", "z"], ordered=True), - Categorical(["c", "d", "y"], ordered=True), - ['foo', 'bar']], - names=['A', 'B', 'C']) - expected = DataFrame({'values': Series( - np.nan, index=exp_index)}).sort_index() - expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4] - result = gb.sum() - tm.assert_frame_equal(result, expected) - - # GH 8623 - x = pd.DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'], - [1, 'John P. Doe']], - columns=['person_id', 'person_name']) - x['person_name'] = pd.Categorical(x.person_name) - - g = x.groupby(['person_id']) - result = g.transform(lambda x: x) - tm.assert_frame_equal(result, x[['person_name']]) - - result = x.drop_duplicates('person_name') - expected = x.iloc[[0, 1]] - tm.assert_frame_equal(result, expected) - - def f(x): - return x.drop_duplicates('person_name').iloc[0] - - result = g.apply(f) - expected = x.iloc[[0, 1]].copy() - expected.index = Index([1, 2], name='person_id') - expected['person_name'] = expected['person_name'].astype('object') - tm.assert_frame_equal(result, expected) - - # GH 9921 - # Monotonic - df = DataFrame({"a": [5, 15, 25]}) - c = pd.cut(df.a, bins=[0, 10, 20, 30, 40]) - - result = df.a.groupby(c).transform(sum) - tm.assert_series_equal(result, df['a']) - - tm.assert_series_equal( - df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) - tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) - tm.assert_frame_equal( - df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']]) - - # Filter - tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a']) - tm.assert_frame_equal(df.groupby(c).filter(np.all), df) - - # Non-monotonic - df = DataFrame({"a": [5, 15, 25, -5]}) - c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40]) - - result = df.a.groupby(c).transform(sum) - tm.assert_series_equal(result, df['a']) - - tm.assert_series_equal( - df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) - tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) - tm.assert_frame_equal( - df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']]) - - # GH 9603 - df = pd.DataFrame({'a': [1, 0, 0, 0]}) - c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=pd.Categorical(list('abcd'))) - result = df.groupby(c).apply(len) - - exp_index = pd.CategoricalIndex(c.values.categories, - ordered=c.values.ordered) - expected = pd.Series([1, 0, 0, 0], index=exp_index) - expected.index.name = 'a' - tm.assert_series_equal(result, expected) - def test_pivot_table(self): raw_cat1 = Categorical(["a", "a", "b", "b"], diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 6d2607962dfb0..aebc9cd3deaac 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -73,17 +73,11 @@ def teardown_method(self, method): def run_arithmetic(self, df, other, assert_func, check_dtype=False, test_flex=True): expr._MIN_ELEMENTS = 0 - operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv', 'pow'] + operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv'] if not compat.PY3: operations.append('div') for arith in operations: - # numpy >= 1.11 doesn't handle integers - # raised to integer powers - # https://github.com/pandas-dev/pandas/issues/15363 - if arith == 'pow' and not _np_version_under1p11: - continue - operator_name = arith if arith == 'div': operator_name = 'truediv' diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 9305504f8d5e3..5d56088193d30 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- from __future__ import division, print_function +from distutils.version import LooseVersion from functools import partial import pytest @@ -181,12 +182,17 @@ def _coerce_tds(targ, res): check_dtype=check_dtype) def check_fun_data(self, testfunc, targfunc, testarval, targarval, - targarnanval, check_dtype=True, **kwargs): + targarnanval, check_dtype=True, empty_targfunc=None, + **kwargs): for axis in list(range(targarval.ndim)) + [None]: for skipna in [False, True]: targartempval = targarval if skipna else targarnanval - try: + if skipna and empty_targfunc and isna(targartempval).all(): + targ = empty_targfunc(targartempval, axis=axis, **kwargs) + else: targ = targfunc(targartempval, axis=axis, **kwargs) + + try: res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs) self.check_results(targ, res, axis, @@ -218,10 +224,11 @@ def check_fun_data(self, testfunc, targfunc, testarval, targarval, except ValueError: return self.check_fun_data(testfunc, targfunc, testarval2, targarval2, - targarnanval2, check_dtype=check_dtype, **kwargs) + targarnanval2, check_dtype=check_dtype, + empty_targfunc=empty_targfunc, **kwargs) def check_fun(self, testfunc, targfunc, testar, targar=None, - targarnan=None, **kwargs): + targarnan=None, empty_targfunc=None, **kwargs): if targar is None: targar = testar if targarnan is None: @@ -231,7 +238,8 @@ def check_fun(self, testfunc, targfunc, testar, targar=None, targarnanval = getattr(self, targarnan) try: self.check_fun_data(testfunc, targfunc, testarval, targarval, - targarnanval, **kwargs) + targarnanval, empty_targfunc=empty_targfunc, + **kwargs) except BaseException as exc: exc.args += ('testar: %s' % testar, 'targar: %s' % targar, 'targarnan: %s' % targarnan) @@ -328,7 +336,8 @@ def test_nanall(self): def test_nansum(self): self.check_funs(nanops.nansum, np.sum, allow_str=False, - allow_date=False, allow_tdelta=True, check_dtype=False) + allow_date=False, allow_tdelta=True, check_dtype=False, + empty_targfunc=np.nansum) def test_nanmean(self): self.check_funs(nanops.nanmean, np.mean, allow_complex=False, @@ -461,8 +470,12 @@ def test_nankurt(self): allow_tdelta=False) def test_nanprod(self): + if LooseVersion(np.__version__) < LooseVersion("1.10.0"): + raise pytest.skip("np.nanprod added in 1.10.0") + self.check_funs(nanops.nanprod, np.prod, allow_str=False, - allow_date=False, allow_tdelta=False) + allow_date=False, allow_tdelta=False, + empty_targfunc=np.nanprod) def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs): res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 33fb6f1108bf2..7e442fcc2fc8b 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -3,6 +3,7 @@ from warnings import catch_warnings from datetime import datetime +from distutils.version import LooseVersion import operator import pytest @@ -10,7 +11,6 @@ import pandas as pd from pandas.core.dtypes.common import is_float_dtype -from pandas.core.dtypes.missing import remove_na_arraylike from pandas import (Series, DataFrame, Index, date_range, isna, notna, pivot, MultiIndex) from pandas.core.nanops import nanall, nanany @@ -83,13 +83,16 @@ def test_count(self): self._check_stat_op('count', f, obj=self.panel, has_skipna=False) def test_sum(self): - self._check_stat_op('sum', np.sum) + self._check_stat_op('sum', np.sum, skipna_alternative=np.nansum) def test_mean(self): self._check_stat_op('mean', np.mean) def test_prod(self): - self._check_stat_op('prod', np.prod) + if LooseVersion(np.__version__) < LooseVersion("1.10.0"): + raise pytest.skip("np.nanprod added in 1.10.0") + + self._check_stat_op('prod', np.prod, skipna_alternative=np.nanprod) def test_median(self): def wrapper(x): @@ -142,7 +145,8 @@ def alt(x): self._check_stat_op('sem', alt) - def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): + def _check_stat_op(self, name, alternative, obj=None, has_skipna=True, + skipna_alternative=None): if obj is None: obj = self.panel @@ -154,11 +158,8 @@ def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): if has_skipna: - def skipna_wrapper(x): - nona = remove_na_arraylike(x) - if len(nona) == 0: - return np.nan - return alternative(nona) + skipna_wrapper = tm._make_skipna_wrapper(alternative, + skipna_alternative) def wrapper(x): return alternative(np.asarray(x)) diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index c0e8770dff8b8..ef19f11499e00 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -4,11 +4,11 @@ import operator import pytest from warnings import catch_warnings +from distutils.version import LooseVersion import numpy as np from pandas import Series, Index, isna, notna from pandas.core.dtypes.common import is_float_dtype -from pandas.core.dtypes.missing import remove_na_arraylike from pandas.core.panel import Panel from pandas.core.panel4d import Panel4D from pandas.tseries.offsets import BDay @@ -37,13 +37,16 @@ def test_count(self): self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False) def test_sum(self): - self._check_stat_op('sum', np.sum) + self._check_stat_op('sum', np.sum, skipna_alternative=np.nansum) def test_mean(self): self._check_stat_op('mean', np.mean) def test_prod(self): - self._check_stat_op('prod', np.prod) + if LooseVersion(np.__version__) < LooseVersion("1.10.0"): + raise pytest.skip("np.nanprod added in 1.10.0") + + self._check_stat_op('prod', np.prod, skipna_alternative=np.nanprod) def test_median(self): def wrapper(x): @@ -106,7 +109,8 @@ def alt(x): # self._check_stat_op('skew', alt) - def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): + def _check_stat_op(self, name, alternative, obj=None, has_skipna=True, + skipna_alternative=None): if obj is None: obj = self.panel4d @@ -117,11 +121,9 @@ def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): f = getattr(obj, name) if has_skipna: - def skipna_wrapper(x): - nona = remove_na_arraylike(x) - if len(nona) == 0: - return np.nan - return alternative(nona) + + skipna_wrapper = tm._make_skipna_wrapper(alternative, + skipna_alternative) def wrapper(x): return alternative(np.asarray(x)) diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index e64bf2217e717..04e702644913f 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -4,6 +4,7 @@ from datetime import datetime, timedelta from functools import partial from textwrap import dedent +from operator import methodcaller import pytz import pytest @@ -3377,8 +3378,45 @@ def test_aggregate_normal(self): assert_frame_equal(expected, dt_result) """ - def test_aggregate_with_nat(self): + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_resample_entirly_nat_window(self, method, unit): + s = pd.Series([0] * 2 + [np.nan] * 2, + index=pd.date_range('2017', periods=4)) + # 0 / 1 by default + result = methodcaller(method)(s.resample("2d")) + expected = pd.Series([0.0, unit], + index=pd.to_datetime(['2017-01-01', + '2017-01-03'])) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = methodcaller(method, min_count=0)(s.resample("2d")) + expected = pd.Series([0.0, unit], + index=pd.to_datetime(['2017-01-01', + '2017-01-03'])) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = methodcaller(method, min_count=1)(s.resample("2d")) + expected = pd.Series([0.0, np.nan], + index=pd.to_datetime(['2017-01-01', + '2017-01-03'])) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('func, fill_value', [ + ('min', np.nan), + ('max', np.nan), + ('sum', 0), + ('prod', 1), + ('count', 0), + ]) + def test_aggregate_with_nat(self, func, fill_value): # check TimeGrouper's aggregation is identical as normal groupby + # if NaT is included, 'var', 'std', 'mean', 'first','last' + # and 'nth' doesn't work yet n = 20 data = np.random.randn(n, 4).astype('int64') @@ -3392,42 +3430,42 @@ def test_aggregate_with_nat(self): normal_grouped = normal_df.groupby('key') dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) - for func in ['min', 'max', 'sum', 'prod']: - normal_result = getattr(normal_grouped, func)() - dt_result = getattr(dt_grouped, func)() - pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]], index=[3], - columns=['A', 'B', 'C', 'D']) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - assert_frame_equal(expected, dt_result) + normal_result = getattr(normal_grouped, func)() + dt_result = getattr(dt_grouped, func)() - for func in ['count']: - normal_result = getattr(normal_grouped, func)() - pad = DataFrame([[0, 0, 0, 0]], index=[3], - columns=['A', 'B', 'C', 'D']) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_frame_equal(expected, dt_result) + pad = DataFrame([[fill_value] * 4], index=[3], + columns=['A', 'B', 'C', 'D']) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + assert_frame_equal(expected, dt_result) + assert dt_result.index.name == 'key' - for func in ['size']: - normal_result = getattr(normal_grouped, func)() - pad = Series([0], index=[3]) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_series_equal(expected, dt_result) - # GH 9925 - assert dt_result.index.name == 'key' + def test_aggregate_with_nat_size(self): + # GH 9925 + n = 20 + data = np.random.randn(n, 4).astype('int64') + normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + normal_df['key'] = [1, 2, np.nan, 4, 5] * 4 + + dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT, + datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4 + + normal_grouped = normal_df.groupby('key') + dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) - # if NaT is included, 'var', 'std', 'mean', 'first','last' - # and 'nth' doesn't work yet + normal_result = normal_grouped.size() + dt_result = dt_grouped.size() + + pad = Series([0], index=[3]) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + assert_series_equal(expected, dt_result) + assert dt_result.index.name == 'key' def test_repr(self): # GH18203 @@ -3436,3 +3474,34 @@ def test_repr(self): "closed='left', label='left', how='mean', " "convention='e', base=0)") assert result == expected + + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_upsample_sum(self, method, unit): + s = pd.Series(1, index=pd.date_range("2017", periods=2, freq="H")) + resampled = s.resample("30T") + index = pd.to_datetime(['2017-01-01T00:00:00', + '2017-01-01T00:30:00', + '2017-01-01T01:00:00']) + + # 0 / 1 by default + result = methodcaller(method)(resampled) + expected = pd.Series([1, unit, 1], index=index) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = methodcaller(method, min_count=0)(resampled) + expected = pd.Series([1, unit, 1], index=index) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = methodcaller(method, min_count=1)(resampled) + expected = pd.Series([1, np.nan, 1], index=index) + tm.assert_series_equal(result, expected) + + # min_count>1 + result = methodcaller(method, min_count=2)(resampled) + expected = pd.Series([np.nan, np.nan, np.nan], index=index) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 35ae4ad4d5db4..e65de10c51300 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -475,6 +475,28 @@ def tests_empty_df_rolling(self, roller): result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum() tm.assert_frame_equal(result, expected) + def test_missing_minp_zero(self): + # https://github.com/pandas-dev/pandas/pull/18921 + # minp=0 + x = pd.Series([np.nan]) + result = x.rolling(1, min_periods=0).sum() + expected = pd.Series([0.0]) + tm.assert_series_equal(result, expected) + + # minp=1 + result = x.rolling(1, min_periods=1).sum() + expected = pd.Series([np.nan]) + tm.assert_series_equal(result, expected) + + def test_missing_minp_zero_variable(self): + # https://github.com/pandas-dev/pandas/pull/18921 + x = pd.Series([np.nan] * 4, + index=pd.DatetimeIndex(['2017-01-01', '2017-01-04', + '2017-01-06', '2017-01-07'])) + result = x.rolling(pd.Timedelta("2d"), min_periods=0).sum() + expected = pd.Series(0.0, index=x.index) + tm.assert_series_equal(result, expected) + def test_multi_index_names(self): # GH 16789, 16825 @@ -548,6 +570,19 @@ def test_empty_df_expanding(self, expander): index=pd.DatetimeIndex([])).expanding(expander).sum() tm.assert_frame_equal(result, expected) + def test_missing_minp_zero(self): + # https://github.com/pandas-dev/pandas/pull/18921 + # minp=0 + x = pd.Series([np.nan]) + result = x.expanding(min_periods=0).sum() + expected = pd.Series([0.0]) + tm.assert_series_equal(result, expected) + + # minp=1 + result = x.expanding(min_periods=1).sum() + expected = pd.Series([np.nan]) + tm.assert_series_equal(result, expected) + class TestEWM(Base): @@ -864,7 +899,8 @@ def test_centered_axis_validation(self): .rolling(window=3, center=True, axis=2).mean()) def test_rolling_sum(self): - self._check_moment_func(mom.rolling_sum, np.sum, name='sum') + self._check_moment_func(mom.rolling_sum, np.nansum, name='sum', + zero_min_periods_equal=False) def test_rolling_count(self): counter = lambda x: np.isfinite(x).astype(float).sum() @@ -1349,14 +1385,18 @@ def test_fperr_robustness(self): def _check_moment_func(self, f, static_comp, name=None, window=50, has_min_periods=True, has_center=True, has_time_rule=True, preserve_nan=True, - fill_value=None, test_stable=False, **kwargs): + fill_value=None, test_stable=False, + zero_min_periods_equal=True, + **kwargs): with warnings.catch_warnings(record=True): self._check_ndarray(f, static_comp, window=window, has_min_periods=has_min_periods, preserve_nan=preserve_nan, has_center=has_center, fill_value=fill_value, - test_stable=test_stable, **kwargs) + test_stable=test_stable, + zero_min_periods_equal=zero_min_periods_equal, + **kwargs) with warnings.catch_warnings(record=True): self._check_structures(f, static_comp, @@ -1375,7 +1415,8 @@ def _check_moment_func(self, f, static_comp, name=None, window=50, def _check_ndarray(self, f, static_comp, window=50, has_min_periods=True, preserve_nan=True, has_center=True, fill_value=None, - test_stable=False, test_window=True, **kwargs): + test_stable=False, test_window=True, + zero_min_periods_equal=True, **kwargs): def get_result(arr, window, min_periods=None, center=False): return f(arr, window, min_periods=min_periods, center=center, ** kwargs) @@ -1408,10 +1449,11 @@ def get_result(arr, window, min_periods=None, center=False): assert isna(result[3]) assert notna(result[4]) - # min_periods=0 - result0 = get_result(arr, 20, min_periods=0) - result1 = get_result(arr, 20, min_periods=1) - tm.assert_almost_equal(result0, result1) + if zero_min_periods_equal: + # min_periods=0 may be equivalent to min_periods=1 + result0 = get_result(arr, 20, min_periods=0) + result1 = get_result(arr, 20, min_periods=1) + tm.assert_almost_equal(result0, result1) else: result = get_result(arr, 50) tm.assert_almost_equal(result[-1], static_comp(arr[10:-10])) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index dec67bbea854f..b6fc9c78d6476 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -2861,3 +2861,31 @@ def setTZ(tz): yield finally: setTZ(orig_tz) + + +def _make_skipna_wrapper(alternative, skipna_alternative=None): + """Create a function for calling on an array. + + Parameters + ---------- + alternative : function + The function to be called on the array with no NaNs. + Only used when 'skipna_alternative' is None. + skipna_alternative : function + The function to be called on the original array + + Returns + ------- + skipna_wrapper : function + """ + if skipna_alternative: + def skipna_wrapper(x): + return skipna_alternative(x.values) + else: + def skipna_wrapper(x): + nona = x.dropna() + if len(nona) == 0: + return np.nan + return alternative(nona) + + return skipna_wrapper
https://api.github.com/repos/pandas-dev/pandas/pulls/18986
2017-12-29T14:29:30Z
2017-12-30T01:22:59Z
2017-12-30T01:22:59Z
2017-12-30T01:42:09Z
revert geopandas xfail
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 6407bee49ad15..0f0abd8cd3400 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -92,7 +92,6 @@ def test_pandas_datareader(): pandas_datareader.get_data_google('AAPL') -@pytest.mark.xfail(reason="install not working, gh-18780") def test_geopandas(): geopandas = import_module('geopandas') # noqa
closes #18780
https://api.github.com/repos/pandas-dev/pandas/pulls/18984
2017-12-29T14:22:17Z
2017-12-29T15:12:42Z
2017-12-29T15:12:41Z
2017-12-29T15:12:42Z
DOC: 0.22.0 release docs
diff --git a/doc/source/release.rst b/doc/source/release.rst index 0298eda2c78ab..aea6280a490d6 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -37,6 +37,27 @@ analysis / manipulation tool available in any language. * Binary installers on PyPI: http://pypi.python.org/pypi/pandas * Documentation: http://pandas.pydata.org +pandas 0.22.0 +------------- + +**Release date:** December 29, 2017 + +This is a major release from 0.21.1 and includes a single, API-breaking change. +We recommend that all users upgrade to this version after carefully reading the +release note. + +The only changes are: + +- The sum of an empty or all-*NA* ``Series`` is now ``0`` +- The product of an empty or all-*NA* ``Series`` is now ``1`` +- We've added a ``min_count`` parameter to ``.sum()`` and ``.prod()`` controlling + the minimum number of valid values for the result to be valid. If fewer than + ``min_count`` non-*NA* values are present, the result is *NA*. The default is + ``0``. To return ``NaN``, the 0.21 behavior, use ``min_count=1``. + +See the :ref:`v0.22.0 Whatsnew <whatsnew_0220>` overview for further explanation +of all the places in the library this affects. + pandas 0.21.1 ------------- diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 8617aa6c03e1f..da4acd99e3873 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -1,7 +1,7 @@ .. _whatsnew_0220: -v0.22.0 -------- +v0.22.0 (December 29, 2017) +--------------------------- This is a major release from 0.21.1 and includes a single, API-breaking change. We recommend that all users upgrade to this version after carefully reading the
[ci skip]
https://api.github.com/repos/pandas-dev/pandas/pulls/18983
2017-12-29T13:19:20Z
2017-12-29T14:27:47Z
2017-12-29T14:27:47Z
2017-12-30T03:36:09Z
ENH: is_scalar returns True for DateOffset objects
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 24f3e4433411e..0061a636cafb6 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -144,6 +144,7 @@ Other Enhancements - :class:`Interval` and :class:`IntervalIndex` have gained a ``length`` attribute (:issue:`18789`) - ``Resampler`` objects now have a functioning :attr:`~pandas.core.resample.Resampler.pipe` method. Previously, calls to ``pipe`` were diverted to the ``mean`` method (:issue:`17905`). +- :func:`~pandas.api.types.is_scalar` now returns ``True`` for ``DateOffset`` objects (:issue:`18943`). .. _whatsnew_0230.api_breaking: diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 5a62203f79642..3898f7499e85e 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -112,6 +112,7 @@ cpdef bint isscalar(object val): - Period - instances of decimal.Decimal - Interval + - DateOffset """ @@ -126,7 +127,8 @@ cpdef bint isscalar(object val): or PyTime_Check(val) or util.is_period_object(val) or is_decimal(val) - or is_interval(val)) + or is_interval(val) + or is_offset(val)) def item_from_zerodim(object val): diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index 5ed8828a0f122..b74b3a79fd69a 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -45,6 +45,8 @@ cpdef bint is_period(object val): """ Return a boolean if this is a Period object """ return util.is_period_object(val) +cdef inline bint is_offset(object val): + return getattr(val, '_typ', '_typ') == 'dateoffset' _TYPE_MAP = { 'categorical': 'categorical', diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index e8bdd2a551a34..219d1b2852938 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -18,7 +18,8 @@ from pandas._libs import tslib, lib, missing as libmissing from pandas import (Series, Index, DataFrame, Timedelta, DatetimeIndex, TimedeltaIndex, Timestamp, - Panel, Period, Categorical, isna) + Panel, Period, Categorical, isna, Interval, + DateOffset) from pandas.compat import u, PY2, PY3, StringIO, lrange from pandas.core.dtypes import inference from pandas.core.dtypes.common import ( @@ -1151,6 +1152,8 @@ def test_isscalar_pandas_scalars(self): assert is_scalar(Timestamp('2014-01-01')) assert is_scalar(Timedelta(hours=1)) assert is_scalar(Period('2014-01-01')) + assert is_scalar(Interval(left=0, right=1)) + assert is_scalar(DateOffset(days=1)) def test_lisscalar_pandas_containers(self): assert not is_scalar(Series())
- [x] closes #18943 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I've implemented `is_offset` identically to `is_period_object` and added it to `isscalar` and `is_scalar` is now returning true for DateOffsets. But not sure how to go about this exactly: > `is_offset` should also be imported / tested in `pandas/core/dtypes/common.py` The other "is" functions that are explicitly imported from inference `is_string_like` and `is_list_like` are used for testing but don't look like they themselves are being tested so I'm not sure what kind of test is needed. The rest of the "is" functions are imported with a wildcard but pylint is telling me they are not used (should I go through and make the required imports explicit?).
https://api.github.com/repos/pandas-dev/pandas/pulls/18982
2017-12-29T05:50:01Z
2017-12-29T14:24:39Z
2017-12-29T14:24:39Z
2017-12-29T14:28:41Z
Add spec for new Interval / IntervalIndex methods: .overlaps(), .covers()
diff --git a/pandas/tests/indexes/interval/test_interval_new.py b/pandas/tests/indexes/interval/test_interval_new.py index c8b30e19daa02..71c7265b89a1d 100644 --- a/pandas/tests/indexes/interval/test_interval_new.py +++ b/pandas/tests/indexes/interval/test_interval_new.py @@ -13,13 +13,6 @@ class TestIntervalIndex(Base): - def _compare_tuple_of_numpy_array(self, result, expected): - lidx, ridx = result - lidx_expected, ridx_expected = expected - - tm.assert_numpy_array_equal(lidx, lidx_expected) - tm.assert_numpy_array_equal(ridx, ridx_expected) - @pytest.mark.parametrize("idx_side", ['right', 'left', 'both', 'neither']) @pytest.mark.parametrize("side", ['right', 'left', 'both', 'neither']) def test_get_loc_interval(self, idx_side, side): diff --git a/pandas/tests/indexes/interval/test_interval_overlaps_covers.py b/pandas/tests/indexes/interval/test_interval_overlaps_covers.py new file mode 100644 index 0000000000000..ef03e9be78abc --- /dev/null +++ b/pandas/tests/indexes/interval/test_interval_overlaps_covers.py @@ -0,0 +1,63 @@ +from __future__ import division + +import pytest +import numpy as np + +from pandas import IntervalIndex +from pandas.tests.indexes.common import Base +import pandas.util.testing as tm + +pytestmark = pytest.mark.skip(reason="new indexing tests for issue 16316") + + +class TestIntervalIndex(Base): + + def _compare_tuple_of_numpy_array(self, result, expected): + lidx, ridx = result + lidx_expected, ridx_expected = expected + + tm.assert_numpy_array_equal(lidx, lidx_expected) + tm.assert_numpy_array_equal(ridx, ridx_expected) + + @pytest.mark.parametrize("oth_side", ['right', 'left', 'both']) + def test_intervalIndex_covers_intervalIndex(self, idx): + + # class IntervalIndex: + # def covers(self, other: IntervalIndex) -> Tuple[IntegerArray1D, + # IntegerArray1D] + + idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 3)], + closed="right") + other = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 3)], + closed=oth_side) + + result = idx.covers(idx) + expected = { + "right": (np.array([0, 1, 2, 2]), np.array([0, 1, 1, 2])), + "left": (np.array([2]), np.array([1])), + "both": (np.array([0, 1, 2, 2]), np.array([0, 1, 1, 2])) + } + + self._compare_tuple_of_numpy_array(result, expected[oth_side]) + + @pytest.mark.parametrize("oth_side", ['right', 'left', 'both']) + def test_intervalIndex_overlaps_intervalIndex(self): + + # class IntervalIndex: + # def overlaps(self, other: IntervalIndex) -> Tuple[IntegerArray1D, + # IntegerArray1D] + + idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 3)], + closed="right") + other = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 3)], + closed=oth_side) + + result = idx.overlaps(idx) + expected = { + "right": (np.array([0, 1, 2, 2]), np.array([0, 1, 1, 2])), + "left": (np.array([0, 0, 1, 1, 2, 2]), + np.array([0, 2, 1, 2, 1, 2])), + "both": (np.array([0, 0, 1, 1, 2, 2]), + np.array([0, 2, 1, 2, 1, 2])) + } + self._compare_tuple_of_numpy_array(result, expected[oth_side]) diff --git a/pandas/tests/scalar/test_interval_new.py b/pandas/tests/scalar/test_interval_new.py new file mode 100644 index 0000000000000..55180e246c40e --- /dev/null +++ b/pandas/tests/scalar/test_interval_new.py @@ -0,0 +1,251 @@ +from __future__ import division + +import pytest +import numpy as np + +from pandas import Interval, IntervalIndex +from pandas.tests.indexes.common import Base +import pandas.util.testing as tm + +pytestmark = pytest.mark.skip(reason="new indexing tests for issue 16316") + + +class TestIntervalIndex(Base): + + @pytest.mark.parametrize("ivl_side", ['right', 'left', 'both', 'neither']) + @pytest.mark.parametrize("oth_side", ['right', 'left', 'both', 'neither']) + def test_interval_covers_interval(self, ivl_side, oth_side): + + # class Interval: + # def covers(self, other: Interval) -> bool + + assert Interval(1, 3).covers(Interval(1.5, 2.5)) + assert Interval(1, 3).covers(Interval(1, 2)) + assert Interval(1, 3).covers(Interval(2, 3)) + assert not Interval(1, 3).covers(Interval(0.5, 2.5)) + assert not Interval(1, 3).covers(Interval(1.5, 3.5)) + + ivl = Interval(1, 3, closed=ivl_side) + other = Interval(1, 3, closed=oth_side) + + should_cover = { + 'right': { + 'right': True, 'left': False, 'both': False, 'neither': True}, + 'left': { + 'right': False, 'left': True, 'both': False, 'neither': True}, + 'both': { + 'right': True, 'left': True, 'both': True, 'neither': True}, + 'neither': { + 'right': False, 'left': False, 'both': False, 'neither': True} + } + + result = ivl.covers(other) + expected = should_cover[ivl_side][oth_side] + assert result == expected + + @pytest.mark.parametrize("ivl_side", ['right', 'left', 'both', 'neither']) + @pytest.mark.parametrize("oth_side", ['right', 'left', 'both', 'neither']) + @pytest.mark.parametrize("ivl_range", [(1, 3), (-1, 1), (3, 5)]) + def test_interval_overlaps_interval(self, ivl_side, oth_side, ivl_range): + + # class Interval: + # def overlaps(self, other: Interval) -> bool + + assert Interval(1, 3).overlaps(Interval(1.5, 2.5)) + assert Interval(1, 3).overlaps(Interval(1, 2)) + assert Interval(1, 3).overlaps(Interval(2, 3)) + assert Interval(1, 3).overlaps(Interval(0.5, 2.5)) + assert Interval(1, 3).overlaps(Interval(1.5, 3.5)) + + assert not Interval(1, 3).overlaps(Interval(-1, 1)) + assert not Interval(1, 3).overlaps(Interval(3, 5)) + + ivl = Interval(*ivl_range, closed=ivl_side) + other = Interval(1, 3, closed=oth_side) + + should_overlap = { + # idx_side: + # ivl_side: {ivl_range: expected_result} + 'right': { + 'right': {(-1, 1): False, (1, 3): True, (3, 5): False}, + 'left': {(-1, 1): False, (1, 3): True, (3, 5): True}, + 'both': {(-1, 1): False, (1, 3): True, (3, 5): True}, + 'neither': {(-1, 1): False, (1, 3): True, (3, 5): False}, + }, + 'left': { + 'right': {(-1, 1): True, (1, 3): True, (3, 5): False}, + 'left': {(-1, 1): False, (1, 3): True, (3, 5): False}, + 'both': {(-1, 1): True, (1, 3): True, (3, 5): False}, + 'neither': {(-1, 1): False, (1, 3): True, (3, 5): False}, + }, + 'both': { + 'right': {(-1, 1): True, (1, 3): True, (3, 5): False}, + 'left': {(-1, 1): False, (1, 3): True, (3, 5): True}, + 'both': {(-1, 1): True, (1, 3): True, (3, 5): True}, + 'neither': {(-1, 1): False, (1, 3): True, (3, 5): False}, + }, + 'neither': { + 'right': {(-1, 1): False, (1, 3): True, (3, 5): False}, + 'left': {(-1, 1): False, (1, 3): True, (3, 5): False}, + 'both': {(-1, 1): False, (1, 3): True, (3, 5): False}, + 'neither': {(-1, 1): False, (1, 3): True, (3, 5): False}, + } + } + + result = ivl.overlaps(other) + expected = should_overlap[oth_side][ivl_side][ivl_range] + other_result = other.overlaps(ivl) + + assert result == expected == other_result + + @pytest.mark.parametrize("idx_side", ['right', 'left', 'both', 'neither']) + @pytest.mark.parametrize("ivl_side", ['right', 'left', 'both', 'neither']) + @pytest.mark.parametrize("ivl_range", [(1, 3), (0, 3), (0, 2), (2, 4)]) + def test_interval_covers_intervalIndex(self, idx_side, ivl_side, + ivl_range): + + # class Interval: + # def covers(self, other: IntervalIndex) -> IntegerArray1D + + # class IntervalIndex: + # def covers(self, other: Interval) -> IntegerArray1D + + idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 3)], + closed=idx_side) + ivl = Interval(*ivl_range, closed=ivl_side) + + should_cover = { + # idx_side: + # ivl_side: {ivl_range: expected_result} + 'right': { + 'right': { + (1, 3): [1, 2], (0, 3): [0, 1, 2], (0, 2): [0], + (2, 4): [1]}, + 'left': { + (1, 3): [], (0, 3): [0], (0, 2): [0], (2, 4): [1]}, + 'both': { + (1, 3): [1, 2], (0, 3): [0, 1, 2], (0, 2): [0], + (2, 4): [1]}, + 'neither': { + (1, 3): [], (0, 3): [0], (0, 2): [0], (2, 4): [1]} + }, + 'left': { + 'right': { + (1, 3): [1], (0, 3): [1, 2], (0, 2): [], (2, 4): []}, + 'left': { + (1, 3): [1, 2], (0, 3): [0, 1, 2], (0, 2): [0], + (2, 4): [1]}, + 'both': { + (1, 3): [1, 2], (0, 3): [0, 1, 2], (0, 2): [0], + (2, 4): [1]}, + 'neither': { + (1, 3): [1], (0, 3): [1, 2], (0, 2): [], (2, 4): []} + }, + 'both': { + 'right': { + (1, 3): [1], (0, 3): [1, 2], (0, 2): [], (2, 4): []}, + 'left': { + (1, 3): [], (0, 3): [0], (0, 2): [0], (2, 4): [1]}, + 'both': { + (1, 3): [1, 2], (0, 3): [0, 1, 2], (0, 2): [0], + (2, 4): [1]}, + 'neither': { + (1, 3): [], (0, 3): [], (0, 2): [], (2, 4): []} + }, + 'neither': { + 'right': { + (1, 3): [1, 2], (0, 3): [0, 1, 2], (0, 2): [0], + (2, 4): [1]}, + 'left': { + (1, 3): [1, 2], (0, 3): [0, 1, 2], (0, 2): [0], + (2, 4): [1]}, + 'both': { + (1, 3): [1, 2], (0, 3): [0, 1, 2], (0, 2): [0], + (2, 4): [1]}, + 'neither': { + (1, 3): [1, 2], (0, 3): [0, 1, 2], (0, 2): [0], + (2, 4): [1]} + } + } + + result = ivl.covers(idx) + expected = np.array(should_cover[idx_side][ivl_side][ivl_range]) + other_result = idx.covers(ivl) + + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(other_result, expected) + + @pytest.mark.parametrize("idx_side", ['right', 'left', 'both', 'neither']) + @pytest.mark.parametrize("ivl_side", ['right', 'left', 'both', 'neither']) + @pytest.mark.parametrize("ivl_range", [(1, 3), (1, 2), (0, 2), (3, 4)]) + def test_interval_overlaps_intervalIndex(self, idx_side, ivl_side, + ivl_range): + + # class Interval: + # def overlaps(self, other: IntervalIndex) -> IntegerArray1D + + # class IntervalIndex: + # def overlaps(self, other: Interval) -> IntegerArray1D + + idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 3)], + closed=idx_side) + ivl = Interval(*ivl_range, closed=ivl_side) + + should_overlap = { + # idx_side: + # ivl_side: {ivl_range: expected_result} + 'right': { + 'right': { + (1, 3): [1, 2], (1, 2): [2], (0, 2): [0, 2], (3, 4): []}, + 'left': { + (1, 3): [0, 1, 2], (1, 2): [0, 2], (0, 2): [0, 2], + (3, 4): [1, 2]}, + 'both': { + (1, 3): [0, 1, 2], (1, 2): [0, 2], (0, 2): [0, 2], + (3, 4): [1, 2]}, + 'neither': { + (1, 3): [1, 2], (1, 2): [2], (0, 2): [0, 2], (3, 4): []}, + }, + 'left': { + 'right': { + (1, 3): [1, 2], (1, 2): [1, 2], (0, 2): [0, 1, 2], + (3, 4): []}, + 'left': { + (1, 3): [1, 2], (1, 2): [2], (0, 2): [0, 2], (3, 4): []}, + 'both': { + (1, 3): [1, 2], (1, 2): [1, 2], (0, 2): [0, 1, 2], + (3, 4): []}, + 'neither': { + (1, 3): [1, 2], (1, 2): [2], (0, 2): [0, 2], (3, 4): []}, + }, + 'both': { + 'right': { + (1, 3): [1, 2], (1, 2): [1, 2], (0, 2): [0, 1, 2], + (3, 4): []}, + 'left': { + (1, 3): [0, 1, 2], (1, 2): [0, 2], (0, 2): [0, 2], + (3, 4): [1, 2]}, + 'both': { + (1, 3): [0, 1, 2], (1, 2): [0, 1, 2], (0, 2): [0, 1, 2], + (3, 4): [1, 2]}, + 'neither': { + (1, 3): [1, 2], (1, 2): [2], (0, 2): [0, 2], (3, 4): []}, + }, + 'neither': { + 'right': { + (1, 3): [1, 2], (1, 2): [2], (0, 2): [0, 2], (3, 4): []}, + 'left': { + (1, 3): [1, 2], (1, 2): [2], (0, 2): [0, 2], (3, 4): []}, + 'both': { + (1, 3): [1, 2], (1, 2): [2], (0, 2): [0, 2], (3, 4): []}, + 'neither': { + (1, 3): [1, 2], (1, 2): [2], (0, 2): [0, 2], (3, 4): []}, + } + } + + result = ivl.overlaps(idx) + expected = np.array(should_overlap[idx_side][ivl_side][ivl_range]) + other_result = idx.overlaps(ivl) + + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(other_result, expected)
- [x] part 2/2 for #16316 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Working on parameterizing the tests. Will ping when ready for review.
https://api.github.com/repos/pandas-dev/pandas/pulls/18975
2017-12-28T19:18:22Z
2018-09-25T15:40:25Z
null
2018-09-25T15:40:26Z
Cleanup Series arithmetic tests
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 3c567e52cccb5..85dd17c2d1b44 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -26,7 +26,6 @@ def freq(request): class TestTimedeltaIndexArithmetic(object): - _holder = TimedeltaIndex @pytest.mark.xfail(reason='GH#18824 ufunc add cannot use operands...') def test_tdi_with_offset_array(self): @@ -46,39 +45,59 @@ def test_tdi_with_offset_array(self): with pytest.raises(TypeError): tdi + anchored - # TODO: Split by ops, better name - def test_numeric_compat(self): - idx = self._holder(np.arange(5, dtype='int64')) - didx = self._holder(np.arange(5, dtype='int64') ** 2) + def test_tdi_mul_int(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) + result = idx * 1 tm.assert_index_equal(result, idx) result = 1 * idx tm.assert_index_equal(result, idx) + def test_tdi_div_int(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) result = idx / 1 tm.assert_index_equal(result, idx) + def test_tdi_floordiv_int(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) result = idx // 1 tm.assert_index_equal(result, idx) + def test_tdi_mul_intarray_0dim(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) + result = idx * np.array(5, dtype='int64') - tm.assert_index_equal(result, - self._holder(np.arange(5, dtype='int64') * 5)) + expected = TimedeltaIndex(np.arange(5, dtype='int64') * 5) + tm.assert_index_equal(result, expected) + + def test_tdi_mul_intarray_1dim(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) result = idx * np.arange(5, dtype='int64') - tm.assert_index_equal(result, didx) + expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2) + tm.assert_index_equal(result, expected) + + # TODO: These ops should return Series, NOT indexes + def test_tdi_mul_intSeries(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) + expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2) result = idx * Series(np.arange(5, dtype='int64')) - tm.assert_index_equal(result, didx) + tm.assert_index_equal(result, expected) result = idx * Series(np.arange(5, dtype='float64') + 0.1) - tm.assert_index_equal(result, self._holder(np.arange( - 5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1))) + expected = TimedeltaIndex([0.0, 1.1, 4.2, 9.3, 16.4]) + tm.assert_index_equal(result, expected) + + def test_tdi_mul_invalid(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) # invalid pytest.raises(TypeError, lambda: idx * idx) - pytest.raises(ValueError, lambda: idx * self._holder(np.arange(3))) + + # Cases with wrong length + pytest.raises(ValueError, lambda: idx * TimedeltaIndex(np.arange(3))) pytest.raises(ValueError, lambda: idx * np.array([1, 2])) def test_ufunc_coercions(self): @@ -274,6 +293,7 @@ def test_subtraction_ops(self): expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo') tm.assert_index_equal(result, expected) + # TODO: Belongs in DatetimeIndex tests def test_subtraction_ops_with_tz(self): # check that dt/dti subtraction ops with tz are validated diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 433e3cf440cbd..1423be04d0a26 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -666,9 +666,46 @@ def test_div(self): result = pd.Series(zero_array) / pd.Series(data) assert_series_equal(result, expected) + @pytest.mark.parametrize('dtype', [None, object]) + @pytest.mark.parametrize('data', [ + [1, 2, 3], + [1.1, 2.2, 3.3], + [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.NaT], + ['x', 'y', 1]]) + def test_series_radd_invalid(self, data, dtype): + s = Series(data, dtype=dtype) + with pytest.raises(TypeError): + 'foo_' + s + + def test_series_radd_str(self): + ser = pd.Series(['x', np.nan, 'x']) + assert_series_equal('a' + ser, pd.Series(['ax', np.nan, 'ax'])) + assert_series_equal(ser + 'a', pd.Series(['xa', np.nan, 'xa'])) + + @pytest.mark.parametrize('dtype', [None, object]) + def test_series_radd_more(self, dtype): + res = 1 + pd.Series([1, 2, 3], dtype=dtype) + exp = pd.Series([2, 3, 4], dtype=dtype) + assert_series_equal(res, exp) + res = pd.Series([1, 2, 3], dtype=dtype) + 1 + assert_series_equal(res, exp) + + res = np.nan + pd.Series([1, 2, 3], dtype=dtype) + exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype) + assert_series_equal(res, exp) + res = pd.Series([1, 2, 3], dtype=dtype) + np.nan + assert_series_equal(res, exp) + + ser = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'), + pd.Timedelta('3 days')], dtype=dtype) + exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'), + pd.Timedelta('6 days')]) + assert_series_equal(pd.Timedelta('3 days') + ser, exp) + assert_series_equal(ser + pd.Timedelta('3 days'), exp) + class TestTimedeltaSeriesArithmetic(object): - def test_timedelta_series_ops(self): + def test_timedelta_series_ops_with_timestamp(self): # GH11925 s = Series(timedelta_range('1 day', periods=3)) ts = Timestamp('2012-01-01') @@ -898,6 +935,9 @@ def test_timedelta64_ops_nat(self): assert_series_equal(-single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta) + with pytest.raises(TypeError): + timedelta_series - Series([NaT], dtype='datetime64[ns]') + # addition assert_series_equal(nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta) @@ -1263,6 +1303,75 @@ def test_datetime64_ops_nat(self): with pytest.raises(TypeError): nat_series_dtype_timestamp / 1 + def test_ops_nat_mixed_datetime64_timedelta64(self): + # GH 11349 + datetime_series = Series([NaT, Timestamp('19900315')]) + nat_series_dtype_timedelta = Series([NaT, NaT], + dtype='timedelta64[ns]') + nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]') + single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]') + single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]') + + # subtraction + assert_series_equal(datetime_series - single_nat_dtype_datetime, + nat_series_dtype_timedelta) + + assert_series_equal(datetime_series - single_nat_dtype_timedelta, + nat_series_dtype_timestamp) + assert_series_equal(-single_nat_dtype_timedelta + datetime_series, + nat_series_dtype_timestamp) + + # without a Series wrapping the NaT, it is ambiguous + # whether it is a datetime64 or timedelta64 + # defaults to interpreting it as timedelta64 + assert_series_equal(nat_series_dtype_timestamp - + single_nat_dtype_datetime, + nat_series_dtype_timedelta) + + assert_series_equal(nat_series_dtype_timestamp - + single_nat_dtype_timedelta, + nat_series_dtype_timestamp) + assert_series_equal(-single_nat_dtype_timedelta + + nat_series_dtype_timestamp, + nat_series_dtype_timestamp) + + # addition + assert_series_equal(nat_series_dtype_timestamp + + single_nat_dtype_timedelta, + nat_series_dtype_timestamp) + assert_series_equal(single_nat_dtype_timedelta + + nat_series_dtype_timestamp, + nat_series_dtype_timestamp) + + assert_series_equal(nat_series_dtype_timestamp + + single_nat_dtype_timedelta, + nat_series_dtype_timestamp) + assert_series_equal(single_nat_dtype_timedelta + + nat_series_dtype_timestamp, + nat_series_dtype_timestamp) + + assert_series_equal(nat_series_dtype_timedelta + + single_nat_dtype_datetime, + nat_series_dtype_timestamp) + assert_series_equal(single_nat_dtype_datetime + + nat_series_dtype_timedelta, + nat_series_dtype_timestamp) + + def test_add_sub_integerlike_invalid(self): + # addition/subtraction of integers and zero-dim integer arrays with + # Series[datetime64] is invalid + obj_series = tm.makeObjectSeries() + obj_series.name = 'objects' + + with pytest.raises(Exception): + obj_series + 1 + with pytest.raises(Exception): + obj_series + np.array(1, dtype=np.int64) + with pytest.raises(Exception): + obj_series - 1 + with pytest.raises(Exception): + obj_series - np.array(1, dtype=np.int64) + class TestSeriesOperators(TestData): def test_op_method(self): @@ -1375,15 +1484,6 @@ def test_operators_empty_int_corner(self): s2 = Series({'x': 0.}) assert_series_equal(s1 * s2, Series([np.nan], index=['x'])) - def test_invalid_ops(self): - # invalid ops - pytest.raises(Exception, self.objSeries.__add__, 1) - pytest.raises(Exception, self.objSeries.__add__, - np.array(1, dtype=np.int64)) - pytest.raises(Exception, self.objSeries.__sub__, 1) - pytest.raises(Exception, self.objSeries.__sub__, - np.array(1, dtype=np.int64)) - def test_timedelta64_conversions(self): startdate = Series(date_range('2013-01-01', '2013-01-03')) enddate = Series(date_range('2013-03-01', '2013-03-03')) @@ -1453,64 +1553,6 @@ def timedelta64(*args): "s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s, us, lhs, rhs)) - def test_ops_nat_mixed_datetime64_timedelta64(self): - # GH 11349 - timedelta_series = Series([NaT, Timedelta('1s')]) - datetime_series = Series([NaT, Timestamp('19900315')]) - nat_series_dtype_timedelta = Series([NaT, NaT], - dtype='timedelta64[ns]') - nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]') - single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]') - single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]') - - # subtraction - assert_series_equal(datetime_series - single_nat_dtype_datetime, - nat_series_dtype_timedelta) - - assert_series_equal(datetime_series - single_nat_dtype_timedelta, - nat_series_dtype_timestamp) - assert_series_equal(-single_nat_dtype_timedelta + datetime_series, - nat_series_dtype_timestamp) - - # without a Series wrapping the NaT, it is ambiguous - # whether it is a datetime64 or timedelta64 - # defaults to interpreting it as timedelta64 - assert_series_equal(nat_series_dtype_timestamp - - single_nat_dtype_datetime, - nat_series_dtype_timedelta) - - assert_series_equal(nat_series_dtype_timestamp - - single_nat_dtype_timedelta, - nat_series_dtype_timestamp) - assert_series_equal(-single_nat_dtype_timedelta + - nat_series_dtype_timestamp, - nat_series_dtype_timestamp) - - with pytest.raises(TypeError): - timedelta_series - single_nat_dtype_datetime - - # addition - assert_series_equal(nat_series_dtype_timestamp + - single_nat_dtype_timedelta, - nat_series_dtype_timestamp) - assert_series_equal(single_nat_dtype_timedelta + - nat_series_dtype_timestamp, - nat_series_dtype_timestamp) - - assert_series_equal(nat_series_dtype_timestamp + - single_nat_dtype_timedelta, - nat_series_dtype_timestamp) - assert_series_equal(single_nat_dtype_timedelta + - nat_series_dtype_timestamp, - nat_series_dtype_timestamp) - - assert_series_equal(nat_series_dtype_timedelta + - single_nat_dtype_datetime, - nat_series_dtype_timestamp) - assert_series_equal(single_nat_dtype_datetime + - nat_series_dtype_timedelta, - nat_series_dtype_timestamp) - def test_ops_datetimelike_align(self): # GH 7500 # datetimelike ops need to align @@ -1719,35 +1761,6 @@ def _check_op(arr, op): _check_op(arr, operator.truediv) _check_op(arr, operator.floordiv) - def test_arith_ops_df_compat(self): - # GH 1134 - s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x') - s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x') - - exp = pd.Series([3.0, 4.0, np.nan, np.nan], - index=list('ABCD'), name='x') - assert_series_equal(s1 + s2, exp) - assert_series_equal(s2 + s1, exp) - - exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]}, - index=list('ABCD')) - assert_frame_equal(s1.to_frame() + s2.to_frame(), exp) - assert_frame_equal(s2.to_frame() + s1.to_frame(), exp) - - # different length - s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x') - s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x') - - exp = pd.Series([3, 4, 5, np.nan], - index=list('ABCD'), name='x') - assert_series_equal(s3 + s4, exp) - assert_series_equal(s4 + s3, exp) - - exp = pd.DataFrame({'x': [3, 4, 5, np.nan]}, - index=list('ABCD')) - assert_frame_equal(s3.to_frame() + s4.to_frame(), exp) - assert_frame_equal(s4.to_frame() + s3.to_frame(), exp) - def test_bool_ops_df_compat(self): # GH 1134 s1 = pd.Series([True, False, True], index=list('ABC'), name='x') @@ -1806,105 +1819,6 @@ def test_bool_ops_df_compat(self): assert_frame_equal(s3.to_frame() | s4.to_frame(), exp) assert_frame_equal(s4.to_frame() | s3.to_frame(), exp) - def test_series_frame_radd_bug(self): - # GH 353 - vals = Series(tm.rands_array(5, 10)) - result = 'foo_' + vals - expected = vals.map(lambda x: 'foo_' + x) - assert_series_equal(result, expected) - - frame = DataFrame({'vals': vals}) - result = 'foo_' + frame - expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)}) - assert_frame_equal(result, expected) - - # really raise this time - with pytest.raises(TypeError): - datetime.now() + self.ts - - with pytest.raises(TypeError): - self.ts + datetime.now() - - def test_series_radd_more(self): - data = [[1, 2, 3], - [1.1, 2.2, 3.3], - [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), - pd.NaT], - ['x', 'y', 1]] - - for d in data: - for dtype in [None, object]: - s = Series(d, dtype=dtype) - with pytest.raises(TypeError): - 'foo_' + s - - for dtype in [None, object]: - res = 1 + pd.Series([1, 2, 3], dtype=dtype) - exp = pd.Series([2, 3, 4], dtype=dtype) - assert_series_equal(res, exp) - res = pd.Series([1, 2, 3], dtype=dtype) + 1 - assert_series_equal(res, exp) - - res = np.nan + pd.Series([1, 2, 3], dtype=dtype) - exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype) - assert_series_equal(res, exp) - res = pd.Series([1, 2, 3], dtype=dtype) + np.nan - assert_series_equal(res, exp) - - s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'), - pd.Timedelta('3 days')], dtype=dtype) - exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'), - pd.Timedelta('6 days')]) - assert_series_equal(pd.Timedelta('3 days') + s, exp) - assert_series_equal(s + pd.Timedelta('3 days'), exp) - - s = pd.Series(['x', np.nan, 'x']) - assert_series_equal('a' + s, pd.Series(['ax', np.nan, 'ax'])) - assert_series_equal(s + 'a', pd.Series(['xa', np.nan, 'xa'])) - - def test_frame_radd_more(self): - data = [[1, 2, 3], - [1.1, 2.2, 3.3], - [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), - pd.NaT], - ['x', 'y', 1]] - - for d in data: - for dtype in [None, object]: - s = DataFrame(d, dtype=dtype) - with pytest.raises(TypeError): - 'foo_' + s - - for dtype in [None, object]: - res = 1 + pd.DataFrame([1, 2, 3], dtype=dtype) - exp = pd.DataFrame([2, 3, 4], dtype=dtype) - assert_frame_equal(res, exp) - res = pd.DataFrame([1, 2, 3], dtype=dtype) + 1 - assert_frame_equal(res, exp) - - res = np.nan + pd.DataFrame([1, 2, 3], dtype=dtype) - exp = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype) - assert_frame_equal(res, exp) - res = pd.DataFrame([1, 2, 3], dtype=dtype) + np.nan - assert_frame_equal(res, exp) - - df = pd.DataFrame(['x', np.nan, 'x']) - assert_frame_equal('a' + df, pd.DataFrame(['ax', np.nan, 'ax'])) - assert_frame_equal(df + 'a', pd.DataFrame(['xa', np.nan, 'xa'])) - - def test_operators_frame(self): - # rpow does not work with DataFrame - df = DataFrame({'A': self.ts}) - - assert_series_equal(self.ts + self.ts, self.ts + df['A'], - check_names=False) - assert_series_equal(self.ts ** self.ts, self.ts ** df['A'], - check_names=False) - assert_series_equal(self.ts < self.ts, self.ts < df['A'], - check_names=False) - assert_series_equal(self.ts / self.ts, self.ts / df['A'], - check_names=False) - def test_operators_combine(self): def _check_fill(meth, op, a, b, fill_value=0): exp_index = a.index.union(b.index) @@ -2073,3 +1987,96 @@ def test_idxminmax_with_inf(self): assert np.isnan(s.idxmin(skipna=False)) assert s.idxmax() == 0 np.isnan(s.idxmax(skipna=False)) + + +class TestSeriesWithFrameArithmetic(TestData): + def test_operators_frame(self): + # rpow does not work with DataFrame + df = DataFrame({'A': self.ts}) + + assert_series_equal(self.ts + self.ts, self.ts + df['A'], + check_names=False) + assert_series_equal(self.ts ** self.ts, self.ts ** df['A'], + check_names=False) + assert_series_equal(self.ts < self.ts, self.ts < df['A'], + check_names=False) + assert_series_equal(self.ts / self.ts, self.ts / df['A'], + check_names=False) + + def test_series_frame_radd_bug(self): + # GH 353 + vals = Series(tm.rands_array(5, 10)) + result = 'foo_' + vals + expected = vals.map(lambda x: 'foo_' + x) + assert_series_equal(result, expected) + + frame = DataFrame({'vals': vals}) + result = 'foo_' + frame + expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)}) + assert_frame_equal(result, expected) + + # really raise this time + with pytest.raises(TypeError): + datetime.now() + self.ts + + with pytest.raises(TypeError): + self.ts + datetime.now() + + def test_arith_ops_df_compat(self): + # GH 1134 + s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x') + s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x') + + exp = pd.Series([3.0, 4.0, np.nan, np.nan], + index=list('ABCD'), name='x') + assert_series_equal(s1 + s2, exp) + assert_series_equal(s2 + s1, exp) + + exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]}, + index=list('ABCD')) + assert_frame_equal(s1.to_frame() + s2.to_frame(), exp) + assert_frame_equal(s2.to_frame() + s1.to_frame(), exp) + + # different length + s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x') + s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x') + + exp = pd.Series([3, 4, 5, np.nan], + index=list('ABCD'), name='x') + assert_series_equal(s3 + s4, exp) + assert_series_equal(s4 + s3, exp) + + exp = pd.DataFrame({'x': [3, 4, 5, np.nan]}, + index=list('ABCD')) + assert_frame_equal(s3.to_frame() + s4.to_frame(), exp) + assert_frame_equal(s4.to_frame() + s3.to_frame(), exp) + + def test_frame_radd_more(self): + data = [[1, 2, 3], + [1.1, 2.2, 3.3], + [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), + pd.NaT], + ['x', 'y', 1]] + + for d in data: + for dtype in [None, object]: + s = DataFrame(d, dtype=dtype) + with pytest.raises(TypeError): + 'foo_' + s + + for dtype in [None, object]: + res = 1 + pd.DataFrame([1, 2, 3], dtype=dtype) + exp = pd.DataFrame([2, 3, 4], dtype=dtype) + assert_frame_equal(res, exp) + res = pd.DataFrame([1, 2, 3], dtype=dtype) + 1 + assert_frame_equal(res, exp) + + res = np.nan + pd.DataFrame([1, 2, 3], dtype=dtype) + exp = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype) + assert_frame_equal(res, exp) + res = pd.DataFrame([1, 2, 3], dtype=dtype) + np.nan + assert_frame_equal(res, exp) + + df = pd.DataFrame(['x', np.nan, 'x']) + assert_frame_equal('a' + df, pd.DataFrame(['ax', np.nan, 'ax'])) + assert_frame_equal(df + 'a', pd.DataFrame(['xa', np.nan, 'xa']))
Also a few for TimedeltaIndex. One of the TimedeltaIndex tests specifically tests a behavior I think is wrong, #18963. Actually _fixing_ that bug is separate. Actual content of tests is unchanged.
https://api.github.com/repos/pandas-dev/pandas/pulls/18974
2017-12-28T18:54:37Z
2017-12-29T06:43:15Z
null
2018-02-11T22:00:21Z
Spellcheck of docs, a few minor changes
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index e591825cec748..be749dfc1f594 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -24,9 +24,9 @@ See the :ref:`Indexing and Selecting Data <indexing>` for general indexing docum Whether a copy or a reference is returned for a setting operation, may depend on the context. This is sometimes called ``chained assignment`` and should be avoided. See :ref:`Returning a View versus Copy - <indexing.view_versus_copy>` + <indexing.view_versus_copy>`. -See the :ref:`cookbook<cookbook.selection>` for some advanced strategies +See the :ref:`cookbook<cookbook.selection>` for some advanced strategies. .. _advanced.hierarchical: @@ -46,7 +46,7 @@ described above and in prior sections. Later, when discussing :ref:`group by non-trivial applications to illustrate how it aids in structuring data for analysis. -See the :ref:`cookbook<cookbook.multi_index>` for some advanced strategies +See the :ref:`cookbook<cookbook.multi_index>` for some advanced strategies. Creating a MultiIndex (hierarchical index) object ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -59,7 +59,7 @@ can think of ``MultiIndex`` as an array of tuples where each tuple is unique. A ``MultiIndex.from_tuples``), or a crossed set of iterables (using ``MultiIndex.from_product``). The ``Index`` constructor will attempt to return a ``MultiIndex`` when it is passed a list of tuples. The following examples -demo different ways to initialize MultiIndexes. +demonstrate different ways to initialize MultiIndexes. .. ipython:: python @@ -196,7 +196,8 @@ highly performant. If you want to see the actual used levels. # for a specific level df[['foo','qux']].columns.get_level_values(0) -To reconstruct the ``MultiIndex`` with only the used levels +To reconstruct the ``MultiIndex`` with only the used levels, the +``remove_unused_levels`` method may be used. .. versionadded:: 0.20.0 @@ -216,7 +217,7 @@ tuples: s + s[:-2] s + s[::2] -``reindex`` can be called with another ``MultiIndex`` or even a list or array +``reindex`` can be called with another ``MultiIndex``, or even a list or array of tuples: .. ipython:: python @@ -230,7 +231,7 @@ Advanced indexing with hierarchical index ----------------------------------------- Syntactically integrating ``MultiIndex`` in advanced indexing with ``.loc`` is a -bit challenging, but we've made every effort to do so. for example the +bit challenging, but we've made every effort to do so. For example the following works as you would expect: .. ipython:: python @@ -286,7 +287,7 @@ As usual, **both sides** of the slicers are included as this is label indexing. df.loc[(slice('A1','A3'),.....), :] -   rather than this: +   You should **not** do this:   .. code-block:: python @@ -315,7 +316,7 @@ Basic multi-index slicing using slices, lists, and labels. dfmi.loc[(slice('A1','A3'), slice(None), ['C1', 'C3']), :] -You can use a ``pd.IndexSlice`` to have a more natural syntax using ``:`` rather than using ``slice(None)`` +You can use :class:`pandas.IndexSlice` to facilitate a more natural syntax using ``:``, rather than using ``slice(None)``. .. ipython:: python @@ -344,7 +345,7 @@ slicers on a single axis. dfmi.loc(axis=0)[:, :, ['C1', 'C3']] -Furthermore you can *set* the values using these methods +Furthermore you can *set* the values using the following methods. .. ipython:: python @@ -379,7 +380,7 @@ selecting data at a particular level of a MultiIndex easier. df.loc[(slice(None),'one'),:] You can also select on the columns with :meth:`~pandas.MultiIndex.xs`, by -providing the axis argument +providing the axis argument. .. ipython:: python @@ -391,7 +392,7 @@ providing the axis argument # using the slicers df.loc[:,(slice(None),'one')] -:meth:`~pandas.MultiIndex.xs` also allows selection with multiple keys +:meth:`~pandas.MultiIndex.xs` also allows selection with multiple keys. .. ipython:: python @@ -403,13 +404,13 @@ providing the axis argument df.loc[:,('bar','one')] You can pass ``drop_level=False`` to :meth:`~pandas.MultiIndex.xs` to retain -the level that was selected +the level that was selected. .. ipython:: python df.xs('one', level='second', axis=1, drop_level=False) -versus the result with ``drop_level=True`` (the default value) +Compare the above with the result using ``drop_level=True`` (the default value). .. ipython:: python @@ -470,7 +471,7 @@ allowing you to permute the hierarchical index levels in one step: Sorting a :class:`~pandas.MultiIndex` ------------------------------------- -For MultiIndex-ed objects to be indexed & sliced effectively, they need +For MultiIndex-ed objects to be indexed and sliced effectively, they need to be sorted. As with any index, you can use ``sort_index``. .. ipython:: python @@ -623,7 +624,8 @@ Index Types ----------- We have discussed ``MultiIndex`` in the previous sections pretty extensively. ``DatetimeIndex`` and ``PeriodIndex`` -are shown :ref:`here <timeseries.overview>`. ``TimedeltaIndex`` are :ref:`here <timedeltas.timedeltas>`. +are shown :ref:`here <timeseries.overview>`, and information about +`TimedeltaIndex`` is found :ref:`here <timedeltas.timedeltas>`. In the following sub-sections we will highlight some other index types. @@ -647,7 +649,7 @@ and allows efficient indexing and storage of an index with a large number of dup df.dtypes df.B.cat.categories -Setting the index, will create a ``CategoricalIndex`` +Setting the index will create a ``CategoricalIndex``. .. ipython:: python @@ -655,36 +657,38 @@ Setting the index, will create a ``CategoricalIndex`` df2.index Indexing with ``__getitem__/.iloc/.loc`` works similarly to an ``Index`` with duplicates. -The indexers MUST be in the category or the operation will raise. +The indexers **must** be in the category or the operation will raise a ``KeyError``. .. ipython:: python df2.loc['a'] -These PRESERVE the ``CategoricalIndex`` +The ``CategoricalIndex`` is **preserved** after indexing: .. ipython:: python df2.loc['a'].index -Sorting will order by the order of the categories +Sorting the index will sort by the order of the categories (Recall that we +created the index with with ``CategoricalDtype(list('cab'))``, so the sorted +order is ``cab``.). .. ipython:: python df2.sort_index() -Groupby operations on the index will preserve the index nature as well +Groupby operations on the index will preserve the index nature as well. .. ipython:: python df2.groupby(level=0).sum() df2.groupby(level=0).sum().index -Reindexing operations, will return a resulting index based on the type of the passed -indexer, meaning that passing a list will return a plain-old-``Index``; indexing with +Reindexing operations will return a resulting index based on the type of the passed +indexer. Passing a list will return a plain-old ``Index``; indexing with a ``Categorical`` will return a ``CategoricalIndex``, indexed according to the categories -of the PASSED ``Categorical`` dtype. This allows one to arbitrarily index these even with -values NOT in the categories, similarly to how you can reindex ANY pandas index. +of the **passed** ``Categorical`` dtype. This allows one to arbitrarily index these even with +values **not** in the categories, similarly to how you can reindex **any** pandas index. .. ipython :: python @@ -720,7 +724,8 @@ Int64Index and RangeIndex Indexing on an integer-based Index with floats has been clarified in 0.18.0, for a summary of the changes, see :ref:`here <whatsnew_0180.float_indexers>`. -``Int64Index`` is a fundamental basic index in *pandas*. This is an Immutable array implementing an ordered, sliceable set. +``Int64Index`` is a fundamental basic index in pandas. +This is an Immutable array implementing an ordered, sliceable set. Prior to 0.18.0, the ``Int64Index`` would provide the default index for all ``NDFrame`` objects. ``RangeIndex`` is a sub-class of ``Int64Index`` added in version 0.18.0, now providing the default index for all ``NDFrame`` objects. @@ -742,7 +747,7 @@ same. sf = pd.Series(range(5), index=indexf) sf -Scalar selection for ``[],.loc`` will always be label based. An integer will match an equal float index (e.g. ``3`` is equivalent to ``3.0``) +Scalar selection for ``[],.loc`` will always be label based. An integer will match an equal float index (e.g. ``3`` is equivalent to ``3.0``). .. ipython:: python @@ -751,15 +756,17 @@ Scalar selection for ``[],.loc`` will always be label based. An integer will mat sf.loc[3] sf.loc[3.0] -The only positional indexing is via ``iloc`` +The only positional indexing is via ``iloc``. .. ipython:: python sf.iloc[3] -A scalar index that is not found will raise ``KeyError`` +A scalar index that is not found will raise a ``KeyError``. -Slicing is ALWAYS on the values of the index, for ``[],ix,loc`` and ALWAYS positional with ``iloc`` +Slicing is primarily on the values of the index when using ``[],ix,loc``, and +**always** positional when using ``iloc``. The exception is when the slice is +boolean, in which case it will always be positional. .. ipython:: python @@ -767,14 +774,14 @@ Slicing is ALWAYS on the values of the index, for ``[],ix,loc`` and ALWAYS posit sf.loc[2:4] sf.iloc[2:4] -In float indexes, slicing using floats is allowed +In float indexes, slicing using floats is allowed. .. ipython:: python sf[2.1:4.6] sf.loc[2.1:4.6] -In non-float indexes, slicing using floats will raise a ``TypeError`` +In non-float indexes, slicing using floats will raise a ``TypeError``. .. code-block:: ipython @@ -786,7 +793,7 @@ In non-float indexes, slicing using floats will raise a ``TypeError`` .. warning:: - Using a scalar float indexer for ``.iloc`` has been removed in 0.18.0, so the following will raise a ``TypeError`` + Using a scalar float indexer for ``.iloc`` has been removed in 0.18.0, so the following will raise a ``TypeError``: .. code-block:: ipython @@ -816,13 +823,13 @@ Selection operations then will always work on a value basis, for all selection o dfir.loc[0:1001,'A'] dfir.loc[1000.4] -You could then easily pick out the first 1 second (1000 ms) of data then. +You could retrieve the first 1 second (1000 ms) of data as such: .. ipython:: python dfir[0:1000] -Of course if you need integer based selection, then use ``iloc`` +If you need integer based selection, you should use ``iloc``: .. ipython:: python @@ -975,6 +982,7 @@ consider the following Series: s Suppose we wished to slice from ``c`` to ``e``, using integers this would be +accomplished as such: .. ipython:: python diff --git a/doc/source/basics.rst b/doc/source/basics.rst index f9995472866ed..da82f56d315e6 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -436,7 +436,7 @@ General DataFrame Combine ~~~~~~~~~~~~~~~~~~~~~~~~~ The :meth:`~DataFrame.combine_first` method above calls the more general -DataFrame method :meth:`~DataFrame.combine`. This method takes another DataFrame +:meth:`DataFrame.combine`. This method takes another DataFrame and a combiner function, aligns the input DataFrame and then passes the combiner function pairs of Series (i.e., columns whose names are the same). @@ -540,8 +540,8 @@ will exclude NAs on Series input by default: np.mean(df['one']) np.mean(df['one'].values) -``Series`` also has a method :meth:`~Series.nunique` which will return the -number of unique non-NA values: +:meth:`Series.nunique` will return the number of unique non-NA values in a +Series: .. ipython:: python @@ -852,7 +852,8 @@ Aggregation API The aggregation API allows one to express possibly multiple aggregation operations in a single concise way. This API is similar across pandas objects, see :ref:`groupby API <groupby.aggregate>`, the :ref:`window functions API <stats.aggregate>`, and the :ref:`resample API <timeseries.aggregate>`. -The entry point for aggregation is the method :meth:`~DataFrame.aggregate`, or the alias :meth:`~DataFrame.agg`. +The entry point for aggregation is :meth:`DataFrame.aggregate`, or the alias +:meth:`DataFrame.agg`. We will use a similar starting frame from above: @@ -1913,8 +1914,8 @@ dtype of the column will be chosen to accommodate all of the data types # string data forces an ``object`` dtype pd.Series([1, 2, 3, 6., 'foo']) -The method :meth:`~DataFrame.get_dtype_counts` will return the number of columns of -each type in a ``DataFrame``: +The number of columns of each type in a ``DataFrame`` can be found by calling +:meth:`~DataFrame.get_dtype_counts`. .. ipython:: python diff --git a/doc/source/computation.rst b/doc/source/computation.rst index a6bc9431d3bcc..0994d35999191 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -26,9 +26,10 @@ Statistical Functions Percent Change ~~~~~~~~~~~~~~ -``Series``, ``DataFrame``, and ``Panel`` all have a method ``pct_change`` to compute the -percent change over a given number of periods (using ``fill_method`` to fill -NA/null values *before* computing the percent change). +``Series``, ``DataFrame``, and ``Panel`` all have a method +:meth:`~DataFrame.pct_change` to compute the percent change over a given number +of periods (using ``fill_method`` to fill NA/null values *before* computing +the percent change). .. ipython:: python @@ -47,8 +48,8 @@ NA/null values *before* computing the percent change). Covariance ~~~~~~~~~~ -The ``Series`` object has a method ``cov`` to compute covariance between series -(excluding NA/null values). +:meth:`Series.cov` can be used to compute covariance between series +(excluding missing values). .. ipython:: python @@ -56,8 +57,9 @@ The ``Series`` object has a method ``cov`` to compute covariance between series s2 = pd.Series(np.random.randn(1000)) s1.cov(s2) -Analogously, ``DataFrame`` has a method ``cov`` to compute pairwise covariances -among the series in the DataFrame, also excluding NA/null values. +Analogously, :meth:`DataFrame.cov` to compute +pairwise covariances among the series in the DataFrame, also excluding +NA/null values. .. _computation.covariance.caveats: @@ -97,7 +99,9 @@ in order to have a valid result. Correlation ~~~~~~~~~~~ -Several methods for computing correlations are provided: +Correlation may be computed using the :meth:`~DataFrame.corr` method. +Using the ``method`` parameter, several methods for computing correlations are +provided: .. csv-table:: :header: "Method name", "Description" @@ -110,6 +114,11 @@ Several methods for computing correlations are provided: .. \rho = \cov(x, y) / \sigma_x \sigma_y All of these are currently computed using pairwise complete observations. +Wikipedia has articles covering the above correlation coefficients: + +* `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ +* `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ +* `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ .. note:: @@ -145,9 +154,9 @@ Like ``cov``, ``corr`` also supports the optional ``min_periods`` keyword: frame.corr(min_periods=12) -A related method ``corrwith`` is implemented on DataFrame to compute the -correlation between like-labeled Series contained in different DataFrame -objects. +A related method :meth:`~DataFrame.corrwith` is implemented on DataFrame to +compute the correlation between like-labeled Series contained in different +DataFrame objects. .. ipython:: python @@ -163,8 +172,8 @@ objects. Data ranking ~~~~~~~~~~~~ -The ``rank`` method produces a data ranking with ties being assigned the mean -of the ranks (by default) for the group: +The :meth:`~Series.rank` method produces a data ranking with ties being +assigned the mean of the ranks (by default) for the group: .. ipython:: python @@ -172,8 +181,9 @@ of the ranks (by default) for the group: s['d'] = s['b'] # so there's a tie s.rank() -``rank`` is also a DataFrame method and can rank either the rows (``axis=0``) -or the columns (``axis=1``). ``NaN`` values are excluded from the ranking. +:meth:`~DataFrame.rank` is also a DataFrame method and can rank either the rows +(``axis=0``) or the columns (``axis=1``). ``NaN`` values are excluded from the +ranking. .. ipython:: python @@ -205,7 +215,7 @@ Window Functions Prior to version 0.18.0, ``pd.rolling_*``, ``pd.expanding_*``, and ``pd.ewm*`` were module level functions and are now deprecated. These are replaced by using the :class:`~pandas.core.window.Rolling`, :class:`~pandas.core.window.Expanding` and :class:`~pandas.core.window.EWM`. objects and a corresponding method call. - The deprecation warning will show the new syntax, see an example :ref:`here <whatsnew_0180.window_deprecations>` + The deprecation warning will show the new syntax, see an example :ref:`here <whatsnew_0180.window_deprecations>`. For working with data, a number of windows functions are provided for computing common *window* or *rolling* statistics. Among these are count, sum, @@ -219,7 +229,7 @@ see the :ref:`groupby docs <groupby.transform.window_resample>`. .. note:: - The API for window statistics is quite similar to the way one works with ``GroupBy`` objects, see the documentation :ref:`here <groupby>` + The API for window statistics is quite similar to the way one works with ``GroupBy`` objects, see the documentation :ref:`here <groupby>`. We work with ``rolling``, ``expanding`` and ``exponentially weighted`` data through the corresponding objects, :class:`~pandas.core.window.Rolling`, :class:`~pandas.core.window.Expanding` and :class:`~pandas.core.window.EWM`. @@ -289,7 +299,7 @@ sugar for applying the moving window operator to all of the DataFrame's columns: Method Summary ~~~~~~~~~~~~~~ -We provide a number of the common statistical functions: +We provide a number of common statistical functions: .. currentmodule:: pandas.core.window @@ -564,7 +574,7 @@ Computing rolling pairwise covariances and correlations .. warning:: Prior to version 0.20.0 if ``pairwise=True`` was passed, a ``Panel`` would be returned. - This will now return a 2-level MultiIndexed DataFrame, see the whatsnew :ref:`here <whatsnew_0200.api_breaking.rolling_pairwise>` + This will now return a 2-level MultiIndexed DataFrame, see the whatsnew :ref:`here <whatsnew_0200.api_breaking.rolling_pairwise>`. In financial data analysis and other fields it's common to compute covariance and correlation matrices for a collection of time series. Often one is also @@ -623,7 +633,8 @@ perform multiple computations on the data. These operations are similar to the : r = dfa.rolling(window=60,min_periods=1) r -We can aggregate by passing a function to the entire DataFrame, or select a Series (or multiple Series) via standard getitem. +We can aggregate by passing a function to the entire DataFrame, or select a +Series (or multiple Series) via standard ``__getitem__``. .. ipython:: python @@ -741,14 +752,14 @@ all accept are: - ``min_periods``: threshold of non-null data points to require. Defaults to minimum needed to compute statistic. No ``NaNs`` will be output once ``min_periods`` non-null data points have been seen. -- ``center``: boolean, whether to set the labels at the center (default is False) +- ``center``: boolean, whether to set the labels at the center (default is False). .. _stats.moments.expanding.note: .. note:: The output of the ``.rolling`` and ``.expanding`` methods do not return a ``NaN`` if there are at least ``min_periods`` non-null values in the current - window. For example, + window. For example: .. ipython:: python @@ -818,7 +829,8 @@ In general, a weighted moving average is calculated as y_t = \frac{\sum_{i=0}^t w_i x_{t-i}}{\sum_{i=0}^t w_i}, -where :math:`x_t` is the input and :math:`y_t` is the result. +where :math:`x_t` is the input, :math:`y_t` is the result and the :math:`w_i` +are the weights. The EW functions support two variants of exponential weights. The default, ``adjust=True``, uses the weights :math:`w_i = (1 - \alpha)^i` @@ -931,7 +943,7 @@ average of ``3, NaN, 5`` would be calculated as .. math:: - \frac{(1-\alpha)^2 \cdot 3 + 1 \cdot 5}{(1-\alpha)^2 + 1} + \frac{(1-\alpha)^2 \cdot 3 + 1 \cdot 5}{(1-\alpha)^2 + 1}. Whereas if ``ignore_na=True``, the weighted average would be calculated as @@ -953,4 +965,4 @@ are scaled by debiasing factors (For :math:`w_i = 1`, this reduces to the usual :math:`N / (N - 1)` factor, with :math:`N = t + 1`.) See `Weighted Sample Variance <http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance>`__ -for further details. +on Wikipedia for further details. diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 3c2fd4d959d63..b9223c6ad9f7a 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -18,16 +18,14 @@ Indexing and Selecting Data The axis labeling information in pandas objects serves many purposes: - Identifies data (i.e. provides *metadata*) using known indicators, - important for analysis, visualization, and interactive console display - - Enables automatic and explicit data alignment - - Allows intuitive getting and setting of subsets of the data set + important for analysis, visualization, and interactive console display. + - Enables automatic and explicit data alignment. + - Allows intuitive getting and setting of subsets of the data set. In this section, we will focus on the final point: namely, how to slice, dice, and generally get and set subsets of pandas objects. The primary focus will be on Series and DataFrame as they have received more development attention in -this area. Expect more work to be invested in higher-dimensional data -structures (including ``Panel``) in the future, especially in label-based -advanced indexing. +this area. .. note:: @@ -43,9 +41,9 @@ advanced indexing. .. warning:: Whether a copy or a reference is returned for a setting operation, may - depend on the context. This is sometimes called ``chained assignment`` and - should be avoided. See :ref:`Returning a View versus Copy - <indexing.view_versus_copy>` + depend on the context. This is sometimes called ``chained assignment`` and + should be avoided. See :ref:`Returning a View versus Copy + <indexing.view_versus_copy>`. .. warning:: @@ -53,7 +51,7 @@ advanced indexing. See the :ref:`MultiIndex / Advanced Indexing <advanced>` for ``MultiIndex`` and more advanced indexing documentation. -See the :ref:`cookbook<cookbook.selection>` for some advanced strategies +See the :ref:`cookbook<cookbook.selection>` for some advanced strategies. .. _indexing.choice: @@ -66,21 +64,21 @@ of multi-axis indexing. - ``.loc`` is primarily label based, but may also be used with a boolean array. ``.loc`` will raise ``KeyError`` when the items are not found. Allowed inputs are: - - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is interpreted as a + - A single label, e.g. ``5`` or ``'a'`` (Note that ``5`` is interpreted as a *label* of the index. This use is **not** an integer position along the - index) - - A list or array of labels ``['a', 'b', 'c']`` - - A slice object with labels ``'a':'f'`` (note that contrary to usual python + index.). + - A list or array of labels ``['a', 'b', 'c']``. + - A slice object with labels ``'a':'f'`` (Note that contrary to usual python slices, **both** the start and the stop are included, when present in the - index! - also see :ref:`Slicing with labels - <indexing.slicing_with_labels>`) + index! See :ref:`Slicing with labels + <indexing.slicing_with_labels>`.). - A boolean array - A ``callable`` function with one argument (the calling Series, DataFrame or Panel) and - that returns valid output for indexing (one of the above) + that returns valid output for indexing (one of the above). .. versionadded:: 0.18.1 - See more at :ref:`Selection by Label <indexing.label>` + See more at :ref:`Selection by Label <indexing.label>`. - ``.iloc`` is primarily integer position based (from ``0`` to ``length-1`` of the axis), but may also be used with a boolean @@ -89,27 +87,26 @@ of multi-axis indexing. out-of-bounds indexing. (this conforms with python/numpy *slice* semantics). Allowed inputs are: - - An integer e.g. ``5`` - - A list or array of integers ``[4, 3, 0]`` - - A slice object with ints ``1:7`` - - A boolean array + - An integer e.g. ``5``. + - A list or array of integers ``[4, 3, 0]``. + - A slice object with ints ``1:7``. + - A boolean array. - A ``callable`` function with one argument (the calling Series, DataFrame or Panel) and - that returns valid output for indexing (one of the above) + that returns valid output for indexing (one of the above). .. versionadded:: 0.18.1 - See more at :ref:`Selection by Position <indexing.integer>` - - See more at :ref:`Advanced Indexing <advanced>` and :ref:`Advanced + See more at :ref:`Selection by Position <indexing.integer>`, + :ref:`Advanced Indexing <advanced>` and :ref:`Advanced Hierarchical <advanced.advanced_hierarchical>`. - ``.loc``, ``.iloc``, and also ``[]`` indexing can accept a ``callable`` as indexer. See more at :ref:`Selection By Callable <indexing.callable>`. Getting values from an object with multi-axes selection uses the following -notation (using ``.loc`` as an example, but applies to ``.iloc`` as +notation (using ``.loc`` as an example, but the following applies to ``.iloc`` as well). Any of the axes accessors may be the null slice ``:``. Axes left out of -the specification are assumed to be ``:``. (e.g. ``p.loc['a']`` is equiv to -``p.loc['a', :, :]``) +the specification are assumed to be ``:``, e.g. ``p.loc['a']`` is equivalent to +``p.loc['a', :, :]``. .. csv-table:: :header: "Object Type", "Indexers" @@ -128,7 +125,8 @@ Basics As mentioned when introducing the data structures in the :ref:`last section <basics>`, the primary function of indexing with ``[]`` (a.k.a. ``__getitem__`` for those familiar with implementing class behavior in Python) is selecting out -lower-dimensional slices. Thus, +lower-dimensional slices. The following table shows return type values when +indexing pandas objects with ``[]``: .. csv-table:: :header: "Object Type", "Selection", "Return Value Type" @@ -188,7 +186,7 @@ columns. df.loc[:,['B', 'A']] = df[['A', 'B']] df[['A', 'B']] - The correct way is to use raw values + The correct way to swap column values is by using raw values: .. ipython:: python @@ -310,7 +308,7 @@ Selection By Label Whether a copy or a reference is returned for a setting operation, may depend on the context. This is sometimes called ``chained assignment`` and should be avoided. - See :ref:`Returning a View versus Copy <indexing.view_versus_copy>` + See :ref:`Returning a View versus Copy <indexing.view_versus_copy>`. .. warning:: @@ -336,23 +334,23 @@ Selection By Label .. warning:: Starting in 0.21.0, pandas will show a ``FutureWarning`` if indexing with a list with missing labels. In the future - this will raise a ``KeyError``. See :ref:`list-like Using loc with missing keys in a list is Deprecated <indexing.deprecate_loc_reindex_listlike>` + this will raise a ``KeyError``. See :ref:`list-like Using loc with missing keys in a list is Deprecated <indexing.deprecate_loc_reindex_listlike>`. pandas provides a suite of methods in order to have **purely label based indexing**. This is a strict inclusion based protocol. -All of the labels for which you ask, must be in the index or a ``KeyError`` will be raised! +Every label asked for must be in the index, or a ``KeyError`` will be raised. When slicing, both the start bound **AND** the stop bound are *included*, if present in the index. Integers are valid labels, but they refer to the label **and not the position**. The ``.loc`` attribute is the primary access method. The following are valid inputs: -- A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is interpreted as a *label* of the index. This use is **not** an integer position along the index) -- A list or array of labels ``['a', 'b', 'c']`` -- A slice object with labels ``'a':'f'`` (note that contrary to usual python +- A single label, e.g. ``5`` or ``'a'`` (Note that ``5`` is interpreted as a *label* of the index. This use is **not** an integer position along the index.). +- A list or array of labels ``['a', 'b', 'c']``. +- A slice object with labels ``'a':'f'`` (Note that contrary to usual python slices, **both** the start and the stop are included, when present in the - index! - also See :ref:`Slicing with labels - <indexing.slicing_with_labels>`) -- A boolean array -- A ``callable``, see :ref:`Selection By Callable <indexing.callable>` + index! See :ref:`Slicing with labels + <indexing.slicing_with_labels>`.). +- A boolean array. +- A ``callable``, see :ref:`Selection By Callable <indexing.callable>`. .. ipython:: python @@ -368,7 +366,7 @@ Note that setting works as well: s1.loc['c':] = 0 s1 -With a DataFrame +With a DataFrame: .. ipython:: python @@ -378,26 +376,26 @@ With a DataFrame df1 df1.loc[['a', 'b', 'd'], :] -Accessing via label slices +Accessing via label slices: .. ipython:: python df1.loc['d':, 'A':'C'] -For getting a cross section using a label (equiv to ``df.xs('a')``) +For getting a cross section using a label (equivalent to ``df.xs('a')``): .. ipython:: python df1.loc['a'] -For getting values with a boolean array +For getting values with a boolean array: .. ipython:: python df1.loc['a'] > 0 df1.loc[:, df1.loc['a'] > 0] -For getting a value explicitly (equiv to deprecated ``df.get_value('a','A')``) +For getting a value explicitly (equivalent to deprecated ``df.get_value('a','A')``): .. ipython:: python @@ -441,17 +439,17 @@ Selection By Position Whether a copy or a reference is returned for a setting operation, may depend on the context. This is sometimes called ``chained assignment`` and should be avoided. - See :ref:`Returning a View versus Copy <indexing.view_versus_copy>` + See :ref:`Returning a View versus Copy <indexing.view_versus_copy>`. Pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely python and numpy slicing. These are ``0-based`` indexing. When slicing, the start bounds is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise an ``IndexError``. The ``.iloc`` attribute is the primary access method. The following are valid inputs: -- An integer e.g. ``5`` -- A list or array of integers ``[4, 3, 0]`` -- A slice object with ints ``1:7`` -- A boolean array -- A ``callable``, see :ref:`Selection By Callable <indexing.callable>` +- An integer e.g. ``5``. +- A list or array of integers ``[4, 3, 0]``. +- A slice object with ints ``1:7``. +- A boolean array. +- A ``callable``, see :ref:`Selection By Callable <indexing.callable>`. .. ipython:: python @@ -467,7 +465,7 @@ Note that setting works as well: s1.iloc[:3] = 0 s1 -With a DataFrame +With a DataFrame: .. ipython:: python @@ -476,14 +474,14 @@ With a DataFrame columns=list(range(0,8,2))) df1 -Select via integer slicing +Select via integer slicing: .. ipython:: python df1.iloc[:3] df1.iloc[1:5, 2:4] -Select via integer list +Select via integer list: .. ipython:: python @@ -502,7 +500,7 @@ Select via integer list # this is also equivalent to ``df1.iat[1,1]`` df1.iloc[1, 1] -For getting a cross section using an integer position (equiv to ``df.xs(1)``) +For getting a cross section using an integer position (equiv to ``df.xs(1)``): .. ipython:: python @@ -523,7 +521,7 @@ Out of range slice indexes are handled gracefully just as in Python/Numpy. s.iloc[8:10] Note that using slices that go out of bounds can result in -an empty axis (e.g. an empty DataFrame being returned) +an empty axis (e.g. an empty DataFrame being returned). .. ipython:: python @@ -535,7 +533,7 @@ an empty axis (e.g. an empty DataFrame being returned) A single indexer that is out of bounds will raise an ``IndexError``. A list of indexers where any element is out of bounds will raise an -``IndexError`` +``IndexError``. .. code-block:: python @@ -601,7 +599,7 @@ bit of user confusion over the years. The recommended methods of indexing are: -- ``.loc`` if you want to *label* index +- ``.loc`` if you want to *label* index. - ``.iloc`` if you want to *positionally* index. .. ipython:: python @@ -612,7 +610,7 @@ The recommended methods of indexing are: dfd -Previous Behavior, where you wish to get the 0th and the 2nd elements from the index in the 'A' column. +Previous behavior, where you wish to get the 0th and the 2nd elements from the index in the 'A' column. .. code-block:: ipython @@ -635,7 +633,7 @@ This can also be expressed using ``.iloc``, by explicitly getting locations on t dfd.iloc[[0, 2], dfd.columns.get_loc('A')] -For getting *multiple* indexers, using ``.get_indexer`` +For getting *multiple* indexers, using ``.get_indexer``: .. ipython:: python @@ -824,7 +822,7 @@ Setting With Enlargement The ``.loc/[]`` operations can perform enlargement when setting a non-existent key for that axis. -In the ``Series`` case this is effectively an appending operation +In the ``Series`` case this is effectively an appending operation. .. ipython:: python @@ -833,7 +831,7 @@ In the ``Series`` case this is effectively an appending operation se[5] = 5. se -A ``DataFrame`` can be enlarged on either axis via ``.loc`` +A ``DataFrame`` can be enlarged on either axis via ``.loc``. .. ipython:: python @@ -889,7 +887,11 @@ Boolean indexing .. _indexing.boolean: Another common operation is the use of boolean vectors to filter the data. -The operators are: ``|`` for ``or``, ``&`` for ``and``, and ``~`` for ``not``. These **must** be grouped by using parentheses. +The operators are: ``|`` for ``or``, ``&`` for ``and``, and ``~`` for ``not``. +These **must** be grouped by using parentheses, since by default Python will +evaluate an expression such as ``df.A > 2 & df.B < 3`` as +``df.A > (2 & df.B) < 3``, while the desired evaluation order is +``(df.A > 2) & (df.B < 3)``. Using a boolean vector to index a Series works exactly as in a numpy ndarray: @@ -929,7 +931,7 @@ more complex criteria: # Multiple criteria df2[criterion & (df2['b'] == 'x')] -Note, with the choice methods :ref:`Selection by Label <indexing.label>`, :ref:`Selection by Position <indexing.integer>`, +With the choice methods :ref:`Selection by Label <indexing.label>`, :ref:`Selection by Position <indexing.integer>`, and :ref:`Advanced Indexing <advanced>` you may select along more than one axis using boolean vectors combined with other indexing expressions. .. ipython:: python @@ -941,9 +943,9 @@ and :ref:`Advanced Indexing <advanced>` you may select along more than one axis Indexing with isin ------------------ -Consider the ``isin`` method of Series, which returns a boolean vector that is -true wherever the Series elements exist in the passed list. This allows you to -select rows where one or more columns have values you want: +Consider the :meth:`~Series.isin` method of ``Series``, which returns a boolean +vector that is true wherever the ``Series`` elements exist in the passed list. +This allows you to select rows where one or more columns have values you want: .. ipython:: python @@ -973,7 +975,7 @@ in the membership check: s_mi.iloc[s_mi.index.isin([(1, 'a'), (2, 'b'), (0, 'c')])] s_mi.iloc[s_mi.index.isin(['a', 'c', 'e'], level=1)] -DataFrame also has an ``isin`` method. When calling ``isin``, pass a set of +DataFrame also has an :meth:`~DataFrame.isin` method. When calling ``isin``, pass a set of values as either an array or dict. If values is an array, ``isin`` returns a DataFrame of booleans that is the same shape as the original DataFrame, with True wherever the element is in the sequence of values. @@ -1018,13 +1020,13 @@ Selecting values from a Series with a boolean vector generally returns a subset of the data. To guarantee that selection output has the same shape as the original data, you can use the ``where`` method in ``Series`` and ``DataFrame``. -To return only the selected rows +To return only the selected rows: .. ipython:: python s[s > 0] -To return a Series of the same shape as the original +To return a Series of the same shape as the original: .. ipython:: python @@ -1032,7 +1034,7 @@ To return a Series of the same shape as the original Selecting values from a DataFrame with a boolean criterion now also preserves input data shape. ``where`` is used under the hood as the implementation. -Equivalent is ``df.where(df < 0)`` +The code below is equivalent to ``df.where(df < 0)``. .. ipython:: python :suppress: @@ -1087,12 +1089,12 @@ without creating a copy: Furthermore, ``where`` aligns the input boolean condition (ndarray or DataFrame), such that partial selection with setting is possible. This is analogous to -partial setting via ``.loc`` (but on the contents rather than the axis labels) +partial setting via ``.loc`` (but on the contents rather than the axis labels). .. ipython:: python df2 = df.copy() - df2[ df2[1:4] > 0 ] = 3 + df2[ df2[1:4] > 0] = 3 df2 Where can also accept ``axis`` and ``level`` parameters to align the input when @@ -1103,7 +1105,7 @@ performing the ``where``. df2 = df.copy() df2.where(df2>0,df2['A'],axis='index') -This is equivalent (but faster than) the following. +This is equivalent to (but faster than) the following. .. ipython:: python @@ -1123,9 +1125,11 @@ as condition and ``other`` argument. 'C': [7, 8, 9]}) df3.where(lambda x: x > 4, lambda x: x + 10) -**mask** -``mask`` is the inverse boolean operation of ``where``. +Mask +~~~~ + +:meth:`~pandas.DataFrame.mask` is the inverse boolean operation of ``where``. .. ipython:: python @@ -1134,8 +1138,8 @@ as condition and ``other`` argument. .. _indexing.query: -The :meth:`~pandas.DataFrame.query` Method (Experimental) ---------------------------------------------------------- +The :meth:`~pandas.DataFrame.query` Method +------------------------------------------ :class:`~pandas.DataFrame` objects have a :meth:`~pandas.DataFrame.query` method that allows selection using an expression. @@ -1263,7 +1267,7 @@ having to specify which frame you're interested in querying :meth:`~pandas.DataFrame.query` Python versus pandas Syntax Comparison ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Full numpy-like syntax +Full numpy-like syntax: .. ipython:: python @@ -1273,19 +1277,19 @@ Full numpy-like syntax df[(df.a < df.b) & (df.b < df.c)] Slightly nicer by removing the parentheses (by binding making comparison -operators bind tighter than ``&``/``|``) +operators bind tighter than ``&`` and ``|``). .. ipython:: python df.query('a < b & b < c') -Use English instead of symbols +Use English instead of symbols: .. ipython:: python df.query('a < b and b < c') -Pretty close to how you might write it on paper +Pretty close to how you might write it on paper: .. ipython:: python @@ -1356,7 +1360,7 @@ Special use of the ``==`` operator with ``list`` objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Comparing a ``list`` of values to a column using ``==``/``!=`` works similarly -to ``in``/``not in`` +to ``in``/``not in``. .. ipython:: python @@ -1391,7 +1395,7 @@ You can negate boolean expressions with the word ``not`` or the ``~`` operator. df.query('not bools') df.query('not bools') == df[~df.bools] -Of course, expressions can be arbitrarily complex too +Of course, expressions can be arbitrarily complex too: .. ipython:: python @@ -1420,7 +1424,7 @@ Performance of :meth:`~pandas.DataFrame.query` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``DataFrame.query()`` using ``numexpr`` is slightly faster than Python for -large frames +large frames. .. image:: _static/query-perf.png @@ -1428,7 +1432,7 @@ large frames You will only see the performance benefits of using the ``numexpr`` engine with ``DataFrame.query()`` if your frame has more than approximately 200,000 - rows + rows. .. image:: _static/query-perf-small.png @@ -1482,7 +1486,7 @@ Also, you can pass a list of columns to identify duplications. df2.drop_duplicates(['a', 'b']) To drop duplicates by index value, use ``Index.duplicated`` then perform slicing. -Same options are available in ``keep`` parameter. +The same set of options are available for the ``keep`` parameter. .. ipython:: python @@ -1514,7 +1518,7 @@ The :meth:`~pandas.DataFrame.lookup` Method Sometimes you want to extract a set of values given a sequence of row labels and column labels, and the ``lookup`` method allows for this and returns a -numpy array. For instance, +numpy array. For instance: .. ipython:: python @@ -1599,7 +1603,7 @@ Set operations on Index objects .. _indexing.set_ops: -The two main operations are ``union (|)``, ``intersection (&)`` +The two main operations are ``union (|)`` and ``intersection (&)``. These can be directly called as instance methods or used via overloaded operators. Difference is provided via the ``.difference()`` method. @@ -1612,7 +1616,7 @@ operators. Difference is provided via the ``.difference()`` method. a.difference(b) Also available is the ``symmetric_difference (^)`` operation, which returns elements -that appear in either ``idx1`` or ``idx2`` but not both. This is +that appear in either ``idx1`` or ``idx2``, but not in both. This is equivalent to the Index created by ``idx1.difference(idx2).union(idx2.difference(idx1))``, with duplicates dropped. @@ -1662,9 +1666,9 @@ Set an index .. _indexing.set_index: -DataFrame has a ``set_index`` method which takes a column name (for a regular -``Index``) or a list of column names (for a ``MultiIndex``), to create a new, -indexed DataFrame: +DataFrame has a :meth:`~DataFrame.set_index` method which takes a column name +(for a regular ``Index``) or a list of column names (for a ``MultiIndex``). +To create a new, re-indexed DataFrame: .. ipython:: python :suppress: @@ -1703,9 +1707,10 @@ the index in-place (without creating a new object): Reset the index ~~~~~~~~~~~~~~~ -As a convenience, there is a new function on DataFrame called ``reset_index`` -which transfers the index values into the DataFrame's columns and sets a simple -integer index. This is the inverse operation to ``set_index`` +As a convenience, there is a new function on DataFrame called +:meth:`~DataFrame.reset_index` which transfers the index values into the +DataFrame's columns and sets a simple integer index. +This is the inverse operation of :meth:`~DataFrame.set_index`. .. ipython:: python @@ -1726,11 +1731,6 @@ You can use the ``level`` keyword to remove only a portion of the index: ``reset_index`` takes an optional parameter ``drop`` which if true simply discards the index, instead of putting index values in the DataFrame's columns. -.. note:: - - The ``reset_index`` method used to be called ``delevel`` which is now - deprecated. - Adding an ad hoc index ~~~~~~~~~~~~~~~~~~~~~~ @@ -1769,7 +1769,7 @@ Compare these two access methods: dfmi.loc[:,('one','second')] These both yield the same results, so which should you use? It is instructive to understand the order -of operations on these and why method 2 (``.loc``) is much preferred over method 1 (chained ``[]``) +of operations on these and why method 2 (``.loc``) is much preferred over method 1 (chained ``[]``). ``dfmi['one']`` selects the first level of the columns and returns a DataFrame that is singly-indexed. Then another python operation ``dfmi_with_one['second']`` selects the series indexed by ``'second'`` happens. @@ -1807,7 +1807,7 @@ But this code is handled differently: See that ``__getitem__`` in there? Outside of simple cases, it's very hard to predict whether it will return a view or a copy (it depends on the memory layout -of the array, about which *pandas* makes no guarantees), and therefore whether +of the array, about which pandas makes no guarantees), and therefore whether the ``__setitem__`` will modify ``dfmi`` or a temporary object that gets thrown out immediately afterward. **That's** what ``SettingWithCopy`` is warning you about! @@ -1882,9 +1882,9 @@ A chained assignment can also crop up in setting in a mixed dtype frame. .. note:: - These setting rules apply to all of ``.loc/.iloc`` + These setting rules apply to all of ``.loc/.iloc``. -This is the correct access method +This is the correct access method: .. ipython:: python @@ -1892,7 +1892,7 @@ This is the correct access method dfc.loc[0,'A'] = 11 dfc -This *can* work at times, but is not guaranteed, and so should be avoided +This *can* work at times, but it is not guaranteed to, and therefore should be avoided: .. ipython:: python :okwarning: @@ -1901,7 +1901,7 @@ This *can* work at times, but is not guaranteed, and so should be avoided dfc['A'][0] = 111 dfc -This will **not** work at all, and so should be avoided +This will **not** work at all, and so should be avoided: :: diff --git a/doc/source/options.rst b/doc/source/options.rst index 505a5ade68de0..5641b2628fe40 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -37,7 +37,7 @@ namespace: - :func:`~pandas.option_context` - execute a codeblock with a set of options that revert to prior settings after execution. -**Note:** developers can check out pandas/core/config.py for more info. +**Note:** Developers can check out `pandas/core/config.py <https://github.com/pandas-dev/pandas/blob/master/pandas/core/config.py>`_ for more information. All of the functions above accept a regexp pattern (``re.search`` style) as an argument, and so passing in a substring will work - as long as it is unambiguous: @@ -78,8 +78,9 @@ with no argument ``describe_option`` will print out the descriptions for all ava Getting and Setting Options --------------------------- -As described above, ``get_option()`` and ``set_option()`` are available from the -pandas namespace. To change an option, call ``set_option('option regex', new_value)`` +As described above, :func:`~pandas.get_option` and :func:`~pandas.set_option` +are available from the pandas namespace. To change an option, call +``set_option('option regex', new_value)``. .. ipython:: python @@ -87,7 +88,7 @@ pandas namespace. To change an option, call ``set_option('option regex', new_va pd.set_option('mode.sim_interactive', True) pd.get_option('mode.sim_interactive') -**Note:** that the option 'mode.sim_interactive' is mostly used for debugging purposes. +**Note:** The option 'mode.sim_interactive' is mostly used for debugging purposes. All options also have a default value, and you can use ``reset_option`` to do just that: @@ -221,7 +222,7 @@ can specify the option ``df.info(null_counts=True)`` to override on showing a pa .. ipython:: python - df =pd.DataFrame(np.random.choice([0,1,np.nan], size=(10,10))) + df = pd.DataFrame(np.random.choice([0,1,np.nan], size=(10,10))) df pd.set_option('max_info_rows', 11) df.info() @@ -229,8 +230,8 @@ can specify the option ``df.info(null_counts=True)`` to override on showing a pa df.info() pd.reset_option('max_info_rows') -``display.precision`` sets the output display precision in terms of decimal places. This is only a -suggestion. +``display.precision`` sets the output display precision in terms of decimal places. +This is only a suggestion. .. ipython:: python @@ -241,7 +242,7 @@ suggestion. df ``display.chop_threshold`` sets at what level pandas rounds to zero when -it displays a Series of DataFrame. Note, this does not effect the +it displays a Series of DataFrame. This setting does not change the precision at which the number is stored. .. ipython:: python @@ -254,7 +255,7 @@ precision at which the number is stored. pd.reset_option('chop_threshold') ``display.colheader_justify`` controls the justification of the headers. -Options are 'right', and 'left'. +The options are 'right', and 'left'. .. ipython:: python diff --git a/doc/source/text.rst b/doc/source/text.rst index 2a86d92978043..2b6459b581c1e 100644 --- a/doc/source/text.rst +++ b/doc/source/text.rst @@ -99,7 +99,7 @@ Elements in the split lists can be accessed using ``get`` or ``[]`` notation: s2.str.split('_').str.get(1) s2.str.split('_').str[1] -Easy to expand this to return a DataFrame using ``expand``. +It is easy to expand this to return a DataFrame using ``expand``. .. ipython:: python @@ -268,7 +268,7 @@ It returns a Series if ``expand=False``. pd.Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)', expand=False) Calling on an ``Index`` with a regex with exactly one capture group -returns a ``DataFrame`` with one column if ``expand=True``, +returns a ``DataFrame`` with one column if ``expand=True``. .. ipython:: python @@ -373,7 +373,7 @@ You can check whether elements contain a pattern: pattern = r'[0-9][a-z]' pd.Series(['1', '2', '3a', '3b', '03c']).str.contains(pattern) -or match a pattern: +Or whether elements match a pattern: .. ipython:: python
This PR continues my read-through of the docs, the previous PRs submitted are #18941 and #18948. The following edits have been made: - Missing periods and colons added before introducing code examples. - Increased number of function references (clickable links). - Cleared up a few sentences which I found unclear. Feedback is welcome.
https://api.github.com/repos/pandas-dev/pandas/pulls/18973
2017-12-28T16:54:15Z
2017-12-29T21:48:57Z
2017-12-29T21:48:57Z
2017-12-29T21:49:01Z
TST: split out some sparse tests
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 800a20aa94b8f..693a2fe1fd6a6 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -178,15 +178,15 @@ if [ "$PIP_BUILD_TEST" ]; then # build & install testing echo "[building release]" - bash scripts/build_dist_for_release.sh + time bash scripts/build_dist_for_release.sh || exit 1 conda uninstall -y cython - time pip install dist/*tar.gz --quiet || exit 1 + time pip install dist/*tar.gz || exit 1 elif [ "$CONDA_BUILD_TEST" ]; then # build & install testing echo "[building conda recipe]" - conda build ./conda.recipe --numpy 1.13 --python 3.5 -q --no-test + time conda build ./conda.recipe --numpy 1.13 --python 3.5 -q --no-test echo "[installing]" conda install pandas --use-local diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 3a8edf9f066ee..a47f2c0d4ab13 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -402,7 +402,7 @@ class DatetimeTZDtype(ExtensionDtype): num = 101 base = np.dtype('M8[ns]') _metadata = ['unit', 'tz'] - _match = re.compile("(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") + _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") _cache = {} def __new__(cls, unit=None, tz=None): @@ -514,7 +514,7 @@ class PeriodDtype(ExtensionDtype): base = np.dtype('O') num = 102 _metadata = ['freq'] - _match = re.compile("(P|p)eriod\[(?P<freq>.+)\]") + _match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]") _cache = {} def __new__(cls, freq=None): @@ -632,7 +632,7 @@ class IntervalDtype(ExtensionDtype): base = np.dtype('O') num = 103 _metadata = ['subtype'] - _match = re.compile("(I|i)nterval\[(?P<subtype>.+)\]") + _match = re.compile(r"(I|i)nterval\[(?P<subtype>.+)\]") _cache = {} def __new__(cls, subtype=None): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3243baa0008ae..12a4a7fdaedad 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2635,7 +2635,7 @@ def insert(self, loc, column, value, allow_duplicates=False): allow_duplicates=allow_duplicates) def assign(self, **kwargs): - """ + r""" Assign new columns to a DataFrame, returning a new object (a copy) with all the original columns in addition to the new ones. diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 9614641aa1abf..99c7563d5b249 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -306,7 +306,7 @@ def str_endswith(arr, pat, na=np.nan): def str_replace(arr, pat, repl, n=-1, case=None, flags=0): - """ + r""" Replace occurrences of pattern/regex in the Series/Index with some other string. Equivalent to :meth:`str.replace` or :func:`re.sub`. @@ -598,7 +598,7 @@ def _str_extract_frame(arr, pat, flags=0): def str_extract(arr, pat, flags=0, expand=None): - """ + r""" For each subject string in the Series, extract groups from the first match of regular expression pat. @@ -635,7 +635,7 @@ def str_extract(arr, pat, flags=0, expand=None): Non-matches will be NaN. >>> s = Series(['a1', 'b2', 'c3']) - >>> s.str.extract('([ab])(\d)') + >>> s.str.extract(r'([ab])(\d)') 0 1 0 a 1 1 b 2 @@ -643,7 +643,7 @@ def str_extract(arr, pat, flags=0, expand=None): A pattern may contain optional groups. - >>> s.str.extract('([ab])?(\d)') + >>> s.str.extract(r'([ab])?(\d)') 0 1 0 a 1 1 b 2 @@ -651,7 +651,7 @@ def str_extract(arr, pat, flags=0, expand=None): Named groups will become column names in the result. - >>> s.str.extract('(?P<letter>[ab])(?P<digit>\d)') + >>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)') letter digit 0 a 1 1 b 2 @@ -660,7 +660,7 @@ def str_extract(arr, pat, flags=0, expand=None): A pattern with one group will return a DataFrame with one column if expand=True. - >>> s.str.extract('[ab](\d)', expand=True) + >>> s.str.extract(r'[ab](\d)', expand=True) 0 0 1 1 2 @@ -668,7 +668,7 @@ def str_extract(arr, pat, flags=0, expand=None): A pattern with one group will return a Series if expand=False. - >>> s.str.extract('[ab](\d)', expand=False) + >>> s.str.extract(r'[ab](\d)', expand=False) 0 1 1 2 2 NaN @@ -694,7 +694,7 @@ def str_extract(arr, pat, flags=0, expand=None): def str_extractall(arr, pat, flags=0): - """ + r""" For each subject string in the Series, extract groups from all matches of regular expression pat. When each subject string in the Series has exactly one match, extractall(pat).xs(0, level='match') @@ -728,7 +728,7 @@ def str_extractall(arr, pat, flags=0): Indices with no matches will not appear in the result. >>> s = Series(["a1a2", "b1", "c1"], index=["A", "B", "C"]) - >>> s.str.extractall("[ab](\d)") + >>> s.str.extractall(r"[ab](\d)") 0 match A 0 1 @@ -737,7 +737,7 @@ def str_extractall(arr, pat, flags=0): Capture group names are used for column names of the result. - >>> s.str.extractall("[ab](?P<digit>\d)") + >>> s.str.extractall(r"[ab](?P<digit>\d)") digit match A 0 1 @@ -746,7 +746,7 @@ def str_extractall(arr, pat, flags=0): A pattern with two groups will return a DataFrame with two columns. - >>> s.str.extractall("(?P<letter>[ab])(?P<digit>\d)") + >>> s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)") letter digit match A 0 a 1 @@ -755,7 +755,7 @@ def str_extractall(arr, pat, flags=0): Optional groups that do not match are NaN in the result. - >>> s.str.extractall("(?P<letter>[ab])?(?P<digit>\d)") + >>> s.str.extractall(r"(?P<letter>[ab])?(?P<digit>\d)") letter digit match A 0 a 1 diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index 8e9b5497083f6..347ec41baf0e1 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -3,7 +3,7 @@ from pandas.compat import StringIO, PY2 -def read_clipboard(sep='\s+', **kwargs): # pragma: no cover +def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover r""" Read text from clipboard and pass to read_table. See read_table for the full argument list @@ -55,10 +55,10 @@ def read_clipboard(sep='\s+', **kwargs): # pragma: no cover counts = {x.lstrip().count('\t') for x in lines} if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0: - sep = '\t' + sep = r'\t' if sep is None and kwargs.get('delim_whitespace') is None: - sep = '\s+' + sep = r'\s+' return read_table(StringIO(text), sep=sep, **kwargs) @@ -99,7 +99,7 @@ def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover if excel: try: if sep is None: - sep = '\t' + sep = r'\t' buf = StringIO() # clipboard_set (pyperclip) expects unicode obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 95b3a9162db45..a4678e5b40849 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1002,7 +1002,7 @@ def get_col_type(dtype): buf.write('\\end{longtable}\n') def _format_multicolumn(self, row, ilevels): - """ + r""" Combine columns belonging to a group to a single multicolumn entry according to self.multicolumn_format @@ -1040,7 +1040,7 @@ def append_col(): return row2 def _format_multirow(self, row, ilevels, i, rows): - """ + r""" Check following rows, whether row should be a multirow e.g.: becomes: @@ -1071,7 +1071,7 @@ def _print_cline(self, buf, i, icol): """ for cl in self.clinebuf: if cl[0] == i: - buf.write('\cline{{{cl:d}-{icol:d}}}\n' + buf.write('\\cline{{{cl:d}-{icol:d}}}\n' .format(cl=cl[1], icol=icol)) # remove entries that have been written to buffer self.clinebuf = [x for x in self.clinebuf if x[0] != i] diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index dca26d028d8a4..e053af17667c4 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -51,7 +51,7 @@ # so we need to remove it if we see it. _BOM = u('\ufeff') -_parser_params = """Also supports optionally iterating or breaking of the file +_parser_params = r"""Also supports optionally iterating or breaking of the file into chunks. Additional help can be found in the `online docs for IO Tools @@ -842,19 +842,19 @@ def _clean_options(self, options, engine): " sep=None with delim_whitespace=False" engine = 'python' elif sep is not None and len(sep) > 1: - if engine == 'c' and sep == '\s+': + if engine == 'c' and sep == r'\s+': result['delim_whitespace'] = True del result['delimiter'] elif engine not in ('python', 'python-fwf'): # wait until regex engine integrated fallback_reason = "the 'c' engine does not support"\ " regex separators (separators > 1 char and"\ - " different from '\s+' are"\ + r" different from '\s+' are"\ " interpreted as regex)" engine = 'python' elif delim_whitespace: if 'python' in engine: - result['delimiter'] = '\s+' + result['delimiter'] = r'\s+' elif sep is not None: encodeable = True try: diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index d73417f7b0c95..c428000d73593 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1792,7 +1792,7 @@ def create_for_block( # name values_0 try: if version[0] == 0 and version[1] <= 10 and version[2] == 0: - m = re.search("values_block_(\d+)", name) + m = re.search(r"values_block_(\d+)", name) if m: name = "values_%s" % m.groups()[0] except: @@ -4297,7 +4297,7 @@ class AppendableMultiFrameTable(AppendableFrameTable): table_type = u('appendable_multiframe') obj_type = DataFrame ndim = 2 - _re_levels = re.compile("^level_\d+$") + _re_levels = re.compile(r"^level_\d+$") @property def table_type_short(self): diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 0d398ad3135a6..c7bbbf9940ba1 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -1306,7 +1306,7 @@ def _create_table_setup(self): column_names_and_types = \ self._get_column_names_and_types(self._sql_type_name) - pat = re.compile('\s+') + pat = re.compile(r'\s+') column_names = [col_name for col_name, _, _ in column_names_and_types] if any(map(pat.search, column_names)): warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6) diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 8ce3c74fe6a31..6fc5526e63e59 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -599,7 +599,7 @@ def test_monotonic(self, closed): assert idx.is_monotonic_decreasing assert idx._is_strictly_monotonic_decreasing - @pytest.mark.xfail(reason='not a valid repr as we use interval notation') + @pytest.mark.skip(reason='not a valid repr as we use interval notation') def test_repr(self): i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right') expected = ("IntervalIndex(left=[0, 1]," @@ -619,11 +619,11 @@ def test_repr(self): "\n dtype='interval[datetime64[ns]]')") assert repr(i) == expected - @pytest.mark.xfail(reason='not a valid repr as we use interval notation') + @pytest.mark.skip(reason='not a valid repr as we use interval notation') def test_repr_max_seq_item_setting(self): super(TestIntervalIndex, self).test_repr_max_seq_item_setting() - @pytest.mark.xfail(reason='not a valid repr as we use interval notation') + @pytest.mark.skip(reason='not a valid repr as we use interval notation') def test_repr_roundtrip(self): super(TestIntervalIndex, self).test_repr_roundtrip() diff --git a/pandas/tests/sparse/frame/__init__.py b/pandas/tests/sparse/frame/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/sparse/frame/test_analytics.py b/pandas/tests/sparse/frame/test_analytics.py new file mode 100644 index 0000000000000..ccb30502b862e --- /dev/null +++ b/pandas/tests/sparse/frame/test_analytics.py @@ -0,0 +1,40 @@ +import pytest +import numpy as np +from pandas import SparseDataFrame, DataFrame, SparseSeries +from pandas.util import testing as tm + + +@pytest.mark.xfail(reason='Wrong SparseBlock initialization ' + '(GH 17386)') +def test_quantile(): + # GH 17386 + data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]] + q = 0.1 + + sparse_df = SparseDataFrame(data) + result = sparse_df.quantile(q) + + dense_df = DataFrame(data) + dense_expected = dense_df.quantile(q) + sparse_expected = SparseSeries(dense_expected) + + tm.assert_series_equal(result, dense_expected) + tm.assert_sp_series_equal(result, sparse_expected) + + +@pytest.mark.xfail(reason='Wrong SparseBlock initialization ' + '(GH 17386)') +def test_quantile_multi(): + # GH 17386 + data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]] + q = [0.1, 0.5] + + sparse_df = SparseDataFrame(data) + result = sparse_df.quantile(q) + + dense_df = DataFrame(data) + dense_expected = dense_df.quantile(q) + sparse_expected = SparseDataFrame(dense_expected) + + tm.assert_frame_equal(result, dense_expected) + tm.assert_sp_frame_equal(result, sparse_expected) diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/frame/test_frame.py similarity index 94% rename from pandas/tests/sparse/test_frame.py rename to pandas/tests/sparse/frame/test_frame.py index 4b9d6621a20fb..cf002ff046c2e 100644 --- a/pandas/tests/sparse/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -1402,108 +1402,6 @@ def test_numpy_func_call(self): for func in funcs: getattr(np, func)(self.frame) - @pytest.mark.parametrize('data', [ - [[1, 1], [2, 2], [3, 3], [4, 4], [0, 0]], - [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [nan, nan]], - [ - [1.0, 1.0 + 1.0j], - [2.0 + 2.0j, 2.0], - [3.0, 3.0 + 3.0j], - [4.0 + 4.0j, 4.0], - [nan, nan] - ] - ]) - @pytest.mark.xfail(reason='Wrong SparseBlock initialization ' - '(GH 17386)') - def test_where_with_numeric_data(self, data): - # GH 17386 - lower_bound = 1.5 - - sparse = SparseDataFrame(data) - result = sparse.where(sparse > lower_bound) - - dense = DataFrame(data) - dense_expected = dense.where(dense > lower_bound) - sparse_expected = SparseDataFrame(dense_expected) - - tm.assert_frame_equal(result, dense_expected) - tm.assert_sp_frame_equal(result, sparse_expected) - - @pytest.mark.parametrize('data', [ - [[1, 1], [2, 2], [3, 3], [4, 4], [0, 0]], - [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [nan, nan]], - [ - [1.0, 1.0 + 1.0j], - [2.0 + 2.0j, 2.0], - [3.0, 3.0 + 3.0j], - [4.0 + 4.0j, 4.0], - [nan, nan] - ] - ]) - @pytest.mark.parametrize('other', [ - True, - -100, - 0.1, - 100.0 + 100.0j - ]) - @pytest.mark.xfail(reason='Wrong SparseBlock initialization ' - '(GH 17386)') - def test_where_with_numeric_data_and_other(self, data, other): - # GH 17386 - lower_bound = 1.5 - - sparse = SparseDataFrame(data) - result = sparse.where(sparse > lower_bound, other) - - dense = DataFrame(data) - dense_expected = dense.where(dense > lower_bound, other) - sparse_expected = SparseDataFrame(dense_expected, - default_fill_value=other) - - tm.assert_frame_equal(result, dense_expected) - tm.assert_sp_frame_equal(result, sparse_expected) - - @pytest.mark.xfail(reason='Wrong SparseBlock initialization ' - '(GH 17386)') - def test_where_with_bool_data(self): - # GH 17386 - data = [[False, False], [True, True], [False, False]] - cond = True - - sparse = SparseDataFrame(data) - result = sparse.where(sparse == cond) - - dense = DataFrame(data) - dense_expected = dense.where(dense == cond) - sparse_expected = SparseDataFrame(dense_expected) - - tm.assert_frame_equal(result, dense_expected) - tm.assert_sp_frame_equal(result, sparse_expected) - - @pytest.mark.parametrize('other', [ - True, - 0, - 0.1, - 100.0 + 100.0j - ]) - @pytest.mark.xfail(reason='Wrong SparseBlock initialization ' - '(GH 17386)') - def test_where_with_bool_data_and_other(self, other): - # GH 17386 - data = [[False, False], [True, True], [False, False]] - cond = True - - sparse = SparseDataFrame(data) - result = sparse.where(sparse == cond, other) - - dense = DataFrame(data) - dense_expected = dense.where(dense == cond, other) - sparse_expected = SparseDataFrame(dense_expected, - default_fill_value=other) - - tm.assert_frame_equal(result, dense_expected) - tm.assert_sp_frame_equal(result, sparse_expected) - @pytest.mark.xfail(reason='Wrong SparseBlock initialization ' '(GH 17386)') def test_quantile(self): diff --git a/pandas/tests/sparse/frame/test_indexing.py b/pandas/tests/sparse/frame/test_indexing.py new file mode 100644 index 0000000000000..1c27d44015c2b --- /dev/null +++ b/pandas/tests/sparse/frame/test_indexing.py @@ -0,0 +1,113 @@ +import pytest +import numpy as np +from pandas import SparseDataFrame, DataFrame +from pandas.util import testing as tm + + +pytestmark = pytest.mark.skip("Wrong SparseBlock initialization (GH 17386)") + + +@pytest.mark.parametrize('data', [ + [[1, 1], [2, 2], [3, 3], [4, 4], [0, 0]], + [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [np.nan, np.nan]], + [ + [1.0, 1.0 + 1.0j], + [2.0 + 2.0j, 2.0], + [3.0, 3.0 + 3.0j], + [4.0 + 4.0j, 4.0], + [np.nan, np.nan] + ] +]) +@pytest.mark.xfail(reason='Wrong SparseBlock initialization ' + '(GH 17386)') +def test_where_with_numeric_data(data): + # GH 17386 + lower_bound = 1.5 + + sparse = SparseDataFrame(data) + result = sparse.where(sparse > lower_bound) + + dense = DataFrame(data) + dense_expected = dense.where(dense > lower_bound) + sparse_expected = SparseDataFrame(dense_expected) + + tm.assert_frame_equal(result, dense_expected) + tm.assert_sp_frame_equal(result, sparse_expected) + + +@pytest.mark.parametrize('data', [ + [[1, 1], [2, 2], [3, 3], [4, 4], [0, 0]], + [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [np.nan, np.nan]], + [ + [1.0, 1.0 + 1.0j], + [2.0 + 2.0j, 2.0], + [3.0, 3.0 + 3.0j], + [4.0 + 4.0j, 4.0], + [np.nan, np.nan] + ] +]) +@pytest.mark.parametrize('other', [ + True, + -100, + 0.1, + 100.0 + 100.0j +]) +@pytest.mark.xfail(reason='Wrong SparseBlock initialization ' + '(GH 17386)') +def test_where_with_numeric_data_and_other(data, other): + # GH 17386 + lower_bound = 1.5 + + sparse = SparseDataFrame(data) + result = sparse.where(sparse > lower_bound, other) + + dense = DataFrame(data) + dense_expected = dense.where(dense > lower_bound, other) + sparse_expected = SparseDataFrame(dense_expected, + default_fill_value=other) + + tm.assert_frame_equal(result, dense_expected) + tm.assert_sp_frame_equal(result, sparse_expected) + + +@pytest.mark.xfail(reason='Wrong SparseBlock initialization ' + '(GH 17386)') +def test_where_with_bool_data(): + # GH 17386 + data = [[False, False], [True, True], [False, False]] + cond = True + + sparse = SparseDataFrame(data) + result = sparse.where(sparse == cond) + + dense = DataFrame(data) + dense_expected = dense.where(dense == cond) + sparse_expected = SparseDataFrame(dense_expected) + + tm.assert_frame_equal(result, dense_expected) + tm.assert_sp_frame_equal(result, sparse_expected) + + +@pytest.mark.parametrize('other', [ + True, + 0, + 0.1, + 100.0 + 100.0j +]) +@pytest.mark.xfail(reason='Wrong SparseBlock initialization ' + '(GH 17386)') +def test_where_with_bool_data_and_other(other): + # GH 17386 + data = [[False, False], [True, True], [False, False]] + cond = True + + sparse = SparseDataFrame(data) + result = sparse.where(sparse == cond, other) + + dense = DataFrame(data) + dense_expected = dense.where(dense == cond, other) + sparse_expected = SparseDataFrame(dense_expected, + default_fill_value=other) + + tm.assert_frame_equal(result, dense_expected) + tm.assert_sp_frame_equal(result, sparse_expected) diff --git a/pandas/tests/sparse/series/__init__.py b/pandas/tests/sparse/series/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/sparse/series/test_indexing.py b/pandas/tests/sparse/series/test_indexing.py new file mode 100644 index 0000000000000..de01b065a9fa0 --- /dev/null +++ b/pandas/tests/sparse/series/test_indexing.py @@ -0,0 +1,113 @@ +import pytest +import numpy as np +from pandas import SparseSeries, Series +from pandas.util import testing as tm + + +pytestmark = pytest.mark.skip("Wrong SparseBlock initialization (GH 17386)") + + +@pytest.mark.parametrize('data', [ + [1, 1, 2, 2, 3, 3, 4, 4, 0, 0], + [1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, np.nan, np.nan], + [ + 1.0, 1.0 + 1.0j, + 2.0 + 2.0j, 2.0, + 3.0, 3.0 + 3.0j, + 4.0 + 4.0j, 4.0, + np.nan, np.nan + ] +]) +@pytest.mark.xfail(reason='Wrong SparseBlock initialization ' + '(GH 17386)') +def test_where_with_numeric_data(data): + # GH 17386 + lower_bound = 1.5 + + sparse = SparseSeries(data) + result = sparse.where(sparse > lower_bound) + + dense = Series(data) + dense_expected = dense.where(dense > lower_bound) + sparse_expected = SparseSeries(dense_expected) + + tm.assert_series_equal(result, dense_expected) + tm.assert_sp_series_equal(result, sparse_expected) + + +@pytest.mark.parametrize('data', [ + [1, 1, 2, 2, 3, 3, 4, 4, 0, 0], + [1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, np.nan, np.nan], + [ + 1.0, 1.0 + 1.0j, + 2.0 + 2.0j, 2.0, + 3.0, 3.0 + 3.0j, + 4.0 + 4.0j, 4.0, + np.nan, np.nan + ] +]) +@pytest.mark.parametrize('other', [ + True, + -100, + 0.1, + 100.0 + 100.0j +]) +@pytest.mark.skip(reason='Wrong SparseBlock initialization ' + '(Segfault) ' + '(GH 17386)') +def test_where_with_numeric_data_and_other(data, other): + # GH 17386 + lower_bound = 1.5 + + sparse = SparseSeries(data) + result = sparse.where(sparse > lower_bound, other) + + dense = Series(data) + dense_expected = dense.where(dense > lower_bound, other) + sparse_expected = SparseSeries(dense_expected, fill_value=other) + + tm.assert_series_equal(result, dense_expected) + tm.assert_sp_series_equal(result, sparse_expected) + + +@pytest.mark.xfail(reason='Wrong SparseBlock initialization ' + '(GH 17386)') +def test_where_with_bool_data(): + # GH 17386 + data = [False, False, True, True, False, False] + cond = True + + sparse = SparseSeries(data) + result = sparse.where(sparse == cond) + + dense = Series(data) + dense_expected = dense.where(dense == cond) + sparse_expected = SparseSeries(dense_expected) + + tm.assert_series_equal(result, dense_expected) + tm.assert_sp_series_equal(result, sparse_expected) + + +@pytest.mark.parametrize('other', [ + True, + 0, + 0.1, + 100.0 + 100.0j +]) +@pytest.mark.skip(reason='Wrong SparseBlock initialization ' + '(Segfault) ' + '(GH 17386)') +def test_where_with_bool_data_and_other(other): + # GH 17386 + data = [False, False, True, True, False, False] + cond = True + + sparse = SparseSeries(data) + result = sparse.where(sparse == cond, other) + + dense = Series(data) + dense_expected = dense.where(dense == cond, other) + sparse_expected = SparseSeries(dense_expected, fill_value=other) + + tm.assert_series_equal(result, dense_expected) + tm.assert_sp_series_equal(result, sparse_expected) diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/series/test_series.py similarity index 94% rename from pandas/tests/sparse/test_series.py rename to pandas/tests/sparse/series/test_series.py index 438e32b16f676..2ea1e63433520 100644 --- a/pandas/tests/sparse/test_series.py +++ b/pandas/tests/sparse/series/test_series.py @@ -1419,108 +1419,6 @@ def test_deprecated_reindex_axis(self): self.bseries.reindex_axis([0, 1, 2]) assert 'reindex' in str(m[0].message) - @pytest.mark.parametrize('data', [ - [1, 1, 2, 2, 3, 3, 4, 4, 0, 0], - [1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, nan, nan], - [ - 1.0, 1.0 + 1.0j, - 2.0 + 2.0j, 2.0, - 3.0, 3.0 + 3.0j, - 4.0 + 4.0j, 4.0, - nan, nan - ] - ]) - @pytest.mark.xfail(reason='Wrong SparseBlock initialization ' - '(GH 17386)') - def test_where_with_numeric_data(self, data): - # GH 17386 - lower_bound = 1.5 - - sparse = SparseSeries(data) - result = sparse.where(sparse > lower_bound) - - dense = Series(data) - dense_expected = dense.where(dense > lower_bound) - sparse_expected = SparseSeries(dense_expected) - - tm.assert_series_equal(result, dense_expected) - tm.assert_sp_series_equal(result, sparse_expected) - - @pytest.mark.parametrize('data', [ - [1, 1, 2, 2, 3, 3, 4, 4, 0, 0], - [1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, nan, nan], - [ - 1.0, 1.0 + 1.0j, - 2.0 + 2.0j, 2.0, - 3.0, 3.0 + 3.0j, - 4.0 + 4.0j, 4.0, - nan, nan - ] - ]) - @pytest.mark.parametrize('other', [ - True, - -100, - 0.1, - 100.0 + 100.0j - ]) - @pytest.mark.skip(reason='Wrong SparseBlock initialization ' - '(Segfault) ' - '(GH 17386)') - def test_where_with_numeric_data_and_other(self, data, other): - # GH 17386 - lower_bound = 1.5 - - sparse = SparseSeries(data) - result = sparse.where(sparse > lower_bound, other) - - dense = Series(data) - dense_expected = dense.where(dense > lower_bound, other) - sparse_expected = SparseSeries(dense_expected, fill_value=other) - - tm.assert_series_equal(result, dense_expected) - tm.assert_sp_series_equal(result, sparse_expected) - - @pytest.mark.xfail(reason='Wrong SparseBlock initialization ' - '(GH 17386)') - def test_where_with_bool_data(self): - # GH 17386 - data = [False, False, True, True, False, False] - cond = True - - sparse = SparseSeries(data) - result = sparse.where(sparse == cond) - - dense = Series(data) - dense_expected = dense.where(dense == cond) - sparse_expected = SparseSeries(dense_expected) - - tm.assert_series_equal(result, dense_expected) - tm.assert_sp_series_equal(result, sparse_expected) - - @pytest.mark.parametrize('other', [ - True, - 0, - 0.1, - 100.0 + 100.0j - ]) - @pytest.mark.skip(reason='Wrong SparseBlock initialization ' - '(Segfault) ' - '(GH 17386)') - def test_where_with_bool_data_and_other(self, other): - # GH 17386 - data = [False, False, True, True, False, False] - cond = True - - sparse = SparseSeries(data) - result = sparse.where(sparse == cond, other) - - dense = Series(data) - dense_expected = dense.where(dense == cond, other) - sparse_expected = SparseSeries(dense_expected, fill_value=other) - - tm.assert_series_equal(result, dense_expected) - tm.assert_sp_series_equal(result, sparse_expected) - @pytest.mark.parametrize( 'datetime_type', (np.datetime64,
https://api.github.com/repos/pandas-dev/pandas/pulls/18968
2017-12-28T13:17:20Z
2017-12-28T17:22:02Z
2017-12-28T17:22:02Z
2017-12-28T17:22:28Z
Misc. Typo fixes
diff --git a/appveyor.yml b/appveyor.yml index 44af73b498aa8..0aaac322c4ac7 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -11,7 +11,7 @@ matrix: environment: global: # SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the - # /E:ON and /V:ON options are not enabled in the batch script intepreter + # /E:ON and /V:ON options are not enabled in the batch script interpreter # See: http://stackoverflow.com/a/13751649/163740 CMD_IN_ENV: "cmd /E:ON /V:ON /C .\\ci\\run_with_env.cmd" clone_folder: C:\projects\pandas diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py index 391a209cb2a89..21b20cb123ed6 100644 --- a/asv_bench/benchmarks/frame_ctor.py +++ b/asv_bench/benchmarks/frame_ctor.py @@ -4,7 +4,7 @@ try: from pandas.tseries.offsets import Nano, Hour except ImportError: - # For compatability with older versions + # For compatibility with older versions from pandas.core.datetools import * # noqa from .pandas_vb_common import setup # noqa diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py index 4de87ddcb0683..7b4fec0090701 100644 --- a/asv_bench/benchmarks/pandas_vb_common.py +++ b/asv_bench/benchmarks/pandas_vb_common.py @@ -18,8 +18,8 @@ np.float64, np.int16, np.int8, np.uint16, np.uint8] datetime_dtypes = [np.datetime64, np.timedelta64] -# This function just needs to be imported into each benchmark file in order to -# sets up the random seed before each function. +# This function just needs to be imported into each benchmark file in order to +# sets up the random seed before each function. # http://asv.readthedocs.io/en/latest/writing_benchmarks.html def setup(*args, **kwargs): np.random.seed(1234) @@ -36,14 +36,14 @@ def remove(self, f): try: os.remove(f) except: - # On Windows, attempting to remove a file that is in use + # On Windows, attempting to remove a file that is in use # causes an exception to be raised pass def teardown(self, *args, **kwargs): self.remove(self.fname) -# Compatability import for lib +# Compatibility import for lib for imp in ['pandas._libs.lib', 'pandas.lib', 'pandas_tseries']: try: lib = import_module(imp) diff --git a/doc/source/_static/banklist.html b/doc/source/_static/banklist.html index 8ec1561f8c394..cbcce5a2d49ff 100644 --- a/doc/source/_static/banklist.html +++ b/doc/source/_static/banklist.html @@ -7,7 +7,7 @@ <meta charset="UTF-8"> <!-- Unicode character encoding --> <meta http-equiv="X-UA-Compatible" content="IE=edge"> -<!-- Turns off IE Compatiblity Mode --> +<!-- Turns off IE Compatibility Mode --> <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1"> <!-- Makes it so phones don't auto zoom out. --> <meta name="author" content="DRR"> @@ -4849,7 +4849,7 @@ <h1 class="page_title">Failed Bank List</h1> <ul> <li><a href="/about/freedom/" title="Freedom of Information Act (FOIA) Service Center">Freedom of Information Act (FOIA) Service Center</a></li> <li><a href="/open/" title="FDIC Open Government Webpage">FDIC Open Government Webpage</a></li> - <li><a href="/about/diversity/nofear/" title="No FEAR Act Data">No FEAR Act Data</a></li> + <li><a href="/about/diversity/nofear/" title="No FEAR Act Data">No FEAR Act Data</a></li> </ul> </div> <div id="responsive_footer-small"> diff --git a/doc/source/api.rst b/doc/source/api.rst index 68721b76eed7e..17f6b8df0170d 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -10,7 +10,7 @@ methods. In general, all classes and functions exposed in the top-level ``pandas.*`` namespace are regarded as public. Further some of the subpackages are public, including ``pandas.errors``, -``pandas.plotting``, and ``pandas.testing``. Certain functions in the the +``pandas.plotting``, and ``pandas.testing``. Certain functions in the ``pandas.io`` and ``pandas.tseries`` submodules are public as well (those mentioned in the documentation). Further, the ``pandas.api.types`` subpackage holds some public functions related to data types in pandas. diff --git a/doc/source/basics.rst b/doc/source/basics.rst index da82f56d315e6..74b3dbb83ea91 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -947,7 +947,7 @@ Mixed Dtypes ++++++++++++ When presented with mixed dtypes that cannot aggregate, ``.agg`` will only take the valid -aggregations. This is similiar to how groupby ``.agg`` works. +aggregations. This is similar to how groupby ``.agg`` works. .. ipython:: python diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 0994d35999191..30071c6c5b83c 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -247,7 +247,7 @@ These are created from methods on ``Series`` and ``DataFrame``. r = s.rolling(window=60) r -These object provide tab-completion of the avaible methods and properties. +These object provide tab-completion of the available methods and properties. .. code-block:: ipython diff --git a/doc/source/conf.py b/doc/source/conf.py index bcb83d5699d7e..c188f83f80250 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -380,7 +380,7 @@ 'import pandas as pd', # This ensures correct rendering on system with console encoding != utf8 # (windows). It forces pandas to encode its output reprs using utf8 - # whereever the docs are built. The docs' target is the browser, not + # wherever the docs are built. The docs' target is the browser, not # the console, so this is fine. 'pd.options.display.encoding="utf8"' ] diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 0e5d701353d78..b25f9779d3636 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -124,16 +124,16 @@ to build the documentation locally before pushing your changes. .. _contributiong.dev_c: -Installing a C Complier +Installing a C Compiler ~~~~~~~~~~~~~~~~~~~~~~~ Pandas uses C extensions (mostly written using Cython) to speed up certain operations. To install pandas from source, you need to compile these C -extensions, which means you need a C complier. This process depends on which +extensions, which means you need a C compiler. This process depends on which platform you're using. Follow the `CPython contributing guidelines <https://docs.python.org/devguide/setup.html#build-dependencies>`_ for getting a -complier installed. You don't need to do any of the ``./configure`` or ``make`` -steps; you only need to install the complier. +compiler installed. You don't need to do any of the ``./configure`` or ``make`` +steps; you only need to install the compiler. For Windows developers, the following links may be helpful. @@ -151,7 +151,7 @@ Let us know if you have any difficulties by opening an issue or reaching out on Creating a Python Environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Now that you have a C complier, create an isolated pandas development +Now that you have a C compiler, create an isolated pandas development environment: - Install either `Anaconda <https://www.anaconda.com/download/>`_ or `miniconda diff --git a/doc/source/developer.rst b/doc/source/developer.rst index 9c214020ab43d..b8bb2b2fcbe2f 100644 --- a/doc/source/developer.rst +++ b/doc/source/developer.rst @@ -40,7 +40,7 @@ where ``KeyValue`` is } So that a ``pandas.DataFrame`` can be faithfully reconstructed, we store a -``pandas`` metadata key in the ``FileMetaData`` with the the value stored as : +``pandas`` metadata key in the ``FileMetaData`` with the value stored as : .. code-block:: text diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index c8018c8e66f72..da9d2123bd1ca 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -470,7 +470,7 @@ derived from existing columns. .head()) In the example above, we inserted a precomputed value. We can also pass in -a function of one argument to be evalutated on the DataFrame being assigned to. +a function of one argument to be evaluated on the DataFrame being assigned to. .. ipython:: python @@ -957,7 +957,7 @@ pandas to focus on these areas exclusively. Oftentimes, one can simply use a MultiIndex ``DataFrame`` for easily working with higher dimensional data. -In additon, the ``xarray`` package was built from the ground up, specifically in order to +In addition, the ``xarray`` package was built from the ground up, specifically in order to support the multi-dimensional analysis that is one of ``Panel`` s main usecases. `Here is a link to the xarray panel-transition documentation <http://xarray.pydata.org/en/stable/pandas.html#panel-transition>`__. diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index d2ca76713ba3b..362c998493ae8 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -173,7 +173,7 @@ Using ndarray It's calling series... a lot! It's creating a Series from each row, and get-ting from both the index and the series (three times for each row). Function calls are expensive -in python, so maybe we could minimise these by cythonizing the apply part. +in python, so maybe we could minimize these by cythonizing the apply part. .. note:: @@ -578,7 +578,7 @@ on the original ``DataFrame`` or return a copy with the new column. .. warning:: - For backwards compatability, ``inplace`` defaults to ``True`` if not + For backwards compatibility, ``inplace`` defaults to ``True`` if not specified. This will change in a future version of pandas - if your code depends on an inplace assignment you should update to explicitly set ``inplace=True`` diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index b9223c6ad9f7a..355be5039f146 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -651,7 +651,7 @@ Indexing with list with missing labels is Deprecated In prior versions, using ``.loc[list-of-labels]`` would work as long as *at least 1* of the keys was found (otherwise it would raise a ``KeyError``). This behavior is deprecated and will show a warning message pointing to this section. The -recommeded alternative is to use ``.reindex()``. +recommended alternative is to use ``.reindex()``. For example. @@ -724,7 +724,7 @@ Having a duplicated index will raise for a ``.reindex()``: In [17]: s.reindex(labels) ValueError: cannot reindex from a duplicate axis -Generally, you can interesect the desired labels with the current +Generally, you can intersect the desired labels with the current axis, and then reindex. .. ipython:: python diff --git a/doc/source/install.rst b/doc/source/install.rst index 6133da220aa8d..c4e331d64e721 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -152,7 +152,7 @@ To install pandas for Python 2 you may need to use the package ``python-pandas`` Debian, stable, `official Debian repository <http://packages.debian.org/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python3-pandas`` Debian & Ubuntu, unstable (latest packages), `NeuroDebian <http://neuro.debian.net/index.html#how-to-use-this-repository>`__ , ``sudo apt-get install python3-pandas`` Ubuntu, stable, `official Ubuntu repository <http://packages.ubuntu.com/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python3-pandas`` - OpenSuse, stable, `OpenSuse Repository <http://software.opensuse.org/package/python-pandas?search_term=pandas>`__ , ``zypper in python3-pandas`` + OpenSuse, stable, `OpenSuse Repository <http://software.opensuse.org/package/python-pandas?search_term=pandas>`__ , ``zypper in python3-pandas`` Fedora, stable, `official Fedora repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``dnf install python3-pandas`` Centos/RHEL, stable, `EPEL repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``yum install python3-pandas`` diff --git a/doc/source/io.rst b/doc/source/io.rst index a5a0a41147a6b..49d742d9905d7 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1048,7 +1048,7 @@ The ``thousands`` keyword allows integers to be parsed correctly NA Values ''''''''' -To control which values are parsed as missing values (which are signified by ``NaN``), specifiy a +To control which values are parsed as missing values (which are signified by ``NaN``), specify a string in ``na_values``. If you specify a list of strings, then all values in it are considered to be missing values. If you specify a number (a ``float``, like ``5.0`` or an ``integer`` like ``5``), the corresponding equivalent values will also imply a missing value (in this case effectively @@ -4153,7 +4153,7 @@ Caveats .. warning:: - ``PyTables`` will show a ``NaturalNameWarning`` if a column name + ``PyTables`` will show a ``NaturalNameWarning`` if a column name cannot be used as an attribute selector. *Natural* identifiers contain only letters, numbers, and underscores, and may not begin with a number. @@ -4478,7 +4478,7 @@ Several caveats. - Non supported types include ``Period`` and actual python object types. These will raise a helpful error message on an attempt at serialization. -You can specifiy an ``engine`` to direct the serialization. This can be one of ``pyarrow``, or ``fastparquet``, or ``auto``. +You can specify an ``engine`` to direct the serialization. This can be one of ``pyarrow``, or ``fastparquet``, or ``auto``. If the engine is NOT specified, then the ``pd.options.io.parquet.engine`` option is checked; if this is also ``auto``, then then ``pyarrow`` is tried, and falling back to ``fastparquet``. diff --git a/doc/source/options.rst b/doc/source/options.rst index 5641b2628fe40..cce16a5396377 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -164,7 +164,7 @@ lines are replaced by an ellipsis. df pd.reset_option('max_rows') -``display.expand_frame_repr`` allows for the the representation of +``display.expand_frame_repr`` allows for the representation of dataframes to stretch across pages, wrapped over the full column vs row-wise. .. ipython:: python diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 73e7704b43be6..4443428ca6c9b 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -109,7 +109,7 @@ Wes McKinney is the Benevolent Dictator for Life (BDFL). Development Team ----------------- -The list of the Core Team members and more detailed information can be found on the `people’s page <https://github.com/pandas-dev/pandas-governance/blob/master/people.md>`__ of the governance repo. +The list of the Core Team members and more detailed information can be found on the `people’s page <https://github.com/pandas-dev/pandas-governance/blob/master/people.md>`__ of the governance repo. Institutional Partners diff --git a/doc/source/release.rst b/doc/source/release.rst index aea6280a490d6..12932d9fcee4f 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -2043,7 +2043,7 @@ Bug Fixes - Fixed missing arg validation in get_options_data (:issue:`6105`) - Bug in assignment with duplicate columns in a frame where the locations are a slice (e.g. next to each other) (:issue:`6120`) -- Bug in propogating _ref_locs during construction of a DataFrame with dups +- Bug in propagating _ref_locs during construction of a DataFrame with dups index/columns (:issue:`6121`) - Bug in ``DataFrame.apply`` when using mixed datelike reductions (:issue:`6125`) - Bug in ``DataFrame.append`` when appending a row with different columns (:issue:`6129`) @@ -2056,7 +2056,7 @@ Bug Fixes - Bug in ``HDFStore`` on appending a dataframe with multi-indexed columns to an existing table (:issue:`6167`) - Consistency with dtypes in setting an empty DataFrame (:issue:`6171`) -- Bug in selecting on a multi-index ``HDFStore`` even in the presence of under +- Bug in selecting on a multi-index ``HDFStore`` even in the presence of under specified column spec (:issue:`6169`) - Bug in ``nanops.var`` with ``ddof=1`` and 1 elements would sometimes return ``inf`` rather than ``nan`` on some platforms (:issue:`6136`) @@ -2437,7 +2437,7 @@ API Changes - The refactoring involving``Series`` deriving from ``NDFrame`` breaks ``rpy2<=2.3.8``. an Issue has been opened against rpy2 and a workaround is detailed in :issue:`5698`. Thanks @JanSchulz. - ``Series.argmin`` and ``Series.argmax`` are now aliased to ``Series.idxmin`` and ``Series.idxmax``. - These return the *index* of the min or max element respectively. Prior to 0.13.0 these would return + These return the *index* of the min or max element respectively. Prior to 0.13.0 these would return the position of the min / max element (:issue:`6214`) Internal Refactoring @@ -3097,7 +3097,7 @@ Bug Fixes - Fixed bug where a time-series was being selected in preference to an actual column name in a frame (:issue:`3594`) - Make secondary_y work properly for bar plots (:issue:`3598`) -- Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return +- Fix modulo and integer division on Series,DataFrames to act similarly to ``float`` dtypes to return ``np.nan`` or ``np.inf`` as appropriate (:issue:`3590`) - Fix incorrect dtype on groupby with ``as_index=False`` (:issue:`3610`) - Fix ``read_csv/read_excel`` to correctly encode identical na_values, e.g. ``na_values=[-999.0,-999]`` @@ -3400,11 +3400,11 @@ Bug Fixes - Fixed bug in reshape if not passed correct input, now raises TypeError (:issue:`2719`) - Fixed a bug where Series ctor did not respect ordering if OrderedDict passed in (:issue:`3282`) - Fix NameError issue on RESO_US (:issue:`2787`) -- Allow selection in an *unordered* timeseries to work similary +- Allow selection in an *unordered* timeseries to work similarly to an *ordered* timeseries (:issue:`2437`). - Fix implemented ``.xs`` when called with ``axes=1`` and a level parameter (:issue:`2903`) - Timestamp now supports the class method fromordinal similar to datetimes (:issue:`3042`) -- Fix issue with indexing a series with a boolean key and specifiying a 1-len list on the rhs (:issue:`2745`) +- Fix issue with indexing a series with a boolean key and specifying a 1-len list on the rhs (:issue:`2745`) or a list on the rhs (:issue:`3235`) - Fixed bug in groupby apply when kernel generate list of arrays having unequal len (:issue:`1738`) - fixed handling of rolling_corr with center=True which could produce corr>1 (:issue:`3155`) @@ -3555,7 +3555,7 @@ Bug Fixes - Upconvert datetime + datetime64 values when concatenating frames (:issue:`2624`) - Raise a more helpful error message in merge operations when one DataFrame has duplicate columns (:issue:`2649`) -- Fix partial date parsing issue occuring only when code is run at EOM +- Fix partial date parsing issue occurring only when code is run at EOM (:issue:`2618`) - Prevent MemoryError when using counting sort in sortlevel with high-cardinality MultiIndex objects (:issue:`2684`) @@ -3973,7 +3973,7 @@ Bug Fixes - Don't lose tzinfo when passing DatetimeIndex as DataFrame column (:issue:`1682`) - Fix tz conversion with time zones that haven't had any DST transitions since first date in the array (:issue:`1673`) -- Fix field access with UTC->local conversion on unsorted arrays (:issue:`1756`) +- Fix field access with UTC->local conversion on unsorted arrays (:issue:`1756`) - Fix isnull handling of array-like (list) inputs (:issue:`1755`) - Fix regression in handling of Series in Series constructor (:issue:`1671`) - Fix comparison of Int64Index with DatetimeIndex (:issue:`1681`) @@ -4525,7 +4525,7 @@ Bug Fixes - Fix na-filling handling in mixed-type DataFrame (:issue:`910`) - Fix to DataFrame.set_value with non-existant row/col (:issue:`911`) - Fix malformed block in groupby when excluding nuisance columns (:issue:`916`) -- Fix inconsistant NA handling in dtype=object arrays (:issue:`925`) +- Fix inconsistent NA handling in dtype=object arrays (:issue:`925`) - Fix missing center-of-mass computation in ewmcov (:issue:`862`) - Don't raise exception when opening read-only HDF5 file (:issue:`847`) - Fix possible out-of-bounds memory access in 0-length Series (:issue:`917`) @@ -5395,9 +5395,9 @@ pandas 0.4.3 **Release date:** 10/9/2011 -is is largely a bugfix release from 0.4.2 but also includes a handful of new -d enhanced features. Also, pandas can now be installed and used on Python 3 -hanks Thomas Kluyver!). +This is largely a bugfix release from 0.4.2 but also includes a handful of new +and enhanced features. Also, pandas can now be installed and used on Python 3 +(thanks Thomas Kluyver!). New Features ~~~~~~~~~~~~ @@ -5460,9 +5460,9 @@ pandas 0.4.2 **Release date:** 10/3/2011 -is is a performance optimization release with several bug fixes. The new -t64Index and new merging / joining Cython code and related Python -frastructure are the main new additions +This is a performance optimization release with several bug fixes. The new +Int64Index and new merging / joining Cython code and related Python +infrastructure are the main new additions New Features ~~~~~~~~~~~~ @@ -5537,7 +5537,7 @@ pandas 0.4.1 **Release date:** 9/25/2011 -is is primarily a bug fix release but includes some new features and +This is primarily a bug fix release but includes some new features and improvements New Features diff --git a/doc/source/style.ipynb b/doc/source/style.ipynb index 20f7c2a93b9e6..152ca90049bf1 100644 --- a/doc/source/style.ipynb +++ b/doc/source/style.ipynb @@ -318,7 +318,7 @@ "Both `Styler.apply`, and `Styler.applymap` accept a `subset` keyword.\n", "This allows you to apply styles to specific rows or columns, without having to code that logic into your `style` function.\n", "\n", - "The value passed to `subset` behaves simlar to slicing a DataFrame.\n", + "The value passed to `subset` behaves similar to slicing a DataFrame.\n", "\n", "- A scalar is treated as a column label\n", "- A list (or series or numpy array)\n", diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 26e701d008b3f..201af3c7d5355 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1580,7 +1580,7 @@ We can instead only resample those groups where we have points as follows: Aggregation ~~~~~~~~~~~ -Similar to the :ref:`aggregating API <basics.aggregate>`, :ref:`groupby API <groupby.aggregate>`, and the :ref:`window functions API <stats.aggregate>`, +Similar to the :ref:`aggregating API <basics.aggregate>`, :ref:`groupby API <groupby.aggregate>`, and the :ref:`window functions API <stats.aggregate>`, a ``Resampler`` can be selectively resampled. Resampling a ``DataFrame``, the default will be to act on all columns with the same function. @@ -2108,7 +2108,7 @@ tz-aware data to another time zone: It is incorrect to pass a timezone directly into the ``datetime.datetime`` constructor (e.g., ``datetime.datetime(2011, 1, 1, tz=timezone('US/Eastern'))``. Instead, the datetime - needs to be localized using the the localize method on the timezone. + needs to be localized using the localize method on the timezone. Under the hood, all timestamps are stored in UTC. Scalar values from a ``DatetimeIndex`` with a time zone will have their fields (day, hour, minute) diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index ea720a9ae4ed0..2c1d54c27caab 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -140,7 +140,7 @@ You can also create these other plots using the methods ``DataFrame.plot.<kind>` df.plot.area df.plot.barh df.plot.density df.plot.hist df.plot.line df.plot.scatter df.plot.bar df.plot.box df.plot.hexbin df.plot.kde df.plot.pie -In addition to these ``kind`` s, there are the :ref:`DataFrame.hist() <visualization.hist>`, +In addition to these ``kind`` s, there are the :ref:`DataFrame.hist() <visualization.hist>`, and :ref:`DataFrame.boxplot() <visualization.box>` methods, which use a separate interface. Finally, there are several :ref:`plotting functions <visualization.tools>` in ``pandas.plotting`` @@ -716,7 +716,7 @@ You can use the ``labels`` and ``colors`` keywords to specify the labels and col .. warning:: - Most pandas plots use the the ``label`` and ``color`` arguments (note the lack of "s" on those). + Most pandas plots use the ``label`` and ``color`` arguments (note the lack of "s" on those). To be consistent with :func:`matplotlib.pyplot.pie` you must use ``labels`` and ``colors``. If you want to hide wedge labels, specify ``labels=None``. @@ -1187,7 +1187,7 @@ time-series data. For limited cases where pandas cannot infer the frequency information (e.g., in an externally created ``twinx``), you can choose to suppress this behavior for alignment purposes. -Here is the default behavior, notice how the x-axis tick labelling is performed: +Here is the default behavior, notice how the x-axis tick labeling is performed: .. ipython:: python diff --git a/doc/source/whatsnew/v0.10.0.txt b/doc/source/whatsnew/v0.10.0.txt index f0db1d82252c1..a0c4a3e0073f9 100644 --- a/doc/source/whatsnew/v0.10.0.txt +++ b/doc/source/whatsnew/v0.10.0.txt @@ -369,7 +369,7 @@ Updated PyTables Support df1 df1.get_dtype_counts() -- performance improvments on table writing +- performance improvements on table writing - support for arbitrarily indexed dimensions - ``SparseSeries`` now has a ``density`` property (:issue:`2384`) - enable ``Series.str.strip/lstrip/rstrip`` methods to take an input argument diff --git a/doc/source/whatsnew/v0.10.1.txt b/doc/source/whatsnew/v0.10.1.txt index d5880e44e46c6..2d5843101dec2 100644 --- a/doc/source/whatsnew/v0.10.1.txt +++ b/doc/source/whatsnew/v0.10.1.txt @@ -153,7 +153,7 @@ combined result, by using ``where`` on a selector table. table - You can pass ``chunksize=an integer`` to ``append``, to change the writing - chunksize (default is 50000). This will signficantly lower your memory usage + chunksize (default is 50000). This will significantly lower your memory usage on writing. - You can pass ``expectedrows=an integer`` to the first ``append``, to set the diff --git a/doc/source/whatsnew/v0.11.0.txt b/doc/source/whatsnew/v0.11.0.txt index ea149595e681f..b90a597815ec5 100644 --- a/doc/source/whatsnew/v0.11.0.txt +++ b/doc/source/whatsnew/v0.11.0.txt @@ -88,7 +88,7 @@ Numeric dtypes will propagate and can coexist in DataFrames. If a dtype is passe Dtype Conversion ~~~~~~~~~~~~~~~~ -This is lower-common-denomicator upcasting, meaning you get the dtype which can accomodate all of the types +This is lower-common-denominator upcasting, meaning you get the dtype which can accommodate all of the types .. ipython:: python @@ -193,7 +193,7 @@ Furthermore ``datetime64[ns]`` columns are created by default, when passed datet df.loc[df.index[2:4], ['A','timestamp']] = np.nan df -Astype conversion on ``datetime64[ns]`` to ``object``, implicity converts ``NaT`` to ``np.nan`` +Astype conversion on ``datetime64[ns]`` to ``object``, implicitly converts ``NaT`` to ``np.nan`` .. ipython:: python diff --git a/doc/source/whatsnew/v0.12.0.txt b/doc/source/whatsnew/v0.12.0.txt index 27aa47a6bb097..ad33c49792d9f 100644 --- a/doc/source/whatsnew/v0.12.0.txt +++ b/doc/source/whatsnew/v0.12.0.txt @@ -38,7 +38,7 @@ API changes * ``to_clipboard`` - - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return + - Fix modulo and integer division on Series,DataFrames to act similarly to ``float`` dtypes to return ``np.nan`` or ``np.inf`` as appropriate (:issue:`3590`). This correct a numpy bug that treats ``integer`` and ``float`` dtypes differently. @@ -154,7 +154,7 @@ API changes - The behavior of ``datetime64`` dtypes has changed with respect to certain so-called reduction operations (:issue:`3726`). The following operations now - raise a ``TypeError`` when perfomed on a ``Series`` and return an *empty* + raise a ``TypeError`` when performed on a ``Series`` and return an *empty* ``Series`` when performed on a ``DataFrame`` similar to performing these operations on, for example, a ``DataFrame`` of ``slice`` objects: @@ -206,11 +206,11 @@ I/O Enhancements :ref:`See the installation docs<install.optional_dependencies>` - Added module for reading and writing Stata files: ``pandas.io.stata`` (:issue:`1512`) - accessable via ``read_stata`` top-level function for reading, + accessible via ``read_stata`` top-level function for reading, and ``to_stata`` DataFrame method for writing, :ref:`See the docs<io.stata>` - Added module for reading and writing json format files: ``pandas.io.json`` - accessable via ``read_json`` top-level function for reading, + accessible via ``read_json`` top-level function for reading, and ``to_json`` DataFrame method for writing, :ref:`See the docs<io.json>` various issues (:issue:`1226`, :issue:`3804`, :issue:`3876`, :issue:`3867`, :issue:`1305`) @@ -220,7 +220,7 @@ I/O Enhancements list of the rows from which to read the index. - The option, ``tupleize_cols`` can now be specified in both ``to_csv`` and - ``read_csv``, to provide compatiblity for the pre 0.12 behavior of + ``read_csv``, to provide compatibility for the pre 0.12 behavior of writing and reading ``MultIndex`` columns via a list of tuples. The default in 0.12 is to write lists of tuples and *not* interpret list of tuples as a ``MultiIndex`` column. diff --git a/doc/source/whatsnew/v0.14.0.txt b/doc/source/whatsnew/v0.14.0.txt index f1feab4b909dc..be962ceb181ff 100644 --- a/doc/source/whatsnew/v0.14.0.txt +++ b/doc/source/whatsnew/v0.14.0.txt @@ -83,7 +83,7 @@ API changes been removed, instead a header with the column names is returned (:issue:`6062`). - ``Series`` and ``Index`` now internall share more common operations, e.g. ``factorize(),nunique(),value_counts()`` are now supported on ``Index`` types as well. The ``Series.weekday`` property from is removed - from Series for API consistency. Using a ``DatetimeIndex/PeriodIndex`` method on a Series will now raise a ``TypeError``. + from Series for API consistency. Using a ``DatetimeIndex/PeriodIndex`` method on a Series will now raise a ``TypeError``. (:issue:`4551`, :issue:`4056`, :issue:`5519`, :issue:`6380`, :issue:`7206`). - Add ``is_month_start``, ``is_month_end``, ``is_quarter_start``, ``is_quarter_end``, ``is_year_start``, ``is_year_end`` accessors for ``DateTimeIndex`` / ``Timestamp`` which return a boolean array of whether the timestamp(s) are at the start/end of the month/quarter/year defined by the frequency of the ``DateTimeIndex`` / ``Timestamp`` (:issue:`4565`, :issue:`6998`) @@ -284,7 +284,7 @@ Display Changes `large_repr` set to 'info' (:issue:`7105`) - The `verbose` keyword in ``DataFrame.info()``, which controls whether to shorten the ``info`` representation, is now ``None`` by default. This will follow the global setting in - ``display.max_info_columns``. The global setting can be overriden with ``verbose=True`` or + ``display.max_info_columns``. The global setting can be overridden with ``verbose=True`` or ``verbose=False``. - Fixed a bug with the `info` repr not honoring the `display.max_info_columns` setting (:issue:`6939`) - Offset/freq info now in Timestamp __repr__ (:issue:`4553`) @@ -446,7 +446,7 @@ Some other enhancements to the sql functions include: - support for writing the index. This can be controlled with the ``index`` keyword (default is True). - specify the column label to use when writing the index with ``index_label``. -- specify string columns to parse as datetimes withh the ``parse_dates`` +- specify string columns to parse as datetimes with the ``parse_dates`` keyword in :func:`~pandas.read_sql_query` and :func:`~pandas.read_sql_table`. .. warning:: @@ -596,15 +596,15 @@ Plotting - `align`: Specify the bar alignment. Default is `center` (different from matplotlib). In previous versions, pandas passes `align='edge'` to matplotlib and adjust the location to `center` by itself, and it results `align` keyword is not applied as expected. (:issue:`4525`) - `position`: Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1(right/top-end). Default is 0.5 (center). (:issue:`6604`) - Because of the default `align` value changes, coordinates of bar plots are now located on integer values (0.0, 1.0, 2.0 ...). This is intended to make bar plot be located on the same coodinates as line plot. However, bar plot may differs unexpectedly when you manually adjust the bar location or drawing area, such as using `set_xlim`, `set_ylim`, etc. In this cases, please modify your script to meet with new coordinates. + Because of the default `align` value changes, coordinates of bar plots are now located on integer values (0.0, 1.0, 2.0 ...). This is intended to make bar plot be located on the same coordinates as line plot. However, bar plot may differs unexpectedly when you manually adjust the bar location or drawing area, such as using `set_xlim`, `set_ylim`, etc. In this cases, please modify your script to meet with new coordinates. - The :func:`parallel_coordinates` function now takes argument ``color`` - instead of ``colors``. A ``FutureWarning`` is raised to alert that + instead of ``colors``. A ``FutureWarning`` is raised to alert that the old ``colors`` argument will not be supported in a future release. (:issue:`6956`) - The :func:`parallel_coordinates` and :func:`andrews_curves` functions now take positional argument ``frame`` instead of ``data``. A ``FutureWarning`` is - raised if the old ``data`` argument is used by name. (:issue:`6956`) + raised if the old ``data`` argument is used by name. (:issue:`6956`) - :meth:`DataFrame.boxplot` now supports ``layout`` keyword (:issue:`6769`) - :meth:`DataFrame.boxplot` has a new keyword argument, `return_type`. It accepts ``'dict'``, @@ -645,17 +645,17 @@ Deprecations - The :func:`pivot_table`/:meth:`DataFrame.pivot_table` and :func:`crosstab` functions now take arguments ``index`` and ``columns`` instead of ``rows`` and ``cols``. A - ``FutureWarning`` is raised to alert that the old ``rows`` and ``cols`` arguments + ``FutureWarning`` is raised to alert that the old ``rows`` and ``cols`` arguments will not be supported in a future release (:issue:`5505`) - The :meth:`DataFrame.drop_duplicates` and :meth:`DataFrame.duplicated` methods now take argument ``subset`` instead of ``cols`` to better align with - :meth:`DataFrame.dropna`. A ``FutureWarning`` is raised to alert that the old + :meth:`DataFrame.dropna`. A ``FutureWarning`` is raised to alert that the old ``cols`` arguments will not be supported in a future release (:issue:`6680`) - The :meth:`DataFrame.to_csv` and :meth:`DataFrame.to_excel` functions now takes argument ``columns`` instead of ``cols``. A - ``FutureWarning`` is raised to alert that the old ``cols`` arguments + ``FutureWarning`` is raised to alert that the old ``cols`` arguments will not be supported in a future release (:issue:`6645`) - Indexers will warn ``FutureWarning`` when used with a scalar indexer and @@ -698,12 +698,12 @@ Deprecations ALWAYS return a view. (:issue:`6894`) - The :func:`parallel_coordinates` function now takes argument ``color`` - instead of ``colors``. A ``FutureWarning`` is raised to alert that + instead of ``colors``. A ``FutureWarning`` is raised to alert that the old ``colors`` argument will not be supported in a future release. (:issue:`6956`) - The :func:`parallel_coordinates` and :func:`andrews_curves` functions now take positional argument ``frame`` instead of ``data``. A ``FutureWarning`` is - raised if the old ``data`` argument is used by name. (:issue:`6956`) + raised if the old ``data`` argument is used by name. (:issue:`6956`) - The support for the 'mysql' flavor when using DBAPI connection objects has been deprecated. MySQL will be further supported with SQLAlchemy engines (:issue:`6900`). @@ -899,7 +899,7 @@ Bug Fixes - Raise when trying to align on different levels of a multi-index assignment (:issue:`3738`) - Bug in setting complex dtypes via boolean indexing (:issue:`6345`) - Bug in TimeGrouper/resample when presented with a non-monotonic DatetimeIndex that would return invalid results. (:issue:`4161`) -- Bug in index name propogation in TimeGrouper/resample (:issue:`4161`) +- Bug in index name propagation in TimeGrouper/resample (:issue:`4161`) - TimeGrouper has a more compatible API to the rest of the groupers (e.g. ``groups`` was missing) (:issue:`3881`) - Bug in multiple grouping with a TimeGrouper depending on target column order (:issue:`6764`) - Bug in ``pd.eval`` when parsing strings with possible tokens like ``'&'`` @@ -976,7 +976,7 @@ Bug Fixes clean`` (:issue:`6768`) - Bug with numpy < 1.7.2 when reading long strings from ``HDFStore`` (:issue:`6166`) - Bug in ``DataFrame._reduce`` where non bool-like (0/1) integers were being - coverted into bools. (:issue:`6806`) + converted into bools. (:issue:`6806`) - Regression from 0.13 with ``fillna`` and a Series on datetime-like (:issue:`6344`) - Bug in adding ``np.timedelta64`` to ``DatetimeIndex`` with timezone outputs incorrect results (:issue:`6818`) - Bug in ``DataFrame.replace()`` where changing a dtype through replacement diff --git a/doc/source/whatsnew/v0.14.1.txt b/doc/source/whatsnew/v0.14.1.txt index 239d6c9c6e0d4..d8a6dc1793612 100644 --- a/doc/source/whatsnew/v0.14.1.txt +++ b/doc/source/whatsnew/v0.14.1.txt @@ -75,7 +75,7 @@ API changes Note that for the other offsets the default behaviour did not change. -- Add back ``#N/A N/A`` as a default NA value in text parsing, (regresion from 0.12) (:issue:`5521`) +- Add back ``#N/A N/A`` as a default NA value in text parsing, (regression from 0.12) (:issue:`5521`) - Raise a ``TypeError`` on inplace-setting with a ``.where`` and a non ``np.nan`` value as this is inconsistent with a set-item expression like ``df[mask] = None`` (:issue:`7656`) @@ -88,7 +88,7 @@ Enhancements - Add ``dropna`` argument to ``value_counts`` and ``nunique`` (:issue:`5569`). - Add :meth:`~pandas.DataFrame.select_dtypes` method to allow selection of columns based on dtype (:issue:`7316`). See :ref:`the docs <basics.selectdtypes>`. -- All ``offsets`` suppports the ``normalize`` keyword to specify whether +- All ``offsets`` supports the ``normalize`` keyword to specify whether ``offsets.apply``, ``rollforward`` and ``rollback`` resets the time (hour, minute, etc) or not (default ``False``, preserves time) (:issue:`7156`): diff --git a/doc/source/whatsnew/v0.15.0.txt b/doc/source/whatsnew/v0.15.0.txt index e44bc6e9e91e0..ef17904d5ab1a 100644 --- a/doc/source/whatsnew/v0.15.0.txt +++ b/doc/source/whatsnew/v0.15.0.txt @@ -22,7 +22,7 @@ users upgrade to this version. - ``read_csv`` will now by default ignore blank lines when parsing, see :ref:`here <whatsnew_0150.blanklines>` - API change in using Indexes in set operations, see :ref:`here <whatsnew_0150.index_set_ops>` - Enhancements in the handling of timezones, see :ref:`here <whatsnew_0150.tz>` - - A lot of improvements to the rolling and expanding moment funtions, see :ref:`here <whatsnew_0150.roll>` + - A lot of improvements to the rolling and expanding moment functions, see :ref:`here <whatsnew_0150.roll>` - Internal refactoring of the ``Index`` class to no longer sub-class ``ndarray``, see :ref:`Internal Refactoring <whatsnew_0150.refactoring>` - dropping support for ``PyTables`` less than version 3.0.0, and ``numexpr`` less than version 2.1 (:issue:`7990`) - Split indexing documentation into :ref:`Indexing and Selecting Data <indexing>` and :ref:`MultiIndex / Advanced Indexing <advanced>` @@ -326,7 +326,7 @@ Timezone handling improvements - ``Timestamp.tz_localize`` and ``Timestamp.tz_convert`` now raise ``TypeError`` in error cases, rather than ``Exception`` (:issue:`8025`) -- a timeseries/index localized to UTC when inserted into a Series/DataFrame will preserve the UTC timezone (rather than being a naive ``datetime64[ns]``) as ``object`` dtype (:issue:`8411`) +- a timeseries/index localized to UTC when inserted into a Series/DataFrame will preserve the UTC timezone (rather than being a naive ``datetime64[ns]``) as ``object`` dtype (:issue:`8411`) - ``Timestamp.__repr__`` displays ``dateutil.tz.tzoffset`` info (:issue:`7907`) @@ -837,7 +837,7 @@ Other notable API changes: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead - See the the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy + See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy - ``merge``, ``DataFrame.merge``, and ``ordered_merge`` now return the same type as the ``left`` argument (:issue:`7737`). @@ -878,7 +878,7 @@ a transparent change with only very limited API implications (:issue:`5080`, :is - you may need to unpickle pandas version < 0.15.0 pickles using ``pd.read_pickle`` rather than ``pickle.load``. See :ref:`pickle docs <io.pickle>` - when plotting with a ``PeriodIndex``, the matplotlib internal axes will now be arrays of ``Period`` rather than a ``PeriodIndex`` (this is similar to how a ``DatetimeIndex`` passes arrays of ``datetimes`` now) -- MultiIndexes will now raise similary to other pandas objects w.r.t. truth testing, see :ref:`here <gotchas.truth>` (:issue:`7897`). +- MultiIndexes will now raise similarly to other pandas objects w.r.t. truth testing, see :ref:`here <gotchas.truth>` (:issue:`7897`). - When plotting a DatetimeIndex directly with matplotlib's `plot` function, the axis labels will no longer be formatted as dates but as integers (the internal representation of a ``datetime64``). **UPDATE** This is fixed @@ -1118,7 +1118,7 @@ Bug Fixes - Bug in multi-index slicing with various edge cases (:issue:`8132`) - Regression in multi-index indexing with a non-scalar type object (:issue:`7914`) - Bug in ``Timestamp`` comparisons with ``==`` and ``int64`` dtype (:issue:`8058`) -- Bug in pickles contains ``DateOffset`` may raise ``AttributeError`` when ``normalize`` attribute is reffered internally (:issue:`7748`) +- Bug in pickles contains ``DateOffset`` may raise ``AttributeError`` when ``normalize`` attribute is referred internally (:issue:`7748`) - Bug in ``Panel`` when using ``major_xs`` and ``copy=False`` is passed (deprecation warning fails because of missing ``warnings``) (:issue:`8152`). - Bug in pickle deserialization that failed for pre-0.14.1 containers with dup items trying to avoid ambiguity when matching block and manager items, when there's only one block there's no ambiguity (:issue:`7794`) diff --git a/doc/source/whatsnew/v0.15.1.txt b/doc/source/whatsnew/v0.15.1.txt index cd9298c74539a..f84f25d3e906c 100644 --- a/doc/source/whatsnew/v0.15.1.txt +++ b/doc/source/whatsnew/v0.15.1.txt @@ -274,7 +274,7 @@ Enhancements Bug Fixes ~~~~~~~~~ -- Bug in unpickling of a ``CustomBusinessDay`` object (:issue:`8591`) +- Bug in unpickling of a ``CustomBusinessDay`` object (:issue:`8591`) - Bug in coercing ``Categorical`` to a records array, e.g. ``df.to_records()`` (:issue:`8626`) - Bug in ``Categorical`` not created properly with ``Series.to_frame()`` (:issue:`8626`) - Bug in coercing in astype of a ``Categorical`` of a passed ``pd.Categorical`` (this now raises ``TypeError`` correctly), (:issue:`8626`) diff --git a/doc/source/whatsnew/v0.15.2.txt b/doc/source/whatsnew/v0.15.2.txt index b908b60334f4c..f1dfab0f57ed3 100644 --- a/doc/source/whatsnew/v0.15.2.txt +++ b/doc/source/whatsnew/v0.15.2.txt @@ -215,7 +215,7 @@ Bug Fixes - ``io.data.Options`` now raises ``RemoteDataError`` when no expiry dates are available from Yahoo and when it receives no data from Yahoo (:issue:`8761`), (:issue:`8783`). - Fix: The font size was only set on x axis if vertical or the y axis if horizontal. (:issue:`8765`) - Fixed division by 0 when reading big csv files in python 3 (:issue:`8621`) -- Bug in outputing a Multindex with ``to_html,index=False`` which would add an extra column (:issue:`8452`) +- Bug in outputting a Multindex with ``to_html,index=False`` which would add an extra column (:issue:`8452`) - Imported categorical variables from Stata files retain the ordinal information in the underlying data (:issue:`8836`). - Defined ``.size`` attribute across ``NDFrame`` objects to provide compat with numpy >= 1.9.1; buggy with ``np.array_split`` (:issue:`8846`) - Skip testing of histogram plots for matplotlib <= 1.2 (:issue:`8648`). diff --git a/doc/source/whatsnew/v0.16.0.txt b/doc/source/whatsnew/v0.16.0.txt index 8238cc32d7bb0..48af06d124f2e 100644 --- a/doc/source/whatsnew/v0.16.0.txt +++ b/doc/source/whatsnew/v0.16.0.txt @@ -56,7 +56,7 @@ and the entire DataFrame (with all original and new columns) is returned. iris.assign(sepal_ratio=iris['SepalWidth'] / iris['SepalLength']).head() Above was an example of inserting a precomputed value. We can also pass in -a function to be evalutated. +a function to be evaluated. .. ipython :: python @@ -595,7 +595,7 @@ Bug Fixes - Bug in ``unstack`` with ``TimedeltaIndex`` or ``DatetimeIndex`` and nulls (:issue:`9491`). - Bug in ``rank`` where comparing floats with tolerance will cause inconsistent behaviour (:issue:`8365`). - Fixed character encoding bug in ``read_stata`` and ``StataReader`` when loading data from a URL (:issue:`9231`). -- Bug in adding ``offsets.Nano`` to other offets raises ``TypeError`` (:issue:`9284`) +- Bug in adding ``offsets.Nano`` to other offsets raises ``TypeError`` (:issue:`9284`) - Bug in ``DatetimeIndex`` iteration, related to (:issue:`8890`), fixed in (:issue:`9100`) - Bugs in ``resample`` around DST transitions. This required fixing offset classes so they behave correctly on DST transitions. (:issue:`5172`, :issue:`8744`, :issue:`8653`, :issue:`9173`, :issue:`9468`). - Bug in binary operator method (eg ``.mul()``) alignment with integer levels (:issue:`9463`). @@ -611,7 +611,7 @@ Bug Fixes - Accessing ``Series.str`` methods on with non-string values now raises ``TypeError`` instead of producing incorrect results (:issue:`9184`) - Bug in ``DatetimeIndex.__contains__`` when index has duplicates and is not monotonic increasing (:issue:`9512`) - Fixed division by zero error for ``Series.kurt()`` when all values are equal (:issue:`9197`) -- Fixed issue in the ``xlsxwriter`` engine where it added a default 'General' format to cells if no other format wass applied. This prevented other row or column formatting being applied. (:issue:`9167`) +- Fixed issue in the ``xlsxwriter`` engine where it added a default 'General' format to cells if no other format was applied. This prevented other row or column formatting being applied. (:issue:`9167`) - Fixes issue with ``index_col=False`` when ``usecols`` is also specified in ``read_csv``. (:issue:`9082`) - Bug where ``wide_to_long`` would modify the input stubnames list (:issue:`9204`) - Bug in ``to_sql`` not storing float64 values using double precision. (:issue:`9009`) diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt index a3bbaf73c01ca..239b2ba96404c 100644 --- a/doc/source/whatsnew/v0.17.0.txt +++ b/doc/source/whatsnew/v0.17.0.txt @@ -1157,7 +1157,7 @@ Bug Fixes - Bug in ``.var()`` causing roundoff errors for highly similar values (:issue:`10242`) - Bug in ``DataFrame.plot(subplots=True)`` with duplicated columns outputs incorrect result (:issue:`10962`) - Bug in ``Index`` arithmetic may result in incorrect class (:issue:`10638`) -- Bug in ``date_range`` results in empty if freq is negative annualy, quarterly and monthly (:issue:`11018`) +- Bug in ``date_range`` results in empty if freq is negative annually, quarterly and monthly (:issue:`11018`) - Bug in ``DatetimeIndex`` cannot infer negative freq (:issue:`11018`) - Remove use of some deprecated numpy comparison operations, mainly in tests. (:issue:`10569`) - Bug in ``Index`` dtype may not applied properly (:issue:`11017`) diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt index 1ad7279ea79f7..d5ed0503d9ee3 100644 --- a/doc/source/whatsnew/v0.17.1.txt +++ b/doc/source/whatsnew/v0.17.1.txt @@ -157,11 +157,11 @@ Bug Fixes - ``Series.sort_index()`` now correctly handles the ``inplace`` option (:issue:`11402`) - Incorrectly distributed .c file in the build on ``PyPi`` when reading a csv of floats and passing ``na_values=<a scalar>`` would show an exception (:issue:`11374`) - Bug in ``.to_latex()`` output broken when the index has a name (:issue:`10660`) -- Bug in ``HDFStore.append`` with strings whose encoded length exceded the max unencoded length (:issue:`11234`) +- Bug in ``HDFStore.append`` with strings whose encoded length exceeded the max unencoded length (:issue:`11234`) - Bug in merging ``datetime64[ns, tz]`` dtypes (:issue:`11405`) - Bug in ``HDFStore.select`` when comparing with a numpy scalar in a where clause (:issue:`11283`) - Bug in using ``DataFrame.ix`` with a multi-index indexer (:issue:`11372`) -- Bug in ``date_range`` with ambigous endpoints (:issue:`11626`) +- Bug in ``date_range`` with ambiguous endpoints (:issue:`11626`) - Prevent adding new attributes to the accessors ``.str``, ``.dt`` and ``.cat``. Retrieving such a value was not possible, so error out on setting it. (:issue:`10673`) - Bug in tz-conversions with an ambiguous time and ``.dt`` accessors (:issue:`11295`) diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt index 4b27cf706f9b2..bfd314639aa60 100644 --- a/doc/source/whatsnew/v0.18.0.txt +++ b/doc/source/whatsnew/v0.18.0.txt @@ -217,7 +217,7 @@ It returns a ``DataFrame`` with one column if ``expand=True``. pd.Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)', expand=True) Calling on an ``Index`` with a regex with exactly one capture group -returns an ``Index`` if ``expand=False``. +returns an ``Index`` if ``expand=False``. .. ipython:: python @@ -944,7 +944,7 @@ assignment should be done inplace or return a copy. .. warning:: - For backwards compatability, ``inplace`` defaults to ``True`` if not specified. + For backwards compatibility, ``inplace`` defaults to ``True`` if not specified. This will change in a future version of pandas. If your code depends on an inplace assignment you should update to explicitly set ``inplace=True`` @@ -1039,7 +1039,7 @@ Deprecations 2 0.5 dtype: float64 -- The the ``freq`` and ``how`` arguments to the ``.rolling``, ``.expanding``, and ``.ewm`` (new) functions are deprecated, and will be removed in a future version. You can simply resample the input prior to creating a window function. (:issue:`11603`). +- The ``freq`` and ``how`` arguments to the ``.rolling``, ``.expanding``, and ``.ewm`` (new) functions are deprecated, and will be removed in a future version. You can simply resample the input prior to creating a window function. (:issue:`11603`). For example, instead of ``s.rolling(window=5,freq='D').max()`` to get the max value on a rolling 5 Day window, one could use ``s.resample('D').mean().rolling(window=5).max()``, which first resamples the data to daily data, then provides a rolling 5 day window. diff --git a/doc/source/whatsnew/v0.18.1.txt b/doc/source/whatsnew/v0.18.1.txt index ca386da03295d..de9a5d5d8afae 100644 --- a/doc/source/whatsnew/v0.18.1.txt +++ b/doc/source/whatsnew/v0.18.1.txt @@ -226,7 +226,7 @@ Other Enhancements ^^^^^^^^^^^^^^^^^^ - ``pd.read_csv()`` now supports ``delim_whitespace=True`` for the Python engine (:issue:`12958`) -- ``pd.read_csv()`` now supports opening ZIP files that contains a single CSV, via extension inference or explict ``compression='zip'`` (:issue:`12175`) +- ``pd.read_csv()`` now supports opening ZIP files that contains a single CSV, via extension inference or explicit ``compression='zip'`` (:issue:`12175`) - ``pd.read_csv()`` now supports opening files using xz compression, via extension inference or explicit ``compression='xz'`` is specified; ``xz`` compressions is also supported by ``DataFrame.to_csv`` in the same way (:issue:`11852`) - ``pd.read_msgpack()`` now always gives writeable ndarrays even when compression is used (:issue:`12359`). - ``pd.read_msgpack()`` now supports serializing and de-serializing categoricals with msgpack (:issue:`12573`) diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt index 6093e53029cb6..302105c1e653c 100644 --- a/doc/source/whatsnew/v0.19.0.txt +++ b/doc/source/whatsnew/v0.19.0.txt @@ -1413,7 +1413,7 @@ Performance Improvements - Improved performance of ``factorize`` of datetime with timezone (:issue:`13750`) - Improved performance of by lazily creating indexing hashtables on larger Indexes (:issue:`14266`) - Improved performance of ``groupby.groups`` (:issue:`14293`) -- Unecessary materializing of a MultiIndex when introspecting for memory usage (:issue:`14308`) +- Unnecessary materializing of a MultiIndex when introspecting for memory usage (:issue:`14308`) .. _whatsnew_0190.bug_fixes: @@ -1454,7 +1454,7 @@ Bug Fixes - Bug in ``.tz_localize`` with ``dateutil.tz.tzlocal`` may return incorrect result (:issue:`13583`) - Bug in ``DatetimeTZDtype`` dtype with ``dateutil.tz.tzlocal`` cannot be regarded as valid dtype (:issue:`13583`) - Bug in ``pd.read_hdf()`` where attempting to load an HDF file with a single dataset, that had one or more categorical columns, failed unless the key argument was set to the name of the dataset. (:issue:`13231`) -- Bug in ``.rolling()`` that allowed a negative integer window in contruction of the ``Rolling()`` object, but would later fail on aggregation (:issue:`13383`) +- Bug in ``.rolling()`` that allowed a negative integer window in construction of the ``Rolling()`` object, but would later fail on aggregation (:issue:`13383`) - Bug in ``Series`` indexing with tuple-valued data and a numeric index (:issue:`13509`) - Bug in printing ``pd.DataFrame`` where unusual elements with the ``object`` dtype were causing segfaults (:issue:`13717`) - Bug in ranking ``Series`` which could result in segfaults (:issue:`13445`) diff --git a/doc/source/whatsnew/v0.19.2.txt b/doc/source/whatsnew/v0.19.2.txt index 722e494c9e614..171d97b76de75 100644 --- a/doc/source/whatsnew/v0.19.2.txt +++ b/doc/source/whatsnew/v0.19.2.txt @@ -26,7 +26,7 @@ Enhancements The ``pd.merge_asof()``, added in 0.19.0, gained some improvements: - ``pd.merge_asof()`` gained ``left_index``/``right_index`` and ``left_by``/``right_by`` arguments (:issue:`14253`) -- ``pd.merge_asof()`` can take multiple columns in ``by`` parameter and has specialized dtypes for better performace (:issue:`13936`) +- ``pd.merge_asof()`` can take multiple columns in ``by`` parameter and has specialized dtypes for better performance (:issue:`13936`) .. _whatsnew_0192.performance: @@ -62,7 +62,7 @@ Bug Fixes - Bug in ``pd.to_numeric`` where a 0 was not unsigned on a ``downcast='unsigned'`` argument (:issue:`14401`) - Bug in plotting regular and irregular timeseries using shared axes (``sharex=True`` or ``ax.twinx()``) (:issue:`13341`, :issue:`14322`). -- Bug in not propogating exceptions in parsing invalid datetimes, noted in python 3.6 (:issue:`14561`) +- Bug in not propagating exceptions in parsing invalid datetimes, noted in python 3.6 (:issue:`14561`) - Bug in resampling a ``DatetimeIndex`` in local TZ, covering a DST change, which would raise ``AmbiguousTimeError`` (:issue:`14682`) - Bug in indexing that transformed ``RecursionError`` into ``KeyError`` or ``IndexingError`` (:issue:`14554`) - Bug in ``HDFStore`` when writing a ``MultiIndex`` when using ``data_columns=True`` (:issue:`14435`) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index fc869956c820e..d04a34f7a44d6 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -97,7 +97,7 @@ The API also supports a ``.transform()`` function for broadcasting results. df.transform(['abs', lambda x: x - x.min()]) When presented with mixed dtypes that cannot be aggregated, ``.agg()`` will only take the valid -aggregations. This is similiar to how groupby ``.agg()`` works. (:issue:`15015`) +aggregations. This is similar to how groupby ``.agg()`` works. (:issue:`15015`) .. ipython:: python @@ -1504,7 +1504,7 @@ Other Deprecations - ``TimedeltaIndex.searchsorted()``, ``DatetimeIndex.searchsorted()``, and ``PeriodIndex.searchsorted()`` have deprecated the ``key`` parameter in favor of ``value`` (:issue:`12662`) - ``DataFrame.astype()`` has deprecated the ``raise_on_error`` parameter in favor of ``errors`` (:issue:`14878`) - ``Series.sortlevel`` and ``DataFrame.sortlevel`` have been deprecated in favor of ``Series.sort_index`` and ``DataFrame.sort_index`` (:issue:`15099`) -- importing ``concat`` from ``pandas.tools.merge`` has been deprecated in favor of imports from the ``pandas`` namespace. This should only affect explict imports (:issue:`15358`) +- importing ``concat`` from ``pandas.tools.merge`` has been deprecated in favor of imports from the ``pandas`` namespace. This should only affect explicit imports (:issue:`15358`) - ``Series/DataFrame/Panel.consolidate()`` been deprecated as a public method. (:issue:`15483`) - The ``as_indexer`` keyword of ``Series.str.match()`` has been deprecated (ignored keyword) (:issue:`15257`). - The following top-level pandas functions have been deprecated and will be removed in a future version (:issue:`13790`, :issue:`15940`) diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index a7dde5d6ee410..67c52dac6128d 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -168,4 +168,4 @@ Categorical String ^^^^^^ -- :meth:`Series.str.split()` will now propogate ``NaN`` values across all expanded columns instead of ``None`` (:issue:`18450`) +- :meth:`Series.str.split()` will now propagate ``NaN`` values across all expanded columns instead of ``None`` (:issue:`18450`) diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 0f6660d2f4125..d6766afdf9d4a 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -127,7 +127,7 @@ Other Enhancements - Better support for :func:`Dataframe.style.to_excel` output with the ``xlsxwriter`` engine. (:issue:`16149`) - :func:`pandas.tseries.frequencies.to_offset` now accepts leading '+' signs e.g. '+1h'. (:issue:`18171`) - :func:`MultiIndex.unique` now supports the ``level=`` argument, to get unique values from a specific index level (:issue:`17896`) -- :class:`pandas.io.formats.style.Styler` now has method ``hide_index()`` to determine whether the index will be rendered in ouptut (:issue:`14194`) +- :class:`pandas.io.formats.style.Styler` now has method ``hide_index()`` to determine whether the index will be rendered in output (:issue:`14194`) - :class:`pandas.io.formats.style.Styler` now has method ``hide_columns()`` to determine whether columns will be hidden in output (:issue:`14194`) - Improved wording of ``ValueError`` raised in :func:`to_datetime` when ``unit=`` is passed with a non-convertible value (:issue:`14350`) - :func:`Series.fillna` now accepts a Series or a dict as a ``value`` for a categorical dtype (:issue:`17033`) @@ -172,7 +172,7 @@ Build Changes ^^^^^^^^^^^^^ - Building pandas for development now requires ``cython >= 0.24`` (:issue:`18613`) -- Building from source now explicity requires ``setuptools`` in ``setup.py`` (:issue:`18113`) +- Building from source now explicitly requires ``setuptools`` in ``setup.py`` (:issue:`18113`) - Updated conda recipe to be in compliance with conda-build 3.0+ (:issue:`18002`) .. _whatsnew_0230.api: @@ -231,7 +231,7 @@ Removal of prior version deprecations/changes - Warnings against the obsolete usage ``Categorical(codes, categories)``, which were emitted for instance when the first two arguments to ``Categorical()`` had different dtypes, and recommended the use of ``Categorical.from_codes``, have now been removed (:issue:`8074`) - The ``levels`` and ``labels`` attributes of a ``MultiIndex`` can no longer be set directly (:issue:`4039`). -- ``pd.tseries.util.pivot_annual`` has been removed (deprecated since v0.19). Use ``pivot_table`` instead (:issue:`18370`) +- ``pd.tseries.util.pivot_annual`` has been removed (deprecated since v0.19). Use ``pivot_table`` instead (:issue:`18370`) - ``pd.tseries.util.isleapyear`` has been removed (deprecated since v0.19). Use ``.is_leap_year`` property in Datetime-likes instead (:issue:`18370`) - ``pd.ordered_merge`` has been removed (deprecated since v0.19). Use ``pd.merge_ordered`` instead (:issue:`18459`) - The ``SparseList`` class has been removed (:issue:`14007`) @@ -257,7 +257,7 @@ Performance Improvements - :class`DateOffset` arithmetic performance is improved (:issue:`18218`) - Converting a ``Series`` of ``Timedelta`` objects to days, seconds, etc... sped up through vectorization of underlying methods (:issue:`18092`) - Improved performance of ``.map()`` with a ``Series/dict`` input (:issue:`15081`) -- The overriden ``Timedelta`` properties of days, seconds and microseconds have been removed, leveraging their built-in Python versions instead (:issue:`18242`) +- The overridden ``Timedelta`` properties of days, seconds and microseconds have been removed, leveraging their built-in Python versions instead (:issue:`18242`) - ``Series`` construction will reduce the number of copies made of the input data in certain cases (:issue:`17449`) - Improved performance of :func:`Series.dt.date` and :func:`DatetimeIndex.date` (:issue:`18058`) - Improved performance of :func:`Series.dt.time` and :func:`DatetimeIndex.time` (:issue:`18461`) diff --git a/doc/source/whatsnew/v0.8.1.txt b/doc/source/whatsnew/v0.8.1.txt index 8227bc6bc9c66..add96bec9d1dd 100644 --- a/doc/source/whatsnew/v0.8.1.txt +++ b/doc/source/whatsnew/v0.8.1.txt @@ -32,5 +32,5 @@ Performance improvements strings with ``DatetimeIndex`` or ``to_datetime`` (:issue:`1571`) - Improve the performance of GroupBy on single-key aggregations and use with Categorical types - - Significant datetime parsing performance improvments + - Significant datetime parsing performance improvements diff --git a/doc/source/whatsnew/v0.9.1.txt b/doc/source/whatsnew/v0.9.1.txt index 4faf38219ebee..e2d6d7a275086 100644 --- a/doc/source/whatsnew/v0.9.1.txt +++ b/doc/source/whatsnew/v0.9.1.txt @@ -80,7 +80,7 @@ New features df.where(df>0,-df) Furthermore, `where` now aligns the input boolean condition (ndarray or DataFrame), such that partial selection - with setting is possible. This is analagous to partial setting via `.ix` (but on the contents rather than the axis labels) + with setting is possible. This is analogous to partial setting via `.ix` (but on the contents rather than the axis labels) .. ipython:: python diff --git a/doc/sphinxext/README.rst b/doc/sphinxext/README.rst index e39cf8daac036..2be5372bc0216 100644 --- a/doc/sphinxext/README.rst +++ b/doc/sphinxext/README.rst @@ -14,4 +14,4 @@ pandas documentation. These copies originate from other projects: These copies are maintained at the respective projects, so fixes should, to the extent possible, be pushed upstream instead of only adapting our - local copy to avoid divergence between the the local and upstream version. + local copy to avoid divergence between the local and upstream version. diff --git a/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py b/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py index dfb489e49394d..c5ec26aefd442 100644 --- a/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py +++ b/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py @@ -83,7 +83,7 @@ def get_tokens_unprocessed(self, text): curcode += line[continue_prompt.end():] elif output_prompt is not None: # Use the 'error' token for output. We should probably make - # our own token, but error is typicaly in a bright color like + # our own token, but error is typically in a bright color like # red, so it works fine for our output prompts. insertions.append((len(curcode), [(0, Generic.Error, output_prompt.group())])) diff --git a/doc/sphinxext/ipython_sphinxext/ipython_directive.py b/doc/sphinxext/ipython_sphinxext/ipython_directive.py index 4f7b32840680d..5616d732eb1c6 100644 --- a/doc/sphinxext/ipython_sphinxext/ipython_directive.py +++ b/doc/sphinxext/ipython_sphinxext/ipython_directive.py @@ -93,7 +93,7 @@ Authors ------- -- John D Hunter: orignal author. +- John D Hunter: original author. - Fernando Perez: refactoring, documentation, cleanups, port to 0.11. - VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations. - Skipper Seabold, refactoring, cleanups, pure python addition @@ -154,7 +154,7 @@ def block_parser(part, rgxin, rgxout, fmtin, fmtout): """ part is a string of ipython text, comprised of at most one - input, one ouput, comments, and blank lines. The block parser + input, one output, comments, and blank lines. The block parser parses the text into a list of:: blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...] @@ -268,7 +268,7 @@ def write(self,data): return super(DecodingStringIO, self).write(data) except : pass - # default to brute utf8 if no encoding succeded + # default to brute utf8 if no encoding succeeded return super(DecodingStringIO, self).write(data.decode('utf8', 'replace')) diff --git a/doc/sphinxext/numpydoc/docscrape_sphinx.py b/doc/sphinxext/numpydoc/docscrape_sphinx.py index 9017480c9ab76..127ed49c106ad 100755 --- a/doc/sphinxext/numpydoc/docscrape_sphinx.py +++ b/doc/sphinxext/numpydoc/docscrape_sphinx.py @@ -115,7 +115,7 @@ def _str_member_list(self, name): or inspect.isgetsetdescriptor(param_obj)): param_obj = None - # pandas HACK - do not exclude attributes wich are None + # pandas HACK - do not exclude attributes which are None # if param_obj and (pydoc.getdoc(param_obj) or not desc): # # Referenced object has a docstring # autosum += [" %s%s" % (prefix, param)] diff --git a/pandas/__init__.py b/pandas/__init__.py index 861c8e7d622fc..93c5b6484b840 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -106,8 +106,8 @@ - Easy handling of missing data in floating point as well as non-floating point data. - Size mutability: columns can be inserted and deleted from DataFrame and - higher dimensional objects. - - Automatic and explicit data alignment: objects can be explicitly aligned + higher dimensional objects + - Automatic and explicit data alignment: objects can be explicitly aligned to a set of labels, or the user can simply ignore the labels and let `Series`, `DataFrame`, etc. automatically align the data for you in computations. diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 3710ddc33c7c5..7b61cd22f45d1 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -61,7 +61,7 @@ cdef inline are_diff(object left, object right): class Infinity(object): - """ provide a positive Infinity comparision method for ranking """ + """ provide a positive Infinity comparison method for ranking """ __lt__ = lambda self, other: False __le__ = lambda self, other: isinstance(other, Infinity) @@ -73,7 +73,7 @@ class Infinity(object): class NegInfinity(object): - """ provide a negative Infinity comparision method for ranking """ + """ provide a negative Infinity comparison method for ranking """ __lt__ = lambda self, other: (not isinstance(other, NegInfinity) and not missing.checknull(other)) diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx index aa7aa4b528194..c6f182ac5003f 100644 --- a/pandas/_libs/hashing.pyx +++ b/pandas/_libs/hashing.pyx @@ -80,7 +80,7 @@ def hash_object_array(ndarray[object] arr, object key, object encoding='utf8'): lens[i] = l cdata = data - # keep the refernce alive thru the end of the + # keep the references alive thru the end of the # function datas.append(data) vecs[i] = cdata diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index 4bbe8c654ea0e..72c2834b0bd57 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -148,7 +148,7 @@ cdef class Int64Factorizer: def unique_label_indices(ndarray[int64_t, ndim=1] labels): """ indices of the first occurrences of the unique labels - *excluding* -1. equivelent to: + *excluding* -1. equivalent to: np.unique(labels, return_index=True)[1] """ cdef: diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 3898f7499e85e..788d3c4ac80ad 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -929,7 +929,7 @@ def is_lexsorted(list list_of_arrays): # TODO: could do even better if we know something about the data. eg, index has -# 1-min data, binner has 5-min data, then bins are just strides in index. This +# 1-min data, binner has 5-min data, then bins are just strides in index. This # is a general, O(max(len(values), len(binner))) method. @cython.boundscheck(False) @cython.wraparound(False) diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 1f7c359b519a5..cf63b5083885e 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -424,7 +424,7 @@ cdef class TextReader: if escapechar is not None: if len(escapechar) != 1: - raise ValueError('Only length-1 escapes supported') + raise ValueError('Only length-1 escapes supported') self.parser.escapechar = ord(escapechar) self._set_quoting(quotechar, quoting) @@ -523,7 +523,7 @@ cdef class TextReader: else: if isinstance(header, list): if len(header) > 1: - # need to artifically skip the final line + # need to artificially skip the final line # which is still a header line header = list(header) header.append(header[-1] + 1) diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c index fd76f3328c05b..89753ccf7d773 100644 --- a/pandas/_libs/src/datetime/np_datetime.c +++ b/pandas/_libs/src/datetime/np_datetime.c @@ -327,7 +327,7 @@ int cmp_pandas_datetimestruct(const pandas_datetimestruct *a, * this style of access anyway. * * Returns -1 on error, 0 on success, and 1 (with no error set) - * if obj doesn't have the neeeded date or datetime attributes. + * if obj doesn't have the needed date or datetime attributes. */ int convert_pydatetime_to_datetimestruct(PyObject *obj, pandas_datetimestruct *out) { diff --git a/pandas/_libs/src/numpy.pxd b/pandas/_libs/src/numpy.pxd index 6fa2bc6af9d1f..8ce398ce218a8 100644 --- a/pandas/_libs/src/numpy.pxd +++ b/pandas/_libs/src/numpy.pxd @@ -196,7 +196,7 @@ cdef extern from "numpy/arrayobject.h": # -- the details of this may change. def __getbuffer__(ndarray self, Py_buffer* info, int flags): # This implementation of getbuffer is geared towards Cython - # requirements, and does not yet fullfill the PEP. + # requirements, and does not yet fulfill the PEP. # In particular strided access is always provided regardless # of flags diff --git a/pandas/_libs/src/period_helper.c b/pandas/_libs/src/period_helper.c index 19f810eb54ea7..01fc46481d5b4 100644 --- a/pandas/_libs/src/period_helper.c +++ b/pandas/_libs/src/period_helper.c @@ -247,7 +247,7 @@ static int dInfoCalc_SetFromAbsDate(register struct date_info *dinfo, /////////////////////////////////////////////// -// frequency specifc conversion routines +// frequency specific conversion routines // each function must take an integer fromDate and // a char relation ('S' or 'E' for 'START' or 'END') /////////////////////////////////////////////////////////////////////// diff --git a/pandas/_libs/src/ujson/lib/ultrajson.h b/pandas/_libs/src/ujson/lib/ultrajson.h index 159645b4007e1..0470fef450dde 100644 --- a/pandas/_libs/src/ujson/lib/ultrajson.h +++ b/pandas/_libs/src/ujson/lib/ultrajson.h @@ -140,7 +140,7 @@ typedef int64_t JSLONG; #endif #if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__) -#error "Endianess not supported" +#error "Endianness not supported" #endif enum JSTYPES { @@ -245,7 +245,7 @@ typedef struct __JSONObjectEncoder { int encodeHTMLChars; /* - Set to an error message if error occured */ + Set to an error message if error occurred */ const char *errorMsg; JSOBJ errorObj; diff --git a/pandas/_libs/src/ujson/python/ujson.c b/pandas/_libs/src/ujson/python/ujson.c index a0c2146c30eed..da19afab030b1 100644 --- a/pandas/_libs/src/ujson/python/ujson.c +++ b/pandas/_libs/src/ujson/python/ujson.c @@ -58,12 +58,12 @@ PyObject *JSONFileToObj(PyObject *self, PyObject *args, PyObject *kwargs); static PyMethodDef ujsonMethods[] = { {"encode", (PyCFunction)objToJSON, METH_VARARGS | METH_KEYWORDS, - "Converts arbitrary object recursivly into JSON. " ENCODER_HELP_TEXT}, + "Converts arbitrary object recursively into JSON. " ENCODER_HELP_TEXT}, {"decode", (PyCFunction)JSONToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as string to dict object structure. Use precise_float=True " "to use high precision float decoder."}, {"dumps", (PyCFunction)objToJSON, METH_VARARGS | METH_KEYWORDS, - "Converts arbitrary object recursivly into JSON. " ENCODER_HELP_TEXT}, + "Converts arbitrary object recursively into JSON. " ENCODER_HELP_TEXT}, {"loads", (PyCFunction)JSONToObj, METH_VARARGS | METH_KEYWORDS, "Converts JSON as string to dict object structure. Use precise_float=True " "to use high precision float decoder."}, diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index d3278e42e413f..585c904a601ed 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -104,7 +104,7 @@ cpdef bint _is_normalized(dt): def apply_index_wraps(func): # Note: normally we would use `@functools.wraps(func)`, but this does - # not play nicely wtih cython class methods + # not play nicely with cython class methods def wrapper(self, other): result = func(self, other) if self.normalize: @@ -316,7 +316,7 @@ class EndMixin(object): class _BaseOffset(object): """ - Base class for DateOffset methods that are not overriden by subclasses + Base class for DateOffset methods that are not overridden by subclasses and will (after pickle errors are resolved) go into a cdef class. """ _typ = "dateoffset" @@ -783,7 +783,7 @@ cpdef int get_day_of_month(datetime other, day_opt) except? -1: other : datetime or Timestamp day_opt : 'start', 'end' 'start': returns 1 - 'end': returns last day of the month + 'end': returns last day of the month Returns ------- @@ -924,7 +924,7 @@ cpdef int roll_yearday(datetime other, int n, int month, month : reference month giving the first month of the year day_opt : 'start', 'end' 'start': returns 1 - 'end': returns last day of the month + 'end': returns last day of the month Returns ------- diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 65594de586bac..2921291973373 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -557,7 +557,7 @@ class TimeRE(dict): """Convert a list to a regex string for matching a directive. Want possible matching values to be from longest to shortest. This - prevents the possibility of a match occuring for a value that also + prevents the possibility of a match occurring for a value that also a substring of a larger value that should have matched (e.g., 'abc' matching when 'abcdef' should have been the match). diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 1792f852c9e1e..c7744bf9db58e 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -373,7 +373,7 @@ class Timestamp(_Timestamp): """Pandas replacement for datetime.datetime Timestamp is the pandas equivalent of python's Datetime - and is interchangable with it in most cases. It's the type used + and is interchangeable with it in most cases. It's the type used for the entries that make up a DatetimeIndex, and other timeseries oriented data structures in pandas. diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index 53ead5e8f74a3..73e01fbf17205 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -17,7 +17,7 @@ def _dir_deletions(self): return self._accessors | self._deprecations def _dir_additions(self): - """ add addtional __dir__ for this object """ + """ add additional __dir__ for this object """ rv = set() for accessor in self._accessors: try: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 167f215b6c0ac..571db40537cfc 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -68,7 +68,7 @@ def _ensure_data(values, dtype=None): return _ensure_object(np.asarray(values)), 'object', 'object' if is_bool_dtype(values) or is_bool_dtype(dtype): # we are actually coercing to uint64 - # until our algos suppport uint8 directly (see TODO) + # until our algos support uint8 directly (see TODO) return np.asarray(values).astype('uint64'), 'bool', 'uint64' elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype): return _ensure_int64(values), 'int64', 'int64' @@ -120,7 +120,7 @@ def _ensure_data(values, dtype=None): dtype = 'category' # we are actually coercing to int64 - # until our algos suppport int* directly (not all do) + # until our algos support int* directly (not all do) values = _ensure_int64(values) return values, dtype, 'int64' diff --git a/pandas/core/base.py b/pandas/core/base.py index 72acd0052202b..e90794c6c2e1a 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -685,7 +685,7 @@ def _gotitem(self, key, ndim, subset=None): class IndexOpsMixin(object): - """ common ops mixin to support a unified inteface / docs for Series / + """ common ops mixin to support a unified interface / docs for Series / Index """ diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index baf15b3ca5bc4..d47cb0762447b 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -648,7 +648,7 @@ def _codes_for_groupby(self, sort): Parameters ---------- sort : boolean - The value of the sort paramter groupby was called with. + The value of the sort parameter groupby was called with. Returns ------- @@ -770,7 +770,7 @@ def set_categories(self, new_categories, ordered=None, rename=False, If not given, do not change the ordered information. rename : boolean (default: False) Whether or not the new_categories should be considered as a rename - of the old categories or as reordered categories. + of the old categories or as reordered categories. inplace : boolean (default: False) Whether or not to reorder the categories inplace or return a copy of this categorical with reordered categories. @@ -1139,7 +1139,7 @@ def shift(self, periods): shifted : Categorical """ # since categoricals always have ndim == 1, an axis parameter - # doesnt make any sense here. + # doesn't make any sense here. codes = self.codes if codes.ndim > 1: raise NotImplementedError("Categorical with ndim > 1.") diff --git a/pandas/core/common.py b/pandas/core/common.py index 775ecc32b0f3c..e606be3cc2a23 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -305,7 +305,7 @@ def split_ranges(mask): ranges = [(0, len(mask))] for pos, val in enumerate(mask): - if not val: # this pos should be ommited, split off the prefix range + if not val: # this pos should be omitted, split off the prefix range r = ranges.pop() if pos > r[0]: # yield non-zero range yield (r[0], pos) diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index c74da6379e32f..1dc19d33f3365 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -71,7 +71,7 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check): # required min elements (otherwise we are adding overhead) if np.prod(a.shape) > _MIN_ELEMENTS: - # check for dtype compatiblity + # check for dtype compatibility dtypes = set() for o in [a, b]: if hasattr(o, 'get_dtype_counts'): @@ -224,7 +224,7 @@ def where(cond, a, b, use_numexpr=True): def set_test_mode(v=True): """ - Keeps track of whether numexpr was used. Stores an additional ``True`` + Keeps track of whether numexpr was used. Stores an additional ``True`` for every successful use of evaluate with numexpr since the last ``get_test_result`` """ diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 4b3c608a88be8..26eefa75b2675 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -439,7 +439,7 @@ def visit_Attribute(self, node, **kwargs): return self.term_type(getattr(resolved, attr), self.env) except AttributeError: - # something like datetime.datetime where scope is overriden + # something like datetime.datetime where scope is overridden if isinstance(value, ast.Name) and value.id == attr: return resolved diff --git a/pandas/core/config.py b/pandas/core/config.py index e71c3b6f58562..d10e2d19be665 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -23,7 +23,7 @@ - all options in a certain sub - namespace can be reset at once. - the user can set / get / reset or ask for the description of an option. - a developer can register and mark an option as deprecated. -- you can register a callback to be invoked when the the option value +- you can register a callback to be invoked when the option value is set or reset. Changing the stored value is considered misuse, but is not verboten. @@ -33,8 +33,8 @@ - Data is stored using nested dictionaries, and should be accessed through the provided API. -- "Registered options" and "Deprecated options" have metadata associcated - with them, which are stored in auxilary dictionaries keyed on the +- "Registered options" and "Deprecated options" have metadata associated + with them, which are stored in auxiliary dictionaries keyed on the fully-qualified key, e.g. "x.y.z.option". - the config_init module is imported by the package's __init__.py file. @@ -209,7 +209,7 @@ def __dir__(self): # in the docstring. For dev convenience we'd like to generate the docstrings # dynamically instead of maintaining them by hand. To this, we use the # class below which wraps functions inside a callable, and converts -# __doc__ into a propery function. The doctsrings below are templates +# __doc__ into a property function. The doctsrings below are templates # using the py2.6+ advanced formatting syntax to plug in a concise list # of options, and option descriptions. @@ -691,7 +691,7 @@ def pp(name, ks): @contextmanager def config_prefix(prefix): - """contextmanager for multiple invocations of API with a common prefix + """contextmanager for multiple invocations of API with a common prefix supported API functions: (register / get / set )__option diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 87c6fb69f33bf..5fcb5f09dfae7 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -520,7 +520,7 @@ def maybe_infer_dtype_type(element): def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False): - """ provide explict type promotion and coercion + """ provide explicit type promotion and coercion Parameters ---------- diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index a47f2c0d4ab13..d1637873eb6e1 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -597,8 +597,8 @@ def is_dtype(cls, dtype): """ if isinstance(dtype, compat.string_types): - # PeriodDtype can be instanciated from freq string like "U", - # but dosn't regard freq str like "U" as dtype. + # PeriodDtype can be instantiated from freq string like "U", + # but doesn't regard freq str like "U" as dtype. if dtype.startswith('period[') or dtype.startswith('Period['): try: if cls._parse_dtype_strict(dtype) is not None: diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index ce57b544d9d66..d208c72ffee19 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -362,7 +362,7 @@ def _infer_fill_value(val): def _maybe_fill(arr, fill_value=np.nan): """ - if we have a compatiable fill_value and arr dtype, then fill + if we have a compatible fill_value and arr dtype, then fill """ if _isna_compat(arr, fill_value): arr.fill(fill_value) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 12a4a7fdaedad..d85f3bf552617 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -591,7 +591,7 @@ def _repr_fits_horizontal_(self, ignore_width=False): max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end - # check whether repr fits horizontal by actualy checking + # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() @@ -1578,7 +1578,7 @@ def to_stata(self, fname, convert_dates=None, write_index=True, String path of file-like object convert_dates : dict Dictionary mapping columns containing datetime types to stata - internal format to use when wirting the dates. Options are 'tc', + internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if @@ -1606,7 +1606,7 @@ def to_stata(self, fname, convert_dates=None, write_index=True, * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError - * Columns listed in convert_dates are noth either datetime64[ns] + * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters @@ -5736,7 +5736,7 @@ def idxmax(self, axis=0, skipna=True): return Series(result, index=self._get_agg_axis(axis)) def _get_agg_axis(self, axis_num): - """ let's be explict about this """ + """ let's be explicit about this """ if axis_num == 0: return self.columns elif axis_num == 1: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c5359ba2c5ea1..004cfce6769c8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2341,7 +2341,7 @@ def _check_setitem_copy(self, stacklevel=4, t='setting', force=False): if value is None: return - # see if the copy is not actually refererd; if so, then disolve + # see if the copy is not actually referred; if so, then dissolve # the copy weakref try: gc.collect(2) @@ -3109,7 +3109,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, %(optional_axis)s method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional method to use for filling holes in reindexed DataFrame. - Please note: this is only applicable to DataFrames/Series with a + Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * default: don't fill gaps diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 06b7dbb4ecf7b..285a347153a82 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2956,7 +2956,7 @@ def is_in_axis(key): return True - # if the the grouper is obj[name] + # if the grouper is obj[name] def is_in_obj(gpr): try: return id(gpr) == id(obj[gpr.name]) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 04b8ade7e5253..99ee28c84365f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -319,7 +319,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, return IntervalIndex.from_intervals(subarr, name=name, copy=copy) elif inferred == 'boolean': - # don't support boolean explicity ATM + # don't support boolean explicitly ATM pass elif inferred != 'string': if inferred.startswith('datetime'): @@ -887,7 +887,7 @@ def _format_data(self, name=None): # are we a truncated display is_truncated = n > max_seq_items - # adj can optionaly handle unicode eastern asian width + # adj can optionally handle unicode eastern asian width adj = _get_adjustment() def _extend_line(s, line, value, display_width, next_line_prefix): @@ -1788,7 +1788,7 @@ def _concat_same_dtype(self, to_concat, name): """ Concatenate to_concat which has the same class """ - # must be overrided in specific classes + # must be overridden in specific classes return _concat._concat_index_asobject(to_concat, name) _index_shared_docs['take'] = """ @@ -3276,7 +3276,7 @@ def _get_leaf_sorter(labels): sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max()) return sorter - # find indexers of begining of each set of + # find indexers of beginning of each set of # same-key labels w.r.t all but last level tic = labels[0][:-1] != labels[0][1:] for lab in labels[1:-1]: @@ -3573,7 +3573,7 @@ def _searchsorted_monotonic(self, label, side='left'): def _get_loc_only_exact_matches(self, key): """ - This is overriden on subclasses (namely, IntervalIndex) to control + This is overridden on subclasses (namely, IntervalIndex) to control get_slice_bound. """ return self.get_loc(key) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 241907a54f393..ac7cb30fa823d 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -522,7 +522,7 @@ def reindex(self, target, method=None, level=None, limit=None, # we always want to return an Index type here # to be consistent with .reindex for other index types (e.g. they don't # coerce based on the actual values, only on the dtype) - # unless we had an inital Categorical to begin with + # unless we had an initial Categorical to begin with # in which case we are going to conform to the passed Categorical new_target = np.asarray(new_target) if is_categorical_dtype(target): @@ -746,7 +746,7 @@ def _evaluate_compare(self, other): if isinstance(other, ABCCategorical): if not self.values.is_dtype_equal(other): - raise TypeError("categorical index comparisions must " + raise TypeError("categorical index comparisons must " "have the same categories and ordered " "attributes") diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 40c07376d2522..3fca40562899a 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -132,7 +132,7 @@ def ceil(self, freq): class DatetimeIndexOpsMixin(object): - """ common ops mixin to support a unified inteface datetimelike Index """ + """ common ops mixin to support a unified interface datetimelike Index """ def equals(self, other): """ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index af901440d8abd..b17682b6c3448 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -355,7 +355,7 @@ def __new__(cls, data=None, raise ValueError("Must provide freq argument if no data is " "supplied") - # if dtype has an embeded tz, capture it + # if dtype has an embedded tz, capture it if dtype is not None: try: dtype = DatetimeTZDtype.construct_from_string(dtype) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 2a132f683c519..def9b151f5c91 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -683,7 +683,7 @@ def inferred_type(self): @Appender(Index.memory_usage.__doc__) def memory_usage(self, deep=False): - # we don't use an explict engine + # we don't use an explicit engine # so return the bytes here return (self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index f4c4f91d2cc57..7107378671ba5 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -799,8 +799,8 @@ def _hashed_indexing_key(self, key): *this is internal for use for the cython routines* - Paramters - --------- + Parameters + ---------- key : string or tuple Returns diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index de6713249a7c7..fa6614d27cd19 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1308,7 +1308,7 @@ class _IXIndexer(_NDFrameIndexer): ``.ix`` is the most general indexer and will support any of the inputs in ``.loc`` and ``.iloc``. ``.ix`` also supports floating point label schemes. ``.ix`` is exceptionally useful when dealing - with mixed positional and label based hierachical indexes. + with mixed positional and label based hierarchical indexes. However, when an axis is integer based, ONLY label based access and not positional access is supported. Thus, in such cases, it's @@ -1441,8 +1441,8 @@ def _has_valid_type(self, key, axis): ax = self.obj._get_axis(axis) # valid for a label where all labels are in the index - # slice of lables (where start-end in labels) - # slice of integers (only if in the lables) + # slice of labels (where start-end in labels) + # slice of integers (only if in the labels) # boolean if isinstance(key, slice): @@ -1929,7 +1929,7 @@ def _has_valid_setitem_indexer(self, indexer): self._has_valid_positional_setitem_indexer(indexer) def _convert_key(self, key, is_setter=False): - """ require integer args (and convert to label arguments) """ + """ require integer args (and convert to label arguments) """ for a, i in zip(self.obj.axes, key): if not is_integer(i): raise ValueError("iAt based indexing can only have integer " @@ -2118,7 +2118,7 @@ def maybe_convert_ix(*args): def is_nested_tuple(tup, labels): - # check for a compatiable nested tuple and multiindexes among the axes + # check for a compatible nested tuple and multiindexes among the axes if not isinstance(tup, tuple): return False diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 3a64a0ef84e3d..ba90503e3bf40 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -170,7 +170,7 @@ def formatting_values(self): def get_values(self, dtype=None): """ return an internal format, currently just the ndarray - this is often overriden to handle to_dense like operations + this is often overridden to handle to_dense like operations """ if is_object_dtype(dtype): return self.values.astype(object) @@ -954,7 +954,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, new_values = new_values.T # If the default repeat behavior in np.putmask would go in the - # wrong direction, then explictly repeat and reshape new instead + # wrong direction, then explicitly repeat and reshape new instead if getattr(new, 'ndim', 0) >= 1: if self.ndim - 1 == new.ndim and axis == 1: new = np.repeat( @@ -1455,7 +1455,7 @@ def where(self, other, cond, align=True, errors='raise', cond = cond.values # If the default broadcasting would go in the wrong direction, then - # explictly reshape other instead + # explicitly reshape other instead if getattr(other, 'ndim', 0) >= 1: if values.ndim - 1 == other.ndim and axis == 1: other = other.reshape(tuple(other.shape + (1, ))) @@ -1493,7 +1493,7 @@ def func(cond, values, other): except TypeError: # we cannot coerce, return a compat dtype - # we are explicity ignoring errors + # we are explicitly ignoring errors block = self.coerce_to_target_dtype(other) blocks = block.where(orig_other, cond, align=align, errors=errors, @@ -4939,7 +4939,7 @@ def _maybe_compare(a, b, op): is_a_array = isinstance(a, np.ndarray) is_b_array = isinstance(b, np.ndarray) - # numpy deprecation warning to have i8 vs integer comparisions + # numpy deprecation warning to have i8 vs integer comparisons if is_datetimelike_v_numeric(a, b): result = False diff --git a/pandas/core/missing.py b/pandas/core/missing.py index c3e72d6c31bf5..74fa21fa4b53d 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -127,7 +127,7 @@ def interpolate_1d(xvalues, yvalues, method='linear', limit=None, if not valid.any(): # have to call np.asarray(xvalues) since xvalues could be an Index - # which cant be mutated + # which can't be mutated result = np.empty_like(np.asarray(xvalues), dtype=np.float64) result.fill(np.nan) return result diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 3a7a5e44d5a88..faac8ab312d6b 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -1357,7 +1357,7 @@ def f(self, other): return self._combine_series_infer(other, func, try_cast=False) else: - # straight boolean comparisions we want to allow all columns + # straight boolean comparisons we want to allow all columns # (regardless of dtype to pass thru) See #4537 for discussion. res = self._combine_const(other, func, errors='ignore', diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 6d85e5bf7c7f9..7ec177b03aeb1 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -860,7 +860,7 @@ def xs(self, key, axis=1): xs is only for getting, not setting values. MultiIndex Slicers is a generic way to get/set values on any level or - levels and is a superset of xs functionality, see + levels and is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>` """ diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 9bd5abb2cd476..aaadf6d3ca32f 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -276,7 +276,7 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None, ndims.add(obj.ndim) # get the sample - # want the higest ndim that we have, and must be non-empty + # want the highest ndim that we have, and must be non-empty # unless all objs are empty sample = None if len(ndims) > 1: diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index c2804c8f8e63e..b648c426a877f 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -186,7 +186,7 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): A character indicating the separation of the variable names in the wide format, to be stripped from the names in the long format. For example, if your column names are A-suffix1, A-suffix2, you - can strip the hypen by specifying `sep='-'` + can strip the hyphen by specifying `sep='-'` .. versionadded:: 0.20.0 diff --git a/pandas/core/series.py b/pandas/core/series.py index 360095c386e8b..5d8092fd30496 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -345,7 +345,7 @@ def _set_axis(self, axis, labels, fastpath=False): (DatetimeIndex, PeriodIndex, TimedeltaIndex)): try: labels = DatetimeIndex(labels) - # need to set here becuase we changed the index + # need to set here because we changed the index if fastpath: self._data.set_axis(axis, labels) except (libts.OutOfBoundsDatetime, ValueError): @@ -487,7 +487,7 @@ def nonzero(self): Return the indices of the elements that are non-zero This method is equivalent to calling `numpy.nonzero` on the - series data. For compatability with NumPy, the return value is + series data. For compatibility with NumPy, the return value is the same (a tuple with an array of indices for each dimension), but it will always be a one-item tuple because series only have one dimension. @@ -2388,7 +2388,7 @@ def aggregate(self, func, axis=0, *args, **kwargs): # expression, e.g.: lambda x: x-x.quantile(0.25) # this will fail, so we can try a vectorized evaluation - # we cannot FIRST try the vectorized evaluation, becuase + # we cannot FIRST try the vectorized evaluation, because # then .agg and .apply would have different semantics if the # operation is actually defined on the Series, e.g. str try: diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index 0424ac8703e25..9b2650359bf68 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -525,7 +525,7 @@ def __setitem__(self, key, value): # if is_integer(key): # self.values[key] = value # else: - # raise Exception("SparseArray does not support seting non-scalars + # raise Exception("SparseArray does not support setting non-scalars # via setitem") raise TypeError( "SparseArray does not support item assignment via setitem") @@ -538,7 +538,7 @@ def __setslice__(self, i, j, value): slobj = slice(i, j) # noqa # if not is_scalar(value): - # raise Exception("SparseArray does not support seting non-scalars + # raise Exception("SparseArray does not support setting non-scalars # via slices") # x = self.values diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 99c7563d5b249..fab4e77ce4467 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1414,7 +1414,7 @@ def _wrap_result(self, result, use_codes=True, elif expand is True and not isinstance(self._orig, Index): # required when expand=True is explicitly specified - # not needed when infered + # not needed when inferred def cons_row(x): if is_list_like(x): @@ -1424,7 +1424,7 @@ def cons_row(x): result = [cons_row(x) for x in result] if result: - # propogate nan values to match longest sequence (GH 18450) + # propagate nan values to match longest sequence (GH 18450) max_len = max(len(x) for x in result) result = [x * max_len if x[0] is np.nan else x for x in result] diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 6b8edbb146e4b..1de43116d0b49 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -197,7 +197,8 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, In case when it is not possible to return designated types (e.g. when any element of input is before Timestamp.min or after Timestamp.max) - return will have datetime.datetime type (or correspoding array/Series). + return will have datetime.datetime type (or corresponding + array/Series). Examples -------- @@ -497,7 +498,7 @@ def _convert_listlike(arg, box, format, name=None, tz=tz): def _assemble_from_unit_mappings(arg, errors): """ - assemble the unit specifed fields from the arg (DataFrame) + assemble the unit specified fields from the arg (DataFrame) Return a Series for actual parsing Parameters diff --git a/pandas/core/window.py b/pandas/core/window.py index 5ad8d20cc03e2..76ba76b7a9da9 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -253,8 +253,8 @@ def _wrap_results(self, results, blocks, obj): """ wrap the results - Paramters - --------- + Parameters + ---------- results : list of ndarrays blocks : list of blocks obj : conformed data (may be resampled) @@ -403,7 +403,7 @@ class Window(_Window): 3 NaN 4 NaN - Same as above, but explicity set the min_periods + Same as above, but explicitly set the min_periods >>> df.rolling(2, min_periods=1).sum() B diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index 42b3bdd4991a9..b3d1ce31d66ae 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -38,7 +38,7 @@ class ParserError(ValueError): class DtypeWarning(Warning): """ - Warning that is raised for a dtype incompatiblity. This + Warning that is raised for a dtype incompatibility. This can happen whenever `pd.read_csv` encounters non- uniform dtypes in a column(s) of a given CSV file. """ @@ -56,7 +56,7 @@ class ParserWarning(Warning): Warning that is raised in `pd.read_csv` whenever it is necessary to change parsers (generally from 'c' to 'python') contrary to the one specified by the user due to lack of support or functionality for - parsing particular attributes of a CSV file with the requsted engine. + parsing particular attributes of a CSV file with the requested engine. """ diff --git a/pandas/io/common.py b/pandas/io/common.py index 534c1e0671150..da60698fe529f 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -312,7 +312,7 @@ def _get_handle(path_or_buf, mode, encoding=None, compression=None, f : file-like A file-like object handles : list of file-like objects - A list of file-like object that were openned in this function. + A list of file-like object that were opened in this function. """ try: from s3fs import S3File @@ -533,7 +533,7 @@ def _check_as_is(x): # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") - # ... and reencode it into the target encoding + # ... and re-encode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) @@ -553,7 +553,7 @@ def _check_as_is(x): # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") - # ... and reencode it into the target encoding + # ... and re-encode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 4f0655cff9b57..92b29c8da7e3f 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -561,7 +561,7 @@ def _parse_cell(cell_contents, cell_typ): cell_contents = bool(cell_contents) elif convert_float and cell_typ == XL_CELL_NUMBER: # GH5394 - Excel 'numbers' are always floats - # it's a minimal perf hit and less suprising + # it's a minimal perf hit and less surprising val = int(cell_contents) if val == cell_contents: cell_contents = val @@ -881,12 +881,12 @@ def engine(self): def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None): """ - Write given formated cells into Excel an excel sheet + Write given formatted cells into Excel an excel sheet Parameters ---------- cells : generator - cell of formated data to save to Excel sheet + cell of formatted data to save to Excel sheet sheet_name : string, default None Name of Excel sheet, if None, then use self.cur_sheet startrow: upper left cell row to dump data frame diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py index bdff59939a4de..36eac8dd57fbd 100644 --- a/pandas/io/formats/console.py +++ b/pandas/io/formats/console.py @@ -14,7 +14,7 @@ def detect_console_encoding(): """ Try to find the most capable encoding supported by the console. - slighly modified from the way IPython handles the same issue. + slightly modified from the way IPython handles the same issue. """ global _initial_defencoding diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index a36e82edf6e57..aff3e35861434 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -492,7 +492,7 @@ def _format_regular_rows(self): # output index and index_label? if self.index: - # chek aliases + # check aliases # if list only take first as this is not a MultiIndex if (self.index_label and isinstance(self.index_label, (list, tuple, np.ndarray, diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 3af9e78a5aac4..2c3d92cea0ad8 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -420,7 +420,7 @@ def render(self, **kwargs): the rendered HTML in the notebook. Pandas uses the following keys in render. Arguments passed - in ``**kwargs`` take precedence, so think carefuly if you want + in ``**kwargs`` take precedence, so think carefully if you want to override them: * head @@ -1201,7 +1201,7 @@ def _is_visible(idx_row, idx_col, lengths): def _get_level_lengths(index, hidden_elements=None): """ - Given an index, find the level lenght for each element. + Given an index, find the level length for each element. Optional argument is a list of index positions which should not be visible. @@ -1229,7 +1229,7 @@ def _get_level_lengths(index, hidden_elements=None): lengths[(i, last_label)] = 1 elif (row != sentinel): # even if its hidden, keep track of it in case - # length >1 and later elemens are visible + # length >1 and later elements are visible last_label = j lengths[(i, last_label)] = 0 elif(j not in hidden_elements): diff --git a/pandas/io/html.py b/pandas/io/html.py index 67a48198adc27..e7794864ccb3e 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -263,7 +263,7 @@ def _parse_tables(self, doc, match, attrs): attrs : dict A dictionary of table attributes that can be used to disambiguate - mutliple tables on a page. + multiple tables on a page. Raises ------ diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index bb435c625ff35..72ec5c59c90af 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -162,7 +162,7 @@ class JSONTableWriter(FrameWriter): def __init__(self, obj, orient, date_format, double_precision, ensure_ascii, date_unit, index, default_handler=None): """ - Adds a `schema` attribut with the Table Schema, resets + Adds a `schema` attribute with the Table Schema, resets the index (can't do in caller, because the schema inference needs to know what the index is, forces orient to records, and forces date_format to 'iso'. @@ -534,7 +534,7 @@ def _get_object_parser(self, json): def close(self): """ - If we opened a stream earlier, in _get_data_from_filepath, we should + If we opened a stream earlier, in _get_data_from_filepath, we should close it. If an open stream or file was passed, we leave it open. """ if self.should_close: diff --git a/pandas/io/msgpack/_unpacker.pyx b/pandas/io/msgpack/_unpacker.pyx index 05dfaad8b2058..04bb330e595dd 100644 --- a/pandas/io/msgpack/_unpacker.pyx +++ b/pandas/io/msgpack/_unpacker.pyx @@ -202,7 +202,7 @@ cdef class Unpacker(object): :param int max_buffer_size: Limits size of data waiting unpacked. 0 means system's INT_MAX (default). Raises `BufferFull` exception when it - is insufficient. You shoud set this parameter when unpacking + is insufficient. You should set this parameter when unpacking data from untrasted source. :param int max_str_len: diff --git a/pandas/io/packers.py b/pandas/io/packers.py index ef65a3275060b..9289853a1bbfd 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -70,7 +70,7 @@ move_into_mutable_buffer as _move_into_mutable_buffer, ) -# check whcih compression libs we have installed +# check which compression libs we have installed try: import zlib diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index e053af17667c4..acb7d00284693 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1541,7 +1541,7 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False, values, set(col_na_values) | col_na_fvalues, try_num_bool) - # type specificed in dtype param + # type specified in dtype param if cast_type and not is_dtype_equal(cvals, cast_type): cvals = self._cast_types(cvals, cast_type, c) @@ -2054,7 +2054,7 @@ def __init__(self, f, **kwds): self.data = f # Get columns in two steps: infer from data, then - # infer column indices from self.usecols if is is specified. + # infer column indices from self.usecols if it is specified. self._col_indices = None self.columns, self.num_original_columns = self._infer_columns() diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index c428000d73593..efe6ab6c18868 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -97,7 +97,7 @@ def _ensure_term(where, scope_level): create the terms here with a frame_level=2 (we are 2 levels down) """ - # only consider list/tuple here as an ndarray is automaticaly a coordinate + # only consider list/tuple here as an ndarray is automatically a coordinate # list level = scope_level + 1 if isinstance(where, (list, tuple)): @@ -301,7 +301,7 @@ def read_hdf(path_or_buf, key=None, mode='r', **kwargs): contains a single pandas object. mode : string, {'r', 'r+', 'a'}, default 'r'. Mode to use when opening the file. Ignored if path_or_buf is a pd.HDFStore. - where : list of Term (or convertable) objects, optional + where : list of Term (or convertible) objects, optional start : optional, integer (defaults to None), row number to start selection stop : optional, integer (defaults to None), row number to stop @@ -498,7 +498,7 @@ def __getattr__(self, name): (type(self).__name__, name)) def __contains__(self, key): - """ check for existance of this key + """ check for existence of this key can match the exact pathname or the pathnm w/o the leading '/' """ node = self.get_node(key) @@ -679,7 +679,7 @@ def select(self, key, where=None, start=None, stop=None, columns=None, Parameters ---------- key : object - where : list of Term (or convertable) objects, optional + where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection columns : a list of columns that if not None, will limit the return @@ -724,7 +724,7 @@ def select_as_coordinates( Parameters ---------- key : object - where : list of Term (or convertable) objects, optional + where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection """ @@ -873,7 +873,7 @@ def remove(self, key, where=None, start=None, stop=None): ---------- key : string Node to remove or delete rows from - where : list of Term (or convertable) objects, optional + where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection @@ -1250,7 +1250,7 @@ def error(t): # existing node (and must be a table) if tt is None: - # if we are a writer, determin the tt + # if we are a writer, determine the tt if value is not None: if pt == u('series_table'): @@ -1370,7 +1370,7 @@ class TableIterator(object): ---------- store : the reference store - s : the refered storer + s : the referred storer func : the function to execute the query where : the where of the query nrows : the rows to iterate on @@ -4653,7 +4653,7 @@ class Selection(object): Parameters ---------- table : a Table object - where : list of Terms (or convertable to) + where : list of Terms (or convertible to) start, stop: indicies to start and/or stop selection """ @@ -4718,7 +4718,7 @@ def generate(self, where): raise ValueError( "The passed where expression: {0}\n" " contains an invalid variable reference\n" - " all of the variable refrences must be a " + " all of the variable references must be a " "reference to\n" " an axis (e.g. 'index' or 'columns'), or a " "data_column\n" diff --git a/pandas/io/sql.py b/pandas/io/sql.py index c7bbbf9940ba1..e2f3033c580a5 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -1484,7 +1484,7 @@ def to_sql(self, frame, name, if_exists='fail', index=True, `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None - Ignored parameter included for compatability with SQLAlchemy + Ignored parameter included for compatibility with SQLAlchemy version of ``to_sql``. chunksize : int, default None If not None, then rows will be written in batches of this diff --git a/pandas/io/stata.py b/pandas/io/stata.py index aafe5f2ce76bd..2b97b447921bb 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -645,7 +645,7 @@ def __init__(self, catarray): def _encode(self, s): """ - Python 3 compatability shim + Python 3 compatibility shim """ if compat.PY3: return s.encode(self._encoding) @@ -968,7 +968,7 @@ def __init__(self, path_or_buf, convert_dates=True, self._order_categoricals = order_categoricals if encoding is not None: if encoding not in VALID_ENCODINGS: - raise ValueError('Unknown encoding. Only latin-1 and ascii ' + raise ValueError('Unknown encoding. Only latin-1 and ascii ' 'supported.') self._encoding = encoding self._chunksize = chunksize @@ -1881,7 +1881,7 @@ class StataWriter(StataParser): Input to save convert_dates : dict Dictionary mapping columns containing datetime types to stata internal - format to use when wirting the dates. Options are 'tc', 'td', 'tm', + format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has @@ -1913,7 +1913,7 @@ class StataWriter(StataParser): NotImplementedError * If datetimes contain timezone information ValueError - * Columns listed in convert_dates are noth either datetime64[ns] + * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column dtype is not representable in Stata * Column listed in convert_dates is not in DataFrame diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 9d74a308f79c8..3094d7d0ab1c6 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -1951,7 +1951,7 @@ def plot_series(data, kind='line', ax=None, # Series unique return_type : {None, 'axes', 'dict', 'both'}, default None The kind of object to return. The default is ``axes`` 'axes' returns the matplotlib axes the boxplot is drawn on; - 'dict' returns a dictionary whose values are the matplotlib + 'dict' returns a dictionary whose values are the matplotlib Lines of the boxplot; 'both' returns a namedtuple with the axes and dict. diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py index 145597e52ae14..887202e22b4e0 100644 --- a/pandas/plotting/_style.py +++ b/pandas/plotting/_style.py @@ -67,9 +67,9 @@ def _maybe_valid_colors(colors): except ValueError: return False - # check whether the string can be convertable to single color + # check whether the string can be convertible to single color maybe_single_color = _maybe_valid_colors([colors]) - # check whether each character can be convertable to colors + # check whether each character can be convertible to colors maybe_color_cycle = _maybe_valid_colors(list(colors)) if maybe_single_color and maybe_color_cycle and len(colors) > 1: # Special case for single str 'CN' match and convert to hex diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 6e3b7a059fd49..c824f0026af50 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -480,7 +480,7 @@ def test_rename_multiindex(self): df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns) # - # without specifying level -> accross all levels + # without specifying level -> across all levels renamed = df.rename(index={'foo1': 'foo3', 'bar2': 'bar3'}, columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'}) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 69f1aeddc43e9..b9275fc69e7ff 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1907,7 +1907,7 @@ def test_round_issue(self): def test_built_in_round(self): if not compat.PY3: - pytest.skip("build in round cannot be overriden " + pytest.skip("build in round cannot be overridden " "prior to Python 3") # GH11763 diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index 430562ce727da..fd1eb23643c2b 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -865,7 +865,7 @@ def test_combineSeries(self): # 10890 # we no longer allow auto timeseries broadcasting - # and require explict broadcasting + # and require explicit broadcasting added = self.tsframe.add(ts, axis='index') for key, col in compat.iteritems(self.tsframe): diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index a13d985ab6974..5172efe25d697 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -303,7 +303,7 @@ def test_with_na_groups(self): # assert issubclass(agged.dtype.type, np.integer) - # explicity return a float from my function + # explicitly return a float from my function def f(x): return float(len(x)) diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_whitelist.py index de0deb442e516..3117525d899f6 100644 --- a/pandas/tests/groupby/test_whitelist.py +++ b/pandas/tests/groupby/test_whitelist.py @@ -184,7 +184,7 @@ def test_regression_whitelist_methods( axis, skipna, sort): # GH6944 # GH 17537 - # explicity test the whitelest methods + # explicitly test the whitelest methods if axis == 0: frame = raw_frame diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py index 1d72ca609b1d3..4b989eb35e900 100644 --- a/pandas/tests/indexes/datetimes/test_astype.py +++ b/pandas/tests/indexes/datetimes/test_astype.py @@ -71,7 +71,7 @@ def test_astype_with_tz(self): def test_astype_str_compat(self): # GH 13149, GH 13209 - # verify that we are returing NaT as a string (and not unicode) + # verify that we are returning NaT as a string (and not unicode) idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN]) result = idx.astype(str) diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index c89e3ddbfc5d0..f94a438fcdaa5 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1464,7 +1464,7 @@ def test_parsers_iso8601(self): actual = tslib._test_parse_iso8601(date_str) assert actual == exp - # seperators must all match - YYYYMM not valid + # separators must all match - YYYYMM not valid invalid_cases = ['2011-01/02', '2011^11^11', '201401', '201111', '200101', # mixed separated and unseparated diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 6fc5526e63e59..3ca4c31b7f059 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -1112,7 +1112,7 @@ def test_is_non_overlapping_monotonic(self, closed): idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed) assert idx.is_non_overlapping_monotonic is False - # Should be False for closed='both', overwise True (GH16560) + # Should be False for closed='both', otherwise True (GH16560) if closed == 'both': idx = IntervalIndex.from_breaks(range(4), closed=closed) assert idx.is_non_overlapping_monotonic is False diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index e33fd1e0f4c1e..5109542403b43 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1067,7 +1067,7 @@ def test_format(self): # GH 14626 # windows has different precision on datetime.datetime.now (it doesn't # include us since the default for Timestamp shows these but Index - # formating does not we are skipping) + # formatting does not we are skipping) now = datetime.now() if not str(now).endswith("000"): index = Index([now]) diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index cbd819fa9cfb7..dcd592345b91c 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -117,7 +117,7 @@ def test_numeric_compat(self): def test_explicit_conversions(self): # GH 8608 - # add/sub are overriden explicity for Float/Int Index + # add/sub are overridden explicitly for Float/Int Index idx = self._holder(np.arange(5, dtype='int64')) # float conversions diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py index 55c06e8854333..1ebeef072fdc5 100644 --- a/pandas/tests/indexes/test_range.py +++ b/pandas/tests/indexes/test_range.py @@ -779,7 +779,7 @@ def test_slice_keep_name(self): def test_explicit_conversions(self): # GH 8608 - # add/sub are overriden explicity for Float/Int Index + # add/sub are overridden explicitly for Float/Int Index idx = RangeIndex(5) # float conversions diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index c5fb2580f0a15..ded16224aedf2 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -120,8 +120,8 @@ def get_result(self, obj, method, key, axis): if isinstance(key, dict): key = key[axis] - # use an artifical conversion to map the key as integers to the labels - # so ix can work for comparisions + # use an artificial conversion to map the key as integers to the labels + # so ix can work for comparisons if method == 'indexer': method = 'ix' key = obj._get_axis(axis)[key] @@ -138,7 +138,7 @@ def get_result(self, obj, method, key, axis): def get_value(self, f, i, values=False): """ return the value for the location i """ - # check agains values + # check against values if values: return f.values[i] @@ -160,7 +160,7 @@ def check_values(self, f, func, values=False): for i in indicies: result = getattr(f, func)[i] - # check agains values + # check against values if values: expected = f.values[i] else: diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index 6c5af84f0ce02..d2692c7dc302e 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -360,7 +360,7 @@ def test_slice_integer(self): # same as above, but for Integer based indexes # these coerce to a like integer - # oob indiciates if we are out of bounds + # oob indicates if we are out of bounds # of positional indexing for index, oob in [(tm.makeIntIndex(5), False), (tm.makeRangeIndex(5), False), diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 0e66c15760653..c66310d10ebdc 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -362,7 +362,7 @@ def test_multi_nan_indexing(self): def test_multi_assign(self): - # GH 3626, an assignement of a sub-df to a df + # GH 3626, an assignment of a sub-df to a df df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'], 'PF': [0, 0, 0, 0, 1, 1], 'col1': lrange(6), diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py index 568dd7cec5ecb..3f71e673a4ffe 100644 --- a/pandas/tests/indexing/test_ix.py +++ b/pandas/tests/indexing/test_ix.py @@ -235,7 +235,7 @@ def test_ix_assign_column_mixed(self): tm.assert_frame_equal(df, expected) # ok, but chained assignments are dangerous - # if we turn off chained assignement it will work + # if we turn off chained assignment it will work with option_context('chained_assignment', None): df = DataFrame({'a': lrange(4)}) df['b'] = np.nan diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 6f0d8b1f29b77..fb5f094f9462b 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -17,7 +17,7 @@ class TestLoc(Base): def test_loc_getitem_dups(self): # GH 5678 - # repeated gettitems on a dup index returing a ndarray + # repeated gettitems on a dup index returning a ndarray df = DataFrame( np.random.random_sample((20, 5)), index=['ABCDE' [x % 5] for x in range(20)]) @@ -385,7 +385,7 @@ def test_loc_general(self): def test_loc_setitem_consistency(self): # GH 6149 - # coerce similary for setitem and loc when rows have a null-slice + # coerce similarly for setitem and loc when rows have a null-slice expected = DataFrame({'date': Series(0, index=range(5), dtype=np.int64), 'val': Series(range(5), dtype=np.int64)}) @@ -588,7 +588,7 @@ def test_loc_non_unique(self): # non-unique indexer with loc slice # https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs - # these are going to raise becuase the we are non monotonic + # these are going to raise because the we are non monotonic df = DataFrame({'A': [1, 2, 3, 4, 5, 6], 'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3]) pytest.raises(KeyError, df.loc.__getitem__, diff --git a/pandas/tests/io/data/banklist.html b/pandas/tests/io/data/banklist.html index 8ec1561f8c394..cbcce5a2d49ff 100644 --- a/pandas/tests/io/data/banklist.html +++ b/pandas/tests/io/data/banklist.html @@ -7,7 +7,7 @@ <meta charset="UTF-8"> <!-- Unicode character encoding --> <meta http-equiv="X-UA-Compatible" content="IE=edge"> -<!-- Turns off IE Compatiblity Mode --> +<!-- Turns off IE Compatibility Mode --> <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1"> <!-- Makes it so phones don't auto zoom out. --> <meta name="author" content="DRR"> @@ -4849,7 +4849,7 @@ <h1 class="page_title">Failed Bank List</h1> <ul> <li><a href="/about/freedom/" title="Freedom of Information Act (FOIA) Service Center">Freedom of Information Act (FOIA) Service Center</a></li> <li><a href="/open/" title="FDIC Open Government Webpage">FDIC Open Government Webpage</a></li> - <li><a href="/about/diversity/nofear/" title="No FEAR Act Data">No FEAR Act Data</a></li> + <li><a href="/about/diversity/nofear/" title="No FEAR Act Data">No FEAR Act Data</a></li> </ul> </div> <div id="responsive_footer-small"> diff --git a/pandas/tests/io/data/macau.html b/pandas/tests/io/data/macau.html index be62b3221518d..cfd1a0702460a 100644 --- a/pandas/tests/io/data/macau.html +++ b/pandas/tests/io/data/macau.html @@ -476,7 +476,7 @@ <h4>這個頁面上的內容需要較新版本的 Adobe Flash Player。</h4> toggleclass: ["", "selected"], //Two CSS classes to be applied to the header when it's collapsed and expanded, respectively ["class1", "class2"] togglehtml: ["", "", ""], //Additional HTML added to the header when it's collapsed and expanded, respectively ["position", "html1", "html2"] (see docs) animatespeed: "normal", //speed of animation: integer in milliseconds (ie: 200), or keywords "fast", "normal", or "slow" - oninit:function(headers, expandedindices){ //custom code to run when headers have initalized + oninit:function(headers, expandedindices){ //custom code to run when headers have initialized //do nothing }, onopenclose:function(header, index, state, isuseractivated){ //custom code to run whenever a header is opened or closed diff --git a/pandas/tests/io/data/spam.html b/pandas/tests/io/data/spam.html index 935b39f6d6011..e4fadab6eafd2 100644 --- a/pandas/tests/io/data/spam.html +++ b/pandas/tests/io/data/spam.html @@ -208,7 +208,7 @@ <h1>Nutrient data for 07908, Luncheon meat, pork with ham, minced, canned, inclu <table> <thead> - <tr><td colspan="6" style="vertical-align:middle;text-align:center;height:2em;" class="buttons"><input type="submit" name="_action_show" value="Apply Changes" class="calc" title="Click to recalculate measures" id="1732" /><a href="/ndb/help/contextHelp/measures" onclick="jQuery.ajax({type:'POST', url:'/ndb/help/contextHelp/measures',success:function(data,textStatus){jQuery('#helpDiv').html(data);},error:function(XMLHttpRequest,textStatus,errorThrown){},complete:function(XMLHttpRequest,textStatus){GRAILSUI.measuresHelpDialog.show();}});return false;" controller="help" action="contextHelp" id="measures"><img title="Click for more information on calculating household measures" src="/ndb/static/images/skin/help.png" alt="Help" border="0" style="vertical-align:middle"/></a></span></td></tr> + <tr><td colspan="6" style="vertical-align:middle;text-align:center;height:2em;" class="buttons"><input type="submit" name="_action_show" value="Apply Changes" class="calc" title="Click to recalculate measures" id="1732" /><a href="/ndb/help/contextHelp/measures" onclick="jQuery.ajax({type:'POST', url:'/ndb/help/contextHelp/measures',success:function(data,textStatus){jQuery('#helpDiv').html(data);},error:function(XMLHttpRequest,textStatus,errorThrown){},complete:function(XMLHttpRequest,textStatus){GRAILSUI.measuresHelpDialog.show();}});return false;" controller="help" action="contextHelp" id="measures"><img title="Click for more information on calculating household measures" src="/ndb/static/images/skin/help.png" alt="Help" border="0" style="vertical-align:middle"/></a></span></td></tr> <th style="vertical-align:middle">Nutrient</th> <th style="vertical-align:middle" >Unit</th> <th style="vertical-align:middle"><input type="text" name="Qv" style="width:30px;text-align:right;border-style:inset;height:15px" maxlength="5" value="1" id="Qv" /><br/>Value per 100.0g</th> diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 1fefec6035a20..23b42b612dace 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -369,7 +369,7 @@ def test_str_max_colwidth(self): def test_auto_detect(self): term_width, term_height = get_terminal_size() - fac = 1.05 # Arbitrary large factor to exceed term widht + fac = 1.05 # Arbitrary large factor to exceed term width cols = range(int(term_width * fac)) index = range(10) df = DataFrame(index=index, columns=cols) diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index 4b0ca872da326..bedb11d4fc4ae 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -650,7 +650,7 @@ def test_highlight_max(self): (0, 0): [''], (1, 0): ['']} assert result == expected - # separate since we cant negate the strs + # separate since we can't negate the strs df['C'] = ['a', 'b'] result = df.style.highlight_max()._compute().ctx expected = {(1, 1): ['background-color: yellow']} diff --git a/pandas/tests/io/msgpack/test_extension.py b/pandas/tests/io/msgpack/test_extension.py index 26a611bea224c..2ee72c8a55cb4 100644 --- a/pandas/tests/io/msgpack/test_extension.py +++ b/pandas/tests/io/msgpack/test_extension.py @@ -46,7 +46,7 @@ def default(obj): typecode = 123 # application specific typecode data = tobytes(obj) return ExtType(typecode, data) - raise TypeError("Unknwon type object %r" % (obj, )) + raise TypeError("Unknown type object %r" % (obj, )) def ext_hook(code, data): print('ext_hook called', code, data) diff --git a/pandas/tests/io/msgpack/test_seq.py b/pandas/tests/io/msgpack/test_seq.py index 5f203e8997ccb..06e9872a22777 100644 --- a/pandas/tests/io/msgpack/test_seq.py +++ b/pandas/tests/io/msgpack/test_seq.py @@ -25,7 +25,7 @@ def test_exceeding_unpacker_read_size(): # double free or corruption (!prev) # 40 ok for read_size=1024, while 50 introduces errors - # 7000 ok for read_size=1024*1024, while 8000 leads to glibc detected *** + # 7000 ok for read_size=1024*1024, while 8000 leads to glibc detected *** # python: double free or corruption (!prev): for idx in range(NUMBER_OF_STRINGS): diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py index 7ff2ac9ff1305..b7d0dd1a3484f 100644 --- a/pandas/tests/io/parser/parse_dates.py +++ b/pandas/tests/io/parser/parse_dates.py @@ -217,8 +217,8 @@ def test_nat_parse(self): tm.assert_series_equal(expected, result.dtypes) # test with NaT for the nan_rep - # we don't have a method to specif the Datetime na_rep (it defaults - # to '') + # we don't have a method to specify the Datetime na_rep + # (it defaults to '') df.to_csv(path) result = self.read_csv(path, index_col=0, parse_dates=['B']) tm.assert_frame_equal(result, df) diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 168144d78b3be..3263f71dea3c3 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -861,8 +861,8 @@ def test_excel_multindex_roundtrip(self): if (c_idx_levels == 1 and c_idx_names): continue - # empty name case current read in as unamed levels, - # not Nones + # empty name case current read in as unnamed + # levels, not Nones check_names = True if not r_idx_names and r_idx_levels > 1: check_names = False diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index b9d66426c9dcb..c343e0105eb4f 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -205,7 +205,7 @@ def test_list_numpy_float(self): def test_list_numpy_float_complex(self): if not hasattr(np, 'complex128'): - pytest.skip('numpy cant handle complex128') + pytest.skip('numpy can not handle complex128') x = [np.float32(np.random.rand()) for i in range(5)] + \ [np.complex128(np.random.rand() + 1j * np.random.rand()) diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index d5bcf72488d09..5d2ba8e4fa712 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -38,7 +38,7 @@ def current_pickle_data(): # --------------------- -# comparision functions +# comparison functions # --------------------- def compare_element(result, expected, typ, version=None): if isinstance(expected, Index): diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 305c1ebcedc6f..b40350ada546c 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -903,7 +903,7 @@ def test_append(self): 'items', 'major_axis', 'minor_axis']) assert_panel4d_equal(store['p4d'], p4d) - # test using differnt number of items on each axis + # test using different number of items on each axis p4d2 = p4d.copy() p4d2['l4'] = p4d['l1'] p4d2['l5'] = p4d['l1'] @@ -1300,11 +1300,11 @@ def test_append_with_different_block_ordering(self): df['int16'] = Series([1] * len(df), dtype='int16') store.append('df', df) - # store additonal fields in different blocks + # store additional fields in different blocks df['int16_2'] = Series([1] * len(df), dtype='int16') pytest.raises(ValueError, store.append, 'df', df) - # store multile additonal fields in different blocks + # store multile additional fields in different blocks df['float_3'] = Series([1.] * len(df), dtype='float64') pytest.raises(ValueError, store.append, 'df', df) @@ -1330,7 +1330,7 @@ def check_indexers(key, indexers): assert_panel4d_equal(store.select('p4d'), p4d) check_indexers('p4d', indexers) - # same as above, but try to append with differnt axes + # same as above, but try to append with different axes _maybe_remove(store, 'p4d') store.append('p4d', p4d.iloc[:, :, :10, :], axes=indexers) store.append('p4d', p4d.iloc[:, :, 10:, :], axes=[ @@ -2083,7 +2083,7 @@ def test_append_raise(self): assert df.dtypes['invalid'] == np.object_ pytest.raises(TypeError, store.append, 'df', df) - # directy ndarray + # directly ndarray pytest.raises(TypeError, store.append, 'df', np.arange(10)) # series directly @@ -3066,7 +3066,7 @@ def test_select_with_dups(self): expected = df.loc[:, ['A']] assert_frame_equal(result, expected) - # dups accross dtypes + # dups across dtypes df = concat([DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B']), DataFrame(np.random.randint(0, 10, size=20) @@ -5410,7 +5410,7 @@ def _compare_with_tz(self, a, b): b_e = b.loc[i, c] if not (a_e == b_e and a_e.tz == b_e.tz): raise AssertionError( - "invalid tz comparsion [%s] [%s]" % (a_e, b_e)) + "invalid tz comparison [%s] [%s]" % (a_e, b_e)) def test_append_with_timezones_dateutil(self): diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index d61b0a40380f3..3d25b0b51e052 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -675,7 +675,7 @@ def test_negative_log(self): def _compare_stacked_y_cood(self, normal_lines, stacked_lines): base = np.zeros(len(normal_lines[0].get_data()[1])) for nl, sl in zip(normal_lines, stacked_lines): - base += nl.get_data()[1] # get y coodinates + base += nl.get_data()[1] # get y coordinates sy = sl.get_data()[1] tm.assert_numpy_array_equal(base, sy) diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index bb590d5232b62..60ed280bc050e 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -219,7 +219,7 @@ def test_parallel_coordinates_with_sorted_labels(self): prev_next_tupels = zip([i for i in ordered_color_label_tuples[0:-1]], [i for i in ordered_color_label_tuples[1:]]) for prev, nxt in prev_next_tupels: - # lables and colors are ordered strictly increasing + # labels and colors are ordered strictly increasing assert prev[1] < nxt[1] and prev[0] < nxt[0] @pytest.mark.slow diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py index 0312af12e0715..22925cceb30d1 100644 --- a/pandas/tests/reshape/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -341,7 +341,7 @@ def test_basic_drop_first_one_level(self, sparse): assert_frame_equal(result, expected) def test_basic_drop_first_NA(self, sparse): - # Test NA hadling together with drop_first + # Test NA handling together with drop_first s_NA = ['a', 'b', np.nan] res = get_dummies(s_NA, drop_first=True, sparse=sparse) exp = DataFrame({'b': [0, 1, 0]}, dtype=np.uint8) diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 14bf194ba5ee4..f2b7c20b774b0 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -704,7 +704,7 @@ def test_numpy_round(self): def test_built_in_round(self): if not compat.PY3: pytest.skip( - 'build in round cannot be overriden prior to Python 3') + 'build in round cannot be overridden prior to Python 3') s = Series([1.123, 2.123, 3.123], index=lrange(3)) result = round(s) @@ -1338,7 +1338,7 @@ def test_numpy_argmin_deprecated(self): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): # The deprecation of Series.argmin also causes a deprecation # warning when calling np.argmin. This behavior is temporary - # until the implemention of Series.argmin is corrected. + # until the implementation of Series.argmin is corrected. result = np.argmin(s) assert result == 1 @@ -1408,7 +1408,7 @@ def test_numpy_argmax_deprecated(self): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): # The deprecation of Series.argmax also causes a deprecation # warning when calling np.argmax. This behavior is temporary - # until the implemention of Series.argmax is corrected. + # until the implementation of Series.argmax is corrected. result = np.argmax(s) assert result == 10 diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index a2838f803421c..8ae7feab451f9 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -216,7 +216,7 @@ def test_tab_completion(self): assert 'dt' not in dir(s) assert 'cat' not in dir(s) - # similiarly for .dt + # similarly for .dt s = Series(date_range('1/1/2015', periods=5)) assert 'dt' in dir(s) assert 'str' not in dir(s) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 08416fe34efcc..5de5f1f0584f4 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -487,7 +487,7 @@ def test_constructor_dtype_nocast(self): def test_constructor_datelike_coercion(self): # GH 9477 - # incorrectly infering on dateimelike looking when object dtype is + # incorrectly inferring on dateimelike looking when object dtype is # specified s = Series([Timestamp('20130101'), 'NOV'], dtype=object) assert s.iloc[0] == Timestamp('20130101') diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py index 00fa980d9a139..0503a7b30e91c 100644 --- a/pandas/tests/series/test_indexing.py +++ b/pandas/tests/series/test_indexing.py @@ -1616,7 +1616,7 @@ def test_where_numeric_with_string(self): def test_setitem_boolean(self): mask = self.series > self.series.median() - # similiar indexed series + # similar indexed series result = self.series.copy() result[mask] = self.series * 2 expected = self.series * 2 @@ -1668,7 +1668,7 @@ def test_setitem_na(self): s[::2] = np.nan assert_series_equal(s, expected) - # get's coerced to float, right? + # gets coerced to float, right? expected = Series([np.nan, 1, np.nan, 0]) s = Series([True, True, False, False]) s[::2] = np.nan @@ -2113,7 +2113,7 @@ def test_reindex_pad(self): result = s.reindex(new_index, method='ffill') assert_series_equal(result, expected) - # inferrence of new dtype + # inference of new dtype s = Series([True, False, False, True], index=list('abcd')) new_index = 'agc' result = s.reindex(list(new_index)).ffill() diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 433e3cf440cbd..ce4e388bc6f39 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -1359,7 +1359,7 @@ def check(series, other): expecteds = divmod(series.values, np.asarray(other_np)) for result, expected in zip(results, expecteds): - # check the values, name, and index separatly + # check the values, name, and index separately assert_almost_equal(np.asarray(result), expected) assert result.name == series.name @@ -1449,7 +1449,7 @@ def timedelta64(*args): assert_series_equal(lhs, rhs) except: raise AssertionError( - "invalid comparsion [op->{0},d->{1},h->{2},m->{3}," + "invalid comparison [op->{0},d->{1},h->{2},m->{3}," "s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s, us, lhs, rhs)) diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index df76390d7ce7a..cb905d8186ea9 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -116,7 +116,8 @@ def setup_method(self, method): def test_invalida_delgation(self): # these show that in order for the delegation to work - # the _delegate_* methods need to be overriden to not raise a TypeError + # the _delegate_* methods need to be overridden to not raise + # a TypeError self.Delegate._add_delegate_accessors( delegate=self.Delegator, diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 592b069ef8bac..86d9a9fa91e47 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -2383,7 +2383,7 @@ def test_iloc_mi(self): class TestSorted(Base): - """ everthing you wanted to test about sorting """ + """ everything you wanted to test about sorting """ def test_sort_index_preserve_levels(self): result = self.frame.sort_index() diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index d772dba25868e..770560134d8d6 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -2582,19 +2582,19 @@ def test_truncate(self): trunced = self.panel.truncate(start, end).to_panel() expected = self.panel.to_panel()['ItemA'].truncate(start, end) - # TODO trucate drops index.names + # TODO truncate drops index.names assert_frame_equal(trunced['ItemA'], expected, check_names=False) trunced = self.panel.truncate(before=start).to_panel() expected = self.panel.to_panel()['ItemA'].truncate(before=start) - # TODO trucate drops index.names + # TODO truncate drops index.names assert_frame_equal(trunced['ItemA'], expected, check_names=False) trunced = self.panel.truncate(after=end).to_panel() expected = self.panel.to_panel()['ItemA'].truncate(after=end) - # TODO trucate drops index.names + # TODO truncate drops index.names assert_frame_equal(trunced['ItemA'], expected, check_names=False) # truncate on dates that aren't in there diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index 57bd5e7b62fdf..d0350ba252329 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -82,7 +82,7 @@ def test_int64_overflow_moar(self): # verify this is testing what it is supposed to test! assert is_int64_overflow_possible(gr.grouper.shape) - # mannually compute groupings + # manually compute groupings jim, joe = defaultdict(list), defaultdict(list) for key, a, b in zip(map(tuple, arr), df['jim'], df['joe']): jim[key].append(a) diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index ccffc554e00c7..6f9e872526d0a 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -3417,7 +3417,7 @@ def test_frame_on(self): # test as a frame # we should be ignoring the 'on' as an aggregation column - # note that the expected is setting, computing, and reseting + # note that the expected is setting, computing, and resetting # so the columns need to be switched compared # to the actual result where they are ordered as in the # original @@ -3815,7 +3815,7 @@ def test_ragged_apply(self): def test_all(self): - # simple comparision of integer vs time-based windowing + # simple comparison of integer vs time-based windowing df = self.regular * 2 er = df.rolling(window=1) r = df.rolling(window='1s') @@ -3837,7 +3837,7 @@ def test_all(self): def test_all2(self): - # more sophisticated comparision of integer vs. + # more sophisticated comparison of integer vs. # time-based windowing df = DataFrame({'B': np.arange(50)}, index=pd.date_range('20130101', diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index edabf4a7ccc99..e1a6463e7c351 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -3016,7 +3016,7 @@ def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset): t.minute == tstart.minute and t.second == tstart.second) elif offset_name in self.valid_date_offsets_singular: - # expect the signular offset value to match between tstart and t + # expect the singular offset value to match between tstart and t datepart_offset = getattr(t, offset_name if offset_name != 'weekday' else 'dayofweek') @@ -3063,7 +3063,7 @@ def test_springforward_plural(self): expected_utc_offset=hrs_post) def test_fallback_singular(self): - # in the case of signular offsets, we dont neccesarily know which utc + # in the case of singular offsets, we don't necessarily know which utc # offset the new Timestamp will wind up in (the tz for 1 month may be # different from 1 second) so we don't specify an expected_utc_offset for tz, utc_offsets in self.timezone_utc_offsets.items(): diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index 9530cd5ac3f43..b3813d03532fb 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -577,7 +577,7 @@ def test_ambiguous_nat(self): '11/06/2011 03:00'] di_test = DatetimeIndex(times, tz='US/Eastern') - # left dtype is datetime64[ns, US/Eastern] + # left dtype is datetime64[ns, US/Eastern] # right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')] tm.assert_numpy_array_equal(di_test.values, localized.values) diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py index 0e6cbea21493c..4e874eac9e6c6 100644 --- a/pandas/tseries/holiday.py +++ b/pandas/tseries/holiday.py @@ -133,7 +133,7 @@ def __init__(self, name, year=None, month=None, day=None, offset=None, Name of the holiday , defaults to class name offset : array of pandas.tseries.offsets or class from pandas.tseries.offsets - computes offset from date + computes offset from date observance: function computes when holiday is given a pandas Timestamp days_of_week: diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index 728db6af5558b..b30ffc7416f92 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -40,7 +40,7 @@ def _check_for_default_values(fname, arg_val_dict, compat_args): """ for key in arg_val_dict: # try checking equality directly with '=' operator, - # as comparison may have been overriden for the left + # as comparison may have been overridden for the left # hand object try: v1 = arg_val_dict[key] @@ -292,7 +292,7 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name): out[ax] = v # All user-provided kwargs have been handled now. - # Now we supplement with positional arguments, emmitting warnings + # Now we supplement with positional arguments, emitting warnings # when there's ambiguity and raising when there's conflicts if len(args) == 0: @@ -307,7 +307,7 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name): "or 'columns'") raise TypeError(msg) - msg = ("Intepreting call\n\t'.{method_name}(a, b)' as " + msg = ("Interpreting call\n\t'.{method_name}(a, b)' as " "\n\t'.{method_name}(index=a, columns=b)'.\nUse named " "arguments to remove any ambiguity. In the future, using " "positional arguments for 'index' or 'columns' will raise " diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 8acf16536f1de..8dc0aa1e85ef4 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -107,7 +107,7 @@ def round_trip_pickle(obj, path=None): def round_trip_pathlib(writer, reader, path=None): """ - Write an object to file specifed by a pathlib.Path and read it back + Write an object to file specified by a pathlib.Path and read it back Parameters ---------- @@ -136,7 +136,7 @@ def round_trip_pathlib(writer, reader, path=None): def round_trip_localpath(writer, reader, path=None): """ - Write an object to file specifed by a py.path LocalPath and read it back + Write an object to file specified by a py.path LocalPath and read it back Parameters ---------- @@ -1784,8 +1784,8 @@ def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True, """ nrows, ncols - number of data rows/cols c_idx_names, idx_names - False/True/list of strings, yields No names , - default names or uses the provided names for the levels of the - corresponding index. You can provide a single string when + default names or uses the provided names for the levels of the + corresponding index. You can provide a single string when c_idx_nlevels ==1. c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex @@ -2081,7 +2081,7 @@ def network(t, url="http://www.google.com", _skip_on_messages: iterable of string any exception e for which one of the strings is a substring of str(e) will be skipped with an appropriate - message. Intended to supress errors where an errno isn't available. + message. Intended to suppress errors where an errno isn't available. Notes ----- diff --git a/scripts/find_commits_touching_func.py b/scripts/find_commits_touching_func.py index 4f3b519775c39..0dd609417d7ba 100755 --- a/scripts/find_commits_touching_func.py +++ b/scripts/find_commits_touching_func.py @@ -26,21 +26,21 @@ import argparse desc = """ -Find all commits touching a sepcified function across the codebase. +Find all commits touching a specified function across the codebase. """.strip() argparser = argparse.ArgumentParser(description=desc) argparser.add_argument('funcname', metavar='FUNCNAME', help='Name of function/method to search for changes on.') argparser.add_argument('-f', '--file-masks', metavar='f_re(,f_re)*', default=["\.py.?$"], - help='comma seperated list of regexes to match filenames against\n'+ + help='comma separated list of regexes to match filenames against\n'+ 'defaults all .py? files') argparser.add_argument('-d', '--dir-masks', metavar='d_re(,d_re)*', default=[], - help='comma seperated list of regexes to match base path against') + help='comma separated list of regexes to match base path against') argparser.add_argument('-p', '--path-masks', metavar='p_re(,p_re)*', default=[], - help='comma seperated list of regexes to match full file path against') + help='comma separated list of regexes to match full file path against') argparser.add_argument('-y', '--saw-the-warning', action='store_true',default=False, help='must specify this to run, acknowledge you realize this will erase untracked files') diff --git a/scripts/find_undoc_args.py b/scripts/find_undoc_args.py index 49273bacccf98..32b23a67b187f 100755 --- a/scripts/find_undoc_args.py +++ b/scripts/find_undoc_args.py @@ -19,7 +19,7 @@ parser.add_argument('-m', '--module', metavar='MODULE', type=str,required=True, help='name of package to import and examine',action='store') parser.add_argument('-G', '--github_repo', metavar='REPO', type=str,required=False, - help='github project where the the code lives, e.g. "pandas-dev/pandas"', + help='github project where the code lives, e.g. "pandas-dev/pandas"', default=None,action='store') args = parser.parse_args() diff --git a/setup.py b/setup.py index 515e1660fa6de..443f3eba69b4d 100755 --- a/setup.py +++ b/setup.py @@ -109,7 +109,7 @@ def build_extensions(self): # generate template output if cython: for pxifile in _pxifiles: - # build pxifiles first, template extention must be .pxi.in + # build pxifiles first, template extension must be .pxi.in assert pxifile.endswith('.pxi.in') outfile = pxifile[:-3]
Both user and non-user facing. Found via `codespell -q 3 -I ../pandas-whitelist.txt` whereby the whitelist consisted of: ``` behaviour indicies initialise initialised initialising resetted thru writeable ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18966
2017-12-28T10:49:32Z
2017-12-30T16:15:09Z
2017-12-30T16:15:09Z
2017-12-30T17:11:53Z
Remove unused/unreachable code from ops
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 89d793a586e74..70c1b93122609 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -156,6 +156,7 @@ def add_methods(cls, new_methods, force, select, exclude): raise TypeError("May only pass either select or exclude") if select: + # TODO: This is not hit in coverage report. Is it still needed? select = set(select) methods = {} for key, method in new_methods.items(): @@ -164,6 +165,7 @@ def add_methods(cls, new_methods, force, select, exclude): new_methods = methods if exclude: + # TODO: This is not hit in coverage report. Is it still needed? for k in exclude: new_methods.pop(k, None) @@ -354,24 +356,26 @@ class _TimeOp(_Op): """ Wrapper around Series datetime/time/timedelta arithmetic operations. Generally, you should use classmethod ``_Op.get_op`` as an entry point. + + This is only reached in cases where either self.is_datetime_lhs + or self.is_timedelta_lhs. """ fill_value = iNaT def __init__(self, left, right, name, na_op): super(_TimeOp, self).__init__(left, right, name, na_op) - lvalues = self._convert_to_array(left, name=name) - rvalues = self._convert_to_array(right, name=name, other=lvalues) + lvalues = self._convert_to_array(left) + rvalues = self._convert_to_array(right, other=lvalues) # left - self.is_offset_lhs = is_offsetlike(left) self.is_timedelta_lhs = is_timedelta64_dtype(lvalues) self.is_datetime64_lhs = is_datetime64_dtype(lvalues) self.is_datetime64tz_lhs = is_datetime64tz_dtype(lvalues) self.is_datetime_lhs = (self.is_datetime64_lhs or self.is_datetime64tz_lhs) - self.is_integer_lhs = left.dtype.kind in ['i', 'u'] - self.is_floating_lhs = left.dtype.kind == 'f' + assert left.dtype.kind not in ['i', 'u', 'f'], left + assert self.is_timedelta_lhs or self.is_datetime_lhs # right self.is_offset_rhs = is_offsetlike(right) @@ -440,44 +444,12 @@ def _validate_timedelta(self, name): 'of a series/ndarray of type datetime64[ns] ' 'or a timedelta') - def _validate_offset(self, name): - # assumes self.is_offset_lhs - - if self.is_timedelta_rhs: - # 2 timedeltas - if name not in ('__div__', '__rdiv__', '__truediv__', - '__rtruediv__', '__add__', '__radd__', '__sub__', - '__rsub__'): - raise TypeError("can only operate on a timedeltas for addition" - ", subtraction, and division, but the operator" - " [{name}] was passed".format(name=name)) - - elif self.is_datetime_rhs: - if name not in ('__add__', '__radd__'): - raise TypeError("can only operate on a timedelta/DateOffset " - "and a datetime for addition, but the operator" - " [{name}] was passed".format(name=name)) - - else: - raise TypeError('cannot operate on a series without a rhs ' - 'of a series/ndarray of type datetime64[ns] ' - 'or a timedelta') - def _validate(self, lvalues, rvalues, name): if self.is_datetime_lhs: return self._validate_datetime(lvalues, rvalues, name) - elif self.is_timedelta_lhs: - return self._validate_timedelta(name) - elif self.is_offset_lhs: - return self._validate_offset(name) - - if ((self.is_integer_lhs or self.is_floating_lhs) and - self.is_timedelta_rhs): - self._check_timedelta_with_numeric(name) else: - raise TypeError('cannot operate on a series without a rhs ' - 'of a series/ndarray of type datetime64[ns] ' - 'or a timedelta') + # The only other option is self.is_timedelta_lhs + return self._validate_timedelta(name) def _check_timedelta_with_numeric(self, name): if name not in ('__div__', '__truediv__', '__mul__', '__rmul__'): @@ -486,9 +458,10 @@ def _check_timedelta_with_numeric(self, name): "multiplication, but the operator [{name}] " "was passed".format(name=name)) - def _convert_to_array(self, values, name=None, other=None): + def _convert_to_array(self, values, other=None): """converts values to ndarray""" from pandas.core.tools.timedeltas import to_timedelta + name = self.name ovalues = values supplied_dtype = None @@ -516,17 +489,13 @@ def _convert_to_array(self, values, name=None, other=None): # a datelike elif isinstance(values, pd.DatetimeIndex): values = values.to_series() - # datetime with tz - elif (isinstance(ovalues, datetime.datetime) and - hasattr(ovalues, 'tzinfo')): + elif isinstance(ovalues, (datetime.datetime, np.datetime64)): + # original input was scalar datetimelike values = pd.DatetimeIndex(values) # datetime array with tz elif is_datetimetz(values): if isinstance(values, ABCSeries): values = values._values - elif not (isinstance(values, (np.ndarray, ABCSeries)) and - is_datetime64_dtype(values)): - values = libts.array_to_datetime(values) elif (is_datetime64_dtype(values) and not is_datetime64_ns_dtype(values)): # GH#7996 e.g. np.datetime64('2013-01-01') is datetime64[D] @@ -569,10 +538,8 @@ def _convert_for_datetime(self, lvalues, rvalues): # datetime subtraction means timedelta if self.is_datetime_lhs and self.is_datetime_rhs: - if self.name in ('__sub__', '__rsub__'): - self.dtype = 'timedelta64[ns]' - else: - self.dtype = 'datetime64[ns]' + assert self.name in ('__sub__', '__rsub__') + self.dtype = 'timedelta64[ns]' elif self.is_datetime64tz_lhs: self.dtype = lvalues.dtype elif self.is_datetime64tz_rhs: @@ -595,9 +562,7 @@ def _offset(lvalues, rvalues): self.na_op = lambda x, y: getattr(x, self.name)(y) return lvalues, rvalues - if self.is_offset_lhs: - lvalues, rvalues = _offset(lvalues, rvalues) - elif self.is_offset_rhs: + if self.is_offset_rhs: rvalues, lvalues = _offset(rvalues, lvalues) else: @@ -616,8 +581,6 @@ def _offset(lvalues, rvalues): self.dtype = 'timedelta64[ns]' # convert Tick DateOffset to underlying delta - if self.is_offset_lhs: - lvalues = to_timedelta(lvalues, box=False) if self.is_offset_rhs: rvalues = to_timedelta(rvalues, box=False) @@ -628,7 +591,7 @@ def _offset(lvalues, rvalues): # time delta division -> unit less # integer gets converted to timedelta in np < 1.6 if ((self.is_timedelta_lhs and self.is_timedelta_rhs) and - not self.is_integer_rhs and not self.is_integer_lhs and + not self.is_integer_rhs and self.name in ('__div__', '__rdiv__', '__truediv__', '__rtruediv__', '__floordiv__', '__rfloordiv__')): @@ -722,23 +685,20 @@ def na_op(x, y): result = missing.fill_zeros(result, x, y, name, fill_zeros) return result - def safe_na_op(lvalues, rvalues): + def safe_na_op(lvalues, rvalues, na_op): + # We pass na_op explicitly here for namespace/closure reasons; + # the alternative is to make it an argument to `wrapper`, which + # would mess with the signatures of Series methods. try: with np.errstate(all='ignore'): return na_op(lvalues, rvalues) except Exception: - if isinstance(rvalues, ABCSeries): - if is_object_dtype(rvalues): - # if dtype is object, try elementwise op - return libalgos.arrmap_object(rvalues, - lambda x: op(lvalues, x)) - else: - if is_object_dtype(lvalues): - return libalgos.arrmap_object(lvalues, - lambda x: op(x, rvalues)) + if is_object_dtype(lvalues): + return libalgos.arrmap_object(lvalues, + lambda x: op(x, rvalues)) raise - def wrapper(left, right, name=name, na_op=na_op): + def wrapper(left, right): if isinstance(right, ABCDataFrame): return NotImplemented @@ -747,35 +707,29 @@ def wrapper(left, right, name=name, na_op=na_op): converted = _Op.get_op(left, right, name, na_op) - left, right = converted.left, converted.right lvalues, rvalues = converted.lvalues, converted.rvalues dtype = converted.dtype wrap_results = converted.wrap_results - na_op = converted.na_op if isinstance(rvalues, ABCSeries): - name = _maybe_match_name(left, rvalues) + res_name = _maybe_match_name(left, rvalues) lvalues = getattr(lvalues, 'values', lvalues) rvalues = getattr(rvalues, 'values', rvalues) # _Op aligns left and right else: if isinstance(rvalues, pd.Index): - name = _maybe_match_name(left, rvalues) + res_name = _maybe_match_name(left, rvalues) else: - name = left.name + res_name = left.name if (hasattr(lvalues, 'values') and not isinstance(lvalues, pd.DatetimeIndex)): lvalues = lvalues.values - result = wrap_results(safe_na_op(lvalues, rvalues)) - return construct_result( - left, - result, - index=left.index, - name=name, - dtype=dtype, - ) + result = wrap_results(safe_na_op(lvalues, rvalues, converted.na_op)) + return construct_result(left, result, + index=left.index, name=res_name, dtype=dtype) + wrapper.__name__ = name return wrapper @@ -858,7 +812,7 @@ def na_op(x, y): def wrapper(self, other, axis=None): # Validate the axis parameter if axis is not None: - self._get_axis_number(axis) + axis = self._get_axis_number(axis) if isinstance(other, ABCSeries): name = _maybe_match_name(self, other) @@ -1381,23 +1335,6 @@ def f(self, other): def _arith_method_PANEL(op, name, str_rep=None, fill_zeros=None, default_axis=None, **eval_kwargs): - # copied from Series na_op above, but without unnecessary branch for - # non-scalar - def na_op(x, y): - import pandas.core.computation.expressions as expressions - - try: - result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs) - except TypeError: - - # TODO: might need to find_common_type here? - result = np.empty(len(x), dtype=x.dtype) - mask = notna(x) - result[mask] = op(x[mask], y) - result, changed = maybe_upcast_putmask(result, ~mask, np.nan) - - result = missing.fill_zeros(result, x, y, name, fill_zeros) - return result # work only for scalars def f(self, other): diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 8ae7feab451f9..d7b7f1f0b6ac7 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -752,3 +752,17 @@ def test_dt_accessor_api_for_categorical(self): AttributeError, "Can only use .dt accessor with datetimelike"): invalid.dt assert not hasattr(invalid, 'str') + + +@pytest.mark.parametrize('opname', [ + '__add__', '__radd__', + '__sub__', '__rsub__', + '__mul__', '__rmul__', + # '__div__', '__rdiv__', # TODO: Is this different in py2 vs py3? + '__truediv__', '__rtruediv__', + '__floordiv__', '__rfloordiv__', + '__mod__', '__rmod__', '__divmod__', + '__pow__', '__rpow__']) +def test_generated_op_names(opname): + method = getattr(pd.Series, opname) + assert method.__name__ == opname diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index a421f2cb15bba..d92e00374b3be 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -1068,6 +1068,63 @@ def test_frame_sub_datetime64_not_ns(self): Timedelta(days=2)]) tm.assert_frame_equal(res, expected) + def test_scalar_op_retains_name(self): + # GH#18964 + ser = pd.Series([pd.Timedelta(days=1, hours=2)], name='NCC-1701D') + + dt = pd.Timestamp(1968, 7, 8) + res = ser + dt + assert res.name == ser.name + res = dt - ser + assert res.name == ser.name + + @pytest.mark.xfail(reason='GH#18963 DatetimeIndex+Series[datetime64] ' + 'returns DatetimeIndex with wrong name.') + def test_dti_op_retains_name(self): + # GH#18964 + ser = pd.Series([pd.Timedelta(days=1, hours=2)], name='NCC-1701D') + dti = pd.DatetimeIndex([pd.Timestamp(1968, 7, 8)], name='Serenity') + + expected = pd.Series([ser[0] + dti[0]], name=None) + res = ser + dti + tm.assert_series_equal(res, expected) + res = dti + ser # GH#18963 wrong type and name + tm.assert_series_equal(res, expected) + + expected = pd.Series([dti[0] - ser[0]], name=None) + res = dti - ser # GH#18963 wrong type and name + tm.assert_series_equal(res, expected) + res = -ser + dti + tm.assert_series_equal(res, expected) + + res = ser + dti.rename(name=ser.name) + tm.assert_series_equal(res, expected, check_names=False) + assert res.name == ser.name # TODO: Series.rename(name=...) fails + + @pytest.mark.xfail(reason='GH#18824 Series[timedelta64]+TimedeltaIndex' + 'gets name from TimedeltaIndex;' + 'reverse op raises ValueError') + def test_tdi_op_retains_name(self): + # GH#18964 + ser = pd.Series([pd.Timedelta(days=1, hours=2)], name='NCC-1701D') + tdi = pd.TimedeltaIndex(ser, name='Heart Of Gold') + + expected = pd.Series([ser[0] + tdi[0]], name=None) + res = ser + tdi # right type, wrong name + tm.assert_series_equal(res, expected) + res = tdi + ser + tm.assert_series_equal(res, expected) + + expected = pd.Series([ser[0] - tdi[0]], name=None) + res = ser - tdi # right type, wrong name + tm.assert_series_equal(res, expected) + res = -tdi + ser + tm.assert_series_equal(res, expected) + + res = ser + tdi.rename(name=ser.name) + tm.assert_series_equal(res, expected, check_names=False) + assert res.name == ser.name # TODO: Series.rename(name=...) fails + def test_operators_datetimelike(self): def run_ops(ops, get_ser, test_ser):
Add TODO comments for questionable cases. - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/18964
2017-12-28T05:57:15Z
2018-01-01T17:55:42Z
null
2023-05-11T01:17:02Z
DOC: Link to Python 3 versions of official Python docs
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index ecb9a8f2d79db..f9995472866ed 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1728,7 +1728,7 @@ built-in string methods. For example: Powerful pattern-matching methods are provided as well, but note that pattern-matching generally uses `regular expressions -<https://docs.python.org/2/library/re.html>`__ by default (and in some cases +<https://docs.python.org/3/library/re.html>`__ by default (and in some cases always uses them). Please see :ref:`Vectorized String Methods <text.string_methods>` for a complete diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index cbe945e0cf2cf..d2ca76713ba3b 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -468,8 +468,8 @@ This Python syntax is **not** allowed: * Statements - - Neither `simple <http://docs.python.org/2/reference/simple_stmts.html>`__ - nor `compound <http://docs.python.org/2/reference/compound_stmts.html>`__ + - Neither `simple <https://docs.python.org/3/reference/simple_stmts.html>`__ + nor `compound <https://docs.python.org/3/reference/compound_stmts.html>`__ statements are allowed. This includes things like ``for``, ``while``, and ``if``. diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 2f3dbb9746066..3c2fd4d959d63 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -232,7 +232,7 @@ as an attribute: - You can use this access only if the index element is a valid python identifier, e.g. ``s.1`` is not allowed. See `here for an explanation of valid identifiers - <http://docs.python.org/2.7/reference/lexical_analysis.html#identifiers>`__. + <https://docs.python.org/3/reference/lexical_analysis.html#identifiers>`__. - The attribute will not be available if it conflicts with an existing method name, e.g. ``s.min`` is not allowed. diff --git a/doc/source/install.rst b/doc/source/install.rst index 979d5afd0a04f..6133da220aa8d 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -234,7 +234,7 @@ Optional Dependencies * `psycopg2 <http://initd.org/psycopg/>`__: for PostgreSQL * `pymysql <https://github.com/PyMySQL/PyMySQL>`__: for MySQL. - * `SQLite <https://docs.python.org/3.5/library/sqlite3.html>`__: for SQLite, this is included in Python's standard library by default. + * `SQLite <https://docs.python.org/3/library/sqlite3.html>`__: for SQLite, this is included in Python's standard library by default. * `matplotlib <http://matplotlib.org/>`__: for plotting, Version 1.4.3 or higher. * For Excel I/O: diff --git a/doc/source/io.rst b/doc/source/io.rst index 03c2ce23eb35d..a5a0a41147a6b 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -3065,7 +3065,7 @@ any pickled pandas object (or any other pickled object) from file: Loading pickled data received from untrusted sources can be unsafe. - See: https://docs.python.org/3.6/library/pickle.html + See: https://docs.python.org/3/library/pickle.html .. warning:: @@ -4545,7 +4545,7 @@ facilitate data retrieval and to reduce dependency on DB-specific API. Database is provided by SQLAlchemy if installed. In addition you will need a driver library for your database. Examples of such drivers are `psycopg2 <http://initd.org/psycopg/>`__ for PostgreSQL or `pymysql <https://github.com/PyMySQL/PyMySQL>`__ for MySQL. -For `SQLite <https://docs.python.org/3.5/library/sqlite3.html>`__ this is +For `SQLite <https://docs.python.org/3/library/sqlite3.html>`__ this is included in Python's standard library by default. You can find an overview of supported drivers for each SQL dialect in the `SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__. diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index f968cdad100ba..e20537efc0e71 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -559,7 +559,7 @@ String/Regular Expression Replacement backslashes than strings without this prefix. Backslashes in raw strings will be interpreted as an escaped backslash, e.g., ``r'\' == '\\'``. You should `read about them - <http://docs.python.org/2/reference/lexical_analysis.html#string-literals>`__ + <https://docs.python.org/3/reference/lexical_analysis.html#string-literals>`__ if this is unclear. Replace the '.' with ``NaN`` (str -> str) diff --git a/doc/source/text.rst b/doc/source/text.rst index 85b8aa6aa1857..2a86d92978043 100644 --- a/doc/source/text.rst +++ b/doc/source/text.rst @@ -119,7 +119,7 @@ i.e., from the end of the string to the beginning of the string: s2.str.rsplit('_', expand=True, n=1) Methods like ``replace`` and ``findall`` take `regular expressions -<https://docs.python.org/2/library/re.html>`__, too: +<https://docs.python.org/3/library/re.html>`__, too: .. ipython:: python @@ -221,7 +221,7 @@ Extract first match in each subject (extract) confusing from the perspective of a user. The ``extract`` method accepts a `regular expression -<https://docs.python.org/2/library/re.html>`__ with at least one +<https://docs.python.org/3/library/re.html>`__ with at least one capture group. Extracting a regular expression with more than one group returns a diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index d7edae865911a..ebd5fc12775a4 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -94,7 +94,7 @@ cdef int dayofweek(int y, int m, int d) nogil: See Also -------- - [1] https://docs.python.org/3.6/library/calendar.html#calendar.weekday + [1] https://docs.python.org/3/library/calendar.html#calendar.weekday [2] https://en.wikipedia.org/wiki/\ Determination_of_the_day_of_the_week#Sakamoto.27s_methods diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index f44fa347cb053..434d7f6ccfe13 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -169,9 +169,9 @@ def eval(expr, parser='pandas', engine=None, truediv=True, expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements - <http://docs.python.org/2/reference/simple_stmts.html#simple-statements>`__, + <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions - <http://docs.python.org/2/reference/simple_stmts.html#expression-statements>`__. + <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : string, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 4a66475c85691..a441e6c3fd36a 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -65,7 +65,7 @@ def strftime(self, date_format): Returns ------- ndarray of formatted strings - """.format("https://docs.python.org/2/library/datetime.html" + """.format("https://docs.python.org/3/library/datetime.html" "#strftime-and-strptime-behavior") diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 143b76575e36b..fa953f7d876cc 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -54,7 +54,7 @@ def read_pickle(path, compression='infer'): file path Warning: Loading pickled data received from untrusted sources can be - unsafe. See: http://docs.python.org/2.7/library/pickle.html + unsafe. See: https://docs.python.org/3/library/pickle.html Parameters ---------- diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 6553dd66cba5f..1fefec6035a20 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -254,8 +254,7 @@ def test_repr_is_valid_construction_code(self): tm.assert_series_equal(Series(res), Series(idx)) def test_repr_should_return_str(self): - # http://docs.python.org/py3k/reference/datamodel.html#object.__repr__ - # http://docs.python.org/reference/datamodel.html#object.__repr__ + # https://docs.python.org/3/reference/datamodel.html#object.__repr__ # "...The return value must be a string object." # (str on py2.x, str (unicode) on py3) diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index bf3e584657763..97236f028b1c4 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -140,8 +140,7 @@ def test_repr_name_iterable_indexable(self): repr(s) def test_repr_should_return_str(self): - # http://docs.python.org/py3k/reference/datamodel.html#object.__repr__ - # http://docs.python.org/reference/datamodel.html#object.__repr__ + # https://docs.python.org/3/reference/datamodel.html#object.__repr__ # ...The return value must be a string object. # (str on py2.x, str (unicode) on py3)
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Went through and made sure that all links to the official Python docs start with `https://docs.python.org/3/` to ensure that the links go to the most recent stable version of Python 3.
https://api.github.com/repos/pandas-dev/pandas/pulls/18962
2017-12-28T00:27:21Z
2017-12-28T11:37:48Z
2017-12-28T11:37:48Z
2017-12-28T20:47:52Z
Fix Timedelta.__floordiv__, __rfloordiv__
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 77de1851490b2..9abf04bf8a83c 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -415,6 +415,7 @@ Numeric - Bug in :func:`Series.__sub__` subtracting a non-nanosecond ``np.datetime64`` object from a ``Series`` gave incorrect results (:issue:`7996`) - Bug in :class:`DatetimeIndex`, :class:`TimedeltaIndex` addition and subtraction of zero-dimensional integer arrays gave incorrect results (:issue:`19012`) - Bug in :func:`Series.__add__` adding Series with dtype ``timedelta64[ns]`` to a timezone-aware ``DatetimeIndex`` incorrectly dropped timezone information (:issue:`13905`) +- Bug in :func:`Timedelta.__floordiv__` and :func:`Timedelta.__rfloordiv__` dividing by many incompatible numpy objects was incorrectly allowed (:issue:`18846`) - Categorical diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index af3fa738fad14..8dba8c15f0b81 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1031,13 +1031,27 @@ class Timedelta(_Timedelta): __rdiv__ = __rtruediv__ def __floordiv__(self, other): + # numpy does not implement floordiv for timedelta64 dtype, so we cannot + # just defer + if hasattr(other, '_typ'): + # Series, DataFrame, ... + return NotImplemented + if hasattr(other, 'dtype'): - # work with i8 - other = other.astype('m8[ns]').astype('i8') - return self.value // other + if other.dtype.kind == 'm': + # also timedelta-like + return _broadcast_floordiv_td64(self.value, other, _floordiv) + elif other.dtype.kind in ['i', 'u', 'f']: + if other.ndim == 0: + return Timedelta(self.value // other) + else: + return self.to_timedelta64() // other + + raise TypeError('Invalid dtype {dtype} for ' + '{op}'.format(dtype=other.dtype, + op='__floordiv__')) - elif is_integer_object(other): - # integers only + elif is_integer_object(other) or is_float_object(other): return Timedelta(self.value // other, unit='ns') elif not _validate_ops_compat(other): @@ -1049,20 +1063,79 @@ class Timedelta(_Timedelta): return self.value // other.value def __rfloordiv__(self, other): - if hasattr(other, 'dtype'): - # work with i8 - other = other.astype('m8[ns]').astype('i8') - return other // self.value + # numpy does not implement floordiv for timedelta64 dtype, so we cannot + # just defer + if hasattr(other, '_typ'): + # Series, DataFrame, ... + return NotImplemented + if hasattr(other, 'dtype'): + if other.dtype.kind == 'm': + # also timedelta-like + return _broadcast_floordiv_td64(self.value, other, _rfloordiv) + raise TypeError('Invalid dtype {dtype} for ' + '{op}'.format(dtype=other.dtype, + op='__floordiv__')) + + if is_float_object(other) and util._checknull(other): + # i.e. np.nan + return NotImplemented elif not _validate_ops_compat(other): return NotImplemented other = Timedelta(other) if other is NaT: - return NaT + return np.nan return other.value // self.value +cdef _floordiv(int64_t value, right): + return value // right + + +cdef _rfloordiv(int64_t value, right): + # analogous to referencing operator.div, but there is no operator.rfloordiv + return right // value + + +cdef _broadcast_floordiv_td64(int64_t value, object other, + object (*operation)(int64_t value, + object right)): + """Boilerplate code shared by Timedelta.__floordiv__ and + Timedelta.__rfloordiv__ because np.timedelta64 does not implement these. + + Parameters + ---------- + value : int64_t; `self.value` from a Timedelta object + other : object + operation : function, either _floordiv or _rfloordiv + + Returns + ------- + result : varies based on `other` + """ + # assumes other.dtype.kind == 'm', i.e. other is timedelta-like + cdef: + int ndim = getattr(other, 'ndim', -1) + + # We need to watch out for np.timedelta64('NaT'). + mask = other.view('i8') == NPY_NAT + + if ndim == 0: + if mask: + return np.nan + + return operation(value, other.astype('m8[ns]').astype('i8')) + + else: + res = operation(value, other.astype('m8[ns]').astype('i8')) + + if mask.any(): + res = res.astype('f8') + res[mask] = np.nan + return res + + # resolution in ns -Timedelta.min = Timedelta(np.iinfo(np.int64).min +1) +Timedelta.min = Timedelta(np.iinfo(np.int64).min + 1) Timedelta.max = Timedelta(np.iinfo(np.int64).max) diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 69ce7a42851a1..d0d204253e3f1 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -273,6 +273,16 @@ def test_nat_arithmetic(): assert right - left is NaT +def test_nat_rfloordiv_timedelta(): + # GH#18846 + # See also test_timedelta.TestTimedeltaArithmetic.test_floordiv + td = Timedelta(hours=3, minutes=4) + + assert td // np.nan is NaT + assert np.isnan(td // NaT) + assert np.isnan(td // np.timedelta64('NaT')) + + def test_nat_arithmetic_index(): # GH 11718 diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index 310555c19ea99..8c574d8f8873b 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -136,6 +136,7 @@ def test_binary_ops_nat(self): assert (td * pd.NaT) is pd.NaT assert (td / pd.NaT) is np.nan assert (td // pd.NaT) is np.nan + assert (td // np.timedelta64('NaT')) is np.nan def test_binary_ops_integers(self): td = Timedelta(10, unit='d') @@ -162,6 +163,98 @@ def test_binary_ops_with_timedelta(self): # invalid multiply with another timedelta pytest.raises(TypeError, lambda: td * td) + def test_floordiv(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + scalar = Timedelta(hours=3, minutes=3) + + # scalar others + assert td // scalar == 1 + assert -td // scalar.to_pytimedelta() == -2 + assert (2 * td) // scalar.to_timedelta64() == 2 + + assert td // np.nan is pd.NaT + assert np.isnan(td // pd.NaT) + assert np.isnan(td // np.timedelta64('NaT')) + + with pytest.raises(TypeError): + td // np.datetime64('2016-01-01', dtype='datetime64[us]') + + expected = Timedelta(hours=1, minutes=32) + assert td // 2 == expected + assert td // 2.0 == expected + assert td // np.float64(2.0) == expected + assert td // np.int32(2.0) == expected + assert td // np.uint8(2.0) == expected + + # Array-like others + assert td // np.array(scalar.to_timedelta64()) == 1 + + res = (3 * td) // np.array([scalar.to_timedelta64()]) + expected = np.array([3], dtype=np.int64) + tm.assert_numpy_array_equal(res, expected) + + res = (10 * td) // np.array([scalar.to_timedelta64(), + np.timedelta64('NaT')]) + expected = np.array([10, np.nan]) + tm.assert_numpy_array_equal(res, expected) + + ser = pd.Series([1], dtype=np.int64) + res = td // ser + assert res.dtype.kind == 'm' + + def test_rfloordiv(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + scalar = Timedelta(hours=3, minutes=4) + + # scalar others + # x // Timedelta is defined only for timedelta-like x. int-like, + # float-like, and date-like, in particular, should all either + # a) raise TypeError directly or + # b) return NotImplemented, following which the reversed + # operation will raise TypeError. + assert td.__rfloordiv__(scalar) == 1 + assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2 + assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0 + + assert np.isnan(td.__rfloordiv__(pd.NaT)) + assert np.isnan(td.__rfloordiv__(np.timedelta64('NaT'))) + + dt64 = np.datetime64('2016-01-01', dtype='datetime64[us]') + with pytest.raises(TypeError): + td.__rfloordiv__(dt64) + + assert td.__rfloordiv__(np.nan) is NotImplemented + assert td.__rfloordiv__(3.5) is NotImplemented + assert td.__rfloordiv__(2) is NotImplemented + + with pytest.raises(TypeError): + td.__rfloordiv__(np.float64(2.0)) + with pytest.raises(TypeError): + td.__rfloordiv__(np.int32(2.0)) + with pytest.raises(TypeError): + td.__rfloordiv__(np.uint8(9)) + + # Array-like others + assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1 + + res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()])) + expected = np.array([3], dtype=np.int64) + tm.assert_numpy_array_equal(res, expected) + + arr = np.array([(10 * scalar).to_timedelta64(), + np.timedelta64('NaT')]) + res = td.__rfloordiv__(arr) + expected = np.array([10, np.nan]) + tm.assert_numpy_array_equal(res, expected) + + ser = pd.Series([1], dtype=np.int64) + res = td.__rfloordiv__(ser) + assert res is NotImplemented + with pytest.raises(TypeError): + ser // td + class TestTimedeltaComparison(object): def test_comparison_object_array(self):
It would not at all surprise me to learn that there are more corner cases that this misses. Needs some eyeballs. - [x] closes #18846 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18961
2017-12-27T21:58:49Z
2018-01-07T02:40:29Z
2018-01-07T02:40:29Z
2018-01-23T04:40:36Z
Make Series[datetime64] - pd.NaT behave like DatetimeIndex - pd.NaT
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 6407a33c442d0..0cd07de2e456d 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -208,6 +208,7 @@ Other API Changes - In :func:`read_excel`, the ``comment`` argument is now exposed as a named parameter (:issue:`18735`) - Rearranged the order of keyword arguments in :func:`read_excel()` to align with :func:`read_csv()` (:issue:`16672`) - The options ``html.border`` and ``mode.use_inf_as_null`` were deprecated in prior versions, these will now show ``FutureWarning`` rather than a ``DeprecationWarning`` (:issue:`19003`) +- Subtracting ``NaT`` from a :class:`Series` with ``dtype='datetime64[ns]'`` returns a ``Series`` with ``dtype='timedelta64[ns]'`` instead of ``dtype='datetime64[ns]'``(:issue:`18808`) .. _whatsnew_0230.deprecations: diff --git a/pandas/core/ops.py b/pandas/core/ops.py index faac8ab312d6b..18659898ae442 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -407,8 +407,12 @@ def _validate_datetime(self, lvalues, rvalues, name): # if tz's must be equal (same or None) if getattr(lvalues, 'tz', None) != getattr(rvalues, 'tz', None): - raise ValueError("Incompatible tz's on datetime subtraction " - "ops") + if len(rvalues) == 1 and isna(rvalues).all(): + # NaT gets a pass + pass + else: + raise ValueError("Incompatible tz's on datetime " + "subtraction ops", rvalues) else: raise TypeError('cannot operate on a series without a rhs ' @@ -505,11 +509,20 @@ def _convert_to_array(self, values, name=None, other=None): inferred_type = lib.infer_dtype(values) if (inferred_type in ('datetime64', 'datetime', 'date', 'time') or is_datetimetz(inferred_type)): + + if ovalues is pd.NaT and name == '__sub__': + # Note: This can only occur when `values` represents `right` + # i.e. `other`. + if other.dtype == 'timedelta64[ns]': + values = np.array([iNaT], dtype='timedelta64[ns]') + else: + values = np.array([iNaT], dtype='datetime64[ns]') + # if we have a other of timedelta, but use pd.NaT here we # we are in the wrong path - if (supplied_dtype is None and other is not None and - (other.dtype in ('timedelta64[ns]', 'datetime64[ns]')) and - isna(values).all()): + elif (supplied_dtype is None and other is not None and + (other.dtype in ('timedelta64[ns]', 'datetime64[ns]')) and + isna(values).all()): values = np.empty(values.shape, dtype='timedelta64[ns]') values[:] = iNaT diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index ce4e388bc6f39..019476c467166 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -960,6 +960,13 @@ def test_timedelta64_ops_nat(self): assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta) + def test_td64_sub_NaT(self): + # GH#18808 + ser = Series([NaT, Timedelta('1s')]) + res = ser - NaT + expected = Series([NaT, NaT], dtype='timedelta64[ns]') + tm.assert_series_equal(res, expected) + @pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4), Timedelta(minutes=5, seconds=4), Timedelta('5m4s').to_timedelta64()]) @@ -1224,13 +1231,10 @@ def test_datetime64_ops_nat(self): single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]') # subtraction - assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp) assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp) with pytest.raises(TypeError): -single_nat_dtype_datetime + datetime_series - assert_series_equal(nat_series_dtype_timestamp - NaT, - nat_series_dtype_timestamp) assert_series_equal(-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp) with pytest.raises(TypeError): @@ -1263,6 +1267,20 @@ def test_datetime64_ops_nat(self): with pytest.raises(TypeError): nat_series_dtype_timestamp / 1 + def test_dt64_sub_NaT(self): + # GH#18808 + dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp('19900315')]) + ser = pd.Series(dti) + res = ser - pd.NaT + expected = pd.Series([pd.NaT, pd.NaT], dtype='timedelta64[ns]') + tm.assert_series_equal(res, expected) + + dti_tz = dti.tz_localize('Asia/Tokyo') + ser_tz = pd.Series(dti_tz) + res = ser_tz - pd.NaT + expected = pd.Series([pd.NaT, pd.NaT], dtype='timedelta64[ns]') + tm.assert_series_equal(res, expected) + class TestSeriesOperators(TestData): def test_op_method(self):
- [x] closes #18808 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18960
2017-12-27T19:27:02Z
2018-01-01T17:55:23Z
null
2023-05-11T01:17:01Z
implement non-controversial cleanup portions of #18762
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 933e7ed64b837..950b4d4c14772 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -523,11 +523,9 @@ def shift_quarters(int64_t[:] dtindex, int quarters, n = quarters months_since = (dts.month - q1start_month) % modby - compare_month = dts.month - months_since - compare_month = compare_month or 12 # compare_day is only relevant for comparison in the case # where months_since == 0. - compare_day = get_firstbday(dts.year, compare_month) + compare_day = get_firstbday(dts.year, dts.month) if n <= 0 and (months_since != 0 or (months_since == 0 and dts.day > compare_day)): @@ -556,11 +554,9 @@ def shift_quarters(int64_t[:] dtindex, int quarters, n = quarters months_since = (dts.month - q1start_month) % modby - compare_month = dts.month - months_since - compare_month = compare_month or 12 # compare_day is only relevant for comparison in the case # where months_since == 0. - compare_day = get_lastbday(dts.year, compare_month) + compare_day = get_lastbday(dts.year, dts.month) if n <= 0 and (months_since != 0 or (months_since == 0 and dts.day > compare_day)): @@ -587,15 +583,17 @@ def shift_quarters(int64_t[:] dtindex, int quarters, @cython.wraparound(False) @cython.boundscheck(False) -def shift_months(int64_t[:] dtindex, int months, object day=None): +def shift_months(int64_t[:] dtindex, int months, object day): """ Given an int64-based datetime index, shift all elements specified number of months using DateOffset semantics - day: {None, 'start', 'end'} + day: {None, 'start', 'end', 'business_start', 'business_end'} * None: day of month * 'start' 1st day of month * 'end' last day of month + * 'business_start' first business day of month + * 'business_end' last business day of month """ cdef: Py_ssize_t i @@ -721,7 +719,7 @@ def shift_months(int64_t[:] dtindex, int months, object day=None): return np.asarray(out) -cpdef datetime shift_month(datetime stamp, int months, object day_opt=None): +cpdef datetime shift_month(datetime stamp, int months, object day_opt): """ Given a datetime (or Timestamp) `stamp`, an integer `months` and an option `day_opt`, return a new datetimelike that many months later, @@ -827,7 +825,8 @@ cpdef int get_day_of_month(datetime other, day_opt) except? -1: raise ValueError(day_opt) -cpdef int roll_yearday(other, n, month, day_opt='start') except? -1: +cpdef int roll_yearday(datetime other, int n, int month, + object day_opt) except? -1: """ Possibly increment or decrement the number of periods to shift based on rollforward/rollbackward conventions. @@ -836,9 +835,12 @@ cpdef int roll_yearday(other, n, month, day_opt='start') except? -1: ---------- other : datetime or Timestamp n : number of periods to increment, before adjusting for rolling - day_opt : 'start', 'end' - 'start': returns 1 - 'end': returns last day of the month + month : reference month giving the first month of the year + day_opt : 'start', 'end', 'business_start', 'business_end' + 'start': compare with 1 + 'end': compare with last day of the month + 'business_start': compare with first business day of the month + 'business_end': compare with last business day of the month Returns ------- @@ -846,7 +848,7 @@ cpdef int roll_yearday(other, n, month, day_opt='start') except? -1: Notes ----- - * Mirrors `roll_check` in tslib.shift_months + * Mirrors `roll_check` in shift_months Examples ------- @@ -888,7 +890,7 @@ cpdef int roll_yearday(other, n, month, day_opt='start') except? -1: other.day < get_day_of_month(other, day_opt)): n -= 1 - elif n <= 0: + else: if other.month > month or (other.month == month and other.day > get_day_of_month(other, day_opt)): diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 41d0dd38cd5f6..c2c7ab55f19f0 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -670,7 +670,7 @@ def test_shift_months(years, months): Timestamp('2000-01-01'), Timestamp('2000-02-29'), Timestamp('2000-12-31')]) - actual = DatetimeIndex(shift_months(s.asi8, years * 12 + months)) + actual = DatetimeIndex(shift_months(s.asi8, years * 12 + months, None)) expected = DatetimeIndex([x + pd.offsets.DateOffset( years=years, months=months) for x in s]) tm.assert_index_equal(actual, expected) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 54250bbf903a4..06f24cbe173c4 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -22,7 +22,7 @@ from pandas._libs.tslibs.offsets import ( ApplyTypeError, as_datetime, _is_normalized, - _get_calendar, _to_dt64, _validate_business_time, + _get_calendar, _to_dt64, _determine_offset, apply_index_wraps, roll_yearday, @@ -254,7 +254,7 @@ def apply_index(self, i): months = ((self.kwds.get('years', 0) * 12 + self.kwds.get('months', 0)) * self.n) if months: - shifted = liboffsets.shift_months(i.asi8, months) + shifted = liboffsets.shift_months(i.asi8, months, day=None) i = i._shallow_copy(shifted) weeks = (self.kwds.get('weeks', 0)) * self.n @@ -557,28 +557,31 @@ def get_str(td): def apply(self, other): if isinstance(other, datetime): n = self.n + wday = other.weekday() - if n == 0 and other.weekday() > 4: - n = 1 - - result = other - - # avoid slowness below - if abs(n) > 5: - k = n // 5 - result = result + timedelta(7 * k) - if n < 0 and result.weekday() > 4: - n += 1 - n -= 5 * k - if n == 0 and result.weekday() > 4: - n -= 1 + # avoid slowness below by operating on weeks first + weeks = n // 5 + if n <= 0 and wday > 4: + # roll forward + n += 1 - while n != 0: - k = n // abs(n) - result = result + timedelta(k) - if result.weekday() < 5: - n -= k + n -= 5 * weeks + + # n is always >= 0 at this point + if n == 0 and wday > 4: + # roll back + days = 4 - wday + elif wday > 4: + # roll forward + days = (7 - wday) + (n - 1) + elif wday + n <= 4: + # shift by n days without leaving the current week + days = n + else: + # shift by n days plus 2 to get past the weekend + days = n + 2 + result = other + timedelta(days=7 * weeks + days) if self.offset: result = result + self.offset return result @@ -614,8 +617,8 @@ class BusinessHourMixin(BusinessMixin): def __init__(self, start='09:00', end='17:00', offset=timedelta(0)): # must be validated here to equality check kwds = {'offset': offset} - self.start = kwds['start'] = _validate_business_time(start) - self.end = kwds['end'] = _validate_business_time(end) + self.start = kwds['start'] = liboffsets._validate_business_time(start) + self.end = kwds['end'] = liboffsets._validate_business_time(end) self.kwds.update(kwds) self._offset = offset @@ -1092,21 +1095,20 @@ class CustomBusinessMonthBegin(_CustomBusinessMonth): @apply_wraps def apply(self, other): n = self.n - dt_in = other # First move to month offset - cur_mbegin = self.m_offset.rollback(dt_in) + cur_mbegin = self.m_offset.rollback(other) # Find this custom month offset cur_cmbegin = self.cbday.rollforward(cur_mbegin) # handle zero case. arbitrarily rollforward - if n == 0 and dt_in != cur_cmbegin: + if n == 0 and other != cur_cmbegin: n += 1 - if dt_in > cur_cmbegin and n <= -1: + if other > cur_cmbegin and n <= -1: n += 1 - elif dt_in < cur_cmbegin and n >= 1: + elif other < cur_cmbegin and n >= 1: n -= 1 new = cur_mbegin + n * self.m_offset @@ -1239,7 +1241,7 @@ def _apply(self, n, other): months = n // 2 day = 31 if n % 2 else self.day_of_month - return shift_month(other, months, day) + return shift_month(other, months, day_opt=day) def _get_roll(self, i, before_day_of_month, after_day_of_month): n = self.n @@ -1290,7 +1292,7 @@ def _apply(self, n, other): months = n // 2 + n % 2 day = 1 if n % 2 else self.day_of_month - return shift_month(other, months, day) + return shift_month(other, months, day_opt=day) def _get_roll(self, i, before_day_of_month, after_day_of_month): n = self.n @@ -1564,7 +1566,8 @@ class QuarterOffset(DateOffset): _from_name_startingMonth = None _adjust_dst = True # TODO: Consider combining QuarterOffset and YearOffset __init__ at some - # point + # point. Also apply_index, onOffset, rule_code if + # startingMonth vs month attr names are resolved def __init__(self, n=1, normalize=False, startingMonth=None): self.n = self._validate_n(n) @@ -1613,8 +1616,8 @@ def apply(self, other): def onOffset(self, dt): if self.normalize and not _is_normalized(dt): return False - modMonth = (dt.month - self.startingMonth) % 3 - return modMonth == 0 and dt.day == self._get_offset_day(dt) + mod_month = (dt.month - self.startingMonth) % 3 + return mod_month == 0 and dt.day == self._get_offset_day(dt) @apply_index_wraps def apply_index(self, dtindex): @@ -2158,6 +2161,7 @@ def apply(self, other): n -= 1 elif n < 0 and other > current_easter: n += 1 + # TODO: Why does this handle the 0 case the opposite of others? # NOTE: easter returns a datetime.date so we have to convert to type of # other
#18762 touches a lot of code and is currently in limbo pending discussion of `roll_monthday` and `roll_convention`. This separates out unrelated cleanup portions of that PR, the idea being that making the diff there simpler will make things easier. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18959
2017-12-27T19:24:55Z
2017-12-29T21:52:10Z
null
2018-02-11T22:00:21Z
DOC: Update doc strings to show pd.Panel has been deprecated
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 26257f6ecbc37..3243baa0008ae 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1403,6 +1403,8 @@ def to_panel(self): Transform long (stacked) format (DataFrame) into wide (3D, Panel) format. + .. deprecated:: 0.20.0 + Currently the index of the DataFrame must be a 2-level MultiIndex. This may be generalized later diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 0f3c5cb85249a..6d85e5bf7c7f9 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -111,6 +111,13 @@ class Panel(NDFrame): """ Represents wide format panel data, stored as 3-dimensional array + .. deprecated:: 0.20.0 + The recommended way to represent 3-D data are with a MultiIndex on a + DataFrame via the :attr:`~Panel.to_frame()` method or with the + `xarray package <http://xarray.pydata.org/en/stable/>`__. + Pandas provides a :attr:`~Panel.to_xarray()` method to automate this + conversion. + Parameters ---------- data : ndarray (items x major x minor), or dict of DataFrames
Add lines to doc string to show that Panel has been deprecated.
https://api.github.com/repos/pandas-dev/pandas/pulls/18956
2017-12-27T10:00:49Z
2017-12-27T19:39:23Z
2017-12-27T19:39:23Z
2018-02-02T17:06:40Z
CLN: Drop the .reshape method from classes
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 8c94cef4d8ea7..da750c071c4ae 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -241,6 +241,7 @@ Removal of prior version deprecations/changes - :func:`read_csv` has dropped the ``buffer_lines`` parameter (:issue:`13360`) - :func:`read_csv` has dropped the ``compact_ints`` and ``use_unsigned`` parameters (:issue:`13323`) - The ``Timestamp`` class has dropped the ``offset`` attribute in favor of ``freq`` (:issue:`13593`) +- The ``Series``, ``Categorical``, and ``Index`` classes have dropped the ``reshape`` method (:issue:`13012`) .. _whatsnew_0230.performance: diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 845d0243c39e9..baf15b3ca5bc4 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -471,32 +471,6 @@ def tolist(self): return [_maybe_box_datetimelike(x) for x in self] return np.array(self).tolist() - def reshape(self, new_shape, *args, **kwargs): - """ - .. deprecated:: 0.19.0 - Calling this method will raise an error in a future release. - - An ndarray-compatible method that returns `self` because - `Categorical` instances cannot actually be reshaped. - - Parameters - ---------- - new_shape : int or tuple of ints - A 1-D array of integers that correspond to the new - shape of the `Categorical`. For more information on - the parameter, please refer to `np.reshape`. - """ - warn("reshape is deprecated and will raise " - "in a subsequent release", FutureWarning, stacklevel=2) - - nv.validate_reshape(args, kwargs) - - # while the 'new_shape' parameter has no effect, - # we should still enforce valid shape parameters - np.reshape(self.codes, new_shape) - - return self - @property def base(self): """ compat, we are always our own object """ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 79de63b0caeb6..128cd8a9325d6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1190,16 +1190,6 @@ def rename(self, name, inplace=False): """ return self.set_names([name], inplace=inplace) - def reshape(self, *args, **kwargs): - """ - NOT IMPLEMENTED: do not call this method, as reshaping is not - supported for Index objects and will raise an error. - - Reshape an Index. - """ - raise NotImplementedError("reshaping is not supported " - "for Index objects") - @property def _has_complex_internals(self): # to disable groupby tricks in MultiIndex diff --git a/pandas/core/series.py b/pandas/core/series.py index ab26a309533ef..360095c386e8b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -910,37 +910,6 @@ def repeat(self, repeats, *args, **kwargs): return self._constructor(new_values, index=new_index).__finalize__(self) - def reshape(self, *args, **kwargs): - """ - .. deprecated:: 0.19.0 - Calling this method will raise an error. Please call - ``.values.reshape(...)`` instead. - - return an ndarray with the values shape - if the specified shape matches exactly the current shape, then - return self (for compat) - - See also - -------- - numpy.ndarray.reshape - """ - warnings.warn("reshape is deprecated and will raise " - "in a subsequent release. Please use " - ".values.reshape(...) instead", FutureWarning, - stacklevel=2) - - if len(args) == 1 and hasattr(args[0], '__iter__'): - shape = args[0] - else: - shape = args - - if tuple(shape) == self.shape: - # XXX ignoring the "order" keyword. - nv.validate_reshape(tuple(), kwargs) - return self - - return self._values.reshape(shape, **kwargs) - def get_value(self, label, takeable=False): """ Quickly retrieve single value at passed index label diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 72b312f29a793..e09f4ad360843 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1684,12 +1684,6 @@ def test_take_fill_value(self): with pytest.raises(IndexError): idx.take(np.array([1, -5])) - def test_reshape_raise(self): - msg = "reshaping is not supported" - idx = pd.Index([0, 1, 2]) - tm.assert_raises_regex(NotImplementedError, msg, - idx.reshape, idx.shape) - def test_reindex_preserves_name_if_target_is_list_or_ndarray(self): # GH6552 idx = pd.Index([0, 1, 2]) diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py index f472c6ae9383c..0312af12e0715 100644 --- a/pandas/tests/reshape/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -476,41 +476,6 @@ def test_reshaping_panel_categorical(self): index=p.major_axis.set_names('major')) tm.assert_frame_equal(result, expected) - def test_reshape_categorical(self): - cat = Categorical([], categories=["a", "b"]) - tm.assert_produces_warning(FutureWarning, cat.reshape, 0) - - with tm.assert_produces_warning(FutureWarning): - cat = Categorical([], categories=["a", "b"]) - tm.assert_categorical_equal(cat.reshape(0), cat) - - with tm.assert_produces_warning(FutureWarning): - cat = Categorical([], categories=["a", "b"]) - tm.assert_categorical_equal(cat.reshape((5, -1)), cat) - - with tm.assert_produces_warning(FutureWarning): - cat = Categorical(["a", "b"], categories=["a", "b"]) - tm.assert_categorical_equal(cat.reshape(cat.shape), cat) - - with tm.assert_produces_warning(FutureWarning): - cat = Categorical(["a", "b"], categories=["a", "b"]) - tm.assert_categorical_equal(cat.reshape(cat.size), cat) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - msg = "can only specify one unknown dimension" - cat = Categorical(["a", "b"], categories=["a", "b"]) - tm.assert_raises_regex(ValueError, msg, cat.reshape, (-2, -1)) - - def test_reshape_categorical_numpy(self): - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - cat = Categorical(["a", "b"], categories=["a", "b"]) - tm.assert_categorical_equal(np.reshape(cat, cat.shape), cat) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - msg = "the 'order' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, np.reshape, - cat, cat.shape, order='F') - class TestMakeAxisDummies(object): diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 203a0b4a54858..0dae6aa96ced1 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1542,66 +1542,6 @@ def test_shift_categorical(self): assert_index_equal(s.values.categories, sp1.values.categories) assert_index_equal(s.values.categories, sn2.values.categories) - def test_reshape_deprecate(self): - x = Series(np.random.random(10), name='x') - tm.assert_produces_warning(FutureWarning, x.reshape, x.shape) - - def test_reshape_non_2d(self): - # see gh-4554 - with tm.assert_produces_warning(FutureWarning): - x = Series(np.random.random(201), name='x') - assert x.reshape(x.shape, ) is x - - # see gh-2719 - with tm.assert_produces_warning(FutureWarning): - a = Series([1, 2, 3, 4]) - result = a.reshape(2, 2) - expected = a.values.reshape(2, 2) - tm.assert_numpy_array_equal(result, expected) - assert isinstance(result, type(expected)) - - def test_reshape_2d_return_array(self): - x = Series(np.random.random(201), name='x') - - with tm.assert_produces_warning(FutureWarning): - result = x.reshape((-1, 1)) - assert not isinstance(result, Series) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result2 = np.reshape(x, (-1, 1)) - assert not isinstance(result2, Series) - - with tm.assert_produces_warning(FutureWarning): - result = x[:, None] - expected = x.reshape((-1, 1)) - tm.assert_almost_equal(result, expected) - - def test_reshape_bad_kwarg(self): - a = Series([1, 2, 3, 4]) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - msg = "'foo' is an invalid keyword argument for this function" - tm.assert_raises_regex( - TypeError, msg, a.reshape, (2, 2), foo=2) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - msg = r"reshape\(\) got an unexpected keyword argument 'foo'" - tm.assert_raises_regex( - TypeError, msg, a.reshape, a.shape, foo=2) - - def test_numpy_reshape(self): - a = Series([1, 2, 3, 4]) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = np.reshape(a, (2, 2)) - expected = a.values.reshape(2, 2) - tm.assert_numpy_array_equal(result, expected) - assert isinstance(result, type(expected)) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = np.reshape(a, a.shape) - tm.assert_series_equal(result, a) - def test_unstack(self): from numpy import nan
Remove the method for Series, Categorical, and Index. Deprecated or errored in v0.19.0 xref #13012
https://api.github.com/repos/pandas-dev/pandas/pulls/18954
2017-12-27T09:15:49Z
2017-12-27T19:40:00Z
2017-12-27T19:40:00Z
2017-12-28T17:29:08Z
DOC: Added note about groupby excluding Decimal columns by default
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index fecc336049a40..1f0b43bab8d4d 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -984,6 +984,33 @@ Note that ``df.groupby('A').colname.std().`` is more efficient than is only interesting over one column (here ``colname``), it may be filtered *before* applying the aggregation function. +.. note:: + Any object column, also if it contains numerical values such as ``Decimal`` + objects, is considered as a "nuisance" columns. They are excluded from + aggregate functions automatically in groupby. + + If you do wish to include decimal or object columns in an aggregation with + other non-nuisance data types, you must do so explicitly. + +.. ipython:: python + + from decimal import Decimal + df_dec = pd.DataFrame( + {'id': [1, 2, 1, 2], + 'int_column': [1, 2, 3, 4], + 'dec_column': [Decimal('0.50'), Decimal('0.15'), Decimal('0.25'), Decimal('0.40')] + } + ) + + # Decimal columns can be sum'd explicitly by themselves... + df_dec.groupby(['id'])[['dec_column']].sum() + + # ...but cannot be combined with standard data types or they will be excluded + df_dec.groupby(['id'])[['int_column', 'dec_column']].sum() + + # Use .agg function to aggregate over standard and "nuisance" data types at the same time + df_dec.groupby(['id']).agg({'int_column': 'sum', 'dec_column': 'sum'}) + .. _groupby.observed: Handling of (un)observed Categorical values
Also included example of how to explicitly aggregate by Decimal columns. - closes #13821
https://api.github.com/repos/pandas-dev/pandas/pulls/18953
2017-12-27T08:26:56Z
2018-11-08T15:08:41Z
2018-11-08T15:08:40Z
2018-11-08T15:08:55Z
BUG: Adjust time values with Period objects in Series.dt.end_time
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index d2d5d40393b62..e3e1b35f89cbb 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -281,6 +281,43 @@ that the dates have been converted to UTC .. ipython:: python pd.to_datetime(["2015-11-18 15:30:00+05:30", "2015-11-18 16:30:00+06:30"], utc=True) +.. _whatsnew_0240.api_breaking.period_end_time: + +Time values in ``dt.end_time`` and ``to_timestamp(how='end')`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The time values in :class:`Period` and :class:`PeriodIndex` objects are now set +to '23:59:59.999999999' when calling :attr:`Series.dt.end_time`, :attr:`Period.end_time`, +:attr:`PeriodIndex.end_time`, :func:`Period.to_timestamp()` with ``how='end'``, +or :func:`PeriodIndex.to_timestamp()` with ``how='end'`` (:issue:`17157`) + +Previous Behavior: + +.. code-block:: ipython + + In [2]: p = pd.Period('2017-01-01', 'D') + In [3]: pi = pd.PeriodIndex([p]) + + In [4]: pd.Series(pi).dt.end_time[0] + Out[4]: Timestamp(2017-01-01 00:00:00) + + In [5]: p.end_time + Out[5]: Timestamp(2017-01-01 23:59:59.999999999) + +Current Behavior: + +Calling :attr:`Series.dt.end_time` will now result in a time of '23:59:59.999999999' as +is the case with :attr:`Period.end_time`, for example + +.. ipython:: python + + p = pd.Period('2017-01-01', 'D') + pi = pd.PeriodIndex([p]) + + pd.Series(pi).dt.end_time[0] + + p.end_time + .. _whatsnew_0240.api.datetimelike.normalize: Tick DateOffset Normalize Restrictions diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 65fb0f331d039..96d7994bdc822 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -34,6 +34,7 @@ cdef extern from "../src/datetime/np_datetime.h": cimport util from util cimport is_period_object, is_string_object, INT32_MIN +from pandas._libs.tslibs.timedeltas import Timedelta from timestamps import Timestamp from timezones cimport is_utc, is_tzlocal, get_dst_info from timedeltas cimport delta_to_nanoseconds @@ -1221,6 +1222,10 @@ cdef class _Period(object): freq = self._maybe_convert_freq(freq) how = _validate_end_alias(how) + end = how == 'E' + if end: + return (self + 1).to_timestamp(how='start') - Timedelta(1, 'ns') + if freq is None: base, mult = get_freq_code(self.freq) freq = get_to_timestamp_base(base) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 00d53ad82b2dc..26aaab2b1b237 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1235,11 +1235,9 @@ def _generate_regular_range(cls, start, end, periods, freq): tz = None if isinstance(start, Timestamp): tz = start.tz - start = start.to_pydatetime() if isinstance(end, Timestamp): tz = end.tz - end = end.to_pydatetime() xdr = generate_range(start=start, end=end, periods=periods, offset=freq) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index b315e3ec20830..32aa89010b206 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -25,7 +25,7 @@ from pandas.core.tools.datetimes import parse_time_string from pandas._libs.lib import infer_dtype -from pandas._libs import tslib, index as libindex +from pandas._libs import tslib, index as libindex, Timedelta from pandas._libs.tslibs.period import (Period, IncompatibleFrequency, DIFFERENT_FREQ_INDEX, _validate_end_alias) @@ -501,6 +501,16 @@ def to_timestamp(self, freq=None, how='start'): """ how = _validate_end_alias(how) + end = how == 'E' + if end: + if freq == 'B': + # roll forward to ensure we land on B date + adjust = Timedelta(1, 'D') - Timedelta(1, 'ns') + return self.to_timestamp(how='start') + adjust + else: + adjust = Timedelta(1, 'ns') + return (self + 1).to_timestamp(how='start') - adjust + if freq is None: base, mult = _gfc(self.freq) freq = frequencies.get_to_timestamp_base(base) diff --git a/pandas/tests/frame/test_period.py b/pandas/tests/frame/test_period.py index 482210966fe6b..d56df2371b2e3 100644 --- a/pandas/tests/frame/test_period.py +++ b/pandas/tests/frame/test_period.py @@ -5,7 +5,7 @@ import pandas as pd import pandas.util.testing as tm from pandas import (PeriodIndex, period_range, DataFrame, date_range, - Index, to_datetime, DatetimeIndex) + Index, to_datetime, DatetimeIndex, Timedelta) def _permute(obj): @@ -51,6 +51,7 @@ def test_frame_to_time_stamp(self): df['mix'] = 'a' exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC') + exp_index = exp_index + Timedelta(1, 'D') - Timedelta(1, 'ns') result = df.to_timestamp('D', 'end') tm.assert_index_equal(result.index, exp_index) tm.assert_numpy_array_equal(result.values, df.values) @@ -66,22 +67,26 @@ def _get_with_delta(delta, freq='A-DEC'): delta = timedelta(hours=23) result = df.to_timestamp('H', 'end') exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, 'h') - Timedelta(1, 'ns') tm.assert_index_equal(result.index, exp_index) delta = timedelta(hours=23, minutes=59) result = df.to_timestamp('T', 'end') exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, 'm') - Timedelta(1, 'ns') tm.assert_index_equal(result.index, exp_index) result = df.to_timestamp('S', 'end') delta = timedelta(hours=23, minutes=59, seconds=59) exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, 's') - Timedelta(1, 'ns') tm.assert_index_equal(result.index, exp_index) # columns df = df.T exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC') + exp_index = exp_index + Timedelta(1, 'D') - Timedelta(1, 'ns') result = df.to_timestamp('D', 'end', axis=1) tm.assert_index_equal(result.columns, exp_index) tm.assert_numpy_array_equal(result.values, df.values) @@ -93,16 +98,19 @@ def _get_with_delta(delta, freq='A-DEC'): delta = timedelta(hours=23) result = df.to_timestamp('H', 'end', axis=1) exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, 'h') - Timedelta(1, 'ns') tm.assert_index_equal(result.columns, exp_index) delta = timedelta(hours=23, minutes=59) result = df.to_timestamp('T', 'end', axis=1) exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, 'm') - Timedelta(1, 'ns') tm.assert_index_equal(result.columns, exp_index) result = df.to_timestamp('S', 'end', axis=1) delta = timedelta(hours=23, minutes=59, seconds=59) exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, 's') - Timedelta(1, 'ns') tm.assert_index_equal(result.columns, exp_index) # invalid axis diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 923d826fe1a5e..405edba83dc7a 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -366,6 +366,19 @@ def test_periods_number_check(self): with pytest.raises(ValueError): period_range('2011-1-1', '2012-1-1', 'B') + def test_start_time(self): + # GH 17157 + index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31') + expected_index = date_range('2016-01-01', end='2016-05-31', freq='MS') + tm.assert_index_equal(index.start_time, expected_index) + + def test_end_time(self): + # GH 17157 + index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31') + expected_index = date_range('2016-01-01', end='2016-05-31', freq='M') + expected_index = expected_index.shift(1, freq='D').shift(-1, freq='ns') + tm.assert_index_equal(index.end_time, expected_index) + def test_index_duplicate_periods(self): # monotonic idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN') diff --git a/pandas/tests/indexes/period/test_scalar_compat.py b/pandas/tests/indexes/period/test_scalar_compat.py index 56bd2adf58719..a66a81fe99cd4 100644 --- a/pandas/tests/indexes/period/test_scalar_compat.py +++ b/pandas/tests/indexes/period/test_scalar_compat.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """Tests for PeriodIndex behaving like a vectorized Period scalar""" -from pandas import PeriodIndex, date_range +from pandas import PeriodIndex, date_range, Timedelta import pandas.util.testing as tm @@ -14,4 +14,5 @@ def test_start_time(self): def test_end_time(self): index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31') expected_index = date_range('2016-01-01', end='2016-05-31', freq='M') + expected_index += Timedelta(1, 'D') - Timedelta(1, 'ns') tm.assert_index_equal(index.end_time, expected_index) diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py index 16b558916df2d..c4ed07d98413f 100644 --- a/pandas/tests/indexes/period/test_tools.py +++ b/pandas/tests/indexes/period/test_tools.py @@ -3,6 +3,7 @@ import pytest import pandas as pd +from pandas import Timedelta import pandas.util.testing as tm import pandas.core.indexes.period as period from pandas.compat import lrange @@ -60,6 +61,7 @@ def test_to_timestamp(self): exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC') result = series.to_timestamp(how='end') + exp_index = exp_index + Timedelta(1, 'D') - Timedelta(1, 'ns') tm.assert_index_equal(result.index, exp_index) assert result.name == 'foo' @@ -74,16 +76,19 @@ def _get_with_delta(delta, freq='A-DEC'): delta = timedelta(hours=23) result = series.to_timestamp('H', 'end') exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, 'h') - Timedelta(1, 'ns') tm.assert_index_equal(result.index, exp_index) delta = timedelta(hours=23, minutes=59) result = series.to_timestamp('T', 'end') exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, 'm') - Timedelta(1, 'ns') tm.assert_index_equal(result.index, exp_index) result = series.to_timestamp('S', 'end') delta = timedelta(hours=23, minutes=59, seconds=59) exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, 's') - Timedelta(1, 'ns') tm.assert_index_equal(result.index, exp_index) index = PeriodIndex(freq='H', start='1/1/2001', end='1/2/2001') @@ -92,6 +97,7 @@ def _get_with_delta(delta, freq='A-DEC'): exp_index = date_range('1/1/2001 00:59:59', end='1/2/2001 00:59:59', freq='H') result = series.to_timestamp(how='end') + exp_index = exp_index + Timedelta(1, 's') - Timedelta(1, 'ns') tm.assert_index_equal(result.index, exp_index) assert result.name == 'foo' @@ -284,6 +290,7 @@ def test_to_timestamp_pi_mult(self): result = idx.to_timestamp(how='E') expected = DatetimeIndex(['2011-02-28', 'NaT', '2011-03-31'], name='idx') + expected = expected + Timedelta(1, 'D') - Timedelta(1, 'ns') tm.assert_index_equal(result, expected) def test_to_timestamp_pi_combined(self): @@ -298,11 +305,13 @@ def test_to_timestamp_pi_combined(self): expected = DatetimeIndex(['2011-01-02 00:59:59', '2011-01-03 01:59:59'], name='idx') + expected = expected + Timedelta(1, 's') - Timedelta(1, 'ns') tm.assert_index_equal(result, expected) result = idx.to_timestamp(how='E', freq='H') expected = DatetimeIndex(['2011-01-02 00:00', '2011-01-03 01:00'], name='idx') + expected = expected + Timedelta(1, 'h') - Timedelta(1, 'ns') tm.assert_index_equal(result, expected) def test_period_astype_to_timestamp(self): @@ -312,6 +321,7 @@ def test_period_astype_to_timestamp(self): tm.assert_index_equal(pi.astype('datetime64[ns]'), exp) exp = pd.DatetimeIndex(['2011-01-31', '2011-02-28', '2011-03-31']) + exp = exp + Timedelta(1, 'D') - Timedelta(1, 'ns') tm.assert_index_equal(pi.astype('datetime64[ns]', how='end'), exp) exp = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], @@ -321,6 +331,7 @@ def test_period_astype_to_timestamp(self): exp = pd.DatetimeIndex(['2011-01-31', '2011-02-28', '2011-03-31'], tz='US/Eastern') + exp = exp + Timedelta(1, 'D') - Timedelta(1, 'ns') res = pi.astype('datetime64[ns, US/Eastern]', how='end') tm.assert_index_equal(res, exp) diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index eccd86a888fb9..4a17b2efd1dec 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -5,6 +5,7 @@ from datetime import datetime, date, timedelta import pandas as pd +from pandas import Timedelta import pandas.util.testing as tm import pandas.core.indexes.period as period from pandas.compat import text_type, iteritems @@ -274,12 +275,14 @@ def test_timestamp_tz_arg_dateutil_from_string(self): def test_timestamp_mult(self): p = pd.Period('2011-01', freq='M') - assert p.to_timestamp(how='S') == pd.Timestamp('2011-01-01') - assert p.to_timestamp(how='E') == pd.Timestamp('2011-01-31') + assert p.to_timestamp(how='S') == Timestamp('2011-01-01') + expected = Timestamp('2011-02-01') - Timedelta(1, 'ns') + assert p.to_timestamp(how='E') == expected p = pd.Period('2011-01', freq='3M') - assert p.to_timestamp(how='S') == pd.Timestamp('2011-01-01') - assert p.to_timestamp(how='E') == pd.Timestamp('2011-03-31') + assert p.to_timestamp(how='S') == Timestamp('2011-01-01') + expected = Timestamp('2011-04-01') - Timedelta(1, 'ns') + assert p.to_timestamp(how='E') == expected def test_construction(self): i1 = Period('1/1/2005', freq='M') @@ -611,19 +614,19 @@ def _ex(p): p = Period('1985', freq='A') result = p.to_timestamp('H', how='end') - expected = datetime(1985, 12, 31, 23) + expected = Timestamp(1986, 1, 1) - Timedelta(1, 'ns') assert result == expected result = p.to_timestamp('3H', how='end') assert result == expected result = p.to_timestamp('T', how='end') - expected = datetime(1985, 12, 31, 23, 59) + expected = Timestamp(1986, 1, 1) - Timedelta(1, 'ns') assert result == expected result = p.to_timestamp('2T', how='end') assert result == expected result = p.to_timestamp(how='end') - expected = datetime(1985, 12, 31) + expected = Timestamp(1986, 1, 1) - Timedelta(1, 'ns') assert result == expected expected = datetime(1985, 1, 1) diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py index 63726f27914f3..90dbe26a2f0ea 100644 --- a/pandas/tests/series/test_period.py +++ b/pandas/tests/series/test_period.py @@ -3,7 +3,8 @@ import pandas as pd import pandas.util.testing as tm import pandas.core.indexes.period as period -from pandas import Series, period_range, DataFrame +from pandas import Series, period_range, DataFrame, Period +import pytest def _permute(obj): @@ -167,3 +168,23 @@ def test_truncate(self): pd.Period('2017-09-02') ]) tm.assert_series_equal(result2, pd.Series([2], index=expected_idx2)) + + @pytest.mark.parametrize('input_vals', [ + [Period('2016-01', freq='M'), Period('2016-02', freq='M')], + [Period('2016-01-01', freq='D'), Period('2016-01-02', freq='D')], + [Period('2016-01-01 00:00:00', freq='H'), + Period('2016-01-01 01:00:00', freq='H')], + [Period('2016-01-01 00:00:00', freq='M'), + Period('2016-01-01 00:01:00', freq='M')], + [Period('2016-01-01 00:00:00', freq='S'), + Period('2016-01-01 00:00:01', freq='S')] + ]) + def test_end_time_timevalues(self, input_vals): + # GH 17157 + # Check that the time part of the Period is adjusted by end_time + # when using the dt accessor on a Series + + s = Series(input_vals) + result = s.dt.end_time + expected = s.apply(lambda x: x.end_time) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 1f70d09e43b37..de4dc2bcf25a4 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -21,7 +21,7 @@ import pandas as pd from pandas import (Series, DataFrame, Panel, Index, isna, - notna, Timestamp) + notna, Timestamp, Timedelta) from pandas.compat import range, lrange, zip, OrderedDict from pandas.errors import UnsupportedFunctionCall @@ -1702,12 +1702,14 @@ def test_resample_anchored_intraday(self): result = df.resample('M').mean() expected = df.resample( 'M', kind='period').mean().to_timestamp(how='end') + expected.index += Timedelta(1, 'ns') - Timedelta(1, 'D') tm.assert_frame_equal(result, expected) result = df.resample('M', closed='left').mean() exp = df.tshift(1, freq='D').resample('M', kind='period').mean() exp = exp.to_timestamp(how='end') + exp.index = exp.index + Timedelta(1, 'ns') - Timedelta(1, 'D') tm.assert_frame_equal(result, exp) rng = date_range('1/1/2012', '4/1/2012', freq='100min') @@ -1716,12 +1718,14 @@ def test_resample_anchored_intraday(self): result = df.resample('Q').mean() expected = df.resample( 'Q', kind='period').mean().to_timestamp(how='end') + expected.index += Timedelta(1, 'ns') - Timedelta(1, 'D') tm.assert_frame_equal(result, expected) result = df.resample('Q', closed='left').mean() expected = df.tshift(1, freq='D').resample('Q', kind='period', closed='left').mean() expected = expected.to_timestamp(how='end') + expected.index += Timedelta(1, 'ns') - Timedelta(1, 'D') tm.assert_frame_equal(result, expected) ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h') @@ -2473,7 +2477,7 @@ def test_resample_to_timestamps(self): ts = _simple_pts('1/1/1990', '12/31/1995', freq='M') result = ts.resample('A-DEC', kind='timestamp').mean() - expected = ts.to_timestamp(how='end').resample('A-DEC').mean() + expected = ts.to_timestamp(how='start').resample('A-DEC').mean() assert_series_equal(result, expected) def test_resample_to_quarterly(self): diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 60981f41ec716..9d41401a7eefc 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1321,7 +1321,7 @@ def _end_apply_index(self, dtindex): roll = self.n base = (base_period + roll).to_timestamp(how='end') - return base + off + return base + off + Timedelta(1, 'ns') - Timedelta(1, 'D') def onOffset(self, dt): if self.normalize and not _is_normalized(dt):
- [x] closes #17157 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18952
2017-12-27T01:04:56Z
2018-07-31T13:03:03Z
2018-07-31T13:03:03Z
2018-08-03T20:14:28Z
DOC: greater consistency and spell-check for intro docs
diff --git a/doc/source/10min.rst b/doc/source/10min.rst index 49142311ff057..46c3ffef58228 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -25,7 +25,7 @@ ******************** This is a short introduction to pandas, geared mainly for new users. -You can see more complex recipes in the :ref:`Cookbook<cookbook>` +You can see more complex recipes in the :ref:`Cookbook<cookbook>`. Customarily, we import as follows: @@ -38,7 +38,7 @@ Customarily, we import as follows: Object Creation --------------- -See the :ref:`Data Structure Intro section <dsintro>` +See the :ref:`Data Structure Intro section <dsintro>`. Creating a :class:`Series` by passing a list of values, letting pandas create a default integer index: @@ -70,7 +70,8 @@ Creating a ``DataFrame`` by passing a dict of objects that can be converted to s 'F' : 'foo' }) df2 -Having specific :ref:`dtypes <basics.dtypes>` +The columns of the resulting ``DataFrame`` have different +:ref:`dtypes <basics.dtypes>`. .. ipython:: python @@ -104,16 +105,16 @@ truncated for brevity. Viewing Data ------------ -See the :ref:`Basics section <basics>` +See the :ref:`Basics section <basics>`. -See the top & bottom rows of the frame +Here is how to view the top and bottom rows of the frame: .. ipython:: python df.head() df.tail(3) -Display the index, columns, and the underlying numpy data +Display the index, columns, and the underlying numpy data: .. ipython:: python @@ -121,25 +122,25 @@ Display the index, columns, and the underlying numpy data df.columns df.values -Describe shows a quick statistic summary of your data +:func:`~DataFrame.describe` shows a quick statistic summary of your data: .. ipython:: python df.describe() -Transposing your data +Transposing your data: .. ipython:: python df.T -Sorting by an axis +Sorting by an axis: .. ipython:: python df.sort_index(axis=1, ascending=False) -Sorting by values +Sorting by values: .. ipython:: python @@ -155,13 +156,13 @@ Selection recommend the optimized pandas data access methods, ``.at``, ``.iat``, ``.loc``, ``.iloc`` and ``.ix``. -See the indexing documentation :ref:`Indexing and Selecting Data <indexing>` and :ref:`MultiIndex / Advanced Indexing <advanced>` +See the indexing documentation :ref:`Indexing and Selecting Data <indexing>` and :ref:`MultiIndex / Advanced Indexing <advanced>`. Getting ~~~~~~~ Selecting a single column, which yields a ``Series``, -equivalent to ``df.A`` +equivalent to ``df.A``: .. ipython:: python @@ -177,39 +178,39 @@ Selecting via ``[]``, which slices the rows. Selection by Label ~~~~~~~~~~~~~~~~~~ -See more in :ref:`Selection by Label <indexing.label>` +See more in :ref:`Selection by Label <indexing.label>`. -For getting a cross section using a label +For getting a cross section using a label: .. ipython:: python df.loc[dates[0]] -Selecting on a multi-axis by label +Selecting on a multi-axis by label: .. ipython:: python df.loc[:,['A','B']] -Showing label slicing, both endpoints are *included* +Showing label slicing, both endpoints are *included*: .. ipython:: python df.loc['20130102':'20130104',['A','B']] -Reduction in the dimensions of the returned object +Reduction in the dimensions of the returned object: .. ipython:: python df.loc['20130102',['A','B']] -For getting a scalar value +For getting a scalar value: .. ipython:: python df.loc[dates[0],'A'] -For getting fast access to a scalar (equiv to the prior method) +For getting fast access to a scalar (equivalent to the prior method): .. ipython:: python @@ -218,45 +219,45 @@ For getting fast access to a scalar (equiv to the prior method) Selection by Position ~~~~~~~~~~~~~~~~~~~~~ -See more in :ref:`Selection by Position <indexing.integer>` +See more in :ref:`Selection by Position <indexing.integer>`. -Select via the position of the passed integers +Select via the position of the passed integers: .. ipython:: python df.iloc[3] -By integer slices, acting similar to numpy/python +By integer slices, acting similar to numpy/python: .. ipython:: python df.iloc[3:5,0:2] -By lists of integer position locations, similar to the numpy/python style +By lists of integer position locations, similar to the numpy/python style: .. ipython:: python df.iloc[[1,2,4],[0,2]] -For slicing rows explicitly +For slicing rows explicitly: .. ipython:: python df.iloc[1:3,:] -For slicing columns explicitly +For slicing columns explicitly: .. ipython:: python df.iloc[:,1:3] -For getting a value explicitly +For getting a value explicitly: .. ipython:: python df.iloc[1,1] -For getting fast access to a scalar (equiv to the prior method) +For getting fast access to a scalar (equivalent to the prior method): .. ipython:: python @@ -290,7 +291,7 @@ Setting ~~~~~~~ Setting a new column automatically aligns the data -by the indexes +by the indexes. .. ipython:: python @@ -298,25 +299,25 @@ by the indexes s1 df['F'] = s1 -Setting values by label +Setting values by label: .. ipython:: python df.at[dates[0],'A'] = 0 -Setting values by position +Setting values by position: .. ipython:: python df.iat[0,1] = 0 -Setting by assigning with a numpy array +Setting by assigning with a numpy array: .. ipython:: python df.loc[:,'D'] = np.array([5] * len(df)) -The result of the prior setting operations +The result of the prior setting operations. .. ipython:: python @@ -336,7 +337,7 @@ Missing Data pandas primarily uses the value ``np.nan`` to represent missing data. It is by default not included in computations. See the :ref:`Missing Data section -<missing_data>` +<missing_data>`. Reindexing allows you to change/add/delete the index on a specified axis. This returns a copy of the data. @@ -353,13 +354,13 @@ To drop any rows that have missing data. df1.dropna(how='any') -Filling missing data +Filling missing data. .. ipython:: python df1.fillna(value=5) -To get the boolean mask where values are ``nan`` +To get the boolean mask where values are ``nan``. .. ipython:: python @@ -369,20 +370,20 @@ To get the boolean mask where values are ``nan`` Operations ---------- -See the :ref:`Basic section on Binary Ops <basics.binop>` +See the :ref:`Basic section on Binary Ops <basics.binop>`. Stats ~~~~~ Operations in general *exclude* missing data. -Performing a descriptive statistic +Performing a descriptive statistic: .. ipython:: python df.mean() -Same operation on the other axis +Same operation on the other axis: .. ipython:: python @@ -401,7 +402,7 @@ In addition, pandas automatically broadcasts along the specified dimension. Apply ~~~~~ -Applying functions to the data +Applying functions to the data: .. ipython:: python @@ -411,7 +412,7 @@ Applying functions to the data Histogramming ~~~~~~~~~~~~~ -See more at :ref:`Histogramming and Discretization <basics.discretization>` +See more at :ref:`Histogramming and Discretization <basics.discretization>`. .. ipython:: python @@ -425,7 +426,7 @@ String Methods Series is equipped with a set of string processing methods in the `str` attribute that make it easy to operate on each element of the array, as in the code snippet below. Note that pattern-matching in `str` generally uses `regular -expressions <https://docs.python.org/2/library/re.html>`__ by default (and in +expressions <https://docs.python.org/3/library/re.html>`__ by default (and in some cases always uses them). See more at :ref:`Vectorized String Methods <text.string_methods>`. @@ -445,7 +446,7 @@ DataFrame, and Panel objects with various kinds of set logic for the indexes and relational algebra functionality in the case of join / merge-type operations. -See the :ref:`Merging section <merging>` +See the :ref:`Merging section <merging>`. Concatenating pandas objects together with :func:`concat`: @@ -462,7 +463,7 @@ Concatenating pandas objects together with :func:`concat`: Join ~~~~ -SQL style merges. See the :ref:`Database style joining <merging.join>` +SQL style merges. See the :ref:`Database style joining <merging.join>` section. .. ipython:: python @@ -486,7 +487,8 @@ Another example that can be given is: Append ~~~~~~ -Append rows to a dataframe. See the :ref:`Appending <merging.concatenation>` +Append rows to a dataframe. See the :ref:`Appending <merging.concatenation>` +section. .. ipython:: python @@ -500,13 +502,13 @@ Grouping -------- By "group by" we are referring to a process involving one or more of the -following steps +following steps: - **Splitting** the data into groups based on some criteria - **Applying** a function to each group independently - **Combining** the results into a data structure -See the :ref:`Grouping section <groupby>` +See the :ref:`Grouping section <groupby>`. .. ipython:: python @@ -518,14 +520,15 @@ See the :ref:`Grouping section <groupby>` 'D' : np.random.randn(8)}) df -Grouping and then applying a function ``sum`` to the resulting groups. +Grouping and then applying the :meth:`~DataFrame.sum` function to the resulting +groups. .. ipython:: python df.groupby('A').sum() -Grouping by multiple columns forms a hierarchical index, which we then apply -the function. +Grouping by multiple columns forms a hierarchical index, and again we can +apply the ``sum`` function. .. ipython:: python @@ -595,7 +598,7 @@ Time Series pandas has simple, powerful, and efficient functionality for performing resampling operations during frequency conversion (e.g., converting secondly data into 5-minutely data). This is extremely common in, but not limited to, -financial applications. See the :ref:`Time Series section <timeseries>` +financial applications. See the :ref:`Time Series section <timeseries>`. .. ipython:: python @@ -603,7 +606,7 @@ financial applications. See the :ref:`Time Series section <timeseries>` ts = pd.Series(np.random.randint(0, 500, len(rng)), index=rng) ts.resample('5Min').sum() -Time zone representation +Time zone representation: .. ipython:: python @@ -613,13 +616,13 @@ Time zone representation ts_utc = ts.tz_localize('UTC') ts_utc -Convert to another time zone +Converting to another time zone: .. ipython:: python ts_utc.tz_convert('US/Eastern') -Converting between time span representations +Converting between time span representations: .. ipython:: python @@ -659,14 +662,15 @@ Convert the raw grades to a categorical data type. df["grade"] = df["raw_grade"].astype("category") df["grade"] -Rename the categories to more meaningful names (assigning to ``Series.cat.categories`` is inplace!) +Rename the categories to more meaningful names (assigning to +``Series.cat.categories`` is inplace!). .. ipython:: python df["grade"].cat.categories = ["very good", "good", "very bad"] Reorder the categories and simultaneously add the missing categories (methods under ``Series -.cat`` return a new ``Series`` per default). +.cat`` return a new ``Series`` by default). .. ipython:: python @@ -679,7 +683,7 @@ Sorting is per order in the categories, not lexical order. df.sort_values(by="grade") -Grouping by a categorical column shows also empty categories. +Grouping by a categorical column also shows empty categories. .. ipython:: python @@ -689,7 +693,7 @@ Grouping by a categorical column shows also empty categories. Plotting -------- -:ref:`Plotting <visualization>` docs. +See the :ref:`Plotting <visualization>` docs. .. ipython:: python :suppress: @@ -705,8 +709,8 @@ Plotting @savefig series_plot_basic.png ts.plot() -On DataFrame, :meth:`~DataFrame.plot` is a convenience to plot all of the -columns with labels: +On a DataFrame, the :meth:`~DataFrame.plot` method is a convenience to plot all +of the columns with labels: .. ipython:: python @@ -723,13 +727,13 @@ Getting Data In/Out CSV ~~~ -:ref:`Writing to a csv file <io.store_in_csv>` +:ref:`Writing to a csv file. <io.store_in_csv>` .. ipython:: python df.to_csv('foo.csv') -:ref:`Reading from a csv file <io.read_csv_table>` +:ref:`Reading from a csv file. <io.read_csv_table>` .. ipython:: python @@ -743,15 +747,15 @@ CSV HDF5 ~~~~ -Reading and writing to :ref:`HDFStores <io.hdf5>` +Reading and writing to :ref:`HDFStores <io.hdf5>`. -Writing to a HDF5 Store +Writing to a HDF5 Store. .. ipython:: python df.to_hdf('foo.h5','df') -Reading from a HDF5 Store +Reading from a HDF5 Store. .. ipython:: python @@ -765,15 +769,15 @@ Reading from a HDF5 Store Excel ~~~~~ -Reading and writing to :ref:`MS Excel <io.excel>` +Reading and writing to :ref:`MS Excel <io.excel>`. -Writing to an excel file +Writing to an excel file. .. ipython:: python df.to_excel('foo.xlsx', sheet_name='Sheet1') -Reading from an excel file +Reading from an excel file. .. ipython:: python @@ -787,7 +791,7 @@ Reading from an excel file Gotchas ------- -If you are trying an operation and you see an exception like: +If you are attempting to perform an operation you might see an exception like: .. code-block:: python diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 9318df2b76564..ecb9a8f2d79db 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -133,7 +133,7 @@ of interest: * Broadcasting behavior between higher- (e.g. DataFrame) and lower-dimensional (e.g. Series) objects. - * Missing data in computations + * Missing data in computations. We will demonstrate how to manage these issues independently, though they can be handled simultaneously. @@ -226,12 +226,12 @@ We can also do elementwise :func:`divmod`: Missing data / operations with fill values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In Series and DataFrame (though not yet in Panel), the arithmetic functions -have the option of inputting a *fill_value*, namely a value to substitute when -at most one of the values at a location are missing. For example, when adding -two DataFrame objects, you may wish to treat NaN as 0 unless both DataFrames -are missing that value, in which case the result will be NaN (you can later -replace NaN with some other value using ``fillna`` if you wish). +In Series and DataFrame, the arithmetic functions have the option of inputting +a *fill_value*, namely a value to substitute when at most one of the values at +a location are missing. For example, when adding two DataFrame objects, you may +wish to treat NaN as 0 unless both DataFrames are missing that value, in which +case the result will be NaN (you can later replace NaN with some other value +using ``fillna`` if you wish). .. ipython:: python :suppress: @@ -260,9 +260,9 @@ arithmetic operations described above: df.gt(df2) df2.ne(df) -These operations produce a pandas object the same type as the left-hand-side input -that if of dtype ``bool``. These ``boolean`` objects can be used in indexing operations, -see :ref:`here<indexing.boolean>` +These operations produce a pandas object of the same type as the left-hand-side +input that is of dtype ``bool``. These ``boolean`` objects can be used in +indexing operations, see the section on :ref:`Boolean indexing<indexing.boolean>`. .. _basics.reductions: @@ -316,7 +316,7 @@ To evaluate single-element pandas objects in a boolean context, use the method >>> df and df2 - These both will raise as you are trying to compare multiple values. + These will both raise errors, as you are trying to compare multiple values. .. code-block:: python @@ -329,7 +329,7 @@ See :ref:`gotchas<gotchas.truth>` for a more detailed discussion. Comparing if objects are equivalent ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Often you may find there is more than one way to compute the same +Often you may find that there is more than one way to compute the same result. As a simple example, consider ``df+df`` and ``df*2``. To test that these two computations produce the same result, given the tools shown above, you might imagine using ``(df+df == df*2).all()``. But in @@ -341,7 +341,7 @@ fact, this expression is False: (df+df == df*2).all() Notice that the boolean DataFrame ``df+df == df*2`` contains some False values! -That is because NaNs do not compare as equals: +This is because NaNs do not compare as equals: .. ipython:: python @@ -368,7 +368,7 @@ equality to be True: Comparing array-like objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You can conveniently do element-wise comparisons when comparing a pandas +You can conveniently perform element-wise comparisons when comparing a pandas data structure with a scalar value: .. ipython:: python @@ -452,8 +452,8 @@ So, for instance, to reproduce :meth:`~DataFrame.combine_first` as above: Descriptive statistics ---------------------- -A large number of methods for computing descriptive statistics and other related -operations on :ref:`Series <api.series.stats>`, :ref:`DataFrame +There exists a large number of methods for computing descriptive statistics and +other related operations on :ref:`Series <api.series.stats>`, :ref:`DataFrame <api.dataframe.stats>`, and :ref:`Panel <api.panel.stats>`. Most of these are aggregations (hence producing a lower-dimensional result) like :meth:`~DataFrame.sum`, :meth:`~DataFrame.mean`, and :meth:`~DataFrame.quantile`, @@ -764,7 +764,7 @@ For example, we can fit a regression using statsmodels. Their API expects a form The pipe method is inspired by unix pipes and more recently dplyr_ and magrittr_, which have introduced the popular ``(%>%)`` (read pipe) operator for R_. The implementation of ``pipe`` here is quite clean and feels right at home in python. -We encourage you to view the source code (``pd.DataFrame.pipe??`` in IPython). +We encourage you to view the source code of :meth:`~DataFrame.pipe`. .. _dplyr: https://github.com/hadley/dplyr .. _magrittr: https://github.com/smbache/magrittr @@ -786,7 +786,7 @@ statistics methods, take an optional ``axis`` argument: df.apply(np.cumsum) df.apply(np.exp) -``.apply()`` will also dispatch on a string method name. +The :meth:`~DataFrame.apply` method will also dispatch on a string method name. .. ipython:: python @@ -863,8 +863,9 @@ We will use a similar starting frame from above: tsdf.iloc[3:7] = np.nan tsdf -Using a single function is equivalent to :meth:`~DataFrame.apply`; You can also pass named methods as strings. -These will return a ``Series`` of the aggregated output: +Using a single function is equivalent to :meth:`~DataFrame.apply`. You can also +pass named methods as strings. These will return a ``Series`` of the aggregated +output: .. ipython:: python @@ -875,7 +876,7 @@ These will return a ``Series`` of the aggregated output: # these are equivalent to a ``.sum()`` because we are aggregating on a single function tsdf.sum() -Single aggregations on a ``Series`` this will result in a scalar value: +Single aggregations on a ``Series`` this will return a scalar value: .. ipython:: python @@ -885,8 +886,8 @@ Single aggregations on a ``Series`` this will result in a scalar value: Aggregating with multiple functions +++++++++++++++++++++++++++++++++++ -You can pass multiple aggregation arguments as a list. -The results of each of the passed functions will be a row in the resultant ``DataFrame``. +You can pass multiple aggregation arguments as a list. +The results of each of the passed functions will be a row in the resulting ``DataFrame``. These are naturally named from the aggregation function. .. ipython:: python @@ -989,7 +990,7 @@ The :meth:`~DataFrame.transform` method returns an object that is indexed the sa as the original. This API allows you to provide *multiple* operations at the same time rather than one-by-one. Its API is quite similar to the ``.agg`` API. -Use a similar frame to the above sections. +We create a frame similar to the one used in the above sections. .. ipython:: python @@ -1008,7 +1009,7 @@ function name or a user defined function. tsdf.transform('abs') tsdf.transform(lambda x: x.abs()) -Here ``.transform()`` received a single function; this is equivalent to a ufunc application +Here :meth:`~DataFrame.transform` received a single function; this is equivalent to a ufunc application. .. ipython:: python @@ -1044,7 +1045,7 @@ Transforming with a dict ++++++++++++++++++++++++ -Passing a dict of functions will will allow selective transforming per column. +Passing a dict of functions will allow selective transforming per column. .. ipython:: python @@ -1080,7 +1081,7 @@ a single value and returning a single value. For example: df4['one'].map(f) df4.applymap(f) -:meth:`Series.map` has an additional feature which is that it can be used to easily +:meth:`Series.map` has an additional feature; it can be used to easily "link" or "map" values defined by a secondary series. This is closely related to :ref:`merging/joining functionality <merging>`: @@ -1123,13 +1124,13 @@ A reduction operation. panel.apply(lambda x: x.dtype, axis='items') -A similar reduction type operation +A similar reduction type operation. .. ipython:: python panel.apply(lambda x: x.sum(), axis='major_axis') -This last reduction is equivalent to +This last reduction is equivalent to: .. ipython:: python @@ -1157,7 +1158,7 @@ Apply can also accept multiple axes in the ``axis`` argument. This will pass a result result.loc[:,:,'ItemA'] -This is equivalent to the following +This is equivalent to the following: .. ipython:: python @@ -1358,9 +1359,9 @@ Note that the same result could have been achieved using ts2.reindex(ts.index).fillna(method='ffill') -:meth:`~Series.reindex` will raise a ValueError if the index is not monotonic +:meth:`~Series.reindex` will raise a ValueError if the index is not monotonically increasing or decreasing. :meth:`~Series.fillna` and :meth:`~Series.interpolate` -will not make any checks on the order of the index. +will not perform any checks on the order of the index. .. _basics.limits_on_reindex_fill: @@ -1428,7 +1429,7 @@ Series can also be used: df.rename(columns={'one': 'foo', 'two': 'bar'}, index={'a': 'apple', 'b': 'banana', 'd': 'durian'}) -If the mapping doesn't include a column/index label, it isn't renamed. Also +If the mapping doesn't include a column/index label, it isn't renamed. Note that extra labels in the mapping don't throw an error. .. versionadded:: 0.21.0 @@ -1438,8 +1439,8 @@ you specify a single ``mapper`` and the ``axis`` to apply that mapping to. .. ipython:: python - df.rename({'one': 'foo', 'two': 'bar'}, axis='columns'}) - df.rename({'a': 'apple', 'b': 'banana', 'd': 'durian'}, axis='columns'}) + df.rename({'one': 'foo', 'two': 'bar'}, axis='columns') + df.rename({'a': 'apple', 'b': 'banana', 'd': 'durian'}, axis='index') The :meth:`~DataFrame.rename` method also provides an ``inplace`` named @@ -1515,7 +1516,7 @@ To iterate over the rows of a DataFrame, you can use the following methods: over the values. See the docs on :ref:`function application <basics.apply>`. * If you need to do iterative manipulations on the values but performance is - important, consider writing the inner loop using e.g. cython or numba. + important, consider writing the inner loop with cython or numba. See the :ref:`enhancing performance <enhancingperf>` section for some examples of this approach. @@ -1594,7 +1595,7 @@ index value along with a Series containing the data in each row: To preserve dtypes while iterating over the rows, it is better to use :meth:`~DataFrame.itertuples` which returns namedtuples of the values - and which is generally much faster as ``iterrows``. + and which is generally much faster than :meth:`~DataFrame.iterrows`. For instance, a contrived way to transpose the DataFrame would be: @@ -1615,14 +1616,14 @@ yielding a namedtuple for each row in the DataFrame. The first element of the tuple will be the row's corresponding index value, while the remaining values are the row values. -For instance, +For instance: .. ipython:: python for row in df.itertuples(): print(row) -This method does not convert the row to a Series object but just +This method does not convert the row to a Series object; it merely returns the values inside a namedtuple. Therefore, :meth:`~DataFrame.itertuples` preserves the data type of the values and is generally faster as :meth:`~DataFrame.iterrows`. @@ -1709,7 +1710,7 @@ The ``.dt`` accessor works for period and timedelta dtypes. .. note:: - ``Series.dt`` will raise a ``TypeError`` if you access with a non-datetimelike values + ``Series.dt`` will raise a ``TypeError`` if you access with a non-datetime-like values. Vectorized string methods ------------------------- @@ -1763,7 +1764,7 @@ labels (indexes) are the ``Series.sort_index()`` and the ``DataFrame.sort_index( By Values ~~~~~~~~~ -The :meth:`Series.sort_values` and :meth:`DataFrame.sort_values` are the entry points for **value** sorting (that is the values in a column or row). +The :meth:`Series.sort_values` and :meth:`DataFrame.sort_values` are the entry points for **value** sorting (i.e. the values in a column or row). :meth:`DataFrame.sort_values` can accept an optional ``by`` argument for ``axis=0`` which will use an arbitrary vector or a column name of the DataFrame to determine the sort order: @@ -1794,7 +1795,7 @@ argument: searchsorted ~~~~~~~~~~~~ -Series has the :meth:`~Series.searchsorted` method, which works similar to +Series has the :meth:`~Series.searchsorted` method, which works similarly to :meth:`numpy.ndarray.searchsorted`. .. ipython:: python @@ -1859,14 +1860,14 @@ the axis indexes, since they are immutable) and returns a new object. Note that **it is seldom necessary to copy objects**. For example, there are only a handful of ways to alter a DataFrame *in-place*: - * Inserting, deleting, or modifying a column - * Assigning to the ``index`` or ``columns`` attributes + * Inserting, deleting, or modifying a column. + * Assigning to the ``index`` or ``columns`` attributes. * For homogeneous data, directly modifying the values via the ``values`` - attribute or advanced indexing + attribute or advanced indexing. -To be clear, no pandas methods have the side effect of modifying your data; -almost all methods return new objects, leaving the original object -untouched. If data is modified, it is because you did so explicitly. +To be clear, no pandas method has the side effect of modifying your data; +almost every method returns a new object, leaving the original object +untouched. If the data is modified, it is because you did so explicitly. .. _basics.dtypes: @@ -1879,7 +1880,8 @@ The main types stored in pandas objects are ``float``, ``int``, ``bool``, ``int64`` and ``int32``. See :ref:`Series with TZ <timeseries.timezone_series>` for more detail on ``datetime64[ns, tz]`` dtypes. -A convenient :attr:`~DataFrame.dtypes` attribute for DataFrames returns a Series with the data type of each column. +A convenient :attr:`~DataFrame.dtypes` attribute for DataFrame returns a Series +with the data type of each column. .. ipython:: python @@ -1893,15 +1895,15 @@ A convenient :attr:`~DataFrame.dtypes` attribute for DataFrames returns a Series dft dft.dtypes -On a ``Series`` use the :attr:`~Series.dtype` attribute. +On a ``Series`` object, use the :attr:`~Series.dtype` attribute. .. ipython:: python dft['A'].dtype -If a pandas object contains data multiple dtypes *IN A SINGLE COLUMN*, the dtype of the -column will be chosen to accommodate all of the data types (``object`` is the most -general). +If a pandas object contains data with multiple dtypes *in a single column*, the +dtype of the column will be chosen to accommodate all of the data types +(``object`` is the most general). .. ipython:: python @@ -1938,7 +1940,8 @@ defaults ~~~~~~~~ By default integer types are ``int64`` and float types are ``float64``, -*REGARDLESS* of platform (32-bit or 64-bit). The following will all result in ``int64`` dtypes. +*regardless* of platform (32-bit or 64-bit). +The following will all result in ``int64`` dtypes. .. ipython:: python @@ -1946,7 +1949,7 @@ By default integer types are ``int64`` and float types are ``float64``, pd.DataFrame({'a': [1, 2]}).dtypes pd.DataFrame({'a': 1 }, index=list(range(2))).dtypes -Numpy, however will choose *platform-dependent* types when creating arrays. +Note that Numpy will choose *platform-dependent* types when creating arrays. The following **WILL** result in ``int32`` on 32-bit platform. .. ipython:: python @@ -1958,7 +1961,7 @@ upcasting ~~~~~~~~~ Types can potentially be *upcasted* when combined with other types, meaning they are promoted -from the current type (say ``int`` to ``float``) +from the current type (e.g. ``int`` to ``float``). .. ipython:: python @@ -1995,7 +1998,7 @@ then the more *general* one will be used as the result of the operation. df3.astype('float32').dtypes -Convert a subset of columns to a specified type using :meth:`~DataFrame.astype` +Convert a subset of columns to a specified type using :meth:`~DataFrame.astype`. .. ipython:: python @@ -2006,7 +2009,7 @@ Convert a subset of columns to a specified type using :meth:`~DataFrame.astype` .. versionadded:: 0.19.0 -Convert certain columns to a specific dtype by passing a dict to :meth:`~DataFrame.astype` +Convert certain columns to a specific dtype by passing a dict to :meth:`~DataFrame.astype`. .. ipython:: python @@ -2148,7 +2151,7 @@ gotchas Performing selection operations on ``integer`` type data can easily upcast the data to ``floating``. The dtype of the input data will be preserved in cases where ``nans`` are not introduced. -See also :ref:`Support for integer NA <gotchas.intna>` +See also :ref:`Support for integer NA <gotchas.intna>`. .. ipython:: python @@ -2200,17 +2203,17 @@ dtypes: df['tz_aware_dates'] = pd.date_range('20130101', periods=3, tz='US/Eastern') df -And the dtypes +And the dtypes: .. ipython:: python df.dtypes :meth:`~DataFrame.select_dtypes` has two parameters ``include`` and ``exclude`` that allow you to -say "give me the columns WITH these dtypes" (``include``) and/or "give the -columns WITHOUT these dtypes" (``exclude``). +say "give me the columns *with* these dtypes" (``include``) and/or "give the +columns *without* these dtypes" (``exclude``). -For example, to select ``bool`` columns +For example, to select ``bool`` columns: .. ipython:: python @@ -2226,7 +2229,7 @@ You can also pass the name of a dtype in the `numpy dtype hierarchy :meth:`~pandas.DataFrame.select_dtypes` also works with generic dtypes as well. For example, to select all numeric and boolean columns while excluding unsigned -integers +integers: .. ipython:: python diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index e5c7637ddb499..c8018c8e66f72 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -93,10 +93,12 @@ constructed from the sorted keys of the dict, if possible. .. note:: - NaN (not a number) is the standard missing data marker used in pandas + NaN (not a number) is the standard missing data marker used in pandas. -**From scalar value** If ``data`` is a scalar value, an index must be -provided. The value will be repeated to match the length of **index** +**From scalar value** + +If ``data`` is a scalar value, an index must be +provided. The value will be repeated to match the length of **index**. .. ipython:: python @@ -106,7 +108,7 @@ Series is ndarray-like ~~~~~~~~~~~~~~~~~~~~~~ ``Series`` acts very similarly to a ``ndarray``, and is a valid argument to most NumPy functions. -However, things like slicing also slice the index. +However, operations such as slicing will also slice the index. .. ipython :: python @@ -152,10 +154,9 @@ See also the :ref:`section on attribute access<indexing.attribute_access>`. Vectorized operations and label alignment with Series ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When doing data analysis, as with raw NumPy arrays looping through Series -value-by-value is usually not necessary. Series can also be passed into most -NumPy methods expecting an ndarray. - +When working with raw NumPy arrays, looping through value-by-value is usually +not necessary. The same is true when working with Series in pandas. +Series can also be passed into most NumPy methods expecting an ndarray. .. ipython:: python @@ -245,8 +246,8 @@ based on common sense rules. From dict of Series or dicts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The result **index** will be the **union** of the indexes of the various -Series. If there are any nested dicts, these will be first converted to +The resulting **index** will be the **union** of the indexes of the various +Series. If there are any nested dicts, these will first be converted to Series. If no columns are passed, the columns will be the sorted list of dict keys. @@ -323,7 +324,8 @@ From a list of dicts From a dict of tuples ~~~~~~~~~~~~~~~~~~~~~ -You can automatically create a multi-indexed frame by passing a tuples dictionary +You can automatically create a multi-indexed frame by passing a tuples +dictionary. .. ipython:: python @@ -345,8 +347,8 @@ column name provided). **Missing Data** Much more will be said on this topic in the :ref:`Missing data <missing_data>` -section. To construct a DataFrame with missing data, use ``np.nan`` for those -values which are missing. Alternatively, you may pass a ``numpy.MaskedArray`` +section. To construct a DataFrame with missing data, we use ``np.nan`` to +represent missing values. Alternatively, you may pass a ``numpy.MaskedArray`` as the data argument to the DataFrame constructor, and its masked entries will be considered missing. @@ -367,9 +369,9 @@ set to ``'index'`` in order to use the dict keys as row labels. **DataFrame.from_records** ``DataFrame.from_records`` takes a list of tuples or an ndarray with structured -dtype. Works analogously to the normal ``DataFrame`` constructor, except that -index maybe be a specific field of the structured dtype to use as the index. -For example: +dtype. It works analogously to the normal ``DataFrame`` constructor, except that +the resulting DataFrame index may be a specific field of the structured +dtype. For example: .. ipython:: python @@ -467,7 +469,7 @@ derived from existing columns. (iris.assign(sepal_ratio = iris['SepalWidth'] / iris['SepalLength']) .head()) -Above was an example of inserting a precomputed value. We can also pass in +In the example above, we inserted a precomputed value. We can also pass in a function of one argument to be evalutated on the DataFrame being assigned to. .. ipython:: python @@ -480,7 +482,7 @@ DataFrame untouched. Passing a callable, as opposed to an actual value to be inserted, is useful when you don't have a reference to the DataFrame at hand. This is -common when using ``assign`` in chains of operations. For example, +common when using ``assign`` in a chain of operations. For example, we can limit the DataFrame to just those observations with a Sepal Length greater than 5, calculate the ratio, and plot: @@ -546,7 +548,7 @@ DataFrame: df.loc['b'] df.iloc[2] -For a more exhaustive treatment of more sophisticated label-based indexing and +For a more exhaustive treatment of sophisticated label-based indexing and slicing, see the :ref:`section on indexing <indexing>`. We will address the fundamentals of reindexing / conforming to new sets of labels in the :ref:`section on reindexing <basics.reindexing>`. @@ -739,7 +741,7 @@ DataFrame column attribute access and IPython completion ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If a DataFrame column label is a valid Python variable name, the column can be -accessed like attributes: +accessed like an attribute: .. ipython:: python @@ -912,7 +914,8 @@ For example, using the earlier example data, we could do: Squeezing ~~~~~~~~~ -Another way to change the dimensionality of an object is to ``squeeze`` a 1-len object, similar to ``wp['Item1']`` +Another way to change the dimensionality of an object is to ``squeeze`` a 1-len +object, similar to ``wp['Item1']``. .. ipython:: python :okwarning: @@ -964,7 +967,7 @@ support the multi-dimensional analysis that is one of ``Panel`` s main usecases. p = tm.makePanel() p -Convert to a MultiIndex DataFrame +Convert to a MultiIndex DataFrame. .. ipython:: python :okwarning: diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 0354f6e7f06f7..73e7704b43be6 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -10,21 +10,21 @@ Package overview easy-to-use data structures and data analysis tools for the `Python <https://www.python.org/>`__ programming language. -:mod:`pandas` consists of the following elements +:mod:`pandas` consists of the following elements: * A set of labeled array data structures, the primary of which are - Series and DataFrame + Series and DataFrame. * Index objects enabling both simple axis indexing and multi-level / - hierarchical axis indexing - * An integrated group by engine for aggregating and transforming data sets + hierarchical axis indexing. + * An integrated group by engine for aggregating and transforming data sets. * Date range generation (date_range) and custom date offsets enabling the - implementation of customized frequencies + implementation of customized frequencies. * Input/Output tools: loading tabular data from flat files (CSV, delimited, Excel 2003), and saving and loading pandas objects from the fast and efficient PyTables/HDF5 format. * Memory-efficient "sparse" versions of the standard data structures for storing - data that is mostly missing or mostly constant (some fixed value) - * Moving window statistics (rolling mean, rolling standard deviation, etc.) + data that is mostly missing or mostly constant (some fixed value). + * Moving window statistics (rolling mean, rolling standard deviation, etc.). Data Structures --------------- @@ -58,7 +58,7 @@ transformations in downstream functions. For example, with tabular data (DataFrame) it is more semantically helpful to think of the **index** (the rows) and the **columns** rather than axis 0 and -axis 1. And iterating through the columns of the DataFrame thus results in more +axis 1. Iterating through the columns of the DataFrame thus results in more readable code: :: @@ -74,8 +74,7 @@ All pandas data structures are value-mutable (the values they contain can be altered) but not always size-mutable. The length of a Series cannot be changed, but, for example, columns can be inserted into a DataFrame. However, the vast majority of methods produce new objects and leave the input data -untouched. In general, though, we like to **favor immutability** where -sensible. +untouched. In general we like to **favor immutability** where sensible. Getting Support --------------- diff --git a/pandas/__init__.py b/pandas/__init__.py index 8d9b75ccd6c2c..861c8e7d622fc 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -104,25 +104,25 @@ Here are just a few of the things that pandas does well: - Easy handling of missing data in floating point as well as non-floating - point data + point data. - Size mutability: columns can be inserted and deleted from DataFrame and - higher dimensional objects + higher dimensional objects. - Automatic and explicit data alignment: objects can be explicitly aligned to a set of labels, or the user can simply ignore the labels and let `Series`, `DataFrame`, etc. automatically align the data for you in - computations + computations. - Powerful, flexible group by functionality to perform split-apply-combine - operations on data sets, for both aggregating and transforming data + operations on data sets, for both aggregating and transforming data. - Make it easy to convert ragged, differently-indexed data in other Python - and NumPy data structures into DataFrame objects + and NumPy data structures into DataFrame objects. - Intelligent label-based slicing, fancy indexing, and subsetting of large - data sets - - Intuitive merging and joining data sets - - Flexible reshaping and pivoting of data sets - - Hierarchical labeling of axes (possible to have multiple labels per tick) + data sets. + - Intuitive merging and joining data sets. + - Flexible reshaping and pivoting of data sets. + - Hierarchical labeling of axes (possible to have multiple labels per tick). - Robust IO tools for loading data from flat files (CSV and delimited), Excel files, databases, and saving/loading data from the ultrafast HDF5 - format + format. - Time series-specific functionality: date range generation and frequency conversion, moving window statistics, moving window linear regressions, date shifting and lagging, etc.
I read through introductory docs, and made the following changes: - Added missing periods, which were sometimes absent in short sentences. - In other parts of the documentation, code examples appear to typically be introduced with a colon. This has been added in some sentences which introduce code and do not end in a period nor a colon. - Updated a reference to the Python 2 docs to Python 3. - Restructured a few sentences slightly. - Found an example of code that did not run due to an extra `}`, which I removed.
https://api.github.com/repos/pandas-dev/pandas/pulls/18948
2017-12-26T16:14:47Z
2017-12-27T19:30:01Z
2017-12-27T19:30:01Z
2017-12-28T00:56:29Z
CLN: ASV reshape
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py index 951f718257170..bd3b580d9d130 100644 --- a/asv_bench/benchmarks/reshape.py +++ b/asv_bench/benchmarks/reshape.py @@ -1,13 +1,16 @@ -from .pandas_vb_common import * -from pandas import melt, wide_to_long +from itertools import product +import numpy as np +from pandas import DataFrame, MultiIndex, date_range, melt, wide_to_long + +from .pandas_vb_common import setup # noqa + + +class Melt(object): -class melt_dataframe(object): goal_time = 0.2 def setup(self): - self.index = MultiIndex.from_arrays([np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)]) - self.df = DataFrame(np.random.randn(10000, 4), index=self.index) self.df = DataFrame(np.random.randn(10000, 3), columns=['A', 'B', 'C']) self.df['id1'] = np.random.randint(0, 10, 10000) self.df['id2'] = np.random.randint(100, 1000, 10000) @@ -16,50 +19,42 @@ def time_melt_dataframe(self): melt(self.df, id_vars=['id1', 'id2']) -class reshape_pivot_time_series(object): +class Pivot(object): + goal_time = 0.2 def setup(self): - self.index = MultiIndex.from_arrays([np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)]) - self.df = DataFrame(np.random.randn(10000, 4), index=self.index) - self.index = date_range('1/1/2000', periods=10000, freq='h') - self.df = DataFrame(randn(10000, 50), index=self.index, columns=range(50)) - self.pdf = self.unpivot(self.df) - self.f = (lambda : self.pdf.pivot('date', 'variable', 'value')) + N = 10000 + index = date_range('1/1/2000', periods=N, freq='h') + data = {'value': np.random.randn(N * 50), + 'variable': np.arange(50).repeat(N), + 'date': np.tile(index.values, 50)} + self.df = DataFrame(data) def time_reshape_pivot_time_series(self): - self.f() + self.df.pivot('date', 'variable', 'value') - def unpivot(self, frame): - (N, K) = frame.shape - self.data = {'value': frame.values.ravel('F'), 'variable': np.asarray(frame.columns).repeat(N), 'date': np.tile(np.asarray(frame.index), K), } - return DataFrame(self.data, columns=['date', 'variable', 'value']) +class SimpleReshape(object): -class reshape_stack_simple(object): goal_time = 0.2 def setup(self): - self.index = MultiIndex.from_arrays([np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)]) - self.df = DataFrame(np.random.randn(10000, 4), index=self.index) + arrays = [np.arange(100).repeat(100), + np.roll(np.tile(np.arange(100), 100), 25)] + index = MultiIndex.from_arrays(arrays) + self.df = DataFrame(np.random.randn(10000, 4), index=index) self.udf = self.df.unstack(1) - def time_reshape_stack_simple(self): + def time_stack(self): self.udf.stack() - -class reshape_unstack_simple(object): - goal_time = 0.2 - - def setup(self): - self.index = MultiIndex.from_arrays([np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)]) - self.df = DataFrame(np.random.randn(10000, 4), index=self.index) - - def time_reshape_unstack_simple(self): + def time_unstack(self): self.df.unstack(1) -class reshape_unstack_large_single_dtype(object): +class Unstack(object): + goal_time = 0.2 def setup(self): @@ -67,59 +62,59 @@ def setup(self): n = 1000 levels = np.arange(m) - index = pd.MultiIndex.from_product([levels]*2) + index = MultiIndex.from_product([levels] * 2) columns = np.arange(n) - values = np.arange(m*m*n).reshape(m*m, n) - self.df = pd.DataFrame(values, index, columns) + values = np.arange(m * m * n).reshape(m * m, n) + self.df = DataFrame(values, index, columns) self.df2 = self.df.iloc[:-1] - def time_unstack_full_product(self): + def time_full_product(self): self.df.unstack() - def time_unstack_with_mask(self): + def time_without_last_row(self): self.df2.unstack() -class unstack_sparse_keyspace(object): +class SparseIndex(object): + goal_time = 0.2 def setup(self): - self.index = MultiIndex.from_arrays([np.arange(100).repeat(100), np.roll(np.tile(np.arange(100), 100), 25)]) - self.df = DataFrame(np.random.randn(10000, 4), index=self.index) - self.NUM_ROWS = 1000 - for iter in range(10): - self.df = DataFrame({'A': np.random.randint(50, size=self.NUM_ROWS), 'B': np.random.randint(50, size=self.NUM_ROWS), 'C': np.random.randint((-10), 10, size=self.NUM_ROWS), 'D': np.random.randint((-10), 10, size=self.NUM_ROWS), 'E': np.random.randint(10, size=self.NUM_ROWS), 'F': np.random.randn(self.NUM_ROWS), }) - self.idf = self.df.set_index(['A', 'B', 'C', 'D', 'E']) - if (len(self.idf.index.unique()) == self.NUM_ROWS): - break + NUM_ROWS = 1000 + self.df = DataFrame({'A': np.random.randint(50, size=NUM_ROWS), + 'B': np.random.randint(50, size=NUM_ROWS), + 'C': np.random.randint(-10, 10, size=NUM_ROWS), + 'D': np.random.randint(-10, 10, size=NUM_ROWS), + 'E': np.random.randint(10, size=NUM_ROWS), + 'F': np.random.randn(NUM_ROWS)}) + self.df = self.df.set_index(['A', 'B', 'C', 'D', 'E']) + + def time_unstack(self): + self.df.unstack() - def time_unstack_sparse_keyspace(self): - self.idf.unstack() +class WideToLong(object): -class wide_to_long_big(object): goal_time = 0.2 def setup(self): - vars = 'ABCD' nyrs = 20 nidvars = 20 N = 5000 - yrvars = [] - for var in vars: - for yr in range(1, nyrs + 1): - yrvars.append(var + str(yr)) + self.letters = list('ABCD') + yrvars = [l + str(num) + for l, num in product(self.letters, range(1, nyrs + 1))] - self.df = pd.DataFrame(np.random.randn(N, nidvars + len(yrvars)), - columns=list(range(nidvars)) + yrvars) - self.vars = vars + self.df = DataFrame(np.random.randn(N, nidvars + len(yrvars)), + columns=list(range(nidvars)) + yrvars) + self.df['id'] = self.df.index def time_wide_to_long_big(self): - self.df['id'] = self.df.index - wide_to_long(self.df, list(self.vars), i='id', j='year') + wide_to_long(self.df, self.letters, i='id', j='year') class PivotTable(object): + goal_time = 0.2 def setup(self):
Flake8'd and simplified setups where available. ``` $ asv dev -b ^reshape · Discovering benchmarks · Running 9 total benchmarks (1 commits * 1 environments * 9 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 11.11%] ··· Running reshape.Melt.time_melt_dataframe 7.27ms [ 22.22%] ··· Running reshape.Pivot.time_reshape_pivot_time_series 441ms [ 33.33%] ··· Running reshape.PivotTable.time_pivot_table 46.0ms [ 44.44%] ··· Running reshape.SimpleReshape.time_stack 6.73ms [ 55.56%] ··· Running reshape.SimpleReshape.time_unstack 6.05ms [ 66.67%] ··· Running reshape.SparseIndex.time_unstack 2.76ms [ 77.78%] ··· Running reshape.Unstack.time_full_product 254ms [ 88.89%] ··· Running reshape.Unstack.time_without_last_row 477ms [100.00%] ··· Running reshape.WideToLong.time_wide_to_long_big 378ms ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18944
2017-12-26T07:28:34Z
2017-12-26T21:38:26Z
2017-12-26T21:38:26Z
2017-12-31T04:33:36Z
DOC: typo in documentation
diff --git a/doc/source/internals.rst b/doc/source/internals.rst index 3d96b93de4cc9..a321b4202296f 100644 --- a/doc/source/internals.rst +++ b/doc/source/internals.rst @@ -217,7 +217,7 @@ Below is an example to define 2 original properties, "internal_cache" as a tempo .. code-block:: python - >>> df = SubclassedDataFrame2({'A', [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]}) + >>> df = SubclassedDataFrame2({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]}) >>> df A B C 0 1 4 7
https://api.github.com/repos/pandas-dev/pandas/pulls/18942
2017-12-25T23:50:39Z
2017-12-26T08:09:36Z
2017-12-26T08:09:36Z
2017-12-26T08:09:41Z
DOC: Fixed minor spelling errors
diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 00a71603e1261..0354f6e7f06f7 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -102,7 +102,7 @@ project, and makes it possible to `donate <https://pandas.pydata.org/donate.html Project Governance ------------------ -The governance process that pandas project has used informally since its inception in 2008 is formalized in `Project Governance documents <https://github.com/pandas-dev/pandas-governance>`__ . +The governance process that pandas project has used informally since its inception in 2008 is formalized in `Project Governance documents <https://github.com/pandas-dev/pandas-governance>`__. The documents clarify how decisions are made and how the various elements of our community interact, including the relationship between open source collaborative development and work that may be funded by for-profit or non-profit entities. Wes McKinney is the Benevolent Dictator for Life (BDFL). @@ -116,7 +116,7 @@ The list of the Core Team members and more detailed information can be found on Institutional Partners ---------------------- -The information about current institutional partners can be found on `pandas website page <https://pandas.pydata.org/about.html>`__ +The information about current institutional partners can be found on `pandas website page <https://pandas.pydata.org/about.html>`__. License ------- diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst index 1c34c16ea965a..0b8a2cb89b45e 100644 --- a/doc/source/tutorials.rst +++ b/doc/source/tutorials.rst @@ -161,6 +161,7 @@ Modern Pandas - `Performance <http://tomaugspurger.github.io/modern-4-performance.html>`_ - `Tidy Data <http://tomaugspurger.github.io/modern-5-tidy.html>`_ - `Visualization <http://tomaugspurger.github.io/modern-6-visualization.html>`_ +- `Timeseries <http://tomaugspurger.github.io/modern-7-timeseries.html>`_ Excel charts with pandas, vincent and xlsxwriter ------------------------------------------------
Two small changes: - Two minor spelling errors corrected. - Added part 7 of a tutorial series to the docs (parts 1-6 were included already, but not part 7).
https://api.github.com/repos/pandas-dev/pandas/pulls/18941
2017-12-25T22:03:29Z
2017-12-26T08:11:58Z
2017-12-26T08:11:58Z
2017-12-26T08:39:12Z
ENH: Let Resampler objects have a pipe method
diff --git a/doc/source/api.rst b/doc/source/api.rst index 64f972e52d190..68721b76eed7e 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -2274,6 +2274,7 @@ Function application Resampler.apply Resampler.aggregate Resampler.transform + Resampler.pipe Upsampling ~~~~~~~~~~ diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 3f300deddebeb..735742964f3ee 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -142,6 +142,8 @@ Other Enhancements - ``Categorical.rename_categories``, ``CategoricalIndex.rename_categories`` and :attr:`Series.cat.rename_categories` can now take a callable as their argument (:issue:`18862`) - :class:`Interval` and :class:`IntervalIndex` have gained a ``length`` attribute (:issue:`18789`) +- ``Resampler`` objects now have a functioning :attr:`~pandas.core.resample.Resampler.pipe` method. + Previously, calls to ``pipe`` were diverted to the ``mean`` method (:issue:`17905`). .. _whatsnew_0230.api_breaking: diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index ced120fbdbe29..47b80c00da4d4 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -191,6 +191,60 @@ dtype: int64 """) +_pipe_template = """\ +Apply a function ``func`` with arguments to this %(klass)s object and return +the function's result. + +%(versionadded)s + +Use ``.pipe`` when you want to improve readability by chaining together +functions that expect Series, DataFrames, GroupBy or Resampler objects. +Instead of writing + +>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c) + +You can write + +>>> (df.groupby('group') +... .pipe(f) +... .pipe(g, arg1=a) +... .pipe(h, arg2=b, arg3=c)) + +which is much more readable. + +Parameters +---------- +func : callable or tuple of (callable, string) + Function to apply to this %(klass)s object or, alternatively, + a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a + string indicating the keyword of ``callable`` that expects the + %(klass)s object. +args : iterable, optional + positional arguments passed into ``func``. +kwargs : dict, optional + a dictionary of keyword arguments passed into ``func``. + +Returns +------- +object : the return type of ``func``. + +Notes +----- +See more `here +<http://pandas.pydata.org/pandas-docs/stable/groupby.html#pipe>`_ + +Examples +-------- +%(examples)s + +See Also +-------- +pandas.Series.pipe : Apply a function with arguments to a series +pandas.DataFrame.pipe: Apply a function with arguments to a dataframe +apply : Apply function to each group instead of to the + full %(klass)s object. +""" + _transform_template = """ Call function producing a like-indexed %(klass)s on each group and return a %(klass)s having the same indexes as the original object @@ -676,6 +730,29 @@ def __getattr__(self, attr): raise AttributeError("%r object has no attribute %r" % (type(self).__name__, attr)) + @Substitution(klass='GroupBy', + versionadded='.. versionadded:: 0.21.0', + examples="""\ +>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]}) +>>> df + A B +0 a 1 +1 b 2 +2 a 3 +3 b 4 + +To get the difference between each groups maximum and minimum value in one +pass, you can do + +>>> df.groupby('A').pipe(lambda x: x.max() - x.min()) + B +A +a 2 +b 2""") + @Appender(_pipe_template) + def pipe(self, func, *args, **kwargs): + return _pipe(self, func, *args, **kwargs) + plot = property(GroupByPlot) def _make_wrapper(self, name): @@ -1779,54 +1856,6 @@ def tail(self, n=5): mask = self._cumcount_array(ascending=False) < n return self._selected_obj[mask] - def pipe(self, func, *args, **kwargs): - """ Apply a function with arguments to this GroupBy object, - - .. versionadded:: 0.21.0 - - Parameters - ---------- - func : callable or tuple of (callable, string) - Function to apply to this GroupBy object or, alternatively, a - ``(callable, data_keyword)`` tuple where ``data_keyword`` is a - string indicating the keyword of ``callable`` that expects the - GroupBy object. - args : iterable, optional - positional arguments passed into ``func``. - kwargs : dict, optional - a dictionary of keyword arguments passed into ``func``. - - Returns - ------- - object : the return type of ``func``. - - Notes - ----- - Use ``.pipe`` when chaining together functions that expect - Series, DataFrames or GroupBy objects. Instead of writing - - >>> f(g(h(df.groupby('group')), arg1=a), arg2=b, arg3=c) - - You can write - - >>> (df - ... .groupby('group') - ... .pipe(f, arg1) - ... .pipe(g, arg2) - ... .pipe(h, arg3)) - - See more `here - <http://pandas.pydata.org/pandas-docs/stable/groupby.html#pipe>`_ - - See Also - -------- - pandas.Series.pipe : Apply a function with arguments to a series - pandas.DataFrame.pipe: Apply a function with arguments to a dataframe - apply : Apply function to each group instead of to the - full GroupBy object. - """ - return _pipe(self, func, *args, **kwargs) - GroupBy._add_numeric_operations() diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 9f5439b68558b..c2bf7cff746eb 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -8,7 +8,8 @@ from pandas.core.base import AbstractMethodError, GroupByMixin from pandas.core.groupby import (BinGrouper, Grouper, _GroupBy, GroupBy, - SeriesGroupBy, groupby, PanelGroupBy) + SeriesGroupBy, groupby, PanelGroupBy, + _pipe_template) from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod from pandas.core.indexes.datetimes import DatetimeIndex, date_range @@ -26,7 +27,7 @@ from pandas._libs.lib import Timestamp from pandas._libs.tslibs.period import IncompatibleFrequency -from pandas.util._decorators import Appender +from pandas.util._decorators import Appender, Substitution from pandas.core.generic import _shared_docs _shared_docs_kwargs = dict() @@ -257,6 +258,29 @@ def _assure_grouper(self): """ make sure that we are creating our binner & grouper """ self._set_binner() + @Substitution(klass='Resampler', + versionadded='.. versionadded:: 0.23.0', + examples=""" +>>> df = pd.DataFrame({'A': [1, 2, 3, 4]}, +... index=pd.date_range('2012-08-02', periods=4)) +>>> df + A +2012-08-02 1 +2012-08-03 2 +2012-08-04 3 +2012-08-05 4 + +To get the difference between each 2-day period's maximum and minimum value in +one pass, you can do + +>>> df.resample('2D').pipe(lambda x: x.max() - x.min()) + A +2012-08-02 1 +2012-08-04 1""") + @Appender(_pipe_template) + def pipe(self, func, *args, **kwargs): + return super(Resampler, self).pipe(func, *args, **kwargs) + def plot(self, *args, **kwargs): # for compat with prior versions, we want to # have the warnings shown here and just have this work diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index f00fa07d868a1..38f4b8be469a5 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -235,6 +235,21 @@ def test_groupby_resample_on_api(self): result = df.groupby('key').resample('D', on='dates').mean() assert_frame_equal(result, expected) + def test_pipe(self): + # GH17905 + + # series + r = self.series.resample('H') + expected = r.max() - r.mean() + result = r.pipe(lambda x: x.max() - x.mean()) + tm.assert_series_equal(result, expected) + + # dataframe + r = self.frame.resample('H') + expected = r.max() - r.mean() + result = r.pipe(lambda x: x.max() - x.mean()) + tm.assert_frame_equal(result, expected) + @td.skip_if_no_mpl def test_plot_api(self): # .resample(....).plot(...)
- [x] closes #17905 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Currently, calls to ``df.resample(....).pipe(...)`` are converted to ``df.resample(....).mean().pipe(...)`` and a warning is emitted (see #17905). This PR solves this by moving the ``pipe`` method from the ``GroupBy`` class to the ``_GroupBy`` class. As ``_GroupBy`` is a common parent class of both ``GroupBy`` and ``Resampler``, the ``pipe`` method is now available for ``Resampler`` too. See also #17871.
https://api.github.com/repos/pandas-dev/pandas/pulls/18940
2017-12-25T14:21:25Z
2017-12-26T21:45:49Z
2017-12-26T21:45:49Z
2017-12-26T22:11:05Z
CLN: ASV reindex
diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py index 537d275e7c727..69a1a604b1ccc 100644 --- a/asv_bench/benchmarks/reindex.py +++ b/asv_bench/benchmarks/reindex.py @@ -1,89 +1,77 @@ -from .pandas_vb_common import * -from random import shuffle +import numpy as np +import pandas.util.testing as tm +from pandas import (DataFrame, Series, DatetimeIndex, MultiIndex, Index, + date_range) +from .pandas_vb_common import setup, lib # noqa -class Reindexing(object): +class Reindex(object): + goal_time = 0.2 def setup(self): - self.rng = DatetimeIndex(start='1/1/1970', periods=10000, freq='1min') - self.df = DataFrame(np.random.rand(10000, 10), index=self.rng, + rng = DatetimeIndex(start='1/1/1970', periods=10000, freq='1min') + self.df = DataFrame(np.random.rand(10000, 10), index=rng, columns=range(10)) self.df['foo'] = 'bar' - self.rng2 = Index(self.rng[::2]) - + self.rng_subset = Index(rng[::2]) self.df2 = DataFrame(index=range(10000), data=np.random.rand(10000, 30), columns=range(30)) - - # multi-index N = 5000 K = 200 level1 = tm.makeStringIndex(N).values.repeat(K) level2 = np.tile(tm.makeStringIndex(K).values, N) index = MultiIndex.from_arrays([level1, level2]) - self.s1 = Series(np.random.randn((N * K)), index=index) - self.s2 = self.s1[::2] + self.s = Series(np.random.randn(N * K), index=index) + self.s_subset = self.s[::2] def time_reindex_dates(self): - self.df.reindex(self.rng2) + self.df.reindex(self.rng_subset) def time_reindex_columns(self): self.df2.reindex(columns=self.df.columns[1:5]) def time_reindex_multiindex(self): - self.s1.reindex(self.s2.index) + self.s.reindex(self.s_subset.index) -#---------------------------------------------------------------------- -# Pad / backfill +class ReindexMethod(object): - -class FillMethod(object): goal_time = 0.2 + params = ['pad', 'backfill'] + param_names = ['method'] - def setup(self): - self.rng = date_range('1/1/2000', periods=100000, freq='1min') - self.ts = Series(np.random.randn(len(self.rng)), index=self.rng) - self.ts2 = self.ts[::2] - self.ts3 = self.ts2.reindex(self.ts.index) - self.ts4 = self.ts3.astype('float32') - - def pad(self, source_series, target_index): - try: - source_series.reindex(target_index, method='pad') - except: - source_series.reindex(target_index, fillMethod='pad') - - def backfill(self, source_series, target_index): - try: - source_series.reindex(target_index, method='backfill') - except: - source_series.reindex(target_index, fillMethod='backfill') - - def time_backfill_dates(self): - self.backfill(self.ts2, self.ts.index) + def setup(self, method): + N = 100000 + self.idx = date_range('1/1/2000', periods=N, freq='1min') + self.ts = Series(np.random.randn(N), index=self.idx)[::2] - def time_pad_daterange(self): - self.pad(self.ts2, self.ts.index) + def time_reindex_method(self, method): + self.ts.reindex(self.idx, method=method) - def time_backfill(self): - self.ts3.fillna(method='backfill') - def time_backfill_float32(self): - self.ts4.fillna(method='backfill') +class Fillna(object): - def time_pad(self): - self.ts3.fillna(method='pad') + goal_time = 0.2 + params = ['pad', 'backfill'] + param_names = ['method'] - def time_pad_float32(self): - self.ts4.fillna(method='pad') + def setup(self, method): + N = 100000 + self.idx = date_range('1/1/2000', periods=N, freq='1min') + ts = Series(np.random.randn(N), index=self.idx)[::2] + self.ts_reindexed = ts.reindex(self.idx) + self.ts_float32 = self.ts_reindexed.astype('float32') + def time_reindexed(self, method): + self.ts_reindexed.fillna(method=method) -#---------------------------------------------------------------------- -# align on level + def time_float_32(self, method): + self.ts_float32.fillna(method=method) class LevelAlign(object): + goal_time = 0.2 def setup(self): @@ -92,7 +80,6 @@ def setup(self): labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)]) - random.shuffle(self.index.values) self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index) self.df_level = DataFrame(np.random.randn(100, 4), @@ -102,103 +89,85 @@ def time_align_level(self): self.df.align(self.df_level, level=1, copy=False) def time_reindex_level(self): - self.df_level.reindex(self.df.index, level=1) + self.df_level.reindex(self.index, level=1) -#---------------------------------------------------------------------- -# drop_duplicates +class DropDuplicates(object): - -class Duplicates(object): goal_time = 0.2 - - def setup(self): - self.N = 10000 - self.K = 10 - self.key1 = tm.makeStringIndex(self.N).values.repeat(self.K) - self.key2 = tm.makeStringIndex(self.N).values.repeat(self.K) - self.df = DataFrame({'key1': self.key1, 'key2': self.key2, - 'value': np.random.randn((self.N * self.K)),}) - self.col_array_list = list(self.df.values.T) - - self.df2 = self.df.copy() - self.df2.ix[:10000, :] = np.nan + params = [True, False] + param_names = ['inplace'] + + def setup(self, inplace): + N = 10000 + K = 10 + key1 = tm.makeStringIndex(N).values.repeat(K) + key2 = tm.makeStringIndex(N).values.repeat(K) + self.df = DataFrame({'key1': key1, 'key2': key2, + 'value': np.random.randn(N * K)}) + self.df_nan = self.df.copy() + self.df_nan.iloc[:10000, :] = np.nan self.s = Series(np.random.randint(0, 1000, size=10000)) - self.s2 = Series(np.tile(tm.makeStringIndex(1000).values, 10)) - - np.random.seed(1234) - self.N = 1000000 - self.K = 10000 - self.key1 = np.random.randint(0, self.K, size=self.N) - self.df_int = DataFrame({'key1': self.key1}) - self.df_bool = DataFrame({i: np.random.randint(0, 2, size=self.K, - dtype=bool) - for i in range(10)}) + self.s_str = Series(np.tile(tm.makeStringIndex(1000).values, 10)) - def time_frame_drop_dups(self): - self.df.drop_duplicates(['key1', 'key2']) + N = 1000000 + K = 10000 + key1 = np.random.randint(0, K, size=N) + self.df_int = DataFrame({'key1': key1}) + self.df_bool = DataFrame(np.random.randint(0, 2, size=(K, 10), + dtype=bool)) - def time_frame_drop_dups_inplace(self): - self.df.drop_duplicates(['key1', 'key2'], inplace=True) + def time_frame_drop_dups(self, inplace): + self.df.drop_duplicates(['key1', 'key2'], inplace=inplace) - def time_frame_drop_dups_na(self): - self.df2.drop_duplicates(['key1', 'key2']) + def time_frame_drop_dups_na(self, inplace): + self.df_nan.drop_duplicates(['key1', 'key2'], inplace=inplace) - def time_frame_drop_dups_na_inplace(self): - self.df2.drop_duplicates(['key1', 'key2'], inplace=True) + def time_series_drop_dups_int(self, inplace): + self.s.drop_duplicates(inplace=inplace) - def time_series_drop_dups_int(self): - self.s.drop_duplicates() + def time_series_drop_dups_string(self, inplace): + self.s_str.drop_duplicates(inplace=inplace) - def time_series_drop_dups_string(self): - self.s2.drop_duplicates() + def time_frame_drop_dups_int(self, inplace): + self.df_int.drop_duplicates(inplace=inplace) - def time_frame_drop_dups_int(self): - self.df_int.drop_duplicates() - - def time_frame_drop_dups_bool(self): - self.df_bool.drop_duplicates() - -#---------------------------------------------------------------------- -# blog "pandas escaped the zoo" + def time_frame_drop_dups_bool(self, inplace): + self.df_bool.drop_duplicates(inplace=inplace) class Align(object): + # blog "pandas escaped the zoo" goal_time = 0.2 def setup(self): n = 50000 indices = tm.makeStringIndex(n) subsample_size = 40000 - - def sample(values, k): - sampler = np.arange(len(values)) - shuffle(sampler) - return values.take(sampler[:k]) - - self.x = Series(np.random.randn(50000), indices) + self.x = Series(np.random.randn(n), indices) self.y = Series(np.random.randn(subsample_size), - index=sample(indices, subsample_size)) + index=np.random.choice(indices, subsample_size, + replace=False)) def time_align_series_irregular_string(self): - (self.x + self.y) + self.x + self.y class LibFastZip(object): + goal_time = 0.2 def setup(self): - self.N = 10000 - self.K = 10 - self.key1 = tm.makeStringIndex(self.N).values.repeat(self.K) - self.key2 = tm.makeStringIndex(self.N).values.repeat(self.K) - self.df = DataFrame({'key1': self.key1, 'key2': self.key2, 'value': np.random.randn((self.N * self.K)), }) - self.col_array_list = list(self.df.values.T) - - self.df2 = self.df.copy() - self.df2.ix[:10000, :] = np.nan - self.col_array_list2 = list(self.df2.values.T) + N = 10000 + K = 10 + key1 = tm.makeStringIndex(N).values.repeat(K) + key2 = tm.makeStringIndex(N).values.repeat(K) + col_array = np.vstack([key1, key2, np.random.randn(N * K)]) + col_array2 = col_array.copy() + col_array2[:, :10000] = np.nan + self.col_array_list = list(col_array) + self.col_array_list2 = list(col_array2) def time_lib_fast_zip(self): lib.fast_zip(self.col_array_list)
Flake8'd, utilized `param`s and simplified setup where possible. ``` $ asv dev -b ^reindex · Discovering benchmarks · Running 17 total benchmarks (1 commits * 1 environments * 17 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 5.88%] ··· Running ...ex.Align.time_align_series_irregular_string 604ms [ 11.76%] ··· Running reindex.DropDuplicates.time_frame_drop_dups ok [ 11.76%] ···· ========= ======== inplace --------- -------- True 26.9ms False 24.8ms ========= ======== [ 17.65%] ··· Running ...ex.DropDuplicates.time_frame_drop_dups_bool ok [ 17.65%] ···· ========= ======== inplace --------- -------- True 6.48ms False 7.86ms ========= ======== [ 23.53%] ··· Running ...dex.DropDuplicates.time_frame_drop_dups_int ok [ 23.53%] ···· ========= ======== inplace --------- -------- True 73.2ms False 65.5ms ========= ======== [ 29.41%] ··· Running reindex.DropDuplicates.time_frame_drop_dups_na ok [ 29.41%] ···· ========= ======== inplace --------- -------- True 30.3ms False 29.5ms ========= ======== [ 35.29%] ··· Running ...ex.DropDuplicates.time_series_drop_dups_int ok [ 35.29%] ···· ========= ======== inplace --------- -------- True 1.41ms False 1.35ms ========= ======== [ 41.18%] ··· Running ...DropDuplicates.time_series_drop_dups_string ok [ 41.18%] ···· ========= ======== inplace --------- -------- True 1.81ms False 1.78ms ========= ======== [ 47.06%] ··· Running reindex.Fillna.time_float_32 ok [ 47.06%] ···· ========== ======= method ---------- ------- pad 816μs backfill 911μs ========== ======= [ 52.94%] ··· Running reindex.Fillna.time_reindexed ok [ 52.94%] ···· ========== ======== method ---------- -------- pad 1.68ms backfill 1.45ms ========== ======== [ 58.82%] ··· Running reindex.LevelAlign.time_align_level 29.5ms [ 64.71%] ··· Running reindex.LevelAlign.time_reindex_level 31.3ms [ 70.59%] ··· Running reindex.LibFastZip.time_lib_fast_zip 30.2ms [ 76.47%] ··· Running reindex.LibFastZip.time_lib_fast_zip_fillna 35.4ms [ 82.35%] ··· Running reindex.Reindex.time_reindex_columns 2.25ms [ 88.24%] ··· Running reindex.Reindex.time_reindex_dates 1.90ms [ 94.12%] ··· Running reindex.Reindex.time_reindex_multiindex 650ms [100.00%] ··· Running reindex.ReindexMethod.time_reindex_method ok [100.00%] ···· ========== ======== method ---------- -------- pad 7.01ms backfill 6.87ms ========== ======== ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18938
2017-12-25T05:41:57Z
2017-12-26T21:47:33Z
2017-12-26T21:47:33Z
2017-12-31T04:33:51Z
CLN/BUG: Consolidate Index.astype and fix tz aware bugs
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 8c94cef4d8ea7..df17f6dd4c16f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -288,6 +288,7 @@ Conversion - Bug in :class:`Series` constructor with an int or float list where specifying ``dtype=str``, ``dtype='str'`` or ``dtype='U'`` failed to convert the data elements to strings (:issue:`16605`) - Bug in :class:`Timestamp` where comparison with an array of ``Timestamp`` objects would result in a ``RecursionError`` (:issue:`15183`) - Bug in :class:`WeekOfMonth` and class:`Week` where addition and subtraction did not roll correctly (:issue:`18510`,:issue:`18672`,:issue:`18864`) +- Bug in :meth:`DatetimeIndex.astype` when converting between timezone aware dtypes, and converting from timezone aware to naive (:issue:`18951`) Indexing diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 79de63b0caeb6..d5dbfec9ecc49 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1065,12 +1065,18 @@ def _to_embed(self, keep_tz=False, dtype=None): @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): - if is_categorical_dtype(dtype): + if is_dtype_equal(self.dtype, dtype): + return self.copy() if copy else self + elif is_categorical_dtype(dtype): from .category import CategoricalIndex return CategoricalIndex(self.values, name=self.name, dtype=dtype, copy=copy) - return Index(self.values.astype(dtype, copy=copy), name=self.name, - dtype=dtype) + try: + return Index(self.values.astype(dtype, copy=copy), name=self.name, + dtype=dtype) + except (TypeError, ValueError): + msg = 'Cannot cast {name} to dtype {dtype}' + raise TypeError(msg.format(name=type(self).__name__, dtype=dtype)) def _to_safe_for_reshape(self): """ convert to object if we are a categorical """ diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 8cc996285fbbd..4a66475c85691 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -11,13 +11,22 @@ import numpy as np from pandas.core.dtypes.common import ( - is_integer, is_float, - is_bool_dtype, _ensure_int64, - is_scalar, is_dtype_equal, - is_list_like, is_timedelta64_dtype) + _ensure_int64, + is_dtype_equal, + is_float, + is_integer, + is_list_like, + is_scalar, + is_bool_dtype, + is_categorical_dtype, + is_datetime_or_timedelta_dtype, + is_float_dtype, + is_integer_dtype, + is_object_dtype, + is_string_dtype, + is_timedelta64_dtype) from pandas.core.dtypes.generic import ( - ABCIndex, ABCSeries, - ABCPeriodIndex, ABCIndexClass) + ABCIndex, ABCSeries, ABCPeriodIndex, ABCIndexClass) from pandas.core.dtypes.missing import isna from pandas.core import common as com, algorithms from pandas.core.algorithms import checked_add_with_arr @@ -859,6 +868,22 @@ def _concat_same_dtype(self, to_concat, name): new_data = np.concatenate([c.asi8 for c in to_concat]) return self._simple_new(new_data, **attribs) + def astype(self, dtype, copy=True): + if is_object_dtype(dtype): + return self._box_values_as_index() + elif is_string_dtype(dtype) and not is_categorical_dtype(dtype): + return Index(self.format(), name=self.name, dtype=object) + elif is_integer_dtype(dtype): + return Index(self.values.astype('i8', copy=copy), name=self.name, + dtype='i8') + elif (is_datetime_or_timedelta_dtype(dtype) and + not is_dtype_equal(self.dtype, dtype)) or is_float_dtype(dtype): + # disallow conversion between datetime/timedelta, + # and conversions for any datetimelike to float + msg = 'Cannot cast {name} to dtype {dtype}' + raise TypeError(msg.format(name=type(self).__name__, dtype=dtype)) + return super(DatetimeIndexOpsMixin, self).astype(dtype, copy=copy) + def _ensure_datetimelike_to_i8(other): """ helper for coercing an input scalar or array to i8 """ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index bec26ef72d63a..9e804b6575c47 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -10,17 +10,19 @@ from pandas.core.base import _shared_docs from pandas.core.dtypes.common import ( - _NS_DTYPE, _INT64_DTYPE, - is_object_dtype, is_datetime64_dtype, - is_datetimetz, is_dtype_equal, + _INT64_DTYPE, + _NS_DTYPE, + is_object_dtype, + is_datetime64_dtype, + is_datetimetz, + is_dtype_equal, is_timedelta64_dtype, - is_integer, is_float, + is_integer, + is_float, is_integer_dtype, is_datetime64_ns_dtype, is_period_dtype, is_bool_dtype, - is_string_dtype, - is_categorical_dtype, is_string_like, is_list_like, is_scalar, @@ -36,20 +38,17 @@ from pandas.core.algorithms import checked_add_with_arr from pandas.core.indexes.base import Index, _index_shared_docs -from pandas.core.indexes.category import CategoricalIndex from pandas.core.indexes.numeric import Int64Index, Float64Index import pandas.compat as compat -from pandas.tseries.frequencies import ( - to_offset, get_period_alias, - Resolution) +from pandas.tseries.frequencies import to_offset, get_period_alias, Resolution from pandas.core.indexes.datetimelike import ( DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin) from pandas.tseries.offsets import ( DateOffset, generate_range, Tick, CDay, prefix_mapping) from pandas.core.tools.timedeltas import to_timedelta -from pandas.util._decorators import (Appender, cache_readonly, - deprecate_kwarg, Substitution) +from pandas.util._decorators import ( + Appender, cache_readonly, deprecate_kwarg, Substitution) import pandas.core.common as com import pandas.tseries.offsets as offsets import pandas.core.tools.datetimes as tools @@ -906,25 +905,16 @@ def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs): @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): dtype = pandas_dtype(dtype) - if is_object_dtype(dtype): - return self._box_values_as_index() - elif is_integer_dtype(dtype): - return Index(self.values.astype('i8', copy=copy), name=self.name, - dtype='i8') - elif is_datetime64_ns_dtype(dtype): - if self.tz is not None: - return self.tz_convert('UTC').tz_localize(None) - elif copy is True: - return self.copy() - return self - elif is_categorical_dtype(dtype): - return CategoricalIndex(self.values, name=self.name, dtype=dtype, - copy=copy) - elif is_string_dtype(dtype): - return Index(self.format(), name=self.name, dtype=object) + if (is_datetime64_ns_dtype(dtype) and + not is_dtype_equal(dtype, self.dtype)): + # GH 18951: datetime64_ns dtype but not equal means different tz + new_tz = getattr(dtype, 'tz', None) + if getattr(self.dtype, 'tz', None) is None: + return self.tz_localize(new_tz) + return self.tz_convert(new_tz) elif is_period_dtype(dtype): return self.to_period(freq=dtype.freq) - raise TypeError('Cannot cast DatetimeIndex to dtype %s' % dtype) + return super(DatetimeIndex, self).astype(dtype, copy=copy) def _get_time_micros(self): values = self.asi8 diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 49e574dcbae45..2a132f683c519 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -12,8 +12,6 @@ is_datetime_or_timedelta_dtype, is_datetime64tz_dtype, is_integer_dtype, - is_object_dtype, - is_categorical_dtype, is_float_dtype, is_interval_dtype, is_scalar, @@ -29,7 +27,6 @@ Interval, IntervalMixin, IntervalTree, intervals_to_interval_bounds) -from pandas.core.indexes.category import CategoricalIndex from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.timedeltas import timedelta_range from pandas.core.indexes.multi import MultiIndex @@ -671,16 +668,8 @@ def copy(self, deep=False, name=None): @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): if is_interval_dtype(dtype): - if copy: - self = self.copy() - return self - elif is_object_dtype(dtype): - return Index(self.values, dtype=object) - elif is_categorical_dtype(dtype): - return CategoricalIndex(self.values, name=self.name, dtype=dtype, - copy=copy) - raise ValueError('Cannot cast IntervalIndex to dtype {dtype}' - .format(dtype=dtype)) + return self.copy() if copy else self + return super(IntervalIndex, self).astype(dtype, copy=copy) @cache_readonly def dtype(self): diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 5fc9cb47362d6..5995b9fc7674c 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -4,10 +4,8 @@ from pandas.core.dtypes.common import ( is_dtype_equal, pandas_dtype, - is_float_dtype, - is_object_dtype, + needs_i8_conversion, is_integer_dtype, - is_categorical_dtype, is_bool, is_bool_dtype, is_scalar) @@ -17,7 +15,6 @@ from pandas.core import algorithms from pandas.core.indexes.base import ( Index, InvalidIndexError, _index_shared_docs) -from pandas.core.indexes.category import CategoricalIndex from pandas.util._decorators import Appender, cache_readonly import pandas.core.dtypes.concat as _concat import pandas.core.indexes.base as ibase @@ -315,22 +312,14 @@ def inferred_type(self): @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): dtype = pandas_dtype(dtype) - if is_float_dtype(dtype): - values = self._values.astype(dtype, copy=copy) - elif is_integer_dtype(dtype): - if self.hasnans: - raise ValueError('cannot convert float NaN to integer') - values = self._values.astype(dtype, copy=copy) - elif is_object_dtype(dtype): - values = self._values.astype('object', copy=copy) - elif is_categorical_dtype(dtype): - return CategoricalIndex(self, name=self.name, dtype=dtype, - copy=copy) - else: - raise TypeError('Setting {cls} dtype to anything other than ' - 'float64, object, or category is not supported' - .format(cls=self.__class__)) - return Index(values, name=self.name, dtype=dtype) + if needs_i8_conversion(dtype): + msg = ('Cannot convert Float64Index to dtype {dtype}; integer ' + 'values are required for conversion').format(dtype=dtype) + raise TypeError(msg) + elif is_integer_dtype(dtype) and self.hasnans: + # GH 13149 + raise ValueError('Cannot convert NA to integer') + return super(Float64Index, self).astype(dtype, copy=copy) @Appender(_index_shared_docs['_convert_scalar_indexer']) def _convert_scalar_indexer(self, key, kind=None): diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 64756906d8a63..8b35b1a231551 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -7,16 +7,14 @@ from pandas.core.dtypes.common import ( is_integer, is_float, - is_object_dtype, is_integer_dtype, is_float_dtype, is_scalar, is_datetime64_dtype, - is_datetime64tz_dtype, + is_datetime64_any_dtype, is_timedelta64_dtype, is_period_dtype, is_bool_dtype, - is_categorical_dtype, pandas_dtype, _ensure_object) from pandas.core.dtypes.dtypes import PeriodDtype @@ -24,7 +22,6 @@ import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import get_freq_code as _gfc -from pandas.core.indexes.category import CategoricalIndex from pandas.core.indexes.datetimes import DatetimeIndex, Int64Index, Index from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.indexes.datetimelike import DatelikeOps, DatetimeIndexOpsMixin @@ -506,23 +503,14 @@ def asof_locs(self, where, mask): @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True, how='start'): dtype = pandas_dtype(dtype) - if is_object_dtype(dtype): - return self._box_values_as_index() - elif is_integer_dtype(dtype): - if copy: - return self._int64index.copy() - else: - return self._int64index - elif is_datetime64_dtype(dtype): - return self.to_timestamp(how=how) - elif is_datetime64tz_dtype(dtype): - return self.to_timestamp(how=how).tz_localize(dtype.tz) + if is_integer_dtype(dtype): + return self._int64index.copy() if copy else self._int64index + elif is_datetime64_any_dtype(dtype): + tz = getattr(dtype, 'tz', None) + return self.to_timestamp(how=how).tz_localize(tz) elif is_period_dtype(dtype): return self.asfreq(freq=dtype.freq) - elif is_categorical_dtype(dtype): - return CategoricalIndex(self.values, name=self.name, dtype=dtype, - copy=copy) - raise TypeError('Cannot cast PeriodIndex to dtype %s' % dtype) + return super(PeriodIndex, self).astype(dtype, copy=copy) @Substitution(klass='PeriodIndex') @Appender(_shared_docs['searchsorted']) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 25c764b138465..d28a09225e8b8 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -4,15 +4,13 @@ import numpy as np from pandas.core.dtypes.common import ( _TD_DTYPE, - is_integer, is_float, + is_integer, + is_float, is_bool_dtype, is_list_like, is_scalar, - is_integer_dtype, - is_object_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype, - is_categorical_dtype, pandas_dtype, _ensure_int64) from pandas.core.dtypes.missing import isna @@ -20,7 +18,6 @@ from pandas.core.common import _maybe_box, _values_from_object from pandas.core.indexes.base import Index -from pandas.core.indexes.category import CategoricalIndex from pandas.core.indexes.numeric import Int64Index import pandas.compat as compat from pandas.compat import u @@ -483,28 +480,14 @@ def to_pytimedelta(self): @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): dtype = pandas_dtype(dtype) - - if is_object_dtype(dtype): - return self._box_values_as_index() - elif is_timedelta64_ns_dtype(dtype): - if copy is True: - return self.copy() - return self - elif is_timedelta64_dtype(dtype): + if is_timedelta64_dtype(dtype) and not is_timedelta64_ns_dtype(dtype): # return an index (essentially this is division) result = self.values.astype(dtype, copy=copy) if self.hasnans: - return Index(self._maybe_mask_results(result, - convert='float64'), - name=self.name) + values = self._maybe_mask_results(result, convert='float64') + return Index(values, name=self.name) return Index(result.astype('i8'), name=self.name) - elif is_integer_dtype(dtype): - return Index(self.values.astype('i8', copy=copy), dtype='i8', - name=self.name) - elif is_categorical_dtype(dtype): - return CategoricalIndex(self.values, name=self.name, dtype=dtype, - copy=copy) - raise TypeError('Cannot cast TimedeltaIndex to dtype %s' % dtype) + return super(TimedeltaIndex, self).astype(dtype, copy=copy) def union(self, other): """ diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py index e211807b6a3e4..1d72ca609b1d3 100644 --- a/pandas/tests/indexes/datetimes/test_astype.py +++ b/pandas/tests/indexes/datetimes/test_astype.py @@ -57,6 +57,18 @@ def test_astype_with_tz(self): dtype=object) tm.assert_series_equal(result, expected) + # GH 18951: tz-aware to tz-aware + idx = date_range('20170101', periods=4, tz='US/Pacific') + result = idx.astype('datetime64[ns, US/Eastern]') + expected = date_range('20170101 03:00:00', periods=4, tz='US/Eastern') + tm.assert_index_equal(result, expected) + + # GH 18951: tz-naive to tz-aware + idx = date_range('20170101', periods=4) + result = idx.astype('datetime64[ns, US/Eastern]') + expected = date_range('20170101', periods=4, tz='US/Eastern') + tm.assert_index_equal(result, expected) + def test_astype_str_compat(self): # GH 13149, GH 13209 # verify that we are returing NaT as a string (and not unicode) @@ -126,15 +138,15 @@ def test_astype_object(self): tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_)) assert casted.tolist() == exp_values - def test_astype_raises(self): + @pytest.mark.parametrize('dtype', [ + float, 'timedelta64', 'timedelta64[ns]', 'datetime64', + 'datetime64[D]']) + def test_astype_raises(self, dtype): # GH 13149, GH 13209 idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN]) - - pytest.raises(TypeError, idx.astype, float) - pytest.raises(TypeError, idx.astype, 'timedelta64') - pytest.raises(TypeError, idx.astype, 'timedelta64[ns]') - pytest.raises(TypeError, idx.astype, 'datetime64') - pytest.raises(TypeError, idx.astype, 'datetime64[D]') + msg = 'Cannot cast DatetimeIndex to dtype' + with tm.assert_raises_regex(TypeError, msg): + idx.astype(dtype) def test_index_convert_to_datetime_array(self): def _check_rng(rng): diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 599f6efd16f74..ab341b70dfe91 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -39,19 +39,23 @@ def test_astype_conversion(self): dtype=np.int64) tm.assert_index_equal(result, expected) + result = idx.astype(str) + expected = Index(str(x) for x in idx) + tm.assert_index_equal(result, expected) + idx = period_range('1990', '2009', freq='A') result = idx.astype('i8') tm.assert_index_equal(result, Index(idx.asi8)) tm.assert_numpy_array_equal(result.values, idx.asi8) - def test_astype_raises(self): + @pytest.mark.parametrize('dtype', [ + float, 'timedelta64', 'timedelta64[ns]']) + def test_astype_raises(self, dtype): # GH 13149, GH 13209 idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D') - - pytest.raises(TypeError, idx.astype, str) - pytest.raises(TypeError, idx.astype, float) - pytest.raises(TypeError, idx.astype, 'timedelta64') - pytest.raises(TypeError, idx.astype, 'timedelta64[ns]') + msg = 'Cannot cast PeriodIndex to dtype' + with tm.assert_raises_regex(TypeError, msg): + idx.astype(dtype) def test_pickle_compat_construction(self): pass diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py index 74446af8b77f6..4169c93809059 100644 --- a/pandas/tests/indexes/test_interval.py +++ b/pandas/tests/indexes/test_interval.py @@ -390,14 +390,7 @@ def test_equals(self, closed): assert not expected.equals(expected_other_closed) def test_astype(self, closed): - idx = self.create_index(closed=closed) - - for dtype in [np.int64, np.float64, 'datetime64[ns]', - 'datetime64[ns, US/Eastern]', 'timedelta64', - 'period[M]']: - pytest.raises(ValueError, idx.astype, dtype) - result = idx.astype(object) tm.assert_index_equal(result, Index(idx.values, dtype='object')) assert not idx.equals(result) @@ -407,6 +400,15 @@ def test_astype(self, closed): tm.assert_index_equal(result, idx) assert result.equals(idx) + @pytest.mark.parametrize('dtype', [ + np.int64, np.float64, 'period[M]', 'timedelta64', 'datetime64[ns]', + 'datetime64[ns, US/Eastern]']) + def test_astype_errors(self, closed, dtype): + idx = self.create_index(closed=closed) + msg = 'Cannot cast IntervalIndex to dtype' + with tm.assert_raises_regex(TypeError, msg): + idx.astype(dtype) + @pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series]) def test_where(self, closed, klass): idx = self.create_index(closed=closed) diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py index 96d5981abc1bb..55c06e8854333 100644 --- a/pandas/tests/indexes/test_range.py +++ b/pandas/tests/indexes/test_range.py @@ -711,7 +711,7 @@ def test_nbytes(self): # memory savings vs int index i = RangeIndex(0, 1000) - assert i.nbytes < i.astype(int).nbytes / 10 + assert i.nbytes < i._int64index.nbytes / 10 # constant memory usage i2 = RangeIndex(0, 10) diff --git a/pandas/tests/indexes/timedeltas/test_astype.py b/pandas/tests/indexes/timedeltas/test_astype.py index 0fa0e036096d0..af16fe71edcf3 100644 --- a/pandas/tests/indexes/timedeltas/test_astype.py +++ b/pandas/tests/indexes/timedeltas/test_astype.py @@ -40,8 +40,11 @@ def test_astype(self): dtype=np.int64) tm.assert_index_equal(result, expected) - rng = timedelta_range('1 days', periods=10) + result = idx.astype(str) + expected = Index(str(x) for x in idx) + tm.assert_index_equal(result, expected) + rng = timedelta_range('1 days', periods=10) result = rng.astype('i8') tm.assert_index_equal(result, Index(rng.asi8)) tm.assert_numpy_array_equal(rng.asi8, result.values) @@ -62,14 +65,14 @@ def test_astype_timedelta64(self): tm.assert_index_equal(result, idx) assert result is idx - def test_astype_raises(self): + @pytest.mark.parametrize('dtype', [ + float, 'datetime64', 'datetime64[ns]']) + def test_astype_raises(self, dtype): # GH 13149, GH 13209 idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN]) - - pytest.raises(TypeError, idx.astype, float) - pytest.raises(TypeError, idx.astype, str) - pytest.raises(TypeError, idx.astype, 'datetime64') - pytest.raises(TypeError, idx.astype, 'datetime64[ns]') + msg = 'Cannot cast TimedeltaIndex to dtype' + with tm.assert_raises_regex(TypeError, msg): + idx.astype(dtype) def test_pickle_compat_construction(self): pass
- [X] closes #18704 - [X] closes #18951 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Only behavioral changes: - Allowed `.astype(str)` on `TimedeltaIndex` and `PeriodIndex`, which previously raised. - Couldn't see a reason why it shouldn't be supported. - Fixed issues related to tz-aware conversion in #18951 - `RangeIndex.astype('int64')` now remains a `RangeIndex` - Previously returned a `Int64Index`
https://api.github.com/repos/pandas-dev/pandas/pulls/18937
2017-12-25T02:00:32Z
2017-12-27T19:43:00Z
2017-12-27T19:43:00Z
2017-12-27T20:12:47Z
DOC: Using deprecated sphinx directive instead of non-standard messages in docstrings (#18928)
diff --git a/ci/lint.sh b/ci/lint.sh index b4eafcaf28e39..d00e0c9afb6dc 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -117,6 +117,10 @@ if [ "$LINT" ]; then fi done echo "Check for incorrect sphinx directives DONE" + + echo "Check for deprecated messages without sphinx directive" + grep -R --include="*.py" --include="*.pyx" -E "(DEPRECATED|DEPRECATE|Deprecated)(:|,|\.)" pandas + echo "Check for deprecated messages without sphinx directive DONE" else echo "NOT Linting" fi diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 83437022563d5..dc07104f64c65 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -547,7 +547,30 @@ Backwards Compatibility Please try to maintain backward compatibility. *pandas* has lots of users with lots of existing code, so don't break it if at all possible. If you think breakage is required, clearly state why as part of the pull request. Also, be careful when changing method -signatures and add deprecation warnings where needed. +signatures and add deprecation warnings where needed. Also, add the deprecated sphinx +directive to the deprecated functions or methods. + +If a function with the same arguments as the one being deprecated exist, you can use +the ``pandas.util._decorators.deprecate``: + +.. code-block:: python + + from pandas.util._decorators import deprecate + + deprecate('old_func', 'new_func', '0.21.0') + +Otherwise, you need to do it manually: + +.. code-block:: python + + def old_func(): + """Summary of the function. + + .. deprecated:: 0.21.0 + Use new_func instead. + """ + warnings.warn('Use new_func instead.', FutureWarning, stacklevel=2) + new_func() .. _contributing.ci: diff --git a/pandas/__init__.py b/pandas/__init__.py index 93c5b6484b840..78501620d780b 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -51,7 +51,7 @@ plot_params = pandas.plotting._style._Options(deprecated=True) # do not import deprecate to top namespace scatter_matrix = pandas.util._decorators.deprecate( - 'pandas.scatter_matrix', pandas.plotting.scatter_matrix, + 'pandas.scatter_matrix', pandas.plotting.scatter_matrix, '0.20.0', 'pandas.plotting.scatter_matrix') from pandas.util._print_versions import show_versions diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index ffc1c89dd8adf..de31643742d87 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -389,9 +389,6 @@ class Timestamp(_Timestamp): Unit used for conversion if ts_input is of type int or float. The valid values are 'D', 'h', 'm', 's', 'ms', 'us', and 'ns'. For example, 's' means seconds and 'ms' means milliseconds. - offset : str, DateOffset - Deprecated, use freq - year, month, day : int .. versionadded:: 0.19.0 hour, minute, second, microsecond : int, optional, default 0 diff --git a/pandas/computation/expressions.py b/pandas/computation/expressions.py index f46487cfa1b79..d194cd2404c9d 100644 --- a/pandas/computation/expressions.py +++ b/pandas/computation/expressions.py @@ -2,6 +2,10 @@ def set_use_numexpr(v=True): + """ + .. deprecated:: 0.20.0 + Use ``pandas.set_option('compute.use_numexpr', v)`` instead. + """ warnings.warn("pandas.computation.expressions.set_use_numexpr is " "deprecated and will be removed in a future version.\n" "you can toggle usage of numexpr via " diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index d47cb0762447b..630b68e9ed4a6 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -594,7 +594,8 @@ def _get_labels(self): """ Get the category labels (deprecated). - Deprecated, use .codes! + .. deprecated:: 0.15.0 + Use `.codes()` instead. """ warn("'labels' is deprecated. Use 'codes' instead", FutureWarning, stacklevel=2) diff --git a/pandas/core/datetools.py b/pandas/core/datetools.py index 3444d09c6ed1b..83167a45369c4 100644 --- a/pandas/core/datetools.py +++ b/pandas/core/datetools.py @@ -1,4 +1,8 @@ -"""A collection of random tools for dealing with dates in Python""" +"""A collection of random tools for dealing with dates in Python. + +.. deprecated:: 0.19.0 + Use pandas.tseries module instead. +""" # flake8: noqa diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index e2ee3deb5396e..5d6fc7487eeb5 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -758,10 +758,9 @@ def is_dtype_union_equal(source, target): def is_any_int_dtype(arr_or_dtype): - """ - DEPRECATED: This function will be removed in a future version. + """Check whether the provided array or dtype is of an integer dtype. - Check whether the provided array or dtype is of an integer dtype. + .. deprecated:: 0.20.0 In this function, timedelta64 instances are also considered "any-integer" type objects and will return True. @@ -1557,12 +1556,11 @@ def is_float_dtype(arr_or_dtype): def is_floating_dtype(arr_or_dtype): - """ - DEPRECATED: This function will be removed in a future version. - - Check whether the provided array or dtype is an instance of + """Check whether the provided array or dtype is an instance of numpy's float dtype. + .. deprecated:: 0.20.0 + Unlike, `is_float_dtype`, this check is a lot stricter, as it requires `isinstance` of `np.floating` and not `issubclass`. """ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 821db3c263885..62993a3d168db 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1326,9 +1326,10 @@ def _from_arrays(cls, arrays, columns, index, dtype=None): def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, encoding=None, tupleize_cols=None, infer_datetime_format=False): - """ - Read CSV file (DEPRECATED, please use :func:`pandas.read_csv` - instead). + """Read CSV file. + + .. deprecated:: 0.21.0 + Use :func:`pandas.read_csv` instead. It is preferable to use the more powerful :func:`pandas.read_csv` for most general purposes, but ``from_csv`` makes for an easy @@ -1979,12 +1980,10 @@ def _unpickle_matrix_compat(self, state): # pragma: no cover # Getting and setting elements def get_value(self, index, col, takeable=False): - """ - Quickly retrieve single value at passed column and index + """Quickly retrieve single value at passed column and index .. deprecated:: 0.21.0 - - Please use .at[] or .iat[] accessors. + Use .at[] or .iat[] accessors instead. Parameters ---------- @@ -2024,12 +2023,10 @@ def _get_value(self, index, col, takeable=False): _get_value.__doc__ = get_value.__doc__ def set_value(self, index, col, value, takeable=False): - """ - Put single value at passed column and index + """Put single value at passed column and index .. deprecated:: 0.21.0 - - Please use .at[] or .iat[] accessors. + Use .at[] or .iat[] accessors instead. Parameters ---------- @@ -3737,12 +3734,13 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, def sortlevel(self, level=0, axis=0, ascending=True, inplace=False, sort_remaining=True): - """ - DEPRECATED: use :meth:`DataFrame.sort_index` - - Sort multilevel index by chosen axis and primary level. Data will be + """Sort multilevel index by chosen axis and primary level. Data will be lexicographically sorted by the chosen level followed by the other - levels (in order) + levels (in order). + + .. deprecated:: 0.20.0 + Use :meth:`DataFrame.sort_index` + Parameters ---------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 37247ab133948..c9672a43a95a8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2718,10 +2718,10 @@ def xs(self, key, axis=0, level=None, drop_level=True): _xs = xs def select(self, crit, axis=0): - """ - Return data corresponding to axis labels matching criteria + """Return data corresponding to axis labels matching criteria - DEPRECATED: use df.loc[df.index.map(crit)] to select via labels + .. deprecated:: 0.21.0 + Use df.loc[df.index.map(crit)] to select via labels Parameters ---------- @@ -4108,8 +4108,11 @@ def _consolidate(self, inplace=False): return self._constructor(cons_data).__finalize__(self) def consolidate(self, inplace=False): - """ - DEPRECATED: consolidate will be an internal implementation only. + """Compute NDFrame with "consolidated" internals (data of each dtype + grouped together in a single ndarray). + + .. deprecated:: 0.20.0 + Consolidate will be an internal implementation only. """ # 15483 warnings.warn("consolidate is deprecated and will be removed in a " @@ -4160,11 +4163,10 @@ def _get_bool_data(self): # Internal Interface Methods def as_matrix(self, columns=None): - """ - DEPRECATED: as_matrix will be removed in a future version. - Use :meth:`DataFrame.values` instead. + """Convert the frame to its Numpy-array representation. - Convert the frame to its Numpy-array representation. + .. deprecated:: 0.23.0 + Use :meth:`DataFrame.values` instead. Parameters ---------- @@ -4479,12 +4481,11 @@ def _convert(self, datetime=False, numeric=False, timedelta=False, timedelta=timedelta, coerce=coerce, copy=copy)).__finalize__(self) - # TODO: Remove in 0.18 or 2017, which ever is sooner def convert_objects(self, convert_dates=True, convert_numeric=False, convert_timedeltas=True, copy=True): - """ - Deprecated. - Attempt to infer better dtype for object columns + """Attempt to infer better dtype for object columns. + + .. deprecated:: 0.21.0 Parameters ---------- diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index ee2fdd213dd9a..07e001007d58d 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -441,9 +441,10 @@ def _isnan(self): @property def asobject(self): - """DEPRECATED: Use ``astype(object)`` instead. + """Return object Index which contains boxed values. - return object Index which contains boxed values + .. deprecated:: 0.23.0 + Use ``astype(object)`` instead. *this is an internal non-public method* """ diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 1c401c4854306..26e7c192ad0af 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -477,8 +477,7 @@ def as_matrix(self): # Getting and setting elements def get_value(self, *args, **kwargs): - """ - Quickly retrieve single value at (item, major, minor) location + """Quickly retrieve single value at (item, major, minor) location .. deprecated:: 0.21.0 @@ -525,8 +524,7 @@ def _get_value(self, *args, **kwargs): _get_value.__doc__ = get_value.__doc__ def set_value(self, *args, **kwargs): - """ - Quickly set single value at (item, major, minor) location + """Quickly set single value at (item, major, minor) location .. deprecated:: 0.21.0 diff --git a/pandas/core/series.py b/pandas/core/series.py index 5d8092fd30496..71cded4f9c888 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -93,8 +93,10 @@ # see gh-16971 def remove_na(arr): - """ - DEPRECATED : this function will be removed in a future version. + """Remove null values from array like structure. + + .. deprecated:: 0.21.0 + Use s[s.notnull()] instead. """ warnings.warn("remove_na is deprecated and is a private " @@ -290,8 +292,10 @@ def _init_dict(self, data, index=None, dtype=None): @classmethod def from_array(cls, arr, index=None, name=None, dtype=None, copy=False, fastpath=False): - """ - DEPRECATED: use the pd.Series(..) constructor instead. + """Construct Series from array. + + .. deprecated :: 0.23.0 + Use pd.Series(..) constructor instead. """ warnings.warn("'from_array' is deprecated and will be removed in a " @@ -450,9 +454,11 @@ def get_values(self): @property def asobject(self): - """DEPRECATED: Use ``astype(object)`` instead. + """Return object Series which contains boxed values. + + .. deprecated :: 0.23.0 + Use ``astype(object) instead. - return object Series which contains boxed values *this is an internal non-public method* """ @@ -911,12 +917,10 @@ def repeat(self, repeats, *args, **kwargs): index=new_index).__finalize__(self) def get_value(self, label, takeable=False): - """ - Quickly retrieve single value at passed index label + """Quickly retrieve single value at passed index label .. deprecated:: 0.21.0 - - Please use .at[] or .iat[] accessors. + Please use .at[] or .iat[] accessors. Parameters ---------- @@ -940,14 +944,12 @@ def _get_value(self, label, takeable=False): _get_value.__doc__ = get_value.__doc__ def set_value(self, label, value, takeable=False): - """ - Quickly set single value at passed label. If label is not contained, a - new object is created with the label placed at the end of the result - index + """Quickly set single value at passed label. If label is not contained, + a new object is created with the label placed at the end of the result + index. .. deprecated:: 0.21.0 - - Please use .at[] or .iat[] accessors. + Please use .at[] or .iat[] accessors. Parameters ---------- @@ -1382,13 +1384,13 @@ def idxmax(self, axis=None, skipna=True, *args, **kwargs): return self.index[i] # ndarray compat - argmin = deprecate('argmin', idxmin, + argmin = deprecate('argmin', idxmin, '0.21.0', msg="'argmin' is deprecated, use 'idxmin' instead. " "The behavior of 'argmin' will be corrected to " "return the positional minimum in the future. " "Use 'series.values.argmin' to get the position of " "the minimum now.") - argmax = deprecate('argmax', idxmax, + argmax = deprecate('argmax', idxmax, '0.21.0', msg="'argmax' is deprecated, use 'idxmax' instead. " "The behavior of 'argmax' will be corrected to " "return the positional maximum in the future. " @@ -2120,12 +2122,12 @@ def nsmallest(self, n=5, keep='first'): return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest() def sortlevel(self, level=0, ascending=True, sort_remaining=True): - """ - DEPRECATED: use :meth:`Series.sort_index` - - Sort Series with MultiIndex by chosen level. Data will be + """Sort Series with MultiIndex by chosen level. Data will be lexicographically sorted by the chosen level followed by the other - levels (in order) + levels (in order), + + .. deprecated:: 0.20.0 + Use :meth:`Series.sort_index` Parameters ---------- @@ -2670,7 +2672,12 @@ def shift(self, periods=1, freq=None, axis=0): return super(Series, self).shift(periods=periods, freq=freq, axis=axis) def reindex_axis(self, labels, axis=0, **kwargs): - """ for compatibility with higher dims """ + """Conform Series to new index with optional filling logic. + + .. deprecated:: 0.21.0 + Use ``Series.reindex`` instead. + """ + # for compatibility with higher dims if axis != 0: raise ValueError("cannot reindex series on non-zero axis!") msg = ("'.reindex_axis' is deprecated and will be removed in a future " @@ -2808,9 +2815,10 @@ def between(self, left, right, inclusive=True): @classmethod def from_csv(cls, path, sep=',', parse_dates=True, header=None, index_col=0, encoding=None, infer_datetime_format=False): - """ - Read CSV file (DEPRECATED, please use :func:`pandas.read_csv` - instead). + """Read CSV file. + + .. deprecated:: 0.21.0 + Use :func:`pandas.read_csv` instead. It is preferable to use the more powerful :func:`pandas.read_csv` for most general purposes, but ``from_csv`` makes for an easy @@ -2978,8 +2986,10 @@ def dropna(self, axis=0, inplace=False, **kwargs): return self.copy() def valid(self, inplace=False, **kwargs): - """DEPRECATED. Series.valid will be removed in a future version. - Use :meth:`Series.dropna` instead. + """Return Series without null values. + + .. deprecated:: 0.23.0 + Use :meth:`Series.dropna` instead. """ warnings.warn("Method .valid will be removed in a future version. " "Use .dropna instead.", FutureWarning, stacklevel=2) diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 05f39a8caa6f6..49a0b8d86ad31 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -820,12 +820,12 @@ def cumsum(self, axis=0, *args, **kwargs): return self.apply(lambda x: x.cumsum(), axis=axis) - @Appender(generic._shared_docs['isna']) + @Appender(generic._shared_docs['isna'] % _shared_doc_kwargs) def isna(self): return self._apply_columns(lambda x: x.isna()) isnull = isna - @Appender(generic._shared_docs['notna']) + @Appender(generic._shared_docs['notna'] % _shared_doc_kwargs) def notna(self): return self._apply_columns(lambda x: x.notna()) notnull = notna diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 8a38b1054a1f5..b5d2c0b607444 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -255,9 +255,10 @@ def npoints(self): @classmethod def from_array(cls, arr, index=None, name=None, copy=False, fill_value=None, fastpath=False): - """ - DEPRECATED: use the pd.SparseSeries(..) constructor instead. + """Construct SparseSeries from array. + .. deprecated:: 0.23.0 + Use the pd.SparseSeries(..) constructor instead. """ warnings.warn("'from_array' is deprecated and will be removed in a " "future version. Please use the pd.SparseSeries(..) " @@ -571,8 +572,9 @@ def to_dense(self, sparse_only=False): Parameters ---------- - sparse_only: bool, default False - DEPRECATED: this argument will be removed in a future version. + sparse_only : bool, default False + .. deprecated:: 0.20.0 + This argument will be removed in a future version. If True, return just the non-sparse values, or the dense version of `self.values` if False. @@ -679,7 +681,7 @@ def cumsum(self, axis=0, *args, **kwargs): new_array, index=self.index, sparse_index=new_array.sp_index).__finalize__(self) - @Appender(generic._shared_docs['isna']) + @Appender(generic._shared_docs['isna'] % _shared_doc_kwargs) def isna(self): arr = SparseArray(isna(self.values.sp_values), sparse_index=self.values.sp_index, @@ -687,7 +689,7 @@ def isna(self): return self._constructor(arr, index=self.index).__finalize__(self) isnull = isna - @Appender(generic._shared_docs['notna']) + @Appender(generic._shared_docs['notna'] % _shared_doc_kwargs) def notna(self): arr = SparseArray(notna(self.values.sp_values), sparse_index=self.values.sp_index, diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 3b7ec2ad8a508..e0012c25e366d 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -478,7 +478,8 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan, as_indexer=None): flags : int, default 0 (no flags) re module flags, e.g. re.IGNORECASE na : default NaN, fill value for missing values. - as_indexer : DEPRECATED - Keyword is ignored. + as_indexer + .. deprecated:: 0.21.0 Returns ------- diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 6be6152b09fc8..eed9cee54efb3 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -7,10 +7,15 @@ from functools import wraps, update_wrapper -def deprecate(name, alternative, alt_name=None, klass=None, - stacklevel=2, msg=None): - """ - Return a new function that emits a deprecation warning on use. +def deprecate(name, alternative, version, alt_name=None, + klass=None, stacklevel=2, msg=None): + """Return a new function that emits a deprecation warning on use. + + To use this method for a deprecated function, another function + `alternative` with the same signature must exist. The deprecated + function will emit a deprecation warning, and in the docstring + it will contain the deprecation directive with the provided version + so it can be detected for future removal. Parameters ---------- @@ -18,6 +23,8 @@ def deprecate(name, alternative, alt_name=None, klass=None, Name of function to deprecate alternative : str Name of function to use instead + version : str + Version of pandas in which the method has been deprecated alt_name : str, optional Name to use in preference of alternative.__name__ klass : Warning, default FutureWarning @@ -29,16 +36,24 @@ def deprecate(name, alternative, alt_name=None, klass=None, alt_name = alt_name or alternative.__name__ klass = klass or FutureWarning - msg = msg or "{} is deprecated, use {} instead".format(name, alt_name) + warning_msg = msg or '{} is deprecated, use {} instead'.format(name, + alt_name) @wraps(alternative) def wrapper(*args, **kwargs): - warnings.warn(msg, klass, stacklevel=stacklevel) + warnings.warn(warning_msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) - if getattr(wrapper, '__doc__', None) is not None: - wrapper.__doc__ = ('\n'.join(wrap(msg, 70)) + '\n' - + dedent(wrapper.__doc__)) + # adding deprecated directive to the docstring + msg = msg or 'Use `{alt_name}` instead.' + docstring = '.. deprecated:: {}\n'.format(version) + docstring += dedent(' ' + ('\n'.join(wrap(msg, 70)))) + + if getattr(wrapper, '__doc__') is not None: + docstring += dedent(wrapper.__doc__) + + wrapper.__doc__ = docstring + return wrapper diff --git a/scripts/announce.py b/scripts/announce.py index 1459d2fc18d2a..7b7933eba54dd 100644 --- a/scripts/announce.py +++ b/scripts/announce.py @@ -30,7 +30,7 @@ From the bash command line with $GITHUB token. - $ ./scripts/announce $GITHUB v1.11.0..v1.11.1 > announce.rst + $ ./scripts/announce.py $GITHUB v1.11.0..v1.11.1 > announce.rst """ from __future__ import print_function, division diff --git a/scripts/api_rst_coverage.py b/scripts/api_rst_coverage.py old mode 100644 new mode 100755 index 45340ba0923c4..28e761ef256d0 --- a/scripts/api_rst_coverage.py +++ b/scripts/api_rst_coverage.py @@ -1,3 +1,22 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- +""" +Script to generate a report with the coverage of the API in the docs. + +The output of this script shows the existing methods that are not +included in the API documentation, as well as the methods documented +that do not exist. Ideally, no method should be listed. Currently it +considers the methods of Series, DataFrame and Panel. + +Deprecated methods are usually removed from the documentation, while +still available for three minor versions. They are listed with the +word deprecated and the version number next to them. + +Usage:: + + $ PYTHONPATH=.. ./api_rst_coverage.py + +""" import pandas as pd import inspect import re @@ -13,6 +32,32 @@ def class_name_sort_key(x): else: return x + def get_docstring(x): + class_name, method = x.split('.') + obj = getattr(getattr(pd, class_name), method) + return obj.__doc__ + + def deprecation_version(x): + pattern = re.compile('\.\. deprecated:: ([0-9]+\.[0-9]+\.[0-9]+)') + doc = get_docstring(x) + match = pattern.search(doc) + if match: + return match.groups()[0] + + def add_notes(x): + # Some methods are not documented in api.rst because they + # have been deprecated. Adding a comment to detect them easier. + doc = get_docstring(x) + note = None + if not doc: + note = 'no docstring' + else: + version = deprecation_version(x) + if version: + note = 'deprecated in {}'.format(version) + + return '{} ({})'.format(x, note) if note else x + # class members class_members = set() for cls in classes: @@ -34,10 +79,12 @@ def class_name_sort_key(x): print(x) print() - print("Class members (other than those beginning with '_') missing from api.rst:") - for x in sorted(class_members.difference(api_rst_members), key=class_name_sort_key): + print("Class members (other than those beginning with '_') " + "missing from api.rst:") + for x in sorted(class_members.difference(api_rst_members), + key=class_name_sort_key): if '._' not in x: - print(x) + print(add_notes(x)) if __name__ == "__main__": main()
- [X] closes #18928 - [X] tests passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18934
2017-12-25T00:17:58Z
2018-01-06T17:15:23Z
2018-01-06T17:15:23Z
2018-03-14T18:16:02Z
BUG: DataFrame.from_records ignores exclude if it is [0]
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 26257f6ecbc37..5e1c801f1b1c9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1175,7 +1175,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None, except Exception: result_index = index - if any(exclude): + if len(exclude) > 0: arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
Fixed https://github.com/pandas-dev/pandas/issues/18679
https://api.github.com/repos/pandas-dev/pandas/pulls/18933
2017-12-24T22:46:18Z
2018-01-14T22:18:07Z
null
2018-01-14T22:18:07Z
CLN: ASV period
diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py index 15d7655293ea3..897a3338c164c 100644 --- a/asv_bench/benchmarks/period.py +++ b/asv_bench/benchmarks/period.py @@ -1,61 +1,24 @@ -import pandas as pd -from pandas import Series, Period, PeriodIndex, date_range +from pandas import (DataFrame, Series, Period, PeriodIndex, date_range, + period_range) class PeriodProperties(object): - params = ['M', 'min'] - param_names = ['freq'] - - def setup(self, freq): - self.per = Period('2012-06-01', freq=freq) - - def time_year(self, freq): - self.per.year - - def time_month(self, freq): - self.per.month - - def time_day(self, freq): - self.per.day - - def time_hour(self, freq): - self.per.hour - - def time_minute(self, freq): - self.per.minute - - def time_second(self, freq): - self.per.second - - def time_is_leap_year(self, freq): - self.per.is_leap_year - def time_quarter(self, freq): - self.per.quarter + params = (['M', 'min'], + ['year', 'month', 'day', 'hour', 'minute', 'second', + 'is_leap_year', 'quarter', 'qyear', 'week', 'daysinmonth', + 'dayofweek', 'dayofyear', 'start_time', 'end_time']) + param_names = ['freq', 'attr'] - def time_qyear(self, freq): - self.per.qyear - - def time_week(self, freq): - self.per.week - - def time_daysinmonth(self, freq): - self.per.daysinmonth - - def time_dayofweek(self, freq): - self.per.dayofweek - - def time_dayofyear(self, freq): - self.per.dayofyear - - def time_start_time(self, freq): - self.per.start_time + def setup(self, freq, attr): + self.per = Period('2012-06-01', freq=freq) - def time_end_time(self, freq): - self.per.end_time + def time_property(self, freq, attr): + getattr(self.per, attr) class PeriodUnaryMethods(object): + params = ['M', 'min'] param_names = ['freq'] @@ -73,6 +36,7 @@ def time_asfreq(self, freq): class PeriodIndexConstructor(object): + goal_time = 0.2 params = ['D'] @@ -90,19 +54,19 @@ def time_from_pydatetime(self, freq): class DataFramePeriodColumn(object): + goal_time = 0.2 - def setup_cache(self): - rng = pd.period_range(start='1/1/1990', freq='S', periods=20000) - df = pd.DataFrame(index=range(len(rng))) - return rng, df + def setup(self): + self.rng = period_range(start='1/1/1990', freq='S', periods=20000) + self.df = DataFrame(index=range(len(self.rng))) - def time_setitem_period_column(self, tup): - rng, df = tup - df['col'] = rng + def time_setitem_period_column(self): + self.df['col'] = self.rng class Algorithms(object): + goal_time = 0.2 params = ['index', 'series'] @@ -125,6 +89,7 @@ def time_value_counts(self, typ): class Indexing(object): + goal_time = 0.2 def setup(self): @@ -145,7 +110,7 @@ def time_series_loc(self): self.series.loc[self.period] def time_align(self): - pd.DataFrame({'a': self.series, 'b': self.series[:500]}) + DataFrame({'a': self.series, 'b': self.series[:500]}) def time_intersection(self): self.index[:750].intersection(self.index[250:])
- Utilized `param` for the `PeriodProperties` benchmark - Replaced `setup_cache` for just `setup` since only one benchmark was being run for that class. ``` $ asv dev -b ^period · Discovering benchmarks · Running 15 total benchmarks (1 commits * 1 environments * 15 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 6.67%] ··· Running period.Algorithms.time_drop_duplicates ok [ 6.67%] ···· ======== ======== typ -------- -------- index 777μs series 7.47ms ======== ======== [ 13.33%] ··· Running period.Algorithms.time_value_counts ok [ 13.33%] ···· ======== ======== typ -------- -------- index 1.33ms series 7.94ms ======== ======== [ 20.00%] ··· Running ...ramePeriodColumn.time_setitem_period_column 80.8ms [ 26.67%] ··· Running period.Indexing.time_align 2.57ms [ 33.33%] ··· Running period.Indexing.time_get_loc 211μs [ 40.00%] ··· Running period.Indexing.time_intersection 516μs [ 46.67%] ··· Running period.Indexing.time_series_loc 417μs [ 53.33%] ··· Running period.Indexing.time_shallow_copy 53.4μs [ 60.00%] ··· Running period.Indexing.time_shape 13.5μs [ 66.67%] ··· Running ...PeriodIndexConstructor.time_from_date_range ok [ 66.67%] ···· ====== ======= freq ------ ------- D 408μs ====== ======= [ 73.33%] ··· Running ...PeriodIndexConstructor.time_from_pydatetime ok [ 73.33%] ···· ====== ======== freq ------ -------- D 15.3ms ====== ======== [ 80.00%] ··· Running period.PeriodProperties.time_property ok [ 80.00%] ···· ====== ============== ======== freq attr ------ -------------- -------- M year 17.5μs M month 17.2μs M day 17.4μs M hour 17.4μs M minute 17.6μs M second 16.8μs M is_leap_year 17.6μs M quarter 17.1μs M qyear 17.1μs M week 17.8μs M daysinmonth 17.7μs M dayofweek 16.9μs M dayofyear 17.4μs M start_time 243μs M end_time 263μs min year 17.4μs min month 18.5μs min day 18.1μs min hour 18.1μs min minute 18.2μs min second 18.1μs min is_leap_year 19.4μs min quarter 16.7μs min qyear 17.7μs min week 18.2μs min daysinmonth 18.4μs min dayofweek 18.2μs min dayofyear 18.2μs min start_time 242μs min end_time 260μs ====== ============== ======== [ 86.67%] ··· Running period.PeriodUnaryMethods.time_asfreq ok [ 86.67%] ···· ====== ======= freq ------ ------- M 161μs min 166μs ====== ======= [ 93.33%] ··· Running period.PeriodUnaryMethods.time_now ok [ 93.33%] ···· ====== ======= freq ------ ------- M 128μs min 224μs ====== ======= [100.00%] ··· Running period.PeriodUnaryMethods.time_to_timestamp ok [100.00%] ···· ====== ======= freq ------ ------- M 245μs min 242μs ====== ======= ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18932
2017-12-24T21:48:17Z
2017-12-26T22:00:01Z
2017-12-26T22:00:01Z
2017-12-31T04:33:10Z
TST: organize and cleanup pandas/tests/groupby/test_aggregate.py
diff --git a/.gitignore b/.gitignore index b1748ae72b8ba..0d4e8c6fb75a6 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ .ipynb_checkpoints .tags .cache/ +.vscode/ # Compiled source # ################### diff --git a/pandas/tests/groupby/aggregate/__init__.py b/pandas/tests/groupby/aggregate/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py new file mode 100644 index 0000000000000..caf2365a54ec8 --- /dev/null +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -0,0 +1,294 @@ +# -*- coding: utf-8 -*- + +""" +test .agg behavior / note that .apply is tested generally in test_groupby.py +""" + +import pytest + +import numpy as np +import pandas as pd + +from pandas import concat, DataFrame, Index, MultiIndex, Series +from pandas.core.groupby import SpecificationError +from pandas.compat import OrderedDict +import pandas.util.testing as tm + + +class TestGroupByAggregate(object): + + def setup_method(self, method): + self.ts = tm.makeTimeSeries() + + self.seriesd = tm.getSeriesData() + self.tsd = tm.getTimeSeriesData() + self.frame = DataFrame(self.seriesd) + self.tsframe = DataFrame(self.tsd) + + self.df = DataFrame( + {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], + 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], + 'C': np.random.randn(8), + 'D': np.random.randn(8)}) + + self.df_mixed_floats = DataFrame( + {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], + 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], + 'C': np.random.randn(8), + 'D': np.array(np.random.randn(8), dtype='float32')}) + + index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], + ['one', 'two', 'three']], + labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], + [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=['first', 'second']) + self.mframe = DataFrame(np.random.randn(10, 3), index=index, + columns=['A', 'B', 'C']) + + self.three_group = DataFrame( + {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar', + 'foo', 'foo', 'foo'], + 'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two', + 'two', 'two', 'one'], + 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny', + 'dull', 'shiny', 'shiny', 'shiny'], + 'D': np.random.randn(11), + 'E': np.random.randn(11), + 'F': np.random.randn(11)}) + + def test_agg_regression1(self): + grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month]) + result = grouped.agg(np.mean) + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + def test_agg_must_agg(self): + grouped = self.df.groupby('A')['C'] + + msg = "Must produce aggregated value" + with tm.assert_raises_regex(Exception, msg): + grouped.agg(lambda x: x.describe()) + with tm.assert_raises_regex(Exception, msg): + grouped.agg(lambda x: x.index[:2]) + + def test_agg_ser_multi_key(self): + # TODO(wesm): unused + ser = self.df.C # noqa + + f = lambda x: x.sum() + results = self.df.C.groupby([self.df.A, self.df.B]).aggregate(f) + expected = self.df.groupby(['A', 'B']).sum()['C'] + tm.assert_series_equal(results, expected) + + def test_agg_apply_corner(self): + # nothing to group, all NA + grouped = self.ts.groupby(self.ts * np.nan) + assert self.ts.dtype == np.float64 + + # groupby float64 values results in Float64Index + exp = Series([], dtype=np.float64, + index=pd.Index([], dtype=np.float64)) + tm.assert_series_equal(grouped.sum(), exp) + tm.assert_series_equal(grouped.agg(np.sum), exp) + tm.assert_series_equal(grouped.apply(np.sum), exp, + check_index_type=False) + + # DataFrame + grouped = self.tsframe.groupby(self.tsframe['A'] * np.nan) + exp_df = DataFrame(columns=self.tsframe.columns, dtype=float, + index=pd.Index([], dtype=np.float64)) + tm.assert_frame_equal(grouped.sum(), exp_df, check_names=False) + tm.assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False) + tm.assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0], + check_names=False) + + def test_agg_grouping_is_list_tuple(self): + from pandas.core.groupby import Grouping + + df = tm.makeTimeDataFrame() + + grouped = df.groupby(lambda x: x.year) + grouper = grouped.grouper.groupings[0].grouper + grouped.grouper.groupings[0] = Grouping(self.ts.index, list(grouper)) + + result = grouped.agg(np.mean) + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + grouped.grouper.groupings[0] = Grouping(self.ts.index, tuple(grouper)) + + result = grouped.agg(np.mean) + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + def test_agg_python_multiindex(self): + grouped = self.mframe.groupby(['A', 'B']) + + result = grouped.agg(np.mean) + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize('groupbyfunc', [ + lambda x: x.weekday(), + [lambda x: x.month, lambda x: x.weekday()], + ]) + def test_aggregate_str_func(self, groupbyfunc): + grouped = self.tsframe.groupby(groupbyfunc) + + # single series + result = grouped['A'].agg('std') + expected = grouped['A'].std() + tm.assert_series_equal(result, expected) + + # group frame by function name + result = grouped.aggregate('var') + expected = grouped.var() + tm.assert_frame_equal(result, expected) + + # group frame by function dict + result = grouped.agg(OrderedDict([['A', 'var'], + ['B', 'std'], + ['C', 'mean'], + ['D', 'sem']])) + expected = DataFrame(OrderedDict([['A', grouped['A'].var()], + ['B', grouped['B'].std()], + ['C', grouped['C'].mean()], + ['D', grouped['D'].sem()]])) + tm.assert_frame_equal(result, expected) + + def test_aggregate_item_by_item(self): + df = self.df.copy() + df['E'] = ['a'] * len(self.df) + grouped = self.df.groupby('A') + + aggfun = lambda ser: ser.size + result = grouped.agg(aggfun) + foo = (self.df.A == 'foo').sum() + bar = (self.df.A == 'bar').sum() + K = len(result.columns) + + # GH5782 + # odd comparisons can result here, so cast to make easy + exp = pd.Series(np.array([foo] * K), index=list('BCD'), + dtype=np.float64, name='foo') + tm.assert_series_equal(result.xs('foo'), exp) + + exp = pd.Series(np.array([bar] * K), index=list('BCD'), + dtype=np.float64, name='bar') + tm.assert_almost_equal(result.xs('bar'), exp) + + def aggfun(ser): + return ser.size + + result = DataFrame().groupby(self.df.A).agg(aggfun) + assert isinstance(result, DataFrame) + assert len(result) == 0 + + def test_wrap_agg_out(self): + grouped = self.three_group.groupby(['A', 'B']) + + def func(ser): + if ser.dtype == np.object: + raise TypeError + else: + return ser.sum() + + result = grouped.aggregate(func) + exp_grouped = self.three_group.loc[:, self.three_group.columns != 'C'] + expected = exp_grouped.groupby(['A', 'B']).aggregate(func) + tm.assert_frame_equal(result, expected) + + def test_agg_multiple_functions_maintain_order(self): + # GH #610 + funcs = [('mean', np.mean), ('max', np.max), ('min', np.min)] + result = self.df.groupby('A')['C'].agg(funcs) + exp_cols = Index(['mean', 'max', 'min']) + + tm.assert_index_equal(result.columns, exp_cols) + + def test_multiple_functions_tuples_and_non_tuples(self): + # #1359 + funcs = [('foo', 'mean'), 'std'] + ex_funcs = [('foo', 'mean'), ('std', 'std')] + + result = self.df.groupby('A')['C'].agg(funcs) + expected = self.df.groupby('A')['C'].agg(ex_funcs) + tm.assert_frame_equal(result, expected) + + result = self.df.groupby('A').agg(funcs) + expected = self.df.groupby('A').agg(ex_funcs) + tm.assert_frame_equal(result, expected) + + def test_agg_multiple_functions_too_many_lambdas(self): + grouped = self.df.groupby('A') + funcs = ['mean', lambda x: x.mean(), lambda x: x.std()] + + msg = 'Function names must be unique, found multiple named <lambda>' + with tm.assert_raises_regex(SpecificationError, msg): + grouped.agg(funcs) + + def test_more_flexible_frame_multi_function(self): + grouped = self.df.groupby('A') + + exmean = grouped.agg(OrderedDict([['C', np.mean], ['D', np.mean]])) + exstd = grouped.agg(OrderedDict([['C', np.std], ['D', np.std]])) + + expected = concat([exmean, exstd], keys=['mean', 'std'], axis=1) + expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1) + + d = OrderedDict([['C', [np.mean, np.std]], ['D', [np.mean, np.std]]]) + result = grouped.aggregate(d) + + tm.assert_frame_equal(result, expected) + + # be careful + result = grouped.aggregate(OrderedDict([['C', np.mean], + ['D', [np.mean, np.std]]])) + expected = grouped.aggregate(OrderedDict([['C', np.mean], + ['D', [np.mean, np.std]]])) + tm.assert_frame_equal(result, expected) + + def foo(x): + return np.mean(x) + + def bar(x): + return np.std(x, ddof=1) + + # this uses column selection & renaming + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + d = OrderedDict([['C', np.mean], + ['D', OrderedDict([['foo', np.mean], + ['bar', np.std]])]]) + result = grouped.aggregate(d) + + d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]]) + expected = grouped.aggregate(d) + + tm.assert_frame_equal(result, expected) + + def test_multi_function_flexible_mix(self): + # GH #1268 + grouped = self.df.groupby('A') + + # Expected + d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], + ['D', {'sum': 'sum'}]]) + # this uses column selection & renaming + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + expected = grouped.aggregate(d) + + # Test 1 + d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], + ['D', 'sum']]) + # this uses column selection & renaming + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = grouped.aggregate(d) + tm.assert_frame_equal(result, expected) + + # Test 2 + d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], + ['D', ['sum']]]) + # this uses column selection & renaming + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = grouped.aggregate(d) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py new file mode 100644 index 0000000000000..c8ee05ddbb74f --- /dev/null +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- + +""" +test cython .agg behavior +""" + +from __future__ import print_function + +import pytest + +import numpy as np +from numpy import nan +import pandas as pd + +from pandas import bdate_range, DataFrame, Index, Series +from pandas.core.groupby import DataError +import pandas.util.testing as tm + + +@pytest.mark.parametrize('op_name', [ + 'count', + 'sum', + 'std', + 'var', + 'sem', + 'mean', + 'median', + 'prod', + 'min', + 'max', +]) +def test_cythonized_aggers(op_name): + data = {'A': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1., nan, nan], + 'B': ['A', 'B'] * 6, + 'C': np.random.randn(12)} + df = DataFrame(data) + df.loc[2:10:2, 'C'] = nan + + op = lambda x: getattr(x, op_name)() + + # single column + grouped = df.drop(['B'], axis=1).groupby('A') + exp = {} + for cat, group in grouped: + exp[cat] = op(group['C']) + exp = DataFrame({'C': exp}) + exp.index.name = 'A' + result = op(grouped) + tm.assert_frame_equal(result, exp) + + # multiple columns + grouped = df.groupby(['A', 'B']) + expd = {} + for (cat1, cat2), group in grouped: + expd.setdefault(cat1, {})[cat2] = op(group['C']) + exp = DataFrame(expd).T.stack(dropna=False) + exp.index.names = ['A', 'B'] + exp.name = 'C' + + result = op(grouped)['C'] + if op_name in ['sum', 'prod']: + tm.assert_series_equal(result, exp) + + +def test_cython_agg_boolean(): + frame = DataFrame({'a': np.random.randint(0, 5, 50), + 'b': np.random.randint(0, 2, 50).astype('bool')}) + result = frame.groupby('a')['b'].mean() + expected = frame.groupby('a')['b'].agg(np.mean) + + tm.assert_series_equal(result, expected) + + +def test_cython_agg_nothing_to_agg(): + frame = DataFrame({'a': np.random.randint(0, 5, 50), + 'b': ['foo', 'bar'] * 25}) + msg = "No numeric types to aggregate" + + with tm.assert_raises_regex(DataError, msg): + frame.groupby('a')['b'].mean() + + frame = DataFrame({'a': np.random.randint(0, 5, 50), + 'b': ['foo', 'bar'] * 25}) + with tm.assert_raises_regex(DataError, msg): + frame[['b']].groupby(frame['a']).mean() + + +def test_cython_agg_nothing_to_agg_with_dates(): + frame = DataFrame({'a': np.random.randint(0, 5, 50), + 'b': ['foo', 'bar'] * 25, + 'dates': pd.date_range('now', periods=50, freq='T')}) + msg = "No numeric types to aggregate" + with tm.assert_raises_regex(DataError, msg): + frame.groupby('b').dates.mean() + + +def test_cython_agg_frame_columns(): + # #2113 + df = DataFrame({'x': [1, 2, 3], 'y': [3, 4, 5]}) + + df.groupby(level=0, axis='columns').mean() + df.groupby(level=0, axis='columns').mean() + df.groupby(level=0, axis='columns').mean() + df.groupby(level=0, axis='columns').mean() + + +def test_cython_agg_return_dict(): + # GH 16741 + df = DataFrame( + {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], + 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], + 'C': np.random.randn(8), + 'D': np.random.randn(8)}) + + ts = df.groupby('A')['B'].agg(lambda x: x.value_counts().to_dict()) + expected = Series([{'two': 1, 'one': 1, 'three': 1}, + {'two': 2, 'one': 2, 'three': 1}], + index=Index(['bar', 'foo'], name='A'), + name='B') + tm.assert_series_equal(ts, expected) + + +def test_cython_fail_agg(): + dr = bdate_range('1/1/2000', periods=50) + ts = Series(['A', 'B', 'C', 'D', 'E'] * 10, index=dr) + + grouped = ts.groupby(lambda x: x.month) + summed = grouped.sum() + expected = grouped.agg(np.sum) + tm.assert_series_equal(summed, expected) + + +@pytest.mark.parametrize('op, targop', [ + ('mean', np.mean), + ('median', np.median), + ('var', np.var), + ('add', np.sum), + ('prod', np.prod), + ('min', np.min), + ('max', np.max), + ('first', lambda x: x.iloc[0]), + ('last', lambda x: x.iloc[-1]), +]) +def test__cython_agg_general(op, targop): + df = DataFrame(np.random.randn(1000)) + labels = np.random.randint(0, 50, size=1000).astype(float) + + result = df.groupby(labels)._cython_agg_general(op) + expected = df.groupby(labels).agg(targop) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize('op, targop', [ + ('mean', np.mean), + ('median', lambda x: np.median(x) if len(x) > 0 else np.nan), + ('var', lambda x: np.var(x, ddof=1)), + ('min', np.min), + ('max', np.max), ] +) +def test_cython_agg_empty_buckets(op, targop): + df = pd.DataFrame([11, 12, 13]) + grps = range(0, 55, 5) + + # calling _cython_agg_general directly, instead of via the user API + # which sets different values for min_count, so do that here. + result = df.groupby(pd.cut(df[0], grps))._cython_agg_general(op) + expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x)) + tm.assert_frame_equal(result, expected) + + +def test_cython_agg_empty_buckets_nanops(): + # GH-18869 can't call nanops on empty groups, so hardcode expected + # for these + df = pd.DataFrame([11, 12, 13], columns=['a']) + grps = range(0, 25, 5) + # add / sum + result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('add') + intervals = pd.interval_range(0, 20, freq=5) + expected = pd.DataFrame( + {"a": [0, 0, 36, 0]}, + index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + tm.assert_frame_equal(result, expected) + + # prod + result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('prod') + expected = pd.DataFrame( + {"a": [1, 1, 1716, 1]}, + index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py new file mode 100644 index 0000000000000..f8e44b1548819 --- /dev/null +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -0,0 +1,501 @@ +# -*- coding: utf-8 -*- + +""" +test all other .agg behavior +""" + +from __future__ import print_function + +import pytest + +from datetime import datetime, timedelta +from functools import partial + +import numpy as np +import pandas as pd + +from pandas import date_range, DataFrame, Index, MultiIndex, Series +from pandas.core.groupby import SpecificationError +from pandas.io.formats.printing import pprint_thing +import pandas.util.testing as tm + + +def test_agg_api(): + # GH 6337 + # http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error + # different api for agg when passed custom function with mixed frame + + df = DataFrame({'data1': np.random.randn(5), + 'data2': np.random.randn(5), + 'key1': ['a', 'a', 'b', 'b', 'a'], + 'key2': ['one', 'two', 'one', 'two', 'one']}) + grouped = df.groupby('key1') + + def peak_to_peak(arr): + return arr.max() - arr.min() + + expected = grouped.agg([peak_to_peak]) + expected.columns = ['data1', 'data2'] + result = grouped.agg(peak_to_peak) + tm.assert_frame_equal(result, expected) + + +def test_agg_datetimes_mixed(): + data = [[1, '2012-01-01', 1.0], + [2, '2012-01-02', 2.0], + [3, None, 3.0]] + + df1 = DataFrame({'key': [x[0] for x in data], + 'date': [x[1] for x in data], + 'value': [x[2] for x in data]}) + + data = [[row[0], + datetime.strptime(row[1], '%Y-%m-%d').date() if row[1] else None, + row[2]] + for row in data] + + df2 = DataFrame({'key': [x[0] for x in data], + 'date': [x[1] for x in data], + 'value': [x[2] for x in data]}) + + df1['weights'] = df1['value'] / df1['value'].sum() + gb1 = df1.groupby('date').aggregate(np.sum) + + df2['weights'] = df1['value'] / df1['value'].sum() + gb2 = df2.groupby('date').aggregate(np.sum) + + assert (len(gb1) == len(gb2)) + + +def test_agg_period_index(): + from pandas import period_range, PeriodIndex + prng = period_range('2012-1-1', freq='M', periods=3) + df = DataFrame(np.random.randn(3, 2), index=prng) + rs = df.groupby(level=0).sum() + assert isinstance(rs.index, PeriodIndex) + + # GH 3579 + index = period_range(start='1999-01', periods=5, freq='M') + s1 = Series(np.random.rand(len(index)), index=index) + s2 = Series(np.random.rand(len(index)), index=index) + series = [('s1', s1), ('s2', s2)] + df = DataFrame.from_items(series) + grouped = df.groupby(df.index.month) + list(grouped) + + +def test_agg_dict_parameter_cast_result_dtypes(): + # GH 12821 + + df = DataFrame({'class': ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'], + 'time': date_range('1/1/2011', periods=8, freq='H')}) + df.loc[[0, 1, 2, 5], 'time'] = None + + # test for `first` function + exp = df.loc[[0, 3, 4, 6]].set_index('class') + grouped = df.groupby('class') + tm.assert_frame_equal(grouped.first(), exp) + tm.assert_frame_equal(grouped.agg('first'), exp) + tm.assert_frame_equal(grouped.agg({'time': 'first'}), exp) + tm.assert_series_equal(grouped.time.first(), exp['time']) + tm.assert_series_equal(grouped.time.agg('first'), exp['time']) + + # test for `last` function + exp = df.loc[[0, 3, 4, 7]].set_index('class') + grouped = df.groupby('class') + tm.assert_frame_equal(grouped.last(), exp) + tm.assert_frame_equal(grouped.agg('last'), exp) + tm.assert_frame_equal(grouped.agg({'time': 'last'}), exp) + tm.assert_series_equal(grouped.time.last(), exp['time']) + tm.assert_series_equal(grouped.time.agg('last'), exp['time']) + + # count + exp = pd.Series([2, 2, 2, 2], + index=Index(list('ABCD'), name='class'), + name='time') + tm.assert_series_equal(grouped.time.agg(len), exp) + tm.assert_series_equal(grouped.time.size(), exp) + + exp = pd.Series([0, 1, 1, 2], + index=Index(list('ABCD'), name='class'), + name='time') + tm.assert_series_equal(grouped.time.count(), exp) + + +def test_agg_cast_results_dtypes(): + # similar to GH12821 + # xref #11444 + u = [datetime(2015, x + 1, 1) for x in range(12)] + v = list('aaabbbbbbccd') + df = pd.DataFrame({'X': v, 'Y': u}) + + result = df.groupby('X')['Y'].agg(len) + expected = df.groupby('X')['Y'].count() + tm.assert_series_equal(result, expected) + + +def test_aggregate_float64_no_int64(): + # see gh-11199 + df = DataFrame({"a": [1, 2, 3, 4, 5], + "b": [1, 2, 2, 4, 5], + "c": [1, 2, 3, 4, 5]}) + + expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5]) + expected.index.name = "b" + + result = df.groupby("b")[["a"]].mean() + tm.assert_frame_equal(result, expected) + + expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]}, + index=[1, 2, 4, 5]) + expected.index.name = "b" + + result = df.groupby("b")[["a", "c"]].mean() + tm.assert_frame_equal(result, expected) + + +def test_aggregate_api_consistency(): + # GH 9052 + # make sure that the aggregates via dict + # are consistent + df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', + 'foo', 'bar', 'foo', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': np.random.randn(8) + 1.0, + 'D': np.arange(8)}) + + grouped = df.groupby(['A', 'B']) + c_mean = grouped['C'].mean() + c_sum = grouped['C'].sum() + d_mean = grouped['D'].mean() + d_sum = grouped['D'].sum() + + result = grouped['D'].agg(['sum', 'mean']) + expected = pd.concat([d_sum, d_mean], axis=1) + expected.columns = ['sum', 'mean'] + tm.assert_frame_equal(result, expected, check_like=True) + + result = grouped.agg([np.sum, np.mean]) + expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1) + expected.columns = MultiIndex.from_product([['C', 'D'], + ['sum', 'mean']]) + tm.assert_frame_equal(result, expected, check_like=True) + + result = grouped[['D', 'C']].agg([np.sum, np.mean]) + expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1) + expected.columns = MultiIndex.from_product([['D', 'C'], + ['sum', 'mean']]) + tm.assert_frame_equal(result, expected, check_like=True) + + result = grouped.agg({'C': 'mean', 'D': 'sum'}) + expected = pd.concat([d_sum, c_mean], axis=1) + tm.assert_frame_equal(result, expected, check_like=True) + + result = grouped.agg({'C': ['mean', 'sum'], + 'D': ['mean', 'sum']}) + expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1) + expected.columns = MultiIndex.from_product([['C', 'D'], + ['mean', 'sum']]) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = grouped[['D', 'C']].agg({'r': np.sum, + 'r2': np.mean}) + expected = pd.concat([d_sum, c_sum, d_mean, c_mean], axis=1) + expected.columns = MultiIndex.from_product([['r', 'r2'], + ['D', 'C']]) + tm.assert_frame_equal(result, expected, check_like=True) + + +def test_agg_dict_renaming_deprecation(): + # 15931 + df = pd.DataFrame({'A': [1, 1, 1, 2, 2], + 'B': range(5), + 'C': range(5)}) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False) as w: + df.groupby('A').agg({'B': {'foo': ['sum', 'max']}, + 'C': {'bar': ['count', 'min']}}) + assert "using a dict with renaming" in str(w[0].message) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + df.groupby('A')[['B', 'C']].agg({'ma': 'max'}) + + with tm.assert_produces_warning(FutureWarning) as w: + df.groupby('A').B.agg({'foo': 'count'}) + assert "using a dict on a Series for aggregation" in str(w[0].message) + + +def test_agg_compat(): + # GH 12334 + df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', + 'foo', 'bar', 'foo', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': np.random.randn(8) + 1.0, + 'D': np.arange(8)}) + + g = df.groupby(['A', 'B']) + + expected = pd.concat([g['D'].sum(), g['D'].std()], axis=1) + expected.columns = MultiIndex.from_tuples([('C', 'sum'), + ('C', 'std')]) + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = g['D'].agg({'C': ['sum', 'std']}) + tm.assert_frame_equal(result, expected, check_like=True) + + expected = pd.concat([g['D'].sum(), g['D'].std()], axis=1) + expected.columns = ['C', 'D'] + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = g['D'].agg({'C': 'sum', 'D': 'std'}) + tm.assert_frame_equal(result, expected, check_like=True) + + +def test_agg_nested_dicts(): + # API change for disallowing these types of nested dicts + df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', + 'foo', 'bar', 'foo', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': np.random.randn(8) + 1.0, + 'D': np.arange(8)}) + + g = df.groupby(['A', 'B']) + + msg = r'cannot perform renaming for r[1-2] with a nested dictionary' + with tm.assert_raises_regex(SpecificationError, msg): + g.aggregate({'r1': {'C': ['mean', 'sum']}, + 'r2': {'D': ['mean', 'sum']}}) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = g.agg({'C': {'ra': ['mean', 'std']}, + 'D': {'rb': ['mean', 'std']}}) + expected = pd.concat([g['C'].mean(), g['C'].std(), + g['D'].mean(), g['D'].std()], + axis=1) + expected.columns = pd.MultiIndex.from_tuples( + [('ra', 'mean'), ('ra', 'std'), + ('rb', 'mean'), ('rb', 'std')]) + tm.assert_frame_equal(result, expected, check_like=True) + + # same name as the original column + # GH9052 + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + expected = g['D'].agg({'result1': np.sum, 'result2': np.mean}) + expected = expected.rename(columns={'result1': 'D'}) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = g['D'].agg({'D': np.sum, 'result2': np.mean}) + tm.assert_frame_equal(result, expected, check_like=True) + + +def test_agg_item_by_item_raise_typeerror(): + from numpy.random import randint + + df = DataFrame(randint(10, size=(20, 10))) + + def raiseException(df): + pprint_thing('----------------------------------------') + pprint_thing(df.to_string()) + raise TypeError('test') + + with tm.assert_raises_regex(TypeError, 'test'): + df.groupby(0).agg(raiseException) + + +def test_series_agg_multikey(): + ts = tm.makeTimeSeries() + grouped = ts.groupby([lambda x: x.year, lambda x: x.month]) + + result = grouped.agg(np.sum) + expected = grouped.sum() + tm.assert_series_equal(result, expected) + + +def test_series_agg_multi_pure_python(): + data = DataFrame( + {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar', + 'foo', 'foo', 'foo'], + 'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two', + 'two', 'two', 'one'], + 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny', + 'dull', 'shiny', 'shiny', 'shiny'], + 'D': np.random.randn(11), + 'E': np.random.randn(11), + 'F': np.random.randn(11)}) + + def bad(x): + assert (len(x.base) > 0) + return 'foo' + + result = data.groupby(['A', 'B']).agg(bad) + expected = data.groupby(['A', 'B']).agg(lambda x: 'foo') + tm.assert_frame_equal(result, expected) + + +def test_agg_consistency(): + # agg with ([]) and () not consistent + # GH 6715 + def P1(a): + try: + return np.percentile(a.dropna(), q=1) + except Exception: + return np.nan + + import datetime as dt + df = DataFrame({'col1': [1, 2, 3, 4], + 'col2': [10, 25, 26, 31], + 'date': [dt.date(2013, 2, 10), dt.date(2013, 2, 10), + dt.date(2013, 2, 11), dt.date(2013, 2, 11)]}) + + g = df.groupby('date') + + expected = g.agg([P1]) + expected.columns = expected.columns.levels[0] + + result = g.agg(P1) + tm.assert_frame_equal(result, expected) + + +def test_agg_callables(): + # GH 7929 + df = DataFrame({'foo': [1, 2], 'bar': [3, 4]}).astype(np.int64) + + class fn_class(object): + + def __call__(self, x): + return sum(x) + + equiv_callables = [sum, + np.sum, + lambda x: sum(x), + lambda x: x.sum(), + partial(sum), + fn_class(), ] + + expected = df.groupby("foo").agg(sum) + for ecall in equiv_callables: + result = df.groupby('foo').agg(ecall) + tm.assert_frame_equal(result, expected) + + +def test_agg_over_numpy_arrays(): + # GH 3788 + df = pd.DataFrame([[1, np.array([10, 20, 30])], + [1, np.array([40, 50, 60])], + [2, np.array([20, 30, 40])]], + columns=['category', 'arraydata']) + result = df.groupby('category').agg(sum) + + expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]] + expected_index = pd.Index([1, 2], name='category') + expected_column = ['arraydata'] + expected = pd.DataFrame(expected_data, + index=expected_index, + columns=expected_column) + + tm.assert_frame_equal(result, expected) + + +def test_agg_timezone_round_trip(): + # GH 15426 + ts = pd.Timestamp("2016-01-01 12:00:00", tz='US/Pacific') + df = pd.DataFrame({'a': 1, + 'b': [ts + timedelta(minutes=nn) for nn in range(10)]}) + + result1 = df.groupby('a')['b'].agg(np.min).iloc[0] + result2 = df.groupby('a')['b'].agg(lambda x: np.min(x)).iloc[0] + result3 = df.groupby('a')['b'].min().iloc[0] + + assert result1 == ts + assert result2 == ts + assert result3 == ts + + dates = [pd.Timestamp("2016-01-0%d 12:00:00" % i, tz='US/Pacific') + for i in range(1, 5)] + df = pd.DataFrame({'A': ['a', 'b'] * 2, 'B': dates}) + grouped = df.groupby('A') + + ts = df['B'].iloc[0] + assert ts == grouped.nth(0)['B'].iloc[0] + assert ts == grouped.head(1)['B'].iloc[0] + assert ts == grouped.first()['B'].iloc[0] + assert ts == grouped.apply(lambda x: x.iloc[0])[0] + + ts = df['B'].iloc[2] + assert ts == grouped.last()['B'].iloc[0] + assert ts == grouped.apply(lambda x: x.iloc[-1])[0] + + +def test_sum_uint64_overflow(): + # see gh-14758 + # Convert to uint64 and don't overflow + df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object) + df = df + 9223372036854775807 + + index = pd.Index([9223372036854775808, + 9223372036854775810, + 9223372036854775812], + dtype=np.uint64) + expected = pd.DataFrame({1: [9223372036854775809, + 9223372036854775811, + 9223372036854775813]}, + index=index) + + expected.index.name = 0 + result = df.groupby(0).sum() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("structure, expected", [ + (tuple, pd.DataFrame({'C': {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})), + (list, pd.DataFrame({'C': {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})), + (lambda x: tuple(x), pd.DataFrame({'C': {(1, 1): (1, 1, 1), + (3, 4): (3, 4, 4)}})), + (lambda x: list(x), pd.DataFrame({'C': {(1, 1): [1, 1, 1], + (3, 4): [3, 4, 4]}})) +]) +def test_agg_structs_dataframe(structure, expected): + df = pd.DataFrame({'A': [1, 1, 1, 3, 3, 3], + 'B': [1, 1, 1, 4, 4, 4], + 'C': [1, 1, 1, 3, 4, 4]}) + + result = df.groupby(['A', 'B']).aggregate(structure) + expected.index.names = ['A', 'B'] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("structure, expected", [ + (tuple, pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name='C')), + (list, pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name='C')), + (lambda x: tuple(x), pd.Series([(1, 1, 1), (3, 4, 4)], + index=[1, 3], name='C')), + (lambda x: list(x), pd.Series([[1, 1, 1], [3, 4, 4]], + index=[1, 3], name='C')) +]) +def test_agg_structs_series(structure, expected): + # Issue #18079 + df = pd.DataFrame({'A': [1, 1, 1, 3, 3, 3], + 'B': [1, 1, 1, 4, 4, 4], + 'C': [1, 1, 1, 3, 4, 4]}) + + result = df.groupby('A')['C'].aggregate(structure) + expected.index.name = 'A' + tm.assert_series_equal(result, expected) + + +@pytest.mark.xfail(reason="GH-18869: agg func not called on empty groups.") +def test_agg_category_nansum(): + categories = ['a', 'b', 'c'] + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=categories), + 'B': [1, 2, 3]}) + result = df.groupby("A").B.agg(np.nansum) + expected = pd.Series([3, 3, 0], + index=pd.CategoricalIndex(['a', 'b', 'c'], + categories=categories, + name='A'), + name='B') + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py deleted file mode 100644 index cca21fddd116e..0000000000000 --- a/pandas/tests/groupby/test_aggregate.py +++ /dev/null @@ -1,961 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -we test .agg behavior / note that .apply is tested -generally in test_groupby.py -""" - -from __future__ import print_function - -import pytest - -from datetime import datetime, timedelta -from functools import partial - -import numpy as np -from numpy import nan -import pandas as pd - -from pandas import (date_range, MultiIndex, DataFrame, - Series, Index, bdate_range, concat) -from pandas.util.testing import assert_frame_equal, assert_series_equal -from pandas.core.groupby import SpecificationError, DataError -from pandas.compat import OrderedDict -from pandas.io.formats.printing import pprint_thing -import pandas.util.testing as tm - - -class TestGroupByAggregate(object): - - def setup_method(self, method): - self.ts = tm.makeTimeSeries() - - self.seriesd = tm.getSeriesData() - self.tsd = tm.getTimeSeriesData() - self.frame = DataFrame(self.seriesd) - self.tsframe = DataFrame(self.tsd) - - self.df = DataFrame( - {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], - 'C': np.random.randn(8), - 'D': np.random.randn(8)}) - - self.df_mixed_floats = DataFrame( - {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], - 'C': np.random.randn(8), - 'D': np.array( - np.random.randn(8), dtype='float32')}) - - index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', - 'three']], - labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], - [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=['first', 'second']) - self.mframe = DataFrame(np.random.randn(10, 3), index=index, - columns=['A', 'B', 'C']) - - self.three_group = DataFrame( - {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar', - 'foo', 'foo', 'foo'], - 'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two', - 'two', 'two', 'one'], - 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny', - 'dull', 'shiny', 'shiny', 'shiny'], - 'D': np.random.randn(11), - 'E': np.random.randn(11), - 'F': np.random.randn(11)}) - - def test_agg_api(self): - - # GH 6337 - # http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error - # different api for agg when passed custom function with mixed frame - - df = DataFrame({'data1': np.random.randn(5), - 'data2': np.random.randn(5), - 'key1': ['a', 'a', 'b', 'b', 'a'], - 'key2': ['one', 'two', 'one', 'two', 'one']}) - grouped = df.groupby('key1') - - def peak_to_peak(arr): - return arr.max() - arr.min() - - expected = grouped.agg([peak_to_peak]) - expected.columns = ['data1', 'data2'] - result = grouped.agg(peak_to_peak) - assert_frame_equal(result, expected) - - def test_agg_regression1(self): - grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month]) - result = grouped.agg(np.mean) - expected = grouped.mean() - assert_frame_equal(result, expected) - - def test_agg_datetimes_mixed(self): - data = [[1, '2012-01-01', 1.0], [2, '2012-01-02', 2.0], [3, None, 3.0]] - - df1 = DataFrame({'key': [x[0] for x in data], - 'date': [x[1] for x in data], - 'value': [x[2] for x in data]}) - - data = [[row[0], datetime.strptime(row[1], '%Y-%m-%d').date() if row[1] - else None, row[2]] for row in data] - - df2 = DataFrame({'key': [x[0] for x in data], - 'date': [x[1] for x in data], - 'value': [x[2] for x in data]}) - - df1['weights'] = df1['value'] / df1['value'].sum() - gb1 = df1.groupby('date').aggregate(np.sum) - - df2['weights'] = df1['value'] / df1['value'].sum() - gb2 = df2.groupby('date').aggregate(np.sum) - - assert (len(gb1) == len(gb2)) - - def test_agg_period_index(self): - from pandas import period_range, PeriodIndex - prng = period_range('2012-1-1', freq='M', periods=3) - df = DataFrame(np.random.randn(3, 2), index=prng) - rs = df.groupby(level=0).sum() - assert isinstance(rs.index, PeriodIndex) - - # GH 3579 - index = period_range(start='1999-01', periods=5, freq='M') - s1 = Series(np.random.rand(len(index)), index=index) - s2 = Series(np.random.rand(len(index)), index=index) - series = [('s1', s1), ('s2', s2)] - df = DataFrame.from_items(series) - grouped = df.groupby(df.index.month) - list(grouped) - - def test_agg_dict_parameter_cast_result_dtypes(self): - # GH 12821 - - df = DataFrame( - {'class': ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'], - 'time': date_range('1/1/2011', periods=8, freq='H')}) - df.loc[[0, 1, 2, 5], 'time'] = None - - # test for `first` function - exp = df.loc[[0, 3, 4, 6]].set_index('class') - grouped = df.groupby('class') - assert_frame_equal(grouped.first(), exp) - assert_frame_equal(grouped.agg('first'), exp) - assert_frame_equal(grouped.agg({'time': 'first'}), exp) - assert_series_equal(grouped.time.first(), exp['time']) - assert_series_equal(grouped.time.agg('first'), exp['time']) - - # test for `last` function - exp = df.loc[[0, 3, 4, 7]].set_index('class') - grouped = df.groupby('class') - assert_frame_equal(grouped.last(), exp) - assert_frame_equal(grouped.agg('last'), exp) - assert_frame_equal(grouped.agg({'time': 'last'}), exp) - assert_series_equal(grouped.time.last(), exp['time']) - assert_series_equal(grouped.time.agg('last'), exp['time']) - - # count - exp = pd.Series([2, 2, 2, 2], - index=Index(list('ABCD'), name='class'), - name='time') - assert_series_equal(grouped.time.agg(len), exp) - assert_series_equal(grouped.time.size(), exp) - - exp = pd.Series([0, 1, 1, 2], - index=Index(list('ABCD'), name='class'), - name='time') - assert_series_equal(grouped.time.count(), exp) - - def test_agg_cast_results_dtypes(self): - # similar to GH12821 - # xref #11444 - u = [datetime(2015, x + 1, 1) for x in range(12)] - v = list('aaabbbbbbccd') - df = pd.DataFrame({'X': v, 'Y': u}) - - result = df.groupby('X')['Y'].agg(len) - expected = df.groupby('X')['Y'].count() - assert_series_equal(result, expected) - - def test_agg_must_agg(self): - grouped = self.df.groupby('A')['C'] - pytest.raises(Exception, grouped.agg, lambda x: x.describe()) - pytest.raises(Exception, grouped.agg, lambda x: x.index[:2]) - - def test_agg_ser_multi_key(self): - # TODO(wesm): unused - ser = self.df.C # noqa - - f = lambda x: x.sum() - results = self.df.C.groupby([self.df.A, self.df.B]).aggregate(f) - expected = self.df.groupby(['A', 'B']).sum()['C'] - assert_series_equal(results, expected) - - def test_agg_apply_corner(self): - # nothing to group, all NA - grouped = self.ts.groupby(self.ts * np.nan) - assert self.ts.dtype == np.float64 - - # groupby float64 values results in Float64Index - exp = Series([], dtype=np.float64, index=pd.Index( - [], dtype=np.float64)) - assert_series_equal(grouped.sum(), exp) - assert_series_equal(grouped.agg(np.sum), exp) - assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False) - - # DataFrame - grouped = self.tsframe.groupby(self.tsframe['A'] * np.nan) - exp_df = DataFrame(columns=self.tsframe.columns, dtype=float, - index=pd.Index([], dtype=np.float64)) - assert_frame_equal(grouped.sum(), exp_df, check_names=False) - assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False) - assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0], - check_names=False) - - def test_agg_grouping_is_list_tuple(self): - from pandas.core.groupby import Grouping - - df = tm.makeTimeDataFrame() - - grouped = df.groupby(lambda x: x.year) - grouper = grouped.grouper.groupings[0].grouper - grouped.grouper.groupings[0] = Grouping(self.ts.index, list(grouper)) - - result = grouped.agg(np.mean) - expected = grouped.mean() - tm.assert_frame_equal(result, expected) - - grouped.grouper.groupings[0] = Grouping(self.ts.index, tuple(grouper)) - - result = grouped.agg(np.mean) - expected = grouped.mean() - tm.assert_frame_equal(result, expected) - - def test_aggregate_float64_no_int64(self): - # see gh-11199 - df = DataFrame({"a": [1, 2, 3, 4, 5], - "b": [1, 2, 2, 4, 5], - "c": [1, 2, 3, 4, 5]}) - - expected = DataFrame({"a": [1, 2.5, 4, 5]}, - index=[1, 2, 4, 5]) - expected.index.name = "b" - - result = df.groupby("b")[["a"]].mean() - tm.assert_frame_equal(result, expected) - - expected = DataFrame({"a": [1, 2.5, 4, 5], - "c": [1, 2.5, 4, 5]}, - index=[1, 2, 4, 5]) - expected.index.name = "b" - - result = df.groupby("b")[["a", "c"]].mean() - tm.assert_frame_equal(result, expected) - - def test_aggregate_api_consistency(self): - # GH 9052 - # make sure that the aggregates via dict - # are consistent - - df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': np.random.randn(8) + 1.0, - 'D': np.arange(8)}) - - grouped = df.groupby(['A', 'B']) - c_mean = grouped['C'].mean() - c_sum = grouped['C'].sum() - d_mean = grouped['D'].mean() - d_sum = grouped['D'].sum() - - result = grouped['D'].agg(['sum', 'mean']) - expected = pd.concat([d_sum, d_mean], - axis=1) - expected.columns = ['sum', 'mean'] - assert_frame_equal(result, expected, check_like=True) - - result = grouped.agg([np.sum, np.mean]) - expected = pd.concat([c_sum, - c_mean, - d_sum, - d_mean], - axis=1) - expected.columns = MultiIndex.from_product([['C', 'D'], - ['sum', 'mean']]) - assert_frame_equal(result, expected, check_like=True) - - result = grouped[['D', 'C']].agg([np.sum, np.mean]) - expected = pd.concat([d_sum, - d_mean, - c_sum, - c_mean], - axis=1) - expected.columns = MultiIndex.from_product([['D', 'C'], - ['sum', 'mean']]) - assert_frame_equal(result, expected, check_like=True) - - result = grouped.agg({'C': 'mean', 'D': 'sum'}) - expected = pd.concat([d_sum, - c_mean], - axis=1) - assert_frame_equal(result, expected, check_like=True) - - result = grouped.agg({'C': ['mean', 'sum'], - 'D': ['mean', 'sum']}) - expected = pd.concat([c_mean, - c_sum, - d_mean, - d_sum], - axis=1) - expected.columns = MultiIndex.from_product([['C', 'D'], - ['mean', 'sum']]) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = grouped[['D', 'C']].agg({'r': np.sum, - 'r2': np.mean}) - expected = pd.concat([d_sum, - c_sum, - d_mean, - c_mean], - axis=1) - expected.columns = MultiIndex.from_product([['r', 'r2'], - ['D', 'C']]) - assert_frame_equal(result, expected, check_like=True) - - def test_agg_dict_renaming_deprecation(self): - # 15931 - df = pd.DataFrame({'A': [1, 1, 1, 2, 2], - 'B': range(5), - 'C': range(5)}) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False) as w: - df.groupby('A').agg({'B': {'foo': ['sum', 'max']}, - 'C': {'bar': ['count', 'min']}}) - assert "using a dict with renaming" in str(w[0].message) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - df.groupby('A')[['B', 'C']].agg({'ma': 'max'}) - - with tm.assert_produces_warning(FutureWarning) as w: - df.groupby('A').B.agg({'foo': 'count'}) - assert "using a dict on a Series for aggregation" in str( - w[0].message) - - def test_agg_compat(self): - - # GH 12334 - - df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': np.random.randn(8) + 1.0, - 'D': np.arange(8)}) - - g = df.groupby(['A', 'B']) - - expected = pd.concat([g['D'].sum(), - g['D'].std()], - axis=1) - expected.columns = MultiIndex.from_tuples([('C', 'sum'), - ('C', 'std')]) - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = g['D'].agg({'C': ['sum', 'std']}) - assert_frame_equal(result, expected, check_like=True) - - expected = pd.concat([g['D'].sum(), - g['D'].std()], - axis=1) - expected.columns = ['C', 'D'] - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = g['D'].agg({'C': 'sum', 'D': 'std'}) - assert_frame_equal(result, expected, check_like=True) - - def test_agg_nested_dicts(self): - - # API change for disallowing these types of nested dicts - df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': np.random.randn(8) + 1.0, - 'D': np.arange(8)}) - - g = df.groupby(['A', 'B']) - - def f(): - g.aggregate({'r1': {'C': ['mean', 'sum']}, - 'r2': {'D': ['mean', 'sum']}}) - - pytest.raises(SpecificationError, f) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = g.agg({'C': {'ra': ['mean', 'std']}, - 'D': {'rb': ['mean', 'std']}}) - expected = pd.concat([g['C'].mean(), g['C'].std(), g['D'].mean(), - g['D'].std()], axis=1) - expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), ( - 'ra', 'std'), ('rb', 'mean'), ('rb', 'std')]) - assert_frame_equal(result, expected, check_like=True) - - # same name as the original column - # GH9052 - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - expected = g['D'].agg({'result1': np.sum, 'result2': np.mean}) - expected = expected.rename(columns={'result1': 'D'}) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = g['D'].agg({'D': np.sum, 'result2': np.mean}) - assert_frame_equal(result, expected, check_like=True) - - def test_agg_python_multiindex(self): - grouped = self.mframe.groupby(['A', 'B']) - - result = grouped.agg(np.mean) - expected = grouped.mean() - tm.assert_frame_equal(result, expected) - - def test_aggregate_str_func(self): - def _check_results(grouped): - # single series - result = grouped['A'].agg('std') - expected = grouped['A'].std() - assert_series_equal(result, expected) - - # group frame by function name - result = grouped.aggregate('var') - expected = grouped.var() - assert_frame_equal(result, expected) - - # group frame by function dict - result = grouped.agg(OrderedDict([['A', 'var'], ['B', 'std'], - ['C', 'mean'], ['D', 'sem']])) - expected = DataFrame(OrderedDict([['A', grouped['A'].var( - )], ['B', grouped['B'].std()], ['C', grouped['C'].mean()], - ['D', grouped['D'].sem()]])) - assert_frame_equal(result, expected) - - by_weekday = self.tsframe.groupby(lambda x: x.weekday()) - _check_results(by_weekday) - - by_mwkday = self.tsframe.groupby([lambda x: x.month, - lambda x: x.weekday()]) - _check_results(by_mwkday) - - def test_aggregate_item_by_item(self): - - df = self.df.copy() - df['E'] = ['a'] * len(self.df) - grouped = self.df.groupby('A') - - # API change in 0.11 - # def aggfun(ser): - # return len(ser + 'a') - # result = grouped.agg(aggfun) - # assert len(result.columns) == 1 - - aggfun = lambda ser: ser.size - result = grouped.agg(aggfun) - foo = (self.df.A == 'foo').sum() - bar = (self.df.A == 'bar').sum() - K = len(result.columns) - - # GH5782 - # odd comparisons can result here, so cast to make easy - exp = pd.Series(np.array([foo] * K), index=list('BCD'), - dtype=np.float64, name='foo') - tm.assert_series_equal(result.xs('foo'), exp) - - exp = pd.Series(np.array([bar] * K), index=list('BCD'), - dtype=np.float64, name='bar') - tm.assert_almost_equal(result.xs('bar'), exp) - - def aggfun(ser): - return ser.size - - result = DataFrame().groupby(self.df.A).agg(aggfun) - assert isinstance(result, DataFrame) - assert len(result) == 0 - - def test_agg_item_by_item_raise_typeerror(self): - from numpy.random import randint - - df = DataFrame(randint(10, size=(20, 10))) - - def raiseException(df): - pprint_thing('----------------------------------------') - pprint_thing(df.to_string()) - raise TypeError - - pytest.raises(TypeError, df.groupby(0).agg, raiseException) - - def test_series_agg_multikey(self): - ts = tm.makeTimeSeries() - grouped = ts.groupby([lambda x: x.year, lambda x: x.month]) - - result = grouped.agg(np.sum) - expected = grouped.sum() - assert_series_equal(result, expected) - - def test_series_agg_multi_pure_python(self): - data = DataFrame( - {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar', - 'foo', 'foo', 'foo'], - 'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two', - 'two', 'two', 'one'], - 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny', - 'dull', 'shiny', 'shiny', 'shiny'], - 'D': np.random.randn(11), - 'E': np.random.randn(11), - 'F': np.random.randn(11)}) - - def bad(x): - assert (len(x.base) > 0) - return 'foo' - - result = data.groupby(['A', 'B']).agg(bad) - expected = data.groupby(['A', 'B']).agg(lambda x: 'foo') - assert_frame_equal(result, expected) - - def test_cythonized_aggers(self): - data = {'A': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1., nan, nan], - 'B': ['A', 'B'] * 6, - 'C': np.random.randn(12)} - df = DataFrame(data) - df.loc[2:10:2, 'C'] = nan - - def _testit(name): - - op = lambda x: getattr(x, name)() - - # single column - grouped = df.drop(['B'], axis=1).groupby('A') - exp = {} - for cat, group in grouped: - exp[cat] = op(group['C']) - exp = DataFrame({'C': exp}) - exp.index.name = 'A' - result = op(grouped) - assert_frame_equal(result, exp) - - # multiple columns - grouped = df.groupby(['A', 'B']) - expd = {} - for (cat1, cat2), group in grouped: - expd.setdefault(cat1, {})[cat2] = op(group['C']) - exp = DataFrame(expd).T.stack(dropna=False) - exp.index.names = ['A', 'B'] - exp.name = 'C' - - result = op(grouped)['C'] - if name in ['sum', 'prod']: - assert_series_equal(result, exp) - - _testit('count') - _testit('sum') - _testit('std') - _testit('var') - _testit('sem') - _testit('mean') - _testit('median') - _testit('prod') - _testit('min') - _testit('max') - - def test_cython_agg_boolean(self): - frame = DataFrame({'a': np.random.randint(0, 5, 50), - 'b': np.random.randint(0, 2, 50).astype('bool')}) - result = frame.groupby('a')['b'].mean() - expected = frame.groupby('a')['b'].agg(np.mean) - - assert_series_equal(result, expected) - - def test_cython_agg_nothing_to_agg(self): - frame = DataFrame({'a': np.random.randint(0, 5, 50), - 'b': ['foo', 'bar'] * 25}) - pytest.raises(DataError, frame.groupby('a')['b'].mean) - - frame = DataFrame({'a': np.random.randint(0, 5, 50), - 'b': ['foo', 'bar'] * 25}) - pytest.raises(DataError, frame[['b']].groupby(frame['a']).mean) - - def test_cython_agg_nothing_to_agg_with_dates(self): - frame = DataFrame({'a': np.random.randint(0, 5, 50), - 'b': ['foo', 'bar'] * 25, - 'dates': pd.date_range('now', periods=50, - freq='T')}) - with tm.assert_raises_regex(DataError, - "No numeric types to aggregate"): - frame.groupby('b').dates.mean() - - def test_cython_agg_frame_columns(self): - # #2113 - df = DataFrame({'x': [1, 2, 3], 'y': [3, 4, 5]}) - - df.groupby(level=0, axis='columns').mean() - df.groupby(level=0, axis='columns').mean() - df.groupby(level=0, axis='columns').mean() - df.groupby(level=0, axis='columns').mean() - - def test_cython_agg_return_dict(self): - # GH 16741 - ts = self.df.groupby('A')['B'].agg( - lambda x: x.value_counts().to_dict()) - expected = Series([{'two': 1, 'one': 1, 'three': 1}, - {'two': 2, 'one': 2, 'three': 1}], - index=Index(['bar', 'foo'], name='A'), - name='B') - assert_series_equal(ts, expected) - - def test_cython_fail_agg(self): - dr = bdate_range('1/1/2000', periods=50) - ts = Series(['A', 'B', 'C', 'D', 'E'] * 10, index=dr) - - grouped = ts.groupby(lambda x: x.month) - summed = grouped.sum() - expected = grouped.agg(np.sum) - assert_series_equal(summed, expected) - - def test_agg_consistency(self): - # agg with ([]) and () not consistent - # GH 6715 - - def P1(a): - try: - return np.percentile(a.dropna(), q=1) - except Exception: - return np.nan - - import datetime as dt - df = DataFrame({'col1': [1, 2, 3, 4], - 'col2': [10, 25, 26, 31], - 'date': [dt.date(2013, 2, 10), dt.date(2013, 2, 10), - dt.date(2013, 2, 11), dt.date(2013, 2, 11)]}) - - g = df.groupby('date') - - expected = g.agg([P1]) - expected.columns = expected.columns.levels[0] - - result = g.agg(P1) - assert_frame_equal(result, expected) - - def test_wrap_agg_out(self): - grouped = self.three_group.groupby(['A', 'B']) - - def func(ser): - if ser.dtype == np.object: - raise TypeError - else: - return ser.sum() - - result = grouped.aggregate(func) - exp_grouped = self.three_group.loc[:, self.three_group.columns != 'C'] - expected = exp_grouped.groupby(['A', 'B']).aggregate(func) - assert_frame_equal(result, expected) - - def test_agg_multiple_functions_maintain_order(self): - # GH #610 - funcs = [('mean', np.mean), ('max', np.max), ('min', np.min)] - result = self.df.groupby('A')['C'].agg(funcs) - exp_cols = Index(['mean', 'max', 'min']) - - tm.assert_index_equal(result.columns, exp_cols) - - def test_multiple_functions_tuples_and_non_tuples(self): - # #1359 - - funcs = [('foo', 'mean'), 'std'] - ex_funcs = [('foo', 'mean'), ('std', 'std')] - - result = self.df.groupby('A')['C'].agg(funcs) - expected = self.df.groupby('A')['C'].agg(ex_funcs) - assert_frame_equal(result, expected) - - result = self.df.groupby('A').agg(funcs) - expected = self.df.groupby('A').agg(ex_funcs) - assert_frame_equal(result, expected) - - def test_agg_multiple_functions_too_many_lambdas(self): - grouped = self.df.groupby('A') - funcs = ['mean', lambda x: x.mean(), lambda x: x.std()] - - pytest.raises(SpecificationError, grouped.agg, funcs) - - def test_more_flexible_frame_multi_function(self): - - grouped = self.df.groupby('A') - - exmean = grouped.agg(OrderedDict([['C', np.mean], ['D', np.mean]])) - exstd = grouped.agg(OrderedDict([['C', np.std], ['D', np.std]])) - - expected = concat([exmean, exstd], keys=['mean', 'std'], axis=1) - expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1) - - d = OrderedDict([['C', [np.mean, np.std]], ['D', [np.mean, np.std]]]) - result = grouped.aggregate(d) - - assert_frame_equal(result, expected) - - # be careful - result = grouped.aggregate(OrderedDict([['C', np.mean], - ['D', [np.mean, np.std]]])) - expected = grouped.aggregate(OrderedDict([['C', np.mean], - ['D', [np.mean, np.std]]])) - assert_frame_equal(result, expected) - - def foo(x): - return np.mean(x) - - def bar(x): - return np.std(x, ddof=1) - - # this uses column selection & renaming - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - d = OrderedDict([['C', np.mean], ['D', OrderedDict( - [['foo', np.mean], ['bar', np.std]])]]) - result = grouped.aggregate(d) - - d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]]) - expected = grouped.aggregate(d) - - assert_frame_equal(result, expected) - - def test_multi_function_flexible_mix(self): - # GH #1268 - grouped = self.df.groupby('A') - - d = OrderedDict([['C', OrderedDict([['foo', 'mean'], [ - 'bar', 'std' - ]])], ['D', 'sum']]) - - # this uses column selection & renaming - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = grouped.aggregate(d) - - d2 = OrderedDict([['C', OrderedDict([['foo', 'mean'], [ - 'bar', 'std' - ]])], ['D', ['sum']]]) - - # this uses column selection & renaming - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result2 = grouped.aggregate(d2) - - d3 = OrderedDict([['C', OrderedDict([['foo', 'mean'], [ - 'bar', 'std' - ]])], ['D', {'sum': 'sum'}]]) - - # this uses column selection & renaming - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - expected = grouped.aggregate(d3) - - assert_frame_equal(result, expected) - assert_frame_equal(result2, expected) - - def test_agg_callables(self): - # GH 7929 - df = DataFrame({'foo': [1, 2], 'bar': [3, 4]}).astype(np.int64) - - class fn_class(object): - - def __call__(self, x): - return sum(x) - - equiv_callables = [sum, np.sum, lambda x: sum(x), lambda x: x.sum(), - partial(sum), fn_class()] - - expected = df.groupby("foo").agg(sum) - for ecall in equiv_callables: - result = df.groupby('foo').agg(ecall) - assert_frame_equal(result, expected) - - def test__cython_agg_general(self): - ops = [('mean', np.mean), - ('median', np.median), - ('var', np.var), - ('add', np.sum), - ('prod', np.prod), - ('min', np.min), - ('max', np.max), - ('first', lambda x: x.iloc[0]), - ('last', lambda x: x.iloc[-1]), ] - df = DataFrame(np.random.randn(1000)) - labels = np.random.randint(0, 50, size=1000).astype(float) - - for op, targop in ops: - result = df.groupby(labels)._cython_agg_general(op) - expected = df.groupby(labels).agg(targop) - try: - tm.assert_frame_equal(result, expected) - except BaseException as exc: - exc.args += ('operation: %s' % op, ) - raise - - @pytest.mark.parametrize('op, targop', [ - ('mean', np.mean), - ('median', lambda x: np.median(x) if len(x) > 0 else np.nan), - ('var', lambda x: np.var(x, ddof=1)), - ('min', np.min), - ('max', np.max), ] - ) - def test_cython_agg_empty_buckets(self, op, targop): - df = pd.DataFrame([11, 12, 13]) - grps = range(0, 55, 5) - - # calling _cython_agg_general directly, instead of via the user API - # which sets different values for min_count, so do that here. - result = df.groupby(pd.cut(df[0], grps))._cython_agg_general(op) - expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x)) - try: - tm.assert_frame_equal(result, expected) - except BaseException as exc: - exc.args += ('operation: %s' % op,) - raise - - def test_cython_agg_empty_buckets_nanops(self): - # GH-18869 can't call nanops on empty groups, so hardcode expected - # for these - df = pd.DataFrame([11, 12, 13], columns=['a']) - grps = range(0, 25, 5) - # add / sum - result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('add') - intervals = pd.interval_range(0, 20, freq=5) - expected = pd.DataFrame( - {"a": [0, 0, 36, 0]}, - index=pd.CategoricalIndex(intervals, name='a', ordered=True)) - tm.assert_frame_equal(result, expected) - - # prod - result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('prod') - expected = pd.DataFrame( - {"a": [1, 1, 1716, 1]}, - index=pd.CategoricalIndex(intervals, name='a', ordered=True)) - tm.assert_frame_equal(result, expected) - - @pytest.mark.xfail(reason="GH-18869: agg func not called on empty groups.") - def test_agg_category_nansum(self): - categories = ['a', 'b', 'c'] - df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], - categories=categories), - 'B': [1, 2, 3]}) - result = df.groupby("A").B.agg(np.nansum) - expected = pd.Series([3, 3, 0], - index=pd.CategoricalIndex(['a', 'b', 'c'], - categories=categories, - name='A'), - name='B') - tm.assert_series_equal(result, expected) - - def test_agg_over_numpy_arrays(self): - # GH 3788 - df = pd.DataFrame([[1, np.array([10, 20, 30])], - [1, np.array([40, 50, 60])], - [2, np.array([20, 30, 40])]], - columns=['category', 'arraydata']) - result = df.groupby('category').agg(sum) - - expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]] - expected_index = pd.Index([1, 2], name='category') - expected_column = ['arraydata'] - expected = pd.DataFrame(expected_data, - index=expected_index, - columns=expected_column) - - assert_frame_equal(result, expected) - - def test_agg_timezone_round_trip(self): - # GH 15426 - ts = pd.Timestamp("2016-01-01 12:00:00", tz='US/Pacific') - df = pd.DataFrame({'a': 1, 'b': [ts + timedelta(minutes=nn) - for nn in range(10)]}) - - result1 = df.groupby('a')['b'].agg(np.min).iloc[0] - result2 = df.groupby('a')['b'].agg(lambda x: np.min(x)).iloc[0] - result3 = df.groupby('a')['b'].min().iloc[0] - - assert result1 == ts - assert result2 == ts - assert result3 == ts - - dates = [pd.Timestamp("2016-01-0%d 12:00:00" % i, tz='US/Pacific') - for i in range(1, 5)] - df = pd.DataFrame({'A': ['a', 'b'] * 2, 'B': dates}) - grouped = df.groupby('A') - - ts = df['B'].iloc[0] - assert ts == grouped.nth(0)['B'].iloc[0] - assert ts == grouped.head(1)['B'].iloc[0] - assert ts == grouped.first()['B'].iloc[0] - assert ts == grouped.apply(lambda x: x.iloc[0])[0] - - ts = df['B'].iloc[2] - assert ts == grouped.last()['B'].iloc[0] - assert ts == grouped.apply(lambda x: x.iloc[-1])[0] - - def test_sum_uint64_overflow(self): - # see gh-14758 - - # Convert to uint64 and don't overflow - df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], - dtype=object) + 9223372036854775807 - - index = pd.Index([9223372036854775808, 9223372036854775810, - 9223372036854775812], dtype=np.uint64) - expected = pd.DataFrame({1: [9223372036854775809, - 9223372036854775811, - 9223372036854775813]}, index=index) - - expected.index.name = 0 - result = df.groupby(0).sum() - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize("structure, expected", [ - (tuple, pd.DataFrame({'C': {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})), - (list, pd.DataFrame({'C': {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})), - (lambda x: tuple(x), pd.DataFrame({'C': {(1, 1): (1, 1, 1), - (3, 4): (3, 4, 4)}})), - (lambda x: list(x), pd.DataFrame({'C': {(1, 1): [1, 1, 1], - (3, 4): [3, 4, 4]}})) - ]) - def test_agg_structs_dataframe(self, structure, expected): - df = pd.DataFrame({'A': [1, 1, 1, 3, 3, 3], - 'B': [1, 1, 1, 4, 4, 4], 'C': [1, 1, 1, 3, 4, 4]}) - - result = df.groupby(['A', 'B']).aggregate(structure) - expected.index.names = ['A', 'B'] - assert_frame_equal(result, expected) - - @pytest.mark.parametrize("structure, expected", [ - (tuple, pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name='C')), - (list, pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name='C')), - (lambda x: tuple(x), pd.Series([(1, 1, 1), (3, 4, 4)], - index=[1, 3], name='C')), - (lambda x: list(x), pd.Series([[1, 1, 1], [3, 4, 4]], - index=[1, 3], name='C')) - ]) - def test_agg_structs_series(self, structure, expected): - # Issue #18079 - df = pd.DataFrame({'A': [1, 1, 1, 3, 3, 3], - 'B': [1, 1, 1, 4, 4, 4], 'C': [1, 1, 1, 3, 4, 4]}) - - result = df.groupby('A')['C'].aggregate(structure) - expected.index.name = 'A' - assert_series_equal(result, expected)
closes #18490 Module currently has tests in 2 classes (`TestGroupByAggregate` and `TestGroupByAggregateCython`). The remaining tests are not in a class. Also made the following changes: ### TestGroupByAggregate class #### test_agg_must_agg * replaced `pytest.raises` with `tm.assert_raises_regex` #### test_agg_apply_corner * made it more readable #### test_agg_multiple_functions_too_many_lambdas * replaced `pytest.raises` with `tm.assert_raises_regex` #### test_multi_function_flexible_mix * made it more readable ### TestGroupByAggregateCython class _all test methods that contain cython in the test name_ #### test_cython_agg_nothing_to_agg * replaced `pytest.raises` with `tm.assert_raises_regex` #### test_cython_agg_return_dict * replaced `self.df` with df initialized inside function ### All Others #### test_agg_dict_renaming_deprecation * made it more readable #### test_agg_nested_dicts * replaced `pytest.raises` with `tm.assert_raises_regex` * made it more readable #### test_agg_item_by_item_raise_typeerror * replaced `pytest.raises` with `tm.assert_raises_regex` #### test_agg_structs_series * made it more readable
https://api.github.com/repos/pandas-dev/pandas/pulls/18931
2017-12-24T19:13:09Z
2017-12-30T12:31:36Z
2017-12-30T12:31:36Z
2018-01-05T14:24:09Z
BUG: Stack/unstack do not return subclassed objects (GH15563)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index dc305f36f32ec..ec106ff2b2f61 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -449,6 +449,8 @@ Reshaping - Bug in :func:`cut` which fails when using readonly arrays (:issue:`18773`) - Bug in :func:`Dataframe.pivot_table` which fails when the ``aggfunc`` arg is of type string. The behavior is now consistent with other methods like ``agg`` and ``apply`` (:issue:`18713`) - Bug in :func:`DataFrame.merge` in which merging using ``Index`` objects as vectors raised an Exception (:issue:`19038`) +- Bug in :func:`DataFrame.stack`, :func:`DataFrame.unstack`, :func:`Series.unstack` which were not returning subclasses (:issue:`15563`) +- Numeric ^^^^^^^ diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index b648c426a877f..28e9694681912 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -80,8 +80,7 @@ def melt(frame, id_vars=None, value_vars=None, var_name=None, mdata[col] = np.asanyarray(frame.columns ._get_level_values(i)).repeat(N) - from pandas import DataFrame - return DataFrame(mdata, columns=mcolumns) + return frame._constructor(mdata, columns=mcolumns) def lreshape(data, groups, dropna=True, label=None): @@ -152,8 +151,7 @@ def lreshape(data, groups, dropna=True, label=None): if not mask.all(): mdata = {k: v[mask] for k, v in compat.iteritems(mdata)} - from pandas import DataFrame - return DataFrame(mdata, columns=id_cols + pivot_cols) + return data._constructor(mdata, columns=id_cols + pivot_cols) def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index d6aed064e49f8..7a34044f70c34 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -37,8 +37,23 @@ class _Unstacker(object): Parameters ---------- + values : ndarray + Values of DataFrame to "Unstack" + index : object + Pandas ``Index`` level : int or str, default last level Level to "unstack". Accepts a name for the level. + value_columns : Index, optional + Pandas ``Index`` or ``MultiIndex`` object if unstacking a DataFrame + fill_value : scalar, optional + Default value to fill in missing values if subgroups do not have the + same set of labels. By default, missing values will be replaced with + the default fill value for that data type, NaN for float, NaT for + datetimelike, etc. For integer types, by default data will converted to + float and missing values will be set to NaN. + constructor : object + Pandas ``DataFrame`` or subclass used to create unstacked + response. If None, DataFrame or SparseDataFrame will be used. Examples -------- @@ -69,7 +84,7 @@ class _Unstacker(object): """ def __init__(self, values, index, level=-1, value_columns=None, - fill_value=None): + fill_value=None, constructor=None): self.is_categorical = None self.is_sparse = is_sparse(values) @@ -86,6 +101,14 @@ def __init__(self, values, index, level=-1, value_columns=None, self.value_columns = value_columns self.fill_value = fill_value + if constructor is None: + if self.is_sparse: + self.constructor = SparseDataFrame + else: + self.constructor = DataFrame + else: + self.constructor = constructor + if value_columns is None and values.shape[1] != 1: # pragma: no cover raise ValueError('must pass column labels for multi-column data') @@ -173,8 +196,7 @@ def get_result(self): ordered=ordered) for i in range(values.shape[-1])] - klass = SparseDataFrame if self.is_sparse else DataFrame - return klass(values, index=index, columns=columns) + return self.constructor(values, index=index, columns=columns) def get_new_values(self): values = self.values @@ -374,8 +396,9 @@ def pivot(self, index=None, columns=None, values=None): index = self.index else: index = self[index] - indexed = Series(self[values].values, - index=MultiIndex.from_arrays([index, self[columns]])) + indexed = self._constructor_sliced( + self[values].values, + index=MultiIndex.from_arrays([index, self[columns]])) return indexed.unstack(columns) @@ -461,7 +484,8 @@ def unstack(obj, level, fill_value=None): return obj.T.stack(dropna=False) else: unstacker = _Unstacker(obj.values, obj.index, level=level, - fill_value=fill_value) + fill_value=fill_value, + constructor=obj._constructor_expanddim) return unstacker.get_result() @@ -470,12 +494,12 @@ def _unstack_frame(obj, level, fill_value=None): unstacker = partial(_Unstacker, index=obj.index, level=level, fill_value=fill_value) blocks = obj._data.unstack(unstacker) - klass = type(obj) - return klass(blocks) + return obj._constructor(blocks) else: unstacker = _Unstacker(obj.values, obj.index, level=level, value_columns=obj.columns, - fill_value=fill_value) + fill_value=fill_value, + constructor=obj._constructor) return unstacker.get_result() @@ -528,8 +552,7 @@ def factorize(index): new_values = new_values[mask] new_index = new_index[mask] - klass = type(frame)._constructor_sliced - return klass(new_values, index=new_index) + return frame._constructor_sliced(new_values, index=new_index) def stack_multiple(frame, level, dropna=True): @@ -676,7 +699,7 @@ def _convert_level_number(level_num, columns): new_index = MultiIndex(levels=new_levels, labels=new_labels, names=new_names, verify_integrity=False) - result = DataFrame(new_data, index=new_index, columns=new_columns) + result = frame._constructor(new_data, index=new_index, columns=new_columns) # more efficient way to go about this? can do the whole masking biz but # will only save a small amount of time... diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index 52c591e4dcbb0..c52b512c2930a 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -5,7 +5,7 @@ from warnings import catch_warnings import numpy as np -from pandas import DataFrame, Series, MultiIndex, Panel +from pandas import DataFrame, Series, MultiIndex, Panel, Index import pandas as pd import pandas.util.testing as tm @@ -247,3 +247,270 @@ def test_subclass_sparse_transpose(self): [2, 5], [3, 6]]) tm.assert_sp_frame_equal(ossdf.T, essdf) + + def test_subclass_stack(self): + # GH 15564 + df = tm.SubclassedDataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], + index=['a', 'b', 'c'], + columns=['X', 'Y', 'Z']) + + res = df.stack() + exp = tm.SubclassedSeries( + [1, 2, 3, 4, 5, 6, 7, 8, 9], + index=[list('aaabbbccc'), list('XYZXYZXYZ')]) + + tm.assert_series_equal(res, exp) + + def test_subclass_stack_multi(self): + # GH 15564 + df = tm.SubclassedDataFrame([ + [10, 11, 12, 13], + [20, 21, 22, 23], + [30, 31, 32, 33], + [40, 41, 42, 43]], + index=MultiIndex.from_tuples( + list(zip(list('AABB'), list('cdcd'))), + names=['aaa', 'ccc']), + columns=MultiIndex.from_tuples( + list(zip(list('WWXX'), list('yzyz'))), + names=['www', 'yyy'])) + + exp = tm.SubclassedDataFrame([ + [10, 12], + [11, 13], + [20, 22], + [21, 23], + [30, 32], + [31, 33], + [40, 42], + [41, 43]], + index=MultiIndex.from_tuples(list(zip( + list('AAAABBBB'), list('ccddccdd'), list('yzyzyzyz'))), + names=['aaa', 'ccc', 'yyy']), + columns=Index(['W', 'X'], name='www')) + + res = df.stack() + tm.assert_frame_equal(res, exp) + + res = df.stack('yyy') + tm.assert_frame_equal(res, exp) + + exp = tm.SubclassedDataFrame([ + [10, 11], + [12, 13], + [20, 21], + [22, 23], + [30, 31], + [32, 33], + [40, 41], + [42, 43]], + index=MultiIndex.from_tuples(list(zip( + list('AAAABBBB'), list('ccddccdd'), list('WXWXWXWX'))), + names=['aaa', 'ccc', 'www']), + columns=Index(['y', 'z'], name='yyy')) + + res = df.stack('www') + tm.assert_frame_equal(res, exp) + + def test_subclass_stack_multi_mixed(self): + # GH 15564 + df = tm.SubclassedDataFrame([ + [10, 11, 12.0, 13.0], + [20, 21, 22.0, 23.0], + [30, 31, 32.0, 33.0], + [40, 41, 42.0, 43.0]], + index=MultiIndex.from_tuples( + list(zip(list('AABB'), list('cdcd'))), + names=['aaa', 'ccc']), + columns=MultiIndex.from_tuples( + list(zip(list('WWXX'), list('yzyz'))), + names=['www', 'yyy'])) + + exp = tm.SubclassedDataFrame([ + [10, 12.0], + [11, 13.0], + [20, 22.0], + [21, 23.0], + [30, 32.0], + [31, 33.0], + [40, 42.0], + [41, 43.0]], + index=MultiIndex.from_tuples(list(zip( + list('AAAABBBB'), list('ccddccdd'), list('yzyzyzyz'))), + names=['aaa', 'ccc', 'yyy']), + columns=Index(['W', 'X'], name='www')) + + res = df.stack() + tm.assert_frame_equal(res, exp) + + res = df.stack('yyy') + tm.assert_frame_equal(res, exp) + + exp = tm.SubclassedDataFrame([ + [10.0, 11.0], + [12.0, 13.0], + [20.0, 21.0], + [22.0, 23.0], + [30.0, 31.0], + [32.0, 33.0], + [40.0, 41.0], + [42.0, 43.0]], + index=MultiIndex.from_tuples(list(zip( + list('AAAABBBB'), list('ccddccdd'), list('WXWXWXWX'))), + names=['aaa', 'ccc', 'www']), + columns=Index(['y', 'z'], name='yyy')) + + res = df.stack('www') + tm.assert_frame_equal(res, exp) + + def test_subclass_unstack(self): + # GH 15564 + df = tm.SubclassedDataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], + index=['a', 'b', 'c'], + columns=['X', 'Y', 'Z']) + + res = df.unstack() + exp = tm.SubclassedSeries( + [1, 4, 7, 2, 5, 8, 3, 6, 9], + index=[list('XXXYYYZZZ'), list('abcabcabc')]) + + tm.assert_series_equal(res, exp) + + def test_subclass_unstack_multi(self): + # GH 15564 + df = tm.SubclassedDataFrame([ + [10, 11, 12, 13], + [20, 21, 22, 23], + [30, 31, 32, 33], + [40, 41, 42, 43]], + index=MultiIndex.from_tuples( + list(zip(list('AABB'), list('cdcd'))), + names=['aaa', 'ccc']), + columns=MultiIndex.from_tuples( + list(zip(list('WWXX'), list('yzyz'))), + names=['www', 'yyy'])) + + exp = tm.SubclassedDataFrame([ + [10, 20, 11, 21, 12, 22, 13, 23], + [30, 40, 31, 41, 32, 42, 33, 43]], + index=Index(['A', 'B'], name='aaa'), + columns=MultiIndex.from_tuples(list(zip( + list('WWWWXXXX'), list('yyzzyyzz'), list('cdcdcdcd'))), + names=['www', 'yyy', 'ccc'])) + + res = df.unstack() + tm.assert_frame_equal(res, exp) + + res = df.unstack('ccc') + tm.assert_frame_equal(res, exp) + + exp = tm.SubclassedDataFrame([ + [10, 30, 11, 31, 12, 32, 13, 33], + [20, 40, 21, 41, 22, 42, 23, 43]], + index=Index(['c', 'd'], name='ccc'), + columns=MultiIndex.from_tuples(list(zip( + list('WWWWXXXX'), list('yyzzyyzz'), list('ABABABAB'))), + names=['www', 'yyy', 'aaa'])) + + res = df.unstack('aaa') + tm.assert_frame_equal(res, exp) + + def test_subclass_unstack_multi_mixed(self): + # GH 15564 + df = tm.SubclassedDataFrame([ + [10, 11, 12.0, 13.0], + [20, 21, 22.0, 23.0], + [30, 31, 32.0, 33.0], + [40, 41, 42.0, 43.0]], + index=MultiIndex.from_tuples( + list(zip(list('AABB'), list('cdcd'))), + names=['aaa', 'ccc']), + columns=MultiIndex.from_tuples( + list(zip(list('WWXX'), list('yzyz'))), + names=['www', 'yyy'])) + + exp = tm.SubclassedDataFrame([ + [10, 20, 11, 21, 12.0, 22.0, 13.0, 23.0], + [30, 40, 31, 41, 32.0, 42.0, 33.0, 43.0]], + index=Index(['A', 'B'], name='aaa'), + columns=MultiIndex.from_tuples(list(zip( + list('WWWWXXXX'), list('yyzzyyzz'), list('cdcdcdcd'))), + names=['www', 'yyy', 'ccc'])) + + res = df.unstack() + tm.assert_frame_equal(res, exp) + + res = df.unstack('ccc') + tm.assert_frame_equal(res, exp) + + exp = tm.SubclassedDataFrame([ + [10, 30, 11, 31, 12.0, 32.0, 13.0, 33.0], + [20, 40, 21, 41, 22.0, 42.0, 23.0, 43.0]], + index=Index(['c', 'd'], name='ccc'), + columns=MultiIndex.from_tuples(list(zip( + list('WWWWXXXX'), list('yyzzyyzz'), list('ABABABAB'))), + names=['www', 'yyy', 'aaa'])) + + res = df.unstack('aaa') + tm.assert_frame_equal(res, exp) + + def test_subclass_pivot(self): + # GH 15564 + df = tm.SubclassedDataFrame({ + 'index': ['A', 'B', 'C', 'C', 'B', 'A'], + 'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'], + 'values': [1., 2., 3., 3., 2., 1.]}) + + pivoted = df.pivot( + index='index', columns='columns', values='values') + + expected = tm.SubclassedDataFrame({ + 'One': {'A': 1., 'B': 2., 'C': 3.}, + 'Two': {'A': 1., 'B': 2., 'C': 3.}}) + + expected.index.name, expected.columns.name = 'index', 'columns' + + tm.assert_frame_equal(pivoted, expected) + + def test_subclassed_melt(self): + # GH 15564 + cheese = tm.SubclassedDataFrame({ + 'first': ['John', 'Mary'], + 'last': ['Doe', 'Bo'], + 'height': [5.5, 6.0], + 'weight': [130, 150]}) + + melted = pd.melt(cheese, id_vars=['first', 'last']) + + expected = tm.SubclassedDataFrame([ + ['John', 'Doe', 'height', 5.5], + ['Mary', 'Bo', 'height', 6.0], + ['John', 'Doe', 'weight', 130], + ['Mary', 'Bo', 'weight', 150]], + columns=['first', 'last', 'variable', 'value']) + + tm.assert_frame_equal(melted, expected) + + def test_subclassed_wide_to_long(self): + # GH 9762 + + np.random.seed(123) + x = np.random.randn(3) + df = tm.SubclassedDataFrame({ + "A1970": {0: "a", 1: "b", 2: "c"}, + "A1980": {0: "d", 1: "e", 2: "f"}, + "B1970": {0: 2.5, 1: 1.2, 2: .7}, + "B1980": {0: 3.2, 1: 1.3, 2: .1}, + "X": dict(zip(range(3), x))}) + + df["id"] = df.index + exp_data = {"X": x.tolist() + x.tolist(), + "A": ['a', 'b', 'c', 'd', 'e', 'f'], + "B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1], + "year": [1970, 1970, 1970, 1980, 1980, 1980], + "id": [0, 1, 2, 0, 1, 2]} + expected = tm.SubclassedDataFrame(exp_data) + expected = expected.set_index(['id', 'year'])[["X", "A", "B"]] + long_frame = pd.wide_to_long(df, ["A", "B"], i="id", j="year") + + tm.assert_frame_equal(long_frame, expected) diff --git a/pandas/tests/series/test_subclass.py b/pandas/tests/series/test_subclass.py index 37c8d7343f7f1..60afaa3b821e1 100644 --- a/pandas/tests/series/test_subclass.py +++ b/pandas/tests/series/test_subclass.py @@ -13,24 +13,31 @@ def test_indexing_sliced(self): res = s.loc[['a', 'b']] exp = tm.SubclassedSeries([1, 2], index=list('ab')) tm.assert_series_equal(res, exp) - assert isinstance(res, tm.SubclassedSeries) res = s.iloc[[2, 3]] exp = tm.SubclassedSeries([3, 4], index=list('cd')) tm.assert_series_equal(res, exp) - assert isinstance(res, tm.SubclassedSeries) res = s.loc[['a', 'b']] exp = tm.SubclassedSeries([1, 2], index=list('ab')) tm.assert_series_equal(res, exp) - assert isinstance(res, tm.SubclassedSeries) def test_to_frame(self): s = tm.SubclassedSeries([1, 2, 3, 4], index=list('abcd'), name='xxx') res = s.to_frame() exp = tm.SubclassedDataFrame({'xxx': [1, 2, 3, 4]}, index=list('abcd')) tm.assert_frame_equal(res, exp) - assert isinstance(res, tm.SubclassedDataFrame) + + def test_subclass_unstack(self): + # GH 15564 + s = tm.SubclassedSeries( + [1, 2, 3, 4], index=[list('aabb'), list('xyxy')]) + + res = s.unstack() + exp = tm.SubclassedDataFrame( + {'x': [1, 3], 'y': [2, 4]}, index=['a', 'b']) + + tm.assert_frame_equal(res, exp) class TestSparseSeriesSubclassing(object):
- [x] closes #15563 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Basically picked from #15655 to support stack/unstack in subclassed DataFrames and Series.
https://api.github.com/repos/pandas-dev/pandas/pulls/18929
2017-12-24T14:07:49Z
2018-01-12T11:49:01Z
2018-01-12T11:49:01Z
2018-01-12T11:49:08Z
CLN: Remove Timestamp.offset
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 3f300deddebeb..6be58dff0eecb 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -238,6 +238,7 @@ Removal of prior version deprecations/changes - :func:`read_csv` has dropped the ``as_recarray`` parameter (:issue:`13373`) - :func:`read_csv` has dropped the ``buffer_lines`` parameter (:issue:`13360`) - :func:`read_csv` has dropped the ``compact_ints`` and ``use_unsigned`` parameters (:issue:`13323`) +- The ``Timestamp`` class has dropped the ``offset`` attribute in favor of ``freq`` (:issue:`13593`) .. _whatsnew_0230.performance: diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 086657e8c97b4..683be4c9aa3a8 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -396,7 +396,7 @@ class NaTType(_NaT): """) fromordinal = _make_error_func('fromordinal', # noqa:E128 """ - Timestamp.fromordinal(ordinal, freq=None, tz=None, offset=None) + Timestamp.fromordinal(ordinal, freq=None, tz=None) passed an ordinal, translate and convert to a ts note: by definition there cannot be any tz info on the ordinal itself @@ -409,8 +409,6 @@ class NaTType(_NaT): Offset which Timestamp will have tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time which Timestamp will have. - offset : str, DateOffset - Deprecated, use freq """) # _nat_methods diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 67045cde8661f..1792f852c9e1e 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -435,9 +435,9 @@ class Timestamp(_Timestamp): """ @classmethod - def fromordinal(cls, ordinal, freq=None, tz=None, offset=None): + def fromordinal(cls, ordinal, freq=None, tz=None): """ - Timestamp.fromordinal(ordinal, freq=None, tz=None, offset=None) + Timestamp.fromordinal(ordinal, freq=None, tz=None) passed an ordinal, translate and convert to a ts note: by definition there cannot be any tz info on the ordinal itself @@ -450,11 +450,9 @@ class Timestamp(_Timestamp): Offset which Timestamp will have tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time which Timestamp will have. - offset : str, DateOffset - Deprecated, use freq """ return cls(datetime.fromordinal(ordinal), - freq=freq, tz=tz, offset=offset) + freq=freq, tz=tz) @classmethod def now(cls, tz=None): @@ -529,8 +527,7 @@ class Timestamp(_Timestamp): object freq=None, tz=None, unit=None, year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, - tzinfo=None, - object offset=None): + tzinfo=None): # The parameter list folds together legacy parameter names (the first # four) and positional and keyword parameter names from pydatetime. # @@ -554,15 +551,6 @@ class Timestamp(_Timestamp): cdef _TSObject ts - if offset is not None: - # deprecate offset kwd in 0.19.0, GH13593 - if freq is not None: - msg = "Can only specify freq or offset, not both" - raise TypeError(msg) - warnings.warn("offset is deprecated. Use freq instead", - FutureWarning) - freq = offset - if tzinfo is not None: if not PyTZInfo_Check(tzinfo): # tzinfo must be a datetime.tzinfo object, GH#17690 @@ -676,12 +664,6 @@ class Timestamp(_Timestamp): """ return self.tzinfo - @property - def offset(self): - warnings.warn(".offset is deprecated. Use .freq instead", - FutureWarning) - return self.freq - def __setstate__(self, state): self.value = state[0] self.freq = state[1] diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 7194849f19ebb..69ce7a42851a1 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -170,8 +170,9 @@ def test_NaT_docstrings(): ts_missing = [x for x in ts_names if x not in nat_names and not x.startswith('_')] ts_missing.sort() - ts_expected = ['freqstr', 'normalize', 'offset', - 'to_julian_date', 'to_period', 'tz'] + ts_expected = ['freqstr', 'normalize', + 'to_julian_date', + 'to_period', 'tz'] assert ts_missing == ts_expected ts_overlap = [x for x in nat_names if x in ts_names and diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index 19c09701f6106..4f4f2648d3834 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -307,36 +307,6 @@ def test_constructor_fromordinal(self): ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern') assert ts.to_pydatetime() == dt_tz - def test_constructor_offset_depr(self): - # see gh-12160 - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - ts = Timestamp('2011-01-01', offset='D') - assert ts.freq == 'D' - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - assert ts.offset == 'D' - - msg = "Can only specify freq or offset, not both" - with tm.assert_raises_regex(TypeError, msg): - Timestamp('2011-01-01', offset='D', freq='D') - - def test_constructor_offset_depr_fromordinal(self): - # GH 12160 - base = datetime(2000, 1, 1) - - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - ts = Timestamp.fromordinal(base.toordinal(), offset='D') - assert Timestamp('2000-01-01') == ts - assert ts.freq == 'D' - assert base.toordinal() == ts.toordinal() - - msg = "Can only specify freq or offset, not both" - with tm.assert_raises_regex(TypeError, msg): - Timestamp.fromordinal(base.toordinal(), offset='D', freq='D') - class TestTimestamp(object):
Deprecated in v0.19.0 xref #13593
https://api.github.com/repos/pandas-dev/pandas/pulls/18927
2017-12-24T10:01:47Z
2017-12-26T22:13:27Z
2017-12-26T22:13:27Z
2017-12-27T07:24:37Z
CLN: ASV offset
diff --git a/asv_bench/benchmarks/offset.py b/asv_bench/benchmarks/offset.py index 849776bf9a591..034e861e7fc01 100644 --- a/asv_bench/benchmarks/offset.py +++ b/asv_bench/benchmarks/offset.py @@ -2,51 +2,58 @@ from datetime import datetime import numpy as np - import pandas as pd -from pandas import date_range - try: - import pandas.tseries.holiday + import pandas.tseries.holiday # noqa except ImportError: pass hcal = pd.tseries.holiday.USFederalHolidayCalendar() +# These offests currently raise a NotImplimentedError with .apply_index() +non_apply = [pd.offsets.Day(), + pd.offsets.BYearEnd(), + pd.offsets.BYearBegin(), + pd.offsets.BQuarterEnd(), + pd.offsets.BQuarterBegin(), + pd.offsets.BMonthEnd(), + pd.offsets.BMonthBegin(), + pd.offsets.CustomBusinessDay(), + pd.offsets.CustomBusinessDay(calendar=hcal), + pd.offsets.CustomBusinessMonthBegin(calendar=hcal), + pd.offsets.CustomBusinessMonthEnd(calendar=hcal), + pd.offsets.CustomBusinessMonthEnd(calendar=hcal)] +other_offsets = [pd.offsets.YearEnd(), pd.offsets.YearBegin(), + pd.offsets.QuarterEnd(), pd.offsets.QuarterBegin(), + pd.offsets.MonthEnd(), pd.offsets.MonthBegin(), + pd.offsets.DateOffset(months=2, days=2), + pd.offsets.BusinessDay(), pd.offsets.SemiMonthEnd(), + pd.offsets.SemiMonthBegin()] +offsets = non_apply + other_offsets class ApplyIndex(object): - goal_time = 0.2 - params = [pd.offsets.YearEnd(), pd.offsets.YearBegin(), - pd.offsets.BYearEnd(), pd.offsets.BYearBegin(), - pd.offsets.QuarterEnd(), pd.offsets.QuarterBegin(), - pd.offsets.BQuarterEnd(), pd.offsets.BQuarterBegin(), - pd.offsets.MonthEnd(), pd.offsets.MonthBegin(), - pd.offsets.BMonthEnd(), pd.offsets.BMonthBegin()] - - def setup(self, param): - self.offset = param + goal_time = 0.2 - self.N = 100000 - self.rng = date_range(start='1/1/2000', periods=self.N, freq='T') - self.ser = pd.Series(self.rng) + params = other_offsets + param_names = ['offset'] - def time_apply_index(self, param): - self.rng + self.offset + def setup(self, offset): + N = 10000 + self.rng = pd.date_range(start='1/1/2000', periods=N, freq='T') - def time_apply_series(self, param): - self.ser + self.offset + def time_apply_index(self, offset): + offset.apply_index(self.rng) class OnOffset(object): + goal_time = 0.2 - params = [pd.offsets.QuarterBegin(), pd.offsets.QuarterEnd(), - pd.offsets.BQuarterBegin(), pd.offsets.BQuarterEnd()] + params = offsets param_names = ['offset'] def setup(self, offset): - self.offset = offset self.dates = [datetime(2016, m, d) for m in [10, 11, 12] for d in [1, 2, 3, 28, 29, 30, 31] @@ -54,205 +61,62 @@ def setup(self, offset): def time_on_offset(self, offset): for date in self.dates: - self.offset.onOffset(date) - - -class DatetimeIndexArithmetic(object): - goal_time = 0.2 - - def setup(self): - self.N = 100000 - self.rng = date_range(start='1/1/2000', periods=self.N, freq='T') - self.day_offset = pd.offsets.Day() - self.relativedelta_offset = pd.offsets.DateOffset(months=2, days=2) - self.busday_offset = pd.offsets.BusinessDay() - - def time_add_offset_delta(self): - self.rng + self.day_offset - - def time_add_offset_fast(self): - self.rng + self.relativedelta_offset - - def time_add_offset_slow(self): - self.rng + self.busday_offset - - -class SeriesArithmetic(object): - goal_time = 0.2 + offset.onOffset(date) - def setup(self): - self.N = 100000 - rng = date_range(start='20140101', freq='T', periods=self.N) - self.ser = pd.Series(rng) - self.day_offset = pd.offsets.Day() - self.relativedelta_offset = pd.offsets.DateOffset(months=2, days=2) - self.busday_offset = pd.offsets.BusinessDay() - def time_add_offset_delta(self): - self.ser + self.day_offset +class OffsetSeriesArithmetic(object): - def time_add_offset_fast(self): - self.ser + self.relativedelta_offset - - def time_add_offset_slow(self): - self.ser + self.busday_offset - - -class YearBegin(object): goal_time = 0.2 + params = offsets + param_names = ['offset'] - def setup(self): - self.date = datetime(2011, 1, 1) - self.year = pd.offsets.YearBegin() + def setup(self, offset): + N = 1000 + rng = pd.date_range(start='1/1/2000', periods=N, freq='T') + self.data = pd.Series(rng) - def time_timeseries_year_apply(self): - self.year.apply(self.date) + def time_add_offset(self, offset): + self.data + offset - def time_timeseries_year_incr(self): - self.date + self.year +class OffsetDatetimeIndexArithmetic(object): -class Day(object): goal_time = 0.2 + params = offsets + param_names = ['offset'] - def setup(self): - self.date = datetime(2011, 1, 1) - self.day = pd.offsets.Day() + def setup(self, offset): + N = 1000 + self.data = pd.date_range(start='1/1/2000', periods=N, freq='T') - def time_timeseries_day_apply(self): - self.day.apply(self.date) + def time_add_offset(self, offset): + self.data + offset - def time_timeseries_day_incr(self): - self.date + self.day +class OffestDatetimeArithmetic(object): -class CBDay(object): goal_time = 0.2 + params = offsets + param_names = ['offset'] - def setup(self): + def setup(self, offset): self.date = datetime(2011, 1, 1) self.dt64 = np.datetime64('2011-01-01 09:00Z') - self.cday = pd.offsets.CustomBusinessDay() - - def time_custom_bday_decr(self): - self.date - self.cday - - def time_custom_bday_incr(self): - self.date + self.cday - - def time_custom_bday_apply(self): - self.cday.apply(self.date) - - def time_custom_bday_apply_dt64(self): - self.cday.apply(self.dt64) - - -class CBDayHolidays(object): - goal_time = 0.2 - - def setup(self): - self.date = datetime(2011, 1, 1) - self.cdayh = pd.offsets.CustomBusinessDay(calendar=hcal) - - def time_custom_bday_cal_incr(self): - self.date + 1 * self.cdayh - - def time_custom_bday_cal_decr(self): - self.date - 1 * self.cdayh - - def time_custom_bday_cal_incr_n(self): - self.date + 10 * self.cdayh - - def time_custom_bday_cal_incr_neg_n(self): - self.date - 10 * self.cdayh - - -class CBMonthBegin(object): - goal_time = 0.2 - - def setup(self): - self.date = datetime(2011, 1, 1) - self.cmb = pd.offsets.CustomBusinessMonthBegin(calendar=hcal) - - def time_custom_bmonthbegin_decr_n(self): - self.date - (10 * self.cmb) - - def time_custom_bmonthbegin_incr_n(self): - self.date + (10 * self.cmb) - - -class CBMonthEnd(object): - goal_time = 0.2 - - def setup(self): - self.date = datetime(2011, 1, 1) - self.cme = pd.offsets.CustomBusinessMonthEnd(calendar=hcal) - - def time_custom_bmonthend_incr(self): - self.date + self.cme - - def time_custom_bmonthend_incr_n(self): - self.date + (10 * self.cme) - - def time_custom_bmonthend_decr_n(self): - self.date - (10 * self.cme) - - -class SemiMonthOffset(object): - goal_time = 0.2 - - def setup(self): - self.N = 100000 - self.rng = date_range(start='1/1/2000', periods=self.N, freq='T') - # date is not on an offset which will be slowest case - self.date = datetime(2011, 1, 2) - self.semi_month_end = pd.offsets.SemiMonthEnd() - self.semi_month_begin = pd.offsets.SemiMonthBegin() - - def time_end_apply(self): - self.semi_month_end.apply(self.date) - - def time_end_incr(self): - self.date + self.semi_month_end - - def time_end_incr_n(self): - self.date + 10 * self.semi_month_end - - def time_end_decr(self): - self.date - self.semi_month_end - - def time_end_decr_n(self): - self.date - 10 * self.semi_month_end - - def time_end_apply_index(self): - self.semi_month_end.apply_index(self.rng) - - def time_end_incr_rng(self): - self.rng + self.semi_month_end - - def time_end_decr_rng(self): - self.rng - self.semi_month_end - - def time_begin_apply(self): - self.semi_month_begin.apply(self.date) - - def time_begin_incr(self): - self.date + self.semi_month_begin - def time_begin_incr_n(self): - self.date + 10 * self.semi_month_begin + def time_apply(self, offset): + offset.apply(self.date) - def time_begin_decr(self): - self.date - self.semi_month_begin + def time_apply_np_dt64(self, offset): + offset.apply(self.dt64) - def time_begin_decr_n(self): - self.date - 10 * self.semi_month_begin + def time_add(self, offset): + self.date + offset - def time_begin_apply_index(self): - self.semi_month_begin.apply_index(self.rng) + def time_add_10(self, offset): + self.date + (10 * offset) - def time_begin_incr_rng(self): - self.rng + self.semi_month_begin + def time_subtract(self, offset): + self.date - offset - def time_begin_decr_rng(self): - self.rng - self.semi_month_begin + def time_subtract_10(self, offset): + self.date - (10 * offset) diff --git a/ci/lint.sh b/ci/lint.sh index d678cd1ce5d70..5380c91831cec 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -24,7 +24,7 @@ if [ "$LINT" ]; then echo "Linting setup.py DONE" echo "Linting asv_bench/benchmarks/" - flake8 asv_bench/benchmarks/ --exclude=asv_bench/benchmarks/[ijoprs]*.py --ignore=F811 + flake8 asv_bench/benchmarks/ --exclude=asv_bench/benchmarks/[ips]*.py --ignore=F811 if [ $? -ne "0" ]; then RET=1 fi
- Consolidated all the offsets to a list that was reused for all the benchmarks. - The `ApplyIndex` benchmark seemed to be benchmarking arithmetic, so I changed it to specifically use the `apply_index()` method ``` $ asv dev -b ^offset · Discovering benchmarks · Running 10 total benchmarks (1 commits * 1 environments * 10 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 10.00%] ··· Running offset.ApplyIndex.time_apply_index ok [ 10.00%] ···· ============================================= ======== offset --------------------------------------------- -------- <YearEnd: month=12> 173ms <YearBegin: month=1> 6.94ms <QuarterEnd: startingMonth=3> 194ms <QuarterBegin: startingMonth=3> 7.15ms <MonthEnd> 1.28ms <MonthBegin> 1.19ms <DateOffset: kwds={'months': 2, 'days': 2}> 2.45ms <BusinessDay> 136ms <SemiMonthEnd: day_of_month=15> 138ms <SemiMonthBegin: day_of_month=15> 140ms ============================================= ======== [ 20.00%] ··· Running offset.OffestDatetimeArithmetic.time_add ok [ 20.00%] ···· ============================================= ======== offset --------------------------------------------- -------- <Day> 106μs <BusinessYearEnd: month=12> 202μs <BusinessYearBegin: month=1> 123μs <BusinessQuarterEnd: startingMonth=3> 105μs <BusinessQuarterBegin: startingMonth=3> 124μs <BusinessMonthEnd> 121μs <BusinessMonthBegin> 138μs <CustomBusinessDay> 89.2μs <CustomBusinessDay> 89.6μs <CustomBusinessMonthBegin> 415μs <CustomBusinessMonthEnd> 442μs <CustomBusinessMonthEnd> 422μs <YearEnd: month=12> 63.0μs <YearBegin: month=1> 61.7μs <QuarterEnd: startingMonth=3> 105μs <QuarterBegin: startingMonth=3> 150μs <MonthEnd> 176μs <MonthBegin> 121μs <DateOffset: kwds={'months': 2, 'days': 2}> 69.2μs <BusinessDay> 84.9μs <SemiMonthEnd: day_of_month=15> 179μs <SemiMonthBegin: day_of_month=15> 177μs ============================================= ======== [ 30.00%] ··· Running offset.OffestDatetimeArithmetic.time_add_10 ok [ 30.00%] ···· ============================================= ======== offset --------------------------------------------- -------- <Day> 121μs <BusinessYearEnd: month=12> 161μs <BusinessYearBegin: month=1> 162μs <BusinessQuarterEnd: startingMonth=3> 207μs <BusinessQuarterBegin: startingMonth=3> 137μs <BusinessMonthEnd> 141μs <BusinessMonthBegin> 156μs <CustomBusinessDay> 111μs <CustomBusinessDay> 121μs <CustomBusinessMonthBegin> 458μs <CustomBusinessMonthEnd> 464μs <CustomBusinessMonthEnd> 461μs <YearEnd: month=12> 129μs <YearBegin: month=1> 99.1μs <QuarterEnd: startingMonth=3> 115μs <QuarterBegin: startingMonth=3> 219μs <MonthEnd> 192μs <MonthBegin> 148μs <DateOffset: kwds={'months': 2, 'days': 2}> 280μs <BusinessDay> 101μs <SemiMonthEnd: day_of_month=15> 187μs <SemiMonthBegin: day_of_month=15> 185μs ============================================= ======== [ 40.00%] ··· Running offset.OffestDatetimeArithmetic.time_apply ok [ 40.00%] ···· ============================================= ======== offset --------------------------------------------- -------- <Day> 98.1μs <BusinessYearEnd: month=12> 192μs <BusinessYearBegin: month=1> 116μs <BusinessQuarterEnd: startingMonth=3> 96.5μs <BusinessQuarterBegin: startingMonth=3> 116μs <BusinessMonthEnd> 118μs <BusinessMonthBegin> 129μs <CustomBusinessDay> 79.8μs <CustomBusinessDay> 81.3μs <CustomBusinessMonthBegin> 526μs <CustomBusinessMonthEnd> 416μs <CustomBusinessMonthEnd> 414μs <YearEnd: month=12> 53.1μs <YearBegin: month=1> 51.8μs <QuarterEnd: startingMonth=3> 95.0μs <QuarterBegin: startingMonth=3> 137μs <MonthEnd> 165μs <MonthBegin> 113μs <DateOffset: kwds={'months': 2, 'days': 2}> 61.1μs <BusinessDay> 76.8μs <SemiMonthEnd: day_of_month=15> 170μs <SemiMonthBegin: day_of_month=15> 168μs ============================================= ======== [ 50.00%] ··· Running offset.OffestDatetimeArithmetic.time_apply_np_dt64 ok [ 50.00%] ···· ============================================= ======== offset --------------------------------------------- -------- <Day> 104μs <BusinessYearEnd: month=12> 199μs <BusinessYearBegin: month=1> 121μs <BusinessQuarterEnd: startingMonth=3> 105μs <BusinessQuarterBegin: startingMonth=3> 123μs <BusinessMonthEnd> 121μs <BusinessMonthBegin> 137μs <CustomBusinessDay> 86.2μs <CustomBusinessDay> 88.0μs <CustomBusinessMonthBegin> 411μs <CustomBusinessMonthEnd> 420μs <CustomBusinessMonthEnd> 421μs <YearEnd: month=12> 60.4μs <YearBegin: month=1> 58.5μs <QuarterEnd: startingMonth=3> 103μs <QuarterBegin: startingMonth=3> 119μs <MonthEnd> 174μs <MonthBegin> 118μs <DateOffset: kwds={'months': 2, 'days': 2}> 69.0μs <BusinessDay> 82.6μs <SemiMonthEnd: day_of_month=15> 178μs <SemiMonthBegin: day_of_month=15> 173μs ============================================= ======== [ 60.00%] ··· Running offset.OffestDatetimeArithmetic.time_subtract ok [ 60.00%] ···· ============================================= ======== offset --------------------------------------------- -------- <Day> 125μs <BusinessYearEnd: month=12> 165μs <BusinessYearBegin: month=1> 165μs <BusinessQuarterEnd: startingMonth=3> 117μs <BusinessQuarterBegin: startingMonth=3> 136μs <BusinessMonthEnd> 145μs <BusinessMonthBegin> 144μs <CustomBusinessDay> 114μs <CustomBusinessDay> 105μs <CustomBusinessMonthBegin> 380μs <CustomBusinessMonthEnd> 564μs <CustomBusinessMonthEnd> 569μs <YearEnd: month=12> 104μs <YearBegin: month=1> 104μs <QuarterEnd: startingMonth=3> 114μs <QuarterBegin: startingMonth=3> 130μs <MonthEnd> 209μs <MonthBegin> 140μs <DateOffset: kwds={'months': 2, 'days': 2}> 129μs <BusinessDay> 85.2μs <SemiMonthEnd: day_of_month=15> 191μs <SemiMonthBegin: day_of_month=15> 188μs ============================================= ======== [ 70.00%] ··· Running offset.OffestDatetimeArithmetic.time_subtract_10 ok [ 70.00%] ···· ============================================= ======= offset --------------------------------------------- ------- <Day> 139μs <BusinessYearEnd: month=12> 198μs <BusinessYearBegin: month=1> 196μs <BusinessQuarterEnd: startingMonth=3> 126μs <BusinessQuarterBegin: startingMonth=3> 146μs <BusinessMonthEnd> 157μs <BusinessMonthBegin> 158μs <CustomBusinessDay> 144μs <CustomBusinessDay> 114μs <CustomBusinessMonthBegin> 386μs <CustomBusinessMonthEnd> 482μs <CustomBusinessMonthEnd> 478μs <YearEnd: month=12> 156μs <YearBegin: month=1> 136μs <QuarterEnd: startingMonth=3> 123μs <QuarterBegin: startingMonth=3> 140μs <MonthEnd> 210μs <MonthBegin> 154μs <DateOffset: kwds={'months': 2, 'days': 2}> 516μs <BusinessDay> 115μs <SemiMonthEnd: day_of_month=15> 197μs <SemiMonthBegin: day_of_month=15> 195μs ============================================= ======= [ 80.00%] ··· Running offset.OffsetDatetimeIndexArithmetic.time_add_offset ok [ 80.00%] ···· ============================================= ======== offset --------------------------------------------- -------- <Day> 1.14ms <BusinessYearEnd: month=12> 1.97s <BusinessYearBegin: month=1> 1.08s <BusinessQuarterEnd: startingMonth=3> 920ms <BusinessQuarterBegin: startingMonth=3> 1.10s <BusinessMonthEnd> 1.07s <BusinessMonthBegin> 1.14s <CustomBusinessDay> 752ms <CustomBusinessDay> 754ms <CustomBusinessMonthBegin> 4.56s <CustomBusinessMonthEnd> 4.04s <CustomBusinessMonthEnd> 4.02s <YearEnd: month=12> 172ms <YearBegin: month=1> 7.10ms <QuarterEnd: startingMonth=3> 191ms <QuarterBegin: startingMonth=3> 7.13ms <MonthEnd> 1.48ms <MonthBegin> 1.40ms <DateOffset: kwds={'months': 2, 'days': 2}> 3.09ms <BusinessDay> 137ms <SemiMonthEnd: day_of_month=15> 141ms <SemiMonthBegin: day_of_month=15> 136ms ============================================= ======== [ 90.00%] ··· Running offset.OffsetSeriesArithmetic.time_add_offset ok [ 90.00%] ···· ============================================= ======= offset --------------------------------------------- ------- <Day> 416ms <BusinessYearEnd: month=12> 2.39s <BusinessYearBegin: month=1> 1.50s <BusinessQuarterEnd: startingMonth=3> 1.34s <BusinessQuarterBegin: startingMonth=3> 1.51s <BusinessMonthEnd> 1.49s <BusinessMonthBegin> 1.56s <CustomBusinessDay> 1.17s <CustomBusinessDay> 1.18s <CustomBusinessMonthBegin> 5.02s <CustomBusinessMonthEnd> 4.49s <CustomBusinessMonthEnd> 4.46s <YearEnd: month=12> 588ms <YearBegin: month=1> 426ms <QuarterEnd: startingMonth=3> 611ms <QuarterBegin: startingMonth=3> 425ms <MonthEnd> 422ms <MonthBegin> 420ms <DateOffset: kwds={'months': 2, 'days': 2}> 422ms <BusinessDay> 549ms <SemiMonthEnd: day_of_month=15> 559ms <SemiMonthBegin: day_of_month=15> 561ms ============================================= ======= [100.00%] ··· Running offset.OnOffset.time_on_offset ok [100.00%] ···· ============================================= ======== offset --------------------------------------------- -------- <Day> 28.1μs <BusinessYearEnd: month=12> 6.93ms <BusinessYearBegin: month=1> 5.21ms <BusinessQuarterEnd: startingMonth=3> 5.61ms <BusinessQuarterBegin: startingMonth=3> 4.61ms <BusinessMonthEnd> 5.33ms <BusinessMonthBegin> 34.6μs <CustomBusinessDay> 409μs <CustomBusinessDay> 408μs <CustomBusinessMonthBegin> 16.1ms <CustomBusinessMonthEnd> 16.3ms <CustomBusinessMonthEnd> 16.3ms <YearEnd: month=12> 34.7μs <YearBegin: month=1> 21.3μs <QuarterEnd: startingMonth=3> 252μs <QuarterBegin: startingMonth=3> 4.48ms <MonthEnd> 34.5μs <MonthBegin> 18.8μs <DateOffset: kwds={'months': 2, 'days': 2}> 21.4μs <BusinessDay> 21.0μs <SemiMonthEnd: day_of_month=15> 38.6μs <SemiMonthBegin: day_of_month=15> 22.6μs ============================================= ======== ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18926
2017-12-24T08:26:34Z
2017-12-31T14:54:14Z
2017-12-31T14:54:14Z
2017-12-31T23:55:21Z
DOC: Clarify dispatch behavior of read_sql
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 26874a57c66f7..0d398ad3135a6 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -337,15 +337,22 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None, """ Read SQL query or database table into a DataFrame. + This function is a convenience wrapper around ``read_sql_table`` and + ``read_sql_query`` (for backward compatibility). It will delegate + to the specific function depending on the provided input. A SQL query + will be routed to ``read_sql_query``, while a database table name will + be routed to ``read_sql_table``. Note that the delegated function might + have more specific notes about their functionality not listed here. + Parameters ---------- sql : string or SQLAlchemy Selectable (select or text object) - SQL query to be executed. - con : SQLAlchemy connectable(engine/connection) or database string URI + SQL query to be executed or a table name. + con : SQLAlchemy connectable (engine/connection) or database string URI or DBAPI2 connection (fallback mode) + Using SQLAlchemy makes it possible to use any DB supported by that - library. - If a DBAPI2 object, only sqlite3 is supported. + library. If a DBAPI2 object, only sqlite3 is supported. index_col : string or list of strings, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : boolean, default True @@ -377,14 +384,6 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None, ------- DataFrame - Notes - ----- - This function is a convenience wrapper around ``read_sql_table`` and - ``read_sql_query`` (and for backward compatibility) and will delegate - to the specific function depending on the provided input (database - table name or SQL query). The delegated function might have more specific - notes about their functionality not listed here. - See also -------- read_sql_table : Read SQL database table into a DataFrame.
Wasn't particularly clear or prominent in the docs. Closes #18861.
https://api.github.com/repos/pandas-dev/pandas/pulls/18925
2017-12-24T06:45:31Z
2017-12-26T22:14:54Z
2017-12-26T22:14:54Z
2017-12-27T07:24:45Z
BUG: fix issue with concat creating SparseFrame if not all series are sparse.
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 1890636bc8e1a..42f5e65bd6974 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -537,6 +537,7 @@ Reshaping - Bug in :func:`DataFrame.merge` in which merging using ``Index`` objects as vectors raised an Exception (:issue:`19038`) - Bug in :func:`DataFrame.stack`, :func:`DataFrame.unstack`, :func:`Series.unstack` which were not returning subclasses (:issue:`15563`) - Bug in timezone comparisons, manifesting as a conversion of the index to UTC in ``.concat()`` (:issue:`18523`) +- Bug in :func:`concat` when concatting sparse and dense series it returns only a ``SparseDataFrame``. Should be a ``DataFrame``. (:issue:`18914`, :issue:`18686`, and :issue:`16874`) - diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 3e54ce61cd5b2..ddecbe85087d8 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -19,7 +19,7 @@ _TD_DTYPE) from pandas.core.dtypes.generic import ( ABCDatetimeIndex, ABCTimedeltaIndex, - ABCPeriodIndex, ABCRangeIndex) + ABCPeriodIndex, ABCRangeIndex, ABCSparseDataFrame) def get_dtype_kinds(l): @@ -89,14 +89,16 @@ def _get_series_result_type(result, objs=None): def _get_frame_result_type(result, objs): """ return appropriate class of DataFrame-like concat - if any block is SparseBlock, return SparseDataFrame + if all blocks are SparseBlock, return SparseDataFrame otherwise, return 1st obj """ - if any(b.is_sparse for b in result.blocks): + + if result.blocks and all(b.is_sparse for b in result.blocks): from pandas.core.sparse.api import SparseDataFrame return SparseDataFrame else: - return objs[0] + return next(obj for obj in objs if not isinstance(obj, + ABCSparseDataFrame)) def _concat_compat(to_concat, axis=0): diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py index 6fae09c43d2be..b032cb6f14d4c 100644 --- a/pandas/core/dtypes/generic.py +++ b/pandas/core/dtypes/generic.py @@ -43,6 +43,8 @@ def _check(cls, inst): ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series", )) ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe", )) +ABCSparseDataFrame = create_pandas_abc_type("ABCSparseDataFrame", "_subtyp", + ("sparse_frame", )) ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel",)) ABCSparseSeries = create_pandas_abc_type("ABCSparseSeries", "_subtyp", ('sparse_series', diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py index 58cb182e7d403..53f92b98f022e 100644 --- a/pandas/tests/dtypes/test_generic.py +++ b/pandas/tests/dtypes/test_generic.py @@ -18,6 +18,7 @@ class TestABCClasses(object): df = pd.DataFrame({'names': ['a', 'b', 'c']}, index=multi_index) sparse_series = pd.Series([1, 2, 3]).to_sparse() sparse_array = pd.SparseArray(np.random.randn(10)) + sparse_frame = pd.SparseDataFrame({'a': [1, -1, None]}) def test_abc_types(self): assert isinstance(pd.Index(['a', 'b', 'c']), gt.ABCIndex) @@ -37,6 +38,7 @@ def test_abc_types(self): assert isinstance(self.df.to_panel(), gt.ABCPanel) assert isinstance(self.sparse_series, gt.ABCSparseSeries) assert isinstance(self.sparse_array, gt.ABCSparseArray) + assert isinstance(self.sparse_frame, gt.ABCSparseDataFrame) assert isinstance(self.categorical, gt.ABCCategorical) assert isinstance(pd.Period('2012', freq='A-DEC'), gt.ABCPeriod) diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py index 22925cceb30d1..c9d079421532f 100644 --- a/pandas/tests/reshape/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -454,6 +454,15 @@ def test_dataframe_dummies_preserve_categorical_dtype(self, dtype): tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize('sparse', [True, False]) + def test_get_dummies_dont_sparsify_all_columns(self, sparse): + # GH18914 + df = DataFrame.from_items([('GDP', [1, 2]), ('Nation', ['AB', 'CD'])]) + df = get_dummies(df, columns=['Nation'], sparse=sparse) + df2 = df.reindex(columns=['GDP']) + + tm.assert_frame_equal(df[['GDP']], df2) + class TestCategoricalReshape(object): diff --git a/pandas/tests/sparse/test_combine_concat.py b/pandas/tests/sparse/test_combine_concat.py index 15639fbe156c6..70fd1da529d46 100644 --- a/pandas/tests/sparse/test_combine_concat.py +++ b/pandas/tests/sparse/test_combine_concat.py @@ -1,8 +1,10 @@ # pylint: disable-msg=E1101,W0612 +import pytest import numpy as np import pandas as pd import pandas.util.testing as tm +import itertools class TestSparseSeriesConcat(object): @@ -317,37 +319,52 @@ def test_concat_axis1(self): assert isinstance(res, pd.SparseDataFrame) tm.assert_frame_equal(res.to_dense(), exp) - def test_concat_sparse_dense(self): - sparse = self.dense1.to_sparse() - - res = pd.concat([sparse, self.dense2]) - exp = pd.concat([self.dense1, self.dense2]) - assert isinstance(res, pd.SparseDataFrame) - tm.assert_frame_equal(res.to_dense(), exp) - - res = pd.concat([self.dense2, sparse]) - exp = pd.concat([self.dense2, self.dense1]) - assert isinstance(res, pd.SparseDataFrame) - tm.assert_frame_equal(res.to_dense(), exp) - - sparse = self.dense1.to_sparse(fill_value=0) - - res = pd.concat([sparse, self.dense2]) - exp = pd.concat([self.dense1, self.dense2]) - assert isinstance(res, pd.SparseDataFrame) - tm.assert_frame_equal(res.to_dense(), exp) - - res = pd.concat([self.dense2, sparse]) - exp = pd.concat([self.dense2, self.dense1]) - assert isinstance(res, pd.SparseDataFrame) - tm.assert_frame_equal(res.to_dense(), exp) - - res = pd.concat([self.dense3, sparse], axis=1) - exp = pd.concat([self.dense3, self.dense1], axis=1) - assert isinstance(res, pd.SparseDataFrame) - tm.assert_frame_equal(res, exp) - - res = pd.concat([sparse, self.dense3], axis=1) - exp = pd.concat([self.dense1, self.dense3], axis=1) - assert isinstance(res, pd.SparseDataFrame) - tm.assert_frame_equal(res, exp) + @pytest.mark.parametrize('fill_value,sparse_idx,dense_idx', + itertools.product([None, 0, 1, np.nan], + [0, 1], + [1, 0])) + def test_concat_sparse_dense_rows(self, fill_value, sparse_idx, dense_idx): + frames = [self.dense1, self.dense2] + sparse_frame = [frames[dense_idx], + frames[sparse_idx].to_sparse(fill_value=fill_value)] + dense_frame = [frames[dense_idx], frames[sparse_idx]] + + # This will try both directions sparse + dense and dense + sparse + for _ in range(2): + res = pd.concat(sparse_frame) + exp = pd.concat(dense_frame) + + assert isinstance(res, pd.SparseDataFrame) + tm.assert_frame_equal(res.to_dense(), exp) + + sparse_frame = sparse_frame[::-1] + dense_frame = dense_frame[::-1] + + @pytest.mark.parametrize('fill_value,sparse_idx,dense_idx', + itertools.product([None, 0, 1, np.nan], + [0, 1], + [1, 0])) + def test_concat_sparse_dense_cols(self, fill_value, sparse_idx, dense_idx): + # See GH16874, GH18914 and #18686 for why this should be a DataFrame + + frames = [self.dense1, self.dense3] + + sparse_frame = [frames[dense_idx], + frames[sparse_idx].to_sparse(fill_value=fill_value)] + dense_frame = [frames[dense_idx], frames[sparse_idx]] + + # This will try both directions sparse + dense and dense + sparse + for _ in range(2): + res = pd.concat(sparse_frame, axis=1) + exp = pd.concat(dense_frame, axis=1) + + for column in frames[dense_idx].columns: + if dense_idx == sparse_idx: + tm.assert_frame_equal(res[column], exp[column]) + else: + tm.assert_series_equal(res[column], exp[column]) + + tm.assert_frame_equal(res, exp) + + sparse_frame = sparse_frame[::-1] + dense_frame = dense_frame[::-1]
Ok so after trying a few things out. This seems to be the problem. When concatting multiple data frames together if any of the containing series are sparse then the entire data frame becomes sparse (or SparseDataFrame). This is in fact not what we want. We want a DataFrame that contains a SparseSeries and a Series. closes #18914, closes #18686 closes #16874 closes #18551 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18924
2017-12-24T02:41:15Z
2018-02-01T13:09:19Z
2018-02-01T13:09:18Z
2018-02-01T13:20:14Z
COMPAT-18589: Supporting axis in Series.rename
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 72f63a4da0f4d..a8d35602b9185 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -840,6 +840,7 @@ Reshaping - Bug in :func:`concat` when concatting sparse and dense series it returns only a ``SparseDataFrame``. Should be a ``DataFrame``. (:issue:`18914`, :issue:`18686`, and :issue:`16874`) - Improved error message for :func:`DataFrame.merge` when there is no common merge key (:issue:`19427`) - Bug in :func:`DataFrame.join` which does an *outer* instead of a *left* join when being called with multiple DataFrames and some have non-unique indices (:issue:`19624`) +- :func:`Series.rename` now accepts ``axis`` as a kwarg (:issue:`18589`) Other ^^^^^ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 35f866c9e7d58..297450417e3cf 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -863,6 +863,9 @@ def rename(self, *args, **kwargs): copy = kwargs.pop('copy', True) inplace = kwargs.pop('inplace', False) level = kwargs.pop('level', None) + axis = kwargs.pop('axis', None) + if axis is not None: + axis = self._get_axis_number(axis) if kwargs: raise TypeError('rename() got an unexpected keyword ' diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index 714e43a4af1f8..dce4e82cbdcf1 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -81,6 +81,14 @@ def test_rename_set_name_inplace(self): exp = np.array(['a', 'b', 'c'], dtype=np.object_) tm.assert_numpy_array_equal(s.index.values, exp) + def test_rename_axis_supported(self): + # Supporting axis for compatibility, detailed in GH-18589 + s = Series(range(5)) + s.rename({}, axis=0) + s.rename({}, axis='index') + with tm.assert_raises_regex(ValueError, 'No axis named 5'): + s.rename({}, axis=5) + def test_set_name_attribute(self): s = Series([1, 2, 3]) s2 = Series([1, 2, 3], name='bar')
- [x] closes #18589 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18923
2017-12-23T19:02:31Z
2018-02-14T11:12:08Z
2018-02-14T11:12:08Z
2018-02-14T12:05:58Z
DEPR: Added is_copy to NDFrame._deprecations
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e9dd82eb64834..f2dbb3ef4d32a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -115,7 +115,7 @@ class NDFrame(PandasObject, SelectionMixin): _internal_names_set = set(_internal_names) _accessors = frozenset([]) _deprecations = frozenset(['as_blocks', 'blocks', - 'consolidate', 'convert_objects']) + 'consolidate', 'convert_objects', 'is_copy']) _metadata = [] _is_copy = None
Should have been part of PR #18801
https://api.github.com/repos/pandas-dev/pandas/pulls/18922
2017-12-23T15:54:43Z
2017-12-23T19:42:27Z
2017-12-23T19:42:27Z
2017-12-23T19:42:31Z
Breaking changes for sum / prod of empty / all-NA
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 2d30e00142846..8617aa6c03e1f 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -3,12 +3,218 @@ v0.22.0 ------- -This is a major release from 0.21.1 and includes a number of API changes, -deprecations, new features, enhancements, and performance improvements along -with a large number of bug fixes. We recommend that all users upgrade to this -version. +This is a major release from 0.21.1 and includes a single, API-breaking change. +We recommend that all users upgrade to this version after carefully reading the +release note (singular!). .. _whatsnew_0220.api_breaking: Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Pandas 0.22.0 changes the handling of empty and all-*NA* sums and products. The +summary is that + +* The sum of an empty or all-*NA* ``Series`` is now ``0`` +* The product of an empty or all-*NA* ``Series`` is now ``1`` +* We've added a ``min_count`` parameter to ``.sum()`` and ``.prod()`` controlling + the minimum number of valid values for the result to be valid. If fewer than + ``min_count`` non-*NA* values are present, the result is *NA*. The default is + ``0``. To return ``NaN``, the 0.21 behavior, use ``min_count=1``. + +Some background: In pandas 0.21, we fixed a long-standing inconsistency +in the return value of all-*NA* series depending on whether or not bottleneck +was installed. See :ref:`whatsnew_0210.api_breaking.bottleneck`. At the same +time, we changed the sum and prod of an empty ``Series`` to also be ``NaN``. + +Based on feedback, we've partially reverted those changes. + +Arithmetic Operations +^^^^^^^^^^^^^^^^^^^^^ + +The default sum for empty or all-*NA* ``Series`` is now ``0``. + +*pandas 0.21.x* + +.. code-block:: ipython + + In [1]: pd.Series([]).sum() + Out[1]: nan + + In [2]: pd.Series([np.nan]).sum() + Out[2]: nan + +*pandas 0.22.0* + +.. ipython:: python + + pd.Series([]).sum() + pd.Series([np.nan]).sum() + +The default behavior is the same as pandas 0.20.3 with bottleneck installed. It +also matches the behavior of NumPy's ``np.nansum`` on empty and all-*NA* arrays. + +To have the sum of an empty series return ``NaN`` (the default behavior of +pandas 0.20.3 without bottleneck, or pandas 0.21.x), use the ``min_count`` +keyword. + +.. ipython:: python + + pd.Series([]).sum(min_count=1) + +Thanks to the ``skipna`` parameter, the ``.sum`` on an all-*NA* +series is conceptually the same as the ``.sum`` of an empty one with +``skipna=True`` (the default). + +.. ipython:: python + + pd.Series([np.nan]).sum(min_count=1) # skipna=True by default + +The ``min_count`` parameter refers to the minimum number of *non-null* values +required for a non-NA sum or product. + +:meth:`Series.prod` has been updated to behave the same as :meth:`Series.sum`, +returning ``1`` instead. + +.. ipython:: python + + pd.Series([]).prod() + pd.Series([np.nan]).prod() + pd.Series([]).prod(min_count=1) + +These changes affect :meth:`DataFrame.sum` and :meth:`DataFrame.prod` as well. +Finally, a few less obvious places in pandas are affected by this change. + +Grouping by a Categorical +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Grouping by a ``Categorical`` and summing now returns ``0`` instead of +``NaN`` for categories with no observations. The product now returns ``1`` +instead of ``NaN``. + +*pandas 0.21.x* + +.. code-block:: ipython + + In [8]: grouper = pd.Categorical(['a', 'a'], categories=['a', 'b']) + + In [9]: pd.Series([1, 2]).groupby(grouper).sum() + Out[9]: + a 3.0 + b NaN + dtype: float64 + +*pandas 0.22* + +.. ipython:: python + + grouper = pd.Categorical(['a', 'a'], categories=['a', 'b']) + pd.Series([1, 2]).groupby(grouper).sum() + +To restore the 0.21 behavior of returning ``NaN`` for unobserved groups, +use ``min_count>=1``. + +.. ipython:: python + + pd.Series([1, 2]).groupby(grouper).sum(min_count=1) + +Resample +^^^^^^^^ + +The sum and product of all-*NA* bins has changed from ``NaN`` to ``0`` for +sum and ``1`` for product. + +*pandas 0.21.x* + +.. code-block:: ipython + + In [11]: s = pd.Series([1, 1, np.nan, np.nan], + ...: index=pd.date_range('2017', periods=4)) + ...: s + Out[11]: + 2017-01-01 1.0 + 2017-01-02 1.0 + 2017-01-03 NaN + 2017-01-04 NaN + Freq: D, dtype: float64 + + In [12]: s.resample('2d').sum() + Out[12]: + 2017-01-01 2.0 + 2017-01-03 NaN + Freq: 2D, dtype: float64 + +*pandas 0.22.0* + +.. ipython:: python + + s = pd.Series([1, 1, np.nan, np.nan], + index=pd.date_range('2017', periods=4)) + s.resample('2d').sum() + +To restore the 0.21 behavior of returning ``NaN``, use ``min_count>=1``. + +.. ipython:: python + + s.resample('2d').sum(min_count=1) + +In particular, upsampling and taking the sum or product is affected, as +upsampling introduces missing values even if the original series was +entirely valid. + +*pandas 0.21.x* + +.. code-block:: ipython + + In [14]: idx = pd.DatetimeIndex(['2017-01-01', '2017-01-02']) + + In [15]: pd.Series([1, 2], index=idx).resample('12H').sum() + Out[15]: + 2017-01-01 00:00:00 1.0 + 2017-01-01 12:00:00 NaN + 2017-01-02 00:00:00 2.0 + Freq: 12H, dtype: float64 + +*pandas 0.22.0* + +.. ipython:: python + + idx = pd.DatetimeIndex(['2017-01-01', '2017-01-02']) + pd.Series([1, 2], index=idx).resample("12H").sum() + +Once again, the ``min_count`` keyword is available to restore the 0.21 behavior. + +.. ipython:: python + + pd.Series([1, 2], index=idx).resample("12H").sum(min_count=1) + +Rolling and Expanding +^^^^^^^^^^^^^^^^^^^^^ + +Rolling and expanding already have a ``min_periods`` keyword that behaves +similar to ``min_count``. The only case that changes is when doing a rolling +or expanding sum with ``min_periods=0``. Previously this returned ``NaN``, +when fewer than ``min_periods`` non-*NA* values were in the window. Now it +returns ``0``. + +*pandas 0.21.1* + +.. code-block:: ipython + + In [17]: s = pd.Series([np.nan, np.nan]) + + In [18]: s.rolling(2, min_periods=0).sum() + Out[18]: + 0 NaN + 1 NaN + dtype: float64 + +*pandas 0.22.0* + +.. ipython:: python + + s = pd.Series([np.nan, np.nan]) + s.rolling(2, min_periods=0).sum() + +The default behavior of ``min_periods=None``, implying that ``min_periods`` +equals the window size, is unchanged. diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index 16b7cbff44e03..14d47398ac1df 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -37,7 +37,7 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, ndarray[int64_t] labels, - Py_ssize_t min_count=1): + Py_ssize_t min_count=0): """ Only aggregates on axis=0 """ @@ -101,7 +101,7 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, ndarray[int64_t] labels, - Py_ssize_t min_count=1): + Py_ssize_t min_count=0): """ Only aggregates on axis=0 """ diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index ecce45742afa7..e46bf24c36f18 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -220,14 +220,16 @@ cdef class VariableWindowIndexer(WindowIndexer): right_closed: bint right endpoint closedness True if the right endpoint is closed, False if open - + floor: optional + unit for flooring the unit """ def __init__(self, ndarray input, int64_t win, int64_t minp, - bint left_closed, bint right_closed, ndarray index): + bint left_closed, bint right_closed, ndarray index, + object floor=None): self.is_variable = 1 self.N = len(index) - self.minp = _check_minp(win, minp, self.N) + self.minp = _check_minp(win, minp, self.N, floor=floor) self.start = np.empty(self.N, dtype='int64') self.start.fill(-1) @@ -342,7 +344,7 @@ def get_window_indexer(input, win, minp, index, closed, if index is not None: indexer = VariableWindowIndexer(input, win, minp, left_closed, - right_closed, index) + right_closed, index, floor) elif use_mock: indexer = MockFixedWindowIndexer(input, win, minp, left_closed, right_closed, index, floor) @@ -441,7 +443,7 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, object index, object closed): cdef: double val, prev_x, sum_x = 0 - int64_t s, e + int64_t s, e, range_endpoint int64_t nobs = 0, i, j, N bint is_variable ndarray[int64_t] start, end @@ -449,7 +451,8 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, start, end, N, win, minp, is_variable = get_window_indexer(input, win, minp, index, - closed) + closed, + floor=0) output = np.empty(N, dtype=float) # for performance we are going to iterate @@ -489,13 +492,15 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, # fixed window + range_endpoint = int_max(minp, 1) - 1 + with nogil: - for i in range(0, minp - 1): + for i in range(0, range_endpoint): add_sum(input[i], &nobs, &sum_x) output[i] = NaN - for i in range(minp - 1, N): + for i in range(range_endpoint, N): val = input[i] add_sum(val, &nobs, &sum_x) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2acf64f1d9f74..c5359ba2c5ea1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7619,48 +7619,48 @@ def _doc_parms(cls): _sum_examples = """\ Examples -------- -By default, the sum of an empty series is ``NaN``. +By default, the sum of an empty or all-NA Series is ``0``. ->>> pd.Series([]).sum() # min_count=1 is the default -nan +>>> pd.Series([]).sum() # min_count=0 is the default +0.0 This can be controlled with the ``min_count`` parameter. For example, if -you'd like the sum of an empty series to be 0, pass ``min_count=0``. +you'd like the sum of an empty series to be NaN, pass ``min_count=1``. ->>> pd.Series([]).sum(min_count=0) -0.0 +>>> pd.Series([]).sum(min_count=1) +nan Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and empty series identically. >>> pd.Series([np.nan]).sum() -nan - ->>> pd.Series([np.nan]).sum(min_count=0) 0.0 + +>>> pd.Series([np.nan]).sum(min_count=1) +nan """ _prod_examples = """\ Examples -------- -By default, the product of an empty series is ``NaN`` +By default, the product of an empty or all-NA Series is ``1`` >>> pd.Series([]).prod() -nan +1.0 This can be controlled with the ``min_count`` parameter ->>> pd.Series([]).prod(min_count=0) -1.0 +>>> pd.Series([]).prod(min_count=1) +nan Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and empty series identically. >>> pd.Series([np.nan]).prod() -nan - ->>> pd.Series([np.nan]).sum(min_count=0) 1.0 + +>>> pd.Series([np.nan]).sum(min_count=1) +nan """ @@ -7683,7 +7683,7 @@ def _make_min_count_stat_function(cls, name, name1, name2, axis_descr, desc, examples=examples) @Appender(_num_doc) def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, - min_count=1, + min_count=0, **kwargs): nv.validate_stat_func(tuple(), kwargs, fname=name) if skipna is None: diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 041239ed06d88..06b7dbb4ecf7b 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1363,8 +1363,8 @@ def last(x): else: return last(x) - cls.sum = groupby_function('sum', 'add', np.sum, min_count=1) - cls.prod = groupby_function('prod', 'prod', np.prod, min_count=1) + cls.sum = groupby_function('sum', 'add', np.sum, min_count=0) + cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0) cls.min = groupby_function('min', 'min', np.min, numeric_only=False) cls.max = groupby_function('max', 'max', np.max, numeric_only=False) cls.first = groupby_function('first', 'first', first_compat, diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 88f69f6ff2e14..d1a355021f388 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -109,6 +109,11 @@ def f(values, axis=None, skipna=True, **kwds): try: if values.size == 0 and kwds.get('min_count') is None: # We are empty, returning NA for our type + # Only applies for the default `min_count` of None + # since that affects how empty arrays are handled. + # TODO(GH-18976) update all the nanops methods to + # correctly handle empty inputs and remove this check. + # It *may* just be `var` return _na_for_min_count(values, axis) if (_USE_BOTTLENECK and skipna and @@ -281,6 +286,20 @@ def _wrap_results(result, dtype): def _na_for_min_count(values, axis): + """Return the missing value for `values` + + Parameters + ---------- + values : ndarray + axis : int or None + axis for the reduction + + Returns + ------- + result : scalar or ndarray + For 1-D values, returns a scalar of the correct missing type. + For 2-D values, returns a 1-D array where each element is missing. + """ # we either return np.nan or pd.NaT if is_numeric_dtype(values): values = values.astype('float64') @@ -308,7 +327,7 @@ def nanall(values, axis=None, skipna=True): @disallow('M8') @bottleneck_switch() -def nansum(values, axis=None, skipna=True, min_count=1): +def nansum(values, axis=None, skipna=True, min_count=0): values, mask, dtype, dtype_max = _get_values(values, skipna, 0) dtype_sum = dtype_max if is_float_dtype(dtype): @@ -645,7 +664,7 @@ def nankurt(values, axis=None, skipna=True): @disallow('M8', 'm8') -def nanprod(values, axis=None, skipna=True, min_count=1): +def nanprod(values, axis=None, skipna=True, min_count=0): mask = isna(values) if skipna and not is_any_int_dtype(values): values = values.copy() diff --git a/pandas/core/resample.py b/pandas/core/resample.py index a30c727ecb87c..5447ce7470b9d 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -629,7 +629,7 @@ def size(self): # downsample methods for method in ['sum', 'prod']: - def f(self, _method=method, min_count=1, *args, **kwargs): + def f(self, _method=method, min_count=0, *args, **kwargs): nv.validate_resampler_func(_method, args, kwargs) return self._downsample(_method, min_count=min_count) f.__doc__ = getattr(GroupBy, method).__doc__ diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 80e9acd0d2281..69f1aeddc43e9 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -478,7 +478,8 @@ def test_nunique(self): Series({0: 1, 1: 3, 2: 2})) def test_sum(self): - self._check_stat_op('sum', np.sum, has_numeric_only=True) + self._check_stat_op('sum', np.sum, has_numeric_only=True, + skipna_alternative=np.nansum) # mixed types (with upcasting happening) self._check_stat_op('sum', np.sum, @@ -753,7 +754,8 @@ def alt(x): def _check_stat_op(self, name, alternative, frame=None, has_skipna=True, has_numeric_only=False, check_dtype=True, - check_dates=False, check_less_precise=False): + check_dates=False, check_less_precise=False, + skipna_alternative=None): if frame is None: frame = self.frame # set some NAs @@ -774,15 +776,11 @@ def _check_stat_op(self, name, alternative, frame=None, has_skipna=True, assert len(result) if has_skipna: - def skipna_wrapper(x): - nona = x.dropna() - if len(nona) == 0: - return np.nan - return alternative(nona) - def wrapper(x): return alternative(x.values) + skipna_wrapper = tm._make_skipna_wrapper(alternative, + skipna_alternative) result0 = f(axis=0, skipna=False) result1 = f(axis=1, skipna=False) tm.assert_series_equal(result0, frame.apply(wrapper), @@ -834,8 +832,11 @@ def wrapper(x): r0 = getattr(all_na, name)(axis=0) r1 = getattr(all_na, name)(axis=1) if name in ['sum', 'prod']: - assert np.isnan(r0).all() - assert np.isnan(r1).all() + unit = int(name == 'prod') + expected = pd.Series(unit, index=r0.index, dtype=r0.dtype) + tm.assert_series_equal(r0, expected) + expected = pd.Series(unit, index=r1.index, dtype=r1.dtype) + tm.assert_series_equal(r1, expected) def test_mode(self): df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11], @@ -982,11 +983,16 @@ def test_sum_prod_nanops(self, method, unit): df = pd.DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]}) + # The default + result = getattr(df, method) + expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') + # min_count=1 result = getattr(df, method)(min_count=1) expected = pd.Series([unit, unit, np.nan], index=idx) tm.assert_series_equal(result, expected) + # min_count=0 result = getattr(df, method)(min_count=0) expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') tm.assert_series_equal(result, expected) @@ -995,6 +1001,7 @@ def test_sum_prod_nanops(self, method, unit): expected = pd.Series([unit, np.nan, np.nan], index=idx) tm.assert_series_equal(result, expected) + # min_count > 1 df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) result = getattr(df, method)(min_count=5) expected = pd.Series(result, index=['A', 'B']) @@ -1004,6 +1011,29 @@ def test_sum_prod_nanops(self, method, unit): expected = pd.Series(result, index=['A', 'B']) tm.assert_series_equal(result, expected) + def test_sum_nanops_timedelta(self): + # prod isn't defined on timedeltas + idx = ['a', 'b', 'c'] + df = pd.DataFrame({"a": [0, 0], + "b": [0, np.nan], + "c": [np.nan, np.nan]}) + + df2 = df.apply(pd.to_timedelta) + + # 0 by default + result = df2.sum() + expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df2.sum(min_count=0) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df2.sum(min_count=1) + expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx) + tm.assert_series_equal(result, expected) + def test_sum_object(self): values = self.frame.values.astype(int) frame = DataFrame(values, index=self.frame.index, diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py index 07ecc085098bf..cca21fddd116e 100644 --- a/pandas/tests/groupby/test_aggregate.py +++ b/pandas/tests/groupby/test_aggregate.py @@ -813,8 +813,6 @@ def test__cython_agg_general(self): ('mean', np.mean), ('median', lambda x: np.median(x) if len(x) > 0 else np.nan), ('var', lambda x: np.var(x, ddof=1)), - ('add', lambda x: np.sum(x) if len(x) > 0 else np.nan), - ('prod', np.prod), ('min', np.min), ('max', np.max), ] ) @@ -824,12 +822,7 @@ def test_cython_agg_empty_buckets(self, op, targop): # calling _cython_agg_general directly, instead of via the user API # which sets different values for min_count, so do that here. - if op in ('add', 'prod'): - min_count = 1 - else: - min_count = -1 - result = df.groupby(pd.cut(df[0], grps))._cython_agg_general( - op, min_count=min_count) + result = df.groupby(pd.cut(df[0], grps))._cython_agg_general(op) expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x)) try: tm.assert_frame_equal(result, expected) @@ -837,6 +830,40 @@ def test_cython_agg_empty_buckets(self, op, targop): exc.args += ('operation: %s' % op,) raise + def test_cython_agg_empty_buckets_nanops(self): + # GH-18869 can't call nanops on empty groups, so hardcode expected + # for these + df = pd.DataFrame([11, 12, 13], columns=['a']) + grps = range(0, 25, 5) + # add / sum + result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('add') + intervals = pd.interval_range(0, 20, freq=5) + expected = pd.DataFrame( + {"a": [0, 0, 36, 0]}, + index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + tm.assert_frame_equal(result, expected) + + # prod + result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('prod') + expected = pd.DataFrame( + {"a": [1, 1, 1716, 1]}, + index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + tm.assert_frame_equal(result, expected) + + @pytest.mark.xfail(reason="GH-18869: agg func not called on empty groups.") + def test_agg_category_nansum(self): + categories = ['a', 'b', 'c'] + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=categories), + 'B': [1, 2, 3]}) + result = df.groupby("A").B.agg(np.nansum) + expected = pd.Series([3, 3, 0], + index=pd.CategoricalIndex(['a', 'b', 'c'], + categories=categories, + name='A'), + name='B') + tm.assert_series_equal(result, expected) + def test_agg_over_numpy_arrays(self): # GH 3788 df = pd.DataFrame([[1, np.array([10, 20, 30])], diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 5e3d2bb9cf091..d4f35aa8755d1 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -37,7 +37,7 @@ def test_groupby(self): # single grouper gb = df.groupby("A") exp_idx = CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True) - expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)}) + expected = DataFrame({'values': Series([3, 7, 0], index=exp_idx)}) result = gb.sum() tm.assert_frame_equal(result, expected) @@ -670,9 +670,9 @@ def test_empty_sum(self): 'B': [1, 2, 1]}) expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A') - # NA by default + # 0 by default result = df.groupby("A").B.sum() - expected = pd.Series([3, 1, np.nan], expected_idx, name='B') + expected = pd.Series([3, 1, 0], expected_idx, name='B') tm.assert_series_equal(result, expected) # min_count=0 @@ -685,6 +685,11 @@ def test_empty_sum(self): expected = pd.Series([3, 1, np.nan], expected_idx, name='B') tm.assert_series_equal(result, expected) + # min_count>1 + result = df.groupby("A").B.sum(min_count=2) + expected = pd.Series([3, np.nan, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) + def test_empty_prod(self): # https://github.com/pandas-dev/pandas/issues/18678 df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], @@ -693,9 +698,9 @@ def test_empty_prod(self): expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A') - # NA by default + # 1 by default result = df.groupby("A").B.prod() - expected = pd.Series([2, 1, np.nan], expected_idx, name='B') + expected = pd.Series([2, 1, 1], expected_idx, name='B') tm.assert_series_equal(result, expected) # min_count=0 diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index cf4a6ec1c932a..a13d985ab6974 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2704,7 +2704,7 @@ def h(df, arg3): # Assert the results here index = pd.Index(['A', 'B', 'C'], name='group') - expected = pd.Series([-79.5160891089, -78.4839108911, None], + expected = pd.Series([-79.5160891089, -78.4839108911, -80], index=index) assert_series_equal(expected, result) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index c8503b16a0e16..d359bfa5351a9 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -41,12 +41,11 @@ def test_groupby_with_timegrouper(self): df = df.set_index(['Date']) expected = DataFrame( - {'Quantity': np.nan}, + {'Quantity': 0}, index=date_range('20130901 13:00:00', '20131205 13:00:00', freq='5D', name='Date', closed='left')) - expected.iloc[[0, 6, 18], 0] = np.array( - [24., 6., 9.], dtype='float64') + expected.iloc[[0, 6, 18], 0] = np.array([24, 6, 9], dtype='int64') result1 = df.resample('5D') .sum() assert_frame_equal(result1, expected) @@ -245,6 +244,8 @@ def test_timegrouper_with_reg_groups(self): result = df.groupby([pd.Grouper(freq='1M', key='Date')]).sum() assert_frame_equal(result, expected) + @pytest.mark.parametrize('freq', ['D', 'M', 'A', 'Q-APR']) + def test_timegrouper_with_reg_groups_freq(self, freq): # GH 6764 multiple grouping with/without sort df = DataFrame({ 'date': pd.to_datetime([ @@ -258,20 +259,24 @@ def test_timegrouper_with_reg_groups(self): 'cost1': [12, 15, 10, 24, 39, 1, 0, 90, 45, 34, 1, 12] }).set_index('date') - for freq in ['D', 'M', 'A', 'Q-APR']: - expected = df.groupby('user_id')[ - 'whole_cost'].resample( - freq).sum().dropna().reorder_levels( - ['date', 'user_id']).sort_index().astype('int64') - expected.name = 'whole_cost' - - result1 = df.sort_index().groupby([pd.Grouper(freq=freq), - 'user_id'])['whole_cost'].sum() - assert_series_equal(result1, expected) - - result2 = df.groupby([pd.Grouper(freq=freq), 'user_id'])[ - 'whole_cost'].sum() - assert_series_equal(result2, expected) + expected = ( + df.groupby('user_id')['whole_cost'] + .resample(freq) + .sum(min_count=1) # XXX + .dropna() + .reorder_levels(['date', 'user_id']) + .sort_index() + .astype('int64') + ) + expected.name = 'whole_cost' + + result1 = df.sort_index().groupby([pd.Grouper(freq=freq), + 'user_id'])['whole_cost'].sum() + assert_series_equal(result1, expected) + + result2 = df.groupby([pd.Grouper(freq=freq), 'user_id'])[ + 'whole_cost'].sum() + assert_series_equal(result2, expected) def test_timegrouper_get_group(self): # GH 6914 diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index cd92edc927173..14bf194ba5ee4 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -36,12 +36,12 @@ class TestSeriesAnalytics(TestData): ]) def test_empty(self, method, unit, use_bottleneck): with pd.option_context("use_bottleneck", use_bottleneck): - # GH 9422 + # GH 9422 / 18921 # Entirely empty s = Series([]) # NA by default result = getattr(s, method)() - assert isna(result) + assert result == unit # Explict result = getattr(s, method)(min_count=0) @@ -52,7 +52,7 @@ def test_empty(self, method, unit, use_bottleneck): # Skipna, default result = getattr(s, method)(skipna=True) - assert isna(result) + result == unit # Skipna, explicit result = getattr(s, method)(skipna=True, min_count=0) @@ -65,7 +65,7 @@ def test_empty(self, method, unit, use_bottleneck): s = Series([np.nan]) # NA by default result = getattr(s, method)() - assert isna(result) + assert result == unit # Explicit result = getattr(s, method)(min_count=0) @@ -76,7 +76,7 @@ def test_empty(self, method, unit, use_bottleneck): # Skipna, default result = getattr(s, method)(skipna=True) - assert isna(result) + result == unit # skipna, explicit result = getattr(s, method)(skipna=True, min_count=0) @@ -110,7 +110,7 @@ def test_empty(self, method, unit, use_bottleneck): # GH #844 (changed in 9422) df = DataFrame(np.empty((10, 0))) - assert (df.sum(1).isnull()).all() + assert (getattr(df, method)(1) == unit).all() s = pd.Series([1]) result = getattr(s, method)(min_count=2) @@ -131,9 +131,9 @@ def test_empty(self, method, unit, use_bottleneck): def test_empty_multi(self, method, unit): s = pd.Series([1, np.nan, np.nan, np.nan], index=pd.MultiIndex.from_product([('a', 'b'), (0, 1)])) - # NaN by default + # 1 / 0 by default result = getattr(s, method)(level=0) - expected = pd.Series([1, np.nan], index=['a', 'b']) + expected = pd.Series([1, unit], index=['a', 'b']) tm.assert_series_equal(result, expected) # min_count=0 @@ -147,7 +147,7 @@ def test_empty_multi(self, method, unit): tm.assert_series_equal(result, expected) @pytest.mark.parametrize( - "method", ['sum', 'mean', 'median', 'std', 'var']) + "method", ['mean', 'median', 'std', 'var']) def test_ops_consistency_on_empty(self, method): # GH 7869 @@ -195,7 +195,7 @@ def test_sum_overflow(self, use_bottleneck): assert np.allclose(float(result), v[-1]) def test_sum(self): - self._check_stat_op('sum', np.sum, check_allna=True) + self._check_stat_op('sum', np.sum, check_allna=False) def test_sum_inf(self): s = Series(np.random.randn(10)) diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py index 14a44c36c6a0c..3c93ff1d3f31e 100644 --- a/pandas/tests/series/test_quantile.py +++ b/pandas/tests/series/test_quantile.py @@ -38,7 +38,7 @@ def test_quantile(self): # GH7661 result = Series([np.timedelta64('NaT')]).sum() - assert result is pd.NaT + assert result == pd.Timedelta(0) msg = 'percentiles should all be in the interval \\[0, 1\\]' for invalid in [-1, 2, [0.5, -1], [0.5, 2]]: diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index d03ecb9f9b5b7..df3c49a73d227 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -182,12 +182,17 @@ def _coerce_tds(targ, res): check_dtype=check_dtype) def check_fun_data(self, testfunc, targfunc, testarval, targarval, - targarnanval, check_dtype=True, **kwargs): + targarnanval, check_dtype=True, empty_targfunc=None, + **kwargs): for axis in list(range(targarval.ndim)) + [None]: for skipna in [False, True]: targartempval = targarval if skipna else targarnanval - try: + if skipna and empty_targfunc and isna(targartempval).all(): + targ = empty_targfunc(targartempval, axis=axis, **kwargs) + else: targ = targfunc(targartempval, axis=axis, **kwargs) + + try: res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs) self.check_results(targ, res, axis, @@ -219,10 +224,11 @@ def check_fun_data(self, testfunc, targfunc, testarval, targarval, except ValueError: return self.check_fun_data(testfunc, targfunc, testarval2, targarval2, - targarnanval2, check_dtype=check_dtype, **kwargs) + targarnanval2, check_dtype=check_dtype, + empty_targfunc=empty_targfunc, **kwargs) def check_fun(self, testfunc, targfunc, testar, targar=None, - targarnan=None, **kwargs): + targarnan=None, empty_targfunc=None, **kwargs): if targar is None: targar = testar if targarnan is None: @@ -232,7 +238,8 @@ def check_fun(self, testfunc, targfunc, testar, targar=None, targarnanval = getattr(self, targarnan) try: self.check_fun_data(testfunc, targfunc, testarval, targarval, - targarnanval, **kwargs) + targarnanval, empty_targfunc=empty_targfunc, + **kwargs) except BaseException as exc: exc.args += ('testar: %s' % testar, 'targar: %s' % targar, 'targarnan: %s' % targarnan) @@ -329,7 +336,8 @@ def test_nanall(self): def test_nansum(self): self.check_funs(nanops.nansum, np.sum, allow_str=False, - allow_date=False, allow_tdelta=True, check_dtype=False) + allow_date=False, allow_tdelta=True, check_dtype=False, + empty_targfunc=np.nansum) def test_nanmean(self): self.check_funs(nanops.nanmean, np.mean, allow_complex=False, @@ -461,9 +469,11 @@ def test_nankurt(self): allow_str=False, allow_date=False, allow_tdelta=False) + @td.skip_if_no("numpy", min_version="1.10.0") def test_nanprod(self): self.check_funs(nanops.nanprod, np.prod, allow_str=False, - allow_date=False, allow_tdelta=False) + allow_date=False, allow_tdelta=False, + empty_targfunc=np.nanprod) def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs): res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 34c1ee5683183..d772dba25868e 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -9,7 +9,6 @@ import numpy as np from pandas.core.dtypes.common import is_float_dtype -from pandas.core.dtypes.missing import remove_na_arraylike from pandas import (Series, DataFrame, Index, date_range, isna, notna, pivot, MultiIndex) from pandas.core.nanops import nanall, nanany @@ -83,13 +82,14 @@ def test_count(self): self._check_stat_op('count', f, obj=self.panel, has_skipna=False) def test_sum(self): - self._check_stat_op('sum', np.sum) + self._check_stat_op('sum', np.sum, skipna_alternative=np.nansum) def test_mean(self): self._check_stat_op('mean', np.mean) + @td.skip_if_no("numpy", min_version="1.10.0") def test_prod(self): - self._check_stat_op('prod', np.prod) + self._check_stat_op('prod', np.prod, skipna_alternative=np.nanprod) def test_median(self): def wrapper(x): @@ -140,7 +140,8 @@ def alt(x): self._check_stat_op('sem', alt) - def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): + def _check_stat_op(self, name, alternative, obj=None, has_skipna=True, + skipna_alternative=None): if obj is None: obj = self.panel @@ -152,11 +153,8 @@ def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): if has_skipna: - def skipna_wrapper(x): - nona = remove_na_arraylike(x) - if len(nona) == 0: - return np.nan - return alternative(nona) + skipna_wrapper = tm._make_skipna_wrapper(alternative, + skipna_alternative) def wrapper(x): return alternative(np.asarray(x)) diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index e194136ec716d..e429403bbc919 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -8,7 +8,6 @@ from pandas import Series, Index, isna, notna from pandas.core.dtypes.common import is_float_dtype -from pandas.core.dtypes.missing import remove_na_arraylike from pandas.core.panel import Panel from pandas.core.panel4d import Panel4D from pandas.tseries.offsets import BDay @@ -38,13 +37,14 @@ def test_count(self): self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False) def test_sum(self): - self._check_stat_op('sum', np.sum) + self._check_stat_op('sum', np.sum, skipna_alternative=np.nansum) def test_mean(self): self._check_stat_op('mean', np.mean) + @td.skip_if_no("numpy", min_version="1.10.0") def test_prod(self): - self._check_stat_op('prod', np.prod) + self._check_stat_op('prod', np.prod, skipna_alternative=np.nanprod) def test_median(self): def wrapper(x): @@ -105,7 +105,8 @@ def alt(x): # self._check_stat_op('skew', alt) - def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): + def _check_stat_op(self, name, alternative, obj=None, has_skipna=True, + skipna_alternative=None): if obj is None: obj = self.panel4d @@ -116,11 +117,9 @@ def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): f = getattr(obj, name) if has_skipna: - def skipna_wrapper(x): - nona = remove_na_arraylike(x) - if len(nona) == 0: - return np.nan - return alternative(nona) + + skipna_wrapper = tm._make_skipna_wrapper(alternative, + skipna_alternative) def wrapper(x): return alternative(np.asarray(x)) diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 4a3c4eff9f8c3..e9a517605020a 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -3390,9 +3390,9 @@ def test_aggregate_normal(self): def test_resample_entirly_nat_window(self, method, unit): s = pd.Series([0] * 2 + [np.nan] * 2, index=pd.date_range('2017', periods=4)) - # nan by default + # 0 / 1 by default result = methodcaller(method)(s.resample("2d")) - expected = pd.Series([0.0, np.nan], + expected = pd.Series([0.0, unit], index=pd.to_datetime(['2017-01-01', '2017-01-03'])) tm.assert_series_equal(result, expected) @@ -3411,8 +3411,17 @@ def test_resample_entirly_nat_window(self, method, unit): '2017-01-03'])) tm.assert_series_equal(result, expected) - def test_aggregate_with_nat(self): + @pytest.mark.parametrize('func, fill_value', [ + ('min', np.nan), + ('max', np.nan), + ('sum', 0), + ('prod', 1), + ('count', 0), + ]) + def test_aggregate_with_nat(self, func, fill_value): # check TimeGrouper's aggregation is identical as normal groupby + # if NaT is included, 'var', 'std', 'mean', 'first','last' + # and 'nth' doesn't work yet n = 20 data = np.random.randn(n, 4).astype('int64') @@ -3426,42 +3435,42 @@ def test_aggregate_with_nat(self): normal_grouped = normal_df.groupby('key') dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) - for func in ['min', 'max', 'sum', 'prod']: - normal_result = getattr(normal_grouped, func)() - dt_result = getattr(dt_grouped, func)() - pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]], index=[3], - columns=['A', 'B', 'C', 'D']) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - assert_frame_equal(expected, dt_result) + normal_result = getattr(normal_grouped, func)() + dt_result = getattr(dt_grouped, func)() - for func in ['count']: - normal_result = getattr(normal_grouped, func)() - pad = DataFrame([[0, 0, 0, 0]], index=[3], - columns=['A', 'B', 'C', 'D']) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_frame_equal(expected, dt_result) + pad = DataFrame([[fill_value] * 4], index=[3], + columns=['A', 'B', 'C', 'D']) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + assert_frame_equal(expected, dt_result) + assert dt_result.index.name == 'key' - for func in ['size']: - normal_result = getattr(normal_grouped, func)() - pad = Series([0], index=[3]) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_series_equal(expected, dt_result) - # GH 9925 - assert dt_result.index.name == 'key' + def test_aggregate_with_nat_size(self): + # GH 9925 + n = 20 + data = np.random.randn(n, 4).astype('int64') + normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + normal_df['key'] = [1, 2, np.nan, 4, 5] * 4 - # if NaT is included, 'var', 'std', 'mean', 'first','last' - # and 'nth' doesn't work yet + dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT, + datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4 + + normal_grouped = normal_df.groupby('key') + dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) + + normal_result = normal_grouped.size() + dt_result = dt_grouped.size() + + pad = Series([0], index=[3]) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + assert_series_equal(expected, dt_result) + assert dt_result.index.name == 'key' def test_repr(self): # GH18203 @@ -3482,9 +3491,9 @@ def test_upsample_sum(self, method, unit): '2017-01-01T00:30:00', '2017-01-01T01:00:00']) - # NaN by default + # 0 / 1 by default result = methodcaller(method)(resampled) - expected = pd.Series([1, np.nan, 1], index=index) + expected = pd.Series([1, unit, 1], index=index) tm.assert_series_equal(result, expected) # min_count=0 @@ -3496,3 +3505,8 @@ def test_upsample_sum(self, method, unit): result = methodcaller(method, min_count=1)(resampled) expected = pd.Series([1, np.nan, 1], index=index) tm.assert_series_equal(result, expected) + + # min_count>1 + result = methodcaller(method, min_count=2)(resampled) + expected = pd.Series([np.nan, np.nan, np.nan], index=index) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index bee925823eebe..ccffc554e00c7 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -439,6 +439,28 @@ def tests_empty_df_rolling(self, roller): result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum() tm.assert_frame_equal(result, expected) + def test_missing_minp_zero(self): + # https://github.com/pandas-dev/pandas/pull/18921 + # minp=0 + x = pd.Series([np.nan]) + result = x.rolling(1, min_periods=0).sum() + expected = pd.Series([0.0]) + tm.assert_series_equal(result, expected) + + # minp=1 + result = x.rolling(1, min_periods=1).sum() + expected = pd.Series([np.nan]) + tm.assert_series_equal(result, expected) + + def test_missing_minp_zero_variable(self): + # https://github.com/pandas-dev/pandas/pull/18921 + x = pd.Series([np.nan] * 4, + index=pd.DatetimeIndex(['2017-01-01', '2017-01-04', + '2017-01-06', '2017-01-07'])) + result = x.rolling(pd.Timedelta("2d"), min_periods=0).sum() + expected = pd.Series(0.0, index=x.index) + tm.assert_series_equal(result, expected) + def test_multi_index_names(self): # GH 16789, 16825 @@ -512,6 +534,19 @@ def test_empty_df_expanding(self, expander): index=pd.DatetimeIndex([])).expanding(expander).sum() tm.assert_frame_equal(result, expected) + def test_missing_minp_zero(self): + # https://github.com/pandas-dev/pandas/pull/18921 + # minp=0 + x = pd.Series([np.nan]) + result = x.expanding(min_periods=0).sum() + expected = pd.Series([0.0]) + tm.assert_series_equal(result, expected) + + # minp=1 + result = x.expanding(min_periods=1).sum() + expected = pd.Series([np.nan]) + tm.assert_series_equal(result, expected) + class TestEWM(Base): @@ -828,7 +863,8 @@ def test_centered_axis_validation(self): .rolling(window=3, center=True, axis=2).mean()) def test_rolling_sum(self): - self._check_moment_func(mom.rolling_sum, np.sum, name='sum') + self._check_moment_func(mom.rolling_sum, np.nansum, name='sum', + zero_min_periods_equal=False) def test_rolling_count(self): counter = lambda x: np.isfinite(x).astype(float).sum() @@ -1298,14 +1334,18 @@ def test_fperr_robustness(self): def _check_moment_func(self, f, static_comp, name=None, window=50, has_min_periods=True, has_center=True, has_time_rule=True, preserve_nan=True, - fill_value=None, test_stable=False, **kwargs): + fill_value=None, test_stable=False, + zero_min_periods_equal=True, + **kwargs): with warnings.catch_warnings(record=True): self._check_ndarray(f, static_comp, window=window, has_min_periods=has_min_periods, preserve_nan=preserve_nan, has_center=has_center, fill_value=fill_value, - test_stable=test_stable, **kwargs) + test_stable=test_stable, + zero_min_periods_equal=zero_min_periods_equal, + **kwargs) with warnings.catch_warnings(record=True): self._check_structures(f, static_comp, @@ -1324,7 +1364,8 @@ def _check_moment_func(self, f, static_comp, name=None, window=50, def _check_ndarray(self, f, static_comp, window=50, has_min_periods=True, preserve_nan=True, has_center=True, fill_value=None, - test_stable=False, test_window=True, **kwargs): + test_stable=False, test_window=True, + zero_min_periods_equal=True, **kwargs): def get_result(arr, window, min_periods=None, center=False): return f(arr, window, min_periods=min_periods, center=center, ** kwargs) @@ -1357,10 +1398,11 @@ def get_result(arr, window, min_periods=None, center=False): assert isna(result[3]) assert notna(result[4]) - # min_periods=0 - result0 = get_result(arr, 20, min_periods=0) - result1 = get_result(arr, 20, min_periods=1) - tm.assert_almost_equal(result0, result1) + if zero_min_periods_equal: + # min_periods=0 may be equivalent to min_periods=1 + result0 = get_result(arr, 20, min_periods=0) + result1 = get_result(arr, 20, min_periods=1) + tm.assert_almost_equal(result0, result1) else: result = get_result(arr, 50) tm.assert_almost_equal(result[-1], static_comp(arr[10:-10])) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 4e9282c3bd031..8acf16536f1de 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -2665,3 +2665,31 @@ def setTZ(tz): yield finally: setTZ(orig_tz) + + +def _make_skipna_wrapper(alternative, skipna_alternative=None): + """Create a function for calling on an array. + + Parameters + ---------- + alternative : function + The function to be called on the array with no NaNs. + Only used when 'skipna_alternative' is None. + skipna_alternative : function + The function to be called on the original array + + Returns + ------- + skipna_wrapper : function + """ + if skipna_alternative: + def skipna_wrapper(x): + return skipna_alternative(x.values) + else: + def skipna_wrapper(x): + nona = x.dropna() + if len(nona) == 0: + return np.nan + return alternative(nona) + + return skipna_wrapper
Changes the defaults for `min_count` so that `sum([])` and `sum([np.nan])` are 0 by default, and NaN with `min_count>=1`. I'd recommend looking at only the latest commit until https://github.com/pandas-dev/pandas/pull/18876 is merged. I'll probably force push changes here to keep all the relevant changes in the last commit until https://github.com/pandas-dev/pandas/pull/18876 is in, rebase on that, and then start pushing changes regularly. cc @jreback @jorisvandenbossche @shoyer @wesm
https://api.github.com/repos/pandas-dev/pandas/pulls/18921
2017-12-23T12:48:38Z
2017-12-29T13:05:49Z
2017-12-29T13:05:49Z
2017-12-30T23:18:24Z
Fixing 3.6 Escape Sequence Deprecations in tests/io/parser/usecols.py
diff --git a/pandas/tests/io/parser/usecols.py b/pandas/tests/io/parser/usecols.py index 0fa53e6288bda..8767055239cd5 100644 --- a/pandas/tests/io/parser/usecols.py +++ b/pandas/tests/io/parser/usecols.py @@ -492,16 +492,18 @@ def test_raise_on_usecols_names_mismatch(self): tm.assert_frame_equal(df, expected) usecols = ['a', 'b', 'c', 'f'] - with tm.assert_raises_regex(ValueError, msg.format(missing="\['f'\]")): + with tm.assert_raises_regex( + ValueError, msg.format(missing=r"\['f'\]")): self.read_csv(StringIO(data), usecols=usecols) usecols = ['a', 'b', 'f'] - with tm.assert_raises_regex(ValueError, msg.format(missing="\['f'\]")): + with tm.assert_raises_regex( + ValueError, msg.format(missing=r"\['f'\]")): self.read_csv(StringIO(data), usecols=usecols) usecols = ['a', 'b', 'f', 'g'] with tm.assert_raises_regex( - ValueError, msg.format(missing="\[('f', 'g'|'g', 'f')\]")): + ValueError, msg.format(missing=r"\[('f', 'g'|'g', 'f')\]")): self.read_csv(StringIO(data), usecols=usecols) names = ['A', 'B', 'C', 'D'] @@ -525,9 +527,11 @@ def test_raise_on_usecols_names_mismatch(self): # tm.assert_frame_equal(df, expected) usecols = ['A', 'B', 'C', 'f'] - with tm.assert_raises_regex(ValueError, msg.format(missing="\['f'\]")): + with tm.assert_raises_regex( + ValueError, msg.format(missing=r"\['f'\]")): self.read_csv(StringIO(data), header=0, names=names, usecols=usecols) usecols = ['A', 'B', 'f'] - with tm.assert_raises_regex(ValueError, msg.format(missing="\['f'\]")): + with tm.assert_raises_regex( + ValueError, msg.format(missing=r"\['f'\]")): self.read_csv(StringIO(data), names=names, usecols=usecols)
@jreback [brought up some warnings](https://github.com/pandas-dev/pandas/pull/17310#issuecomment-353402586) on 3.6 that should be fixed by making the regex an r'string'. Tests pass, happy to fix all the other occurrences, would need to know how to generate these warnings on my local machine though, as running the same pytest command as CI doesn't seem to bring them up for me.
https://api.github.com/repos/pandas-dev/pandas/pulls/18918
2017-12-23T02:41:27Z
2017-12-23T19:45:36Z
2017-12-23T19:45:36Z
2017-12-28T15:08:34Z
CLN: ASV join_merge
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py index 3b0e33b72ddc1..5b40a29d54683 100644 --- a/asv_bench/benchmarks/join_merge.py +++ b/asv_bench/benchmarks/join_merge.py @@ -1,20 +1,24 @@ -from .pandas_vb_common import * +import string +import numpy as np +import pandas.util.testing as tm +from pandas import (DataFrame, Series, MultiIndex, date_range, concat, merge, + merge_asof) try: from pandas import merge_ordered except ImportError: from pandas import ordered_merge as merge_ordered +from .pandas_vb_common import Panel, setup # noqa -# ---------------------------------------------------------------------- -# Append class Append(object): + goal_time = 0.2 def setup(self): - self.df1 = pd.DataFrame(np.random.randn(10000, 4), - columns=['A', 'B', 'C', 'D']) + self.df1 = DataFrame(np.random.randn(10000, 4), + columns=['A', 'B', 'C', 'D']) self.df2 = self.df1.copy() self.df2.index = np.arange(10000, 20000) self.mdf1 = self.df1.copy() @@ -35,237 +39,221 @@ def time_append_mixed(self): self.mdf1.append(self.mdf2) -# ---------------------------------------------------------------------- -# Concat - class Concat(object): - goal_time = 0.2 - def setup(self): - self.n = 1000 - self.indices = tm.makeStringIndex(1000) - self.s = Series(self.n, index=self.indices) - self.pieces = [self.s[i:(- i)] for i in range(1, 10)] - self.pieces = (self.pieces * 50) - - self.df_small = pd.DataFrame(randn(5, 4)) + goal_time = 0.2 + params = [0, 1] + param_names = ['axis'] - # empty - self.df = pd.DataFrame(dict(A=range(10000)), index=date_range('20130101', periods=10000, freq='s')) - self.empty = pd.DataFrame() + def setup(self, axis): + N = 1000 + s = Series(N, index=tm.makeStringIndex(N)) + self.series = [s[i:- i] for i in range(1, 10)] * 50 + self.small_frames = [DataFrame(np.random.randn(5, 4))] * 1000 + df = DataFrame({'A': range(N)}, + index=date_range('20130101', periods=N, freq='s')) + self.empty_left = [DataFrame(), df] + self.empty_right = [df, DataFrame()] - def time_concat_series_axis1(self): - concat(self.pieces, axis=1) + def time_concat_series(self, axis): + concat(self.series, axis=axis) - def time_concat_small_frames(self): - concat(([self.df_small] * 1000)) + def time_concat_small_frames(self, axis): + concat(self.small_frames, axis=axis) - def time_concat_empty_frames1(self): - concat([self.df, self.empty]) + def time_concat_empty_right(self, axis): + concat(self.empty_right, axis=axis) - def time_concat_empty_frames2(self): - concat([self.empty, self.df]) + def time_concat_empty_left(self, axis): + concat(self.empty_left, axis=axis) class ConcatPanels(object): - goal_time = 0.2 - - def setup(self): - dataset = np.zeros((10000, 200, 2), dtype=np.float32) - self.panels_f = [pd.Panel(np.copy(dataset, order='F')) - for i in range(20)] - self.panels_c = [pd.Panel(np.copy(dataset, order='C')) - for i in range(20)] - def time_c_ordered_axis0(self): - concat(self.panels_c, axis=0, ignore_index=True) - - def time_f_ordered_axis0(self): - concat(self.panels_f, axis=0, ignore_index=True) + goal_time = 0.2 + params = ([0, 1, 2], [True, False]) + param_names = ['axis', 'ignore_index'] - def time_c_ordered_axis1(self): - concat(self.panels_c, axis=1, ignore_index=True) + def setup(self, axis, ignore_index): + panel_c = Panel(np.zeros((10000, 200, 2), dtype=np.float32, order='C')) + self.panels_c = [panel_c] * 20 + panel_f = Panel(np.zeros((10000, 200, 2), dtype=np.float32, order='F')) + self.panels_f = [panel_f] * 20 - def time_f_ordered_axis1(self): - concat(self.panels_f, axis=1, ignore_index=True) + def time_c_ordered(self, axis, ignore_index): + concat(self.panels_c, axis=axis, ignore_index=ignore_index) - def time_c_ordered_axis2(self): - concat(self.panels_c, axis=2, ignore_index=True) + def time_f_ordered(self, axis, ignore_index): + concat(self.panels_f, axis=axis, ignore_index=ignore_index) - def time_f_ordered_axis2(self): - concat(self.panels_f, axis=2, ignore_index=True) +class ConcatDataFrames(object): -class ConcatFrames(object): goal_time = 0.2 + params = ([0, 1], [True, False]) + param_names = ['axis', 'ignore_index'] - def setup(self): - dataset = np.zeros((10000, 200), dtype=np.float32) - - self.frames_f = [pd.DataFrame(np.copy(dataset, order='F')) - for i in range(20)] - self.frames_c = [pd.DataFrame(np.copy(dataset, order='C')) - for i in range(20)] - - def time_c_ordered_axis0(self): - concat(self.frames_c, axis=0, ignore_index=True) - - def time_f_ordered_axis0(self): - concat(self.frames_f, axis=0, ignore_index=True) + def setup(self, axis, ignore_index): + frame_c = DataFrame(np.zeros((10000, 200), + dtype=np.float32, order='C')) + self.frame_c = [frame_c] * 20 + frame_f = DataFrame(np.zeros((10000, 200), + dtype=np.float32, order='F')) + self.frame_f = [frame_f] * 20 - def time_c_ordered_axis1(self): - concat(self.frames_c, axis=1, ignore_index=True) + def time_c_ordered(self, axis, ignore_index): + concat(self.frame_c, axis=axis, ignore_index=ignore_index) - def time_f_ordered_axis1(self): - concat(self.frames_f, axis=1, ignore_index=True) + def time_f_ordered(self, axis, ignore_index): + concat(self.frame_f, axis=axis, ignore_index=ignore_index) -# ---------------------------------------------------------------------- -# Joins - class Join(object): - goal_time = 0.2 - - def setup(self): - self.level1 = tm.makeStringIndex(10).values - self.level2 = tm.makeStringIndex(1000).values - self.label1 = np.arange(10).repeat(1000) - self.label2 = np.tile(np.arange(1000), 10) - self.key1 = np.tile(self.level1.take(self.label1), 10) - self.key2 = np.tile(self.level2.take(self.label2), 10) - self.shuf = np.arange(100000) - random.shuffle(self.shuf) - try: - self.index2 = MultiIndex(levels=[self.level1, self.level2], - labels=[self.label1, self.label2]) - self.index3 = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], - labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)]) - self.df_multi = DataFrame(np.random.randn(len(self.index2), 4), - index=self.index2, - columns=['A', 'B', 'C', 'D']) - except: - pass - self.df = pd.DataFrame({'data1': np.random.randn(100000), - 'data2': np.random.randn(100000), - 'key1': self.key1, - 'key2': self.key2}) - self.df_key1 = pd.DataFrame(np.random.randn(len(self.level1), 4), - index=self.level1, - columns=['A', 'B', 'C', 'D']) - self.df_key2 = pd.DataFrame(np.random.randn(len(self.level2), 4), - index=self.level2, - columns=['A', 'B', 'C', 'D']) - self.df_shuf = self.df.reindex(self.df.index[self.shuf]) - - def time_join_dataframe_index_multi(self): - self.df.join(self.df_multi, on=['key1', 'key2']) - - def time_join_dataframe_index_single_key_bigger(self): - self.df.join(self.df_key2, on='key2') - def time_join_dataframe_index_single_key_bigger_sort(self): - self.df_shuf.join(self.df_key2, on='key2', sort=True) - - def time_join_dataframe_index_single_key_small(self): - self.df.join(self.df_key1, on='key1') + goal_time = 0.2 + params = [True, False] + param_names = ['sort'] + + def setup(self, sort): + level1 = tm.makeStringIndex(10).values + level2 = tm.makeStringIndex(1000).values + label1 = np.arange(10).repeat(1000) + label2 = np.tile(np.arange(1000), 10) + index2 = MultiIndex(levels=[level1, level2], + labels=[label1, label2]) + self.df_multi = DataFrame(np.random.randn(len(index2), 4), + index=index2, + columns=['A', 'B', 'C', 'D']) + + self.key1 = np.tile(level1.take(label1), 10) + self.key2 = np.tile(level2.take(label2), 10) + self.df = DataFrame({'data1': np.random.randn(100000), + 'data2': np.random.randn(100000), + 'key1': self.key1, + 'key2': self.key2}) + + self.df_key1 = DataFrame(np.random.randn(len(level1), 4), + index=level1, + columns=['A', 'B', 'C', 'D']) + self.df_key2 = DataFrame(np.random.randn(len(level2), 4), + index=level2, + columns=['A', 'B', 'C', 'D']) + + shuf = np.arange(100000) + np.random.shuffle(shuf) + self.df_shuf = self.df.reindex(self.df.index[shuf]) + + def time_join_dataframe_index_multi(self, sort): + self.df.join(self.df_multi, on=['key1', 'key2'], sort=sort) + + def time_join_dataframe_index_single_key_bigger(self, sort): + self.df.join(self.df_key2, on='key2', sort=sort) + + def time_join_dataframe_index_single_key_small(self, sort): + self.df.join(self.df_key1, on='key1', sort=sort) + + def time_join_dataframe_index_shuffle_key_bigger_sort(self, sort): + self.df_shuf.join(self.df_key2, on='key2', sort=sort) class JoinIndex(object): + goal_time = 0.2 def setup(self): - np.random.seed(2718281) - self.n = 50000 - self.left = pd.DataFrame(np.random.randint(1, (self.n / 500), (self.n, 2)), columns=['jim', 'joe']) - self.right = pd.DataFrame(np.random.randint(1, (self.n / 500), (self.n, 2)), columns=['jolie', 'jolia']).set_index('jolie') + N = 50000 + self.left = DataFrame(np.random.randint(1, N / 500, (N, 2)), + columns=['jim', 'joe']) + self.right = DataFrame(np.random.randint(1, N / 500, (N, 2)), + columns=['jolie', 'jolia']).set_index('jolie') def time_left_outer_join_index(self): self.left.join(self.right, on='jim') -class join_non_unique_equal(object): +class JoinNonUnique(object): # outer join of non-unique # GH 6329 - goal_time = 0.2 def setup(self): - self.date_index = date_range('01-Jan-2013', '23-Jan-2013', freq='T') - self.daily_dates = self.date_index.to_period('D').to_timestamp('S', 'S') - self.fracofday = (self.date_index.view(np.ndarray) - self.daily_dates.view(np.ndarray)) - self.fracofday = (self.fracofday.astype('timedelta64[ns]').astype(np.float64) / 86400000000000.0) - self.fracofday = Series(self.fracofday, self.daily_dates) - self.index = date_range(self.date_index.min().to_period('A').to_timestamp('D', 'S'), self.date_index.max().to_period('A').to_timestamp('D', 'E'), freq='D') - self.temp = Series(1.0, self.index) + date_index = date_range('01-Jan-2013', '23-Jan-2013', freq='T') + daily_dates = date_index.to_period('D').to_timestamp('S', 'S') + self.fracofday = date_index.values - daily_dates.values + self.fracofday = self.fracofday.astype('timedelta64[ns]') + self.fracofday = self.fracofday.astype(np.float64) / 86400000000000.0 + self.fracofday = Series(self.fracofday, daily_dates) + index = date_range(date_index.min(), date_index.max(), freq='D') + self.temp = Series(1.0, index)[self.fracofday.index] def time_join_non_unique_equal(self): - (self.fracofday * self.temp[self.fracofday.index]) - + self.fracofday * self.temp -# ---------------------------------------------------------------------- -# Merges class Merge(object): - goal_time = 0.2 - def setup(self): - self.N = 10000 - self.indices = tm.makeStringIndex(self.N).values - self.indices2 = tm.makeStringIndex(self.N).values - self.key = np.tile(self.indices[:8000], 10) - self.key2 = np.tile(self.indices2[:8000], 10) - self.left = pd.DataFrame({'key': self.key, 'key2': self.key2, - 'value': np.random.randn(80000)}) - self.right = pd.DataFrame({'key': self.indices[2000:], - 'key2': self.indices2[2000:], - 'value2': np.random.randn(8000)}) - - self.df = pd.DataFrame({'key1': np.tile(np.arange(500).repeat(10), 2), - 'key2': np.tile(np.arange(250).repeat(10), 4), - 'value': np.random.randn(10000)}) - self.df2 = pd.DataFrame({'key1': np.arange(500), 'value2': randn(500)}) + goal_time = 0.2 + params = [True, False] + param_names = ['sort'] + + def setup(self, sort): + N = 10000 + indices = tm.makeStringIndex(N).values + indices2 = tm.makeStringIndex(N).values + key = np.tile(indices[:8000], 10) + key2 = np.tile(indices2[:8000], 10) + self.left = DataFrame({'key': key, 'key2': key2, + 'value': np.random.randn(80000)}) + self.right = DataFrame({'key': indices[2000:], + 'key2': indices2[2000:], + 'value2': np.random.randn(8000)}) + + self.df = DataFrame({'key1': np.tile(np.arange(500).repeat(10), 2), + 'key2': np.tile(np.arange(250).repeat(10), 4), + 'value': np.random.randn(10000)}) + self.df2 = DataFrame({'key1': np.arange(500), + 'value2': np.random.randn(500)}) self.df3 = self.df[:5000] - def time_merge_2intkey_nosort(self): - merge(self.left, self.right, sort=False) + def time_merge_2intkey(self, sort): + merge(self.left, self.right, sort=sort) - def time_merge_2intkey_sort(self): - merge(self.left, self.right, sort=True) + def time_merge_dataframe_integer_2key(self, sort): + merge(self.df, self.df3, sort=sort) - def time_merge_dataframe_integer_2key(self): - merge(self.df, self.df3) + def time_merge_dataframe_integer_key(self, sort): + merge(self.df, self.df2, on='key1', sort=sort) - def time_merge_dataframe_integer_key(self): - merge(self.df, self.df2, on='key1') +class I8Merge(object): -class i8merge(object): goal_time = 0.2 + params = ['inner', 'outer', 'left', 'right'] + param_names = ['how'] - def setup(self): - (low, high, n) = (((-1) << 10), (1 << 10), (1 << 20)) - self.left = pd.DataFrame(np.random.randint(low, high, (n, 7)), - columns=list('ABCDEFG')) + def setup(self, how): + low, high, n = -1000, 1000, 10**6 + self.left = DataFrame(np.random.randint(low, high, (n, 7)), + columns=list('ABCDEFG')) self.left['left'] = self.left.sum(axis=1) - self.i = np.random.permutation(len(self.left)) - self.right = self.left.iloc[self.i].copy() - self.right.columns = (self.right.columns[:(-1)].tolist() + ['right']) - self.right.index = np.arange(len(self.right)) - self.right['right'] *= (-1) + self.right = self.left.sample(frac=1).rename({'left': 'right'}, axis=1) + self.right = self.right.reset_index(drop=True) + self.right['right'] *= -1 - def time_i8merge(self): - merge(self.left, self.right, how='outer') + def time_i8merge(self, how): + merge(self.left, self.right, how=how) class MergeCategoricals(object): + goal_time = 0.2 def setup(self): - self.left_object = pd.DataFrame( + self.left_object = DataFrame( {'X': np.random.choice(range(0, 10), size=(10000,)), 'Y': np.random.choice(['one', 'two', 'three'], size=(10000,))}) - self.right_object = pd.DataFrame( + self.right_object = DataFrame( {'X': np.random.choice(range(0, 10), size=(10000,)), 'Z': np.random.choice(['jjj', 'kkk', 'sss'], size=(10000,))}) @@ -281,103 +269,85 @@ def time_merge_cat(self): merge(self.left_cat, self.right_cat, on='X') -# ---------------------------------------------------------------------- -# Ordered merge - class MergeOrdered(object): def setup(self): - groups = tm.makeStringIndex(10).values - - self.left = pd.DataFrame({'group': groups.repeat(5000), - 'key' : np.tile(np.arange(0, 10000, 2), 10), - 'lvalue': np.random.randn(50000)}) - - self.right = pd.DataFrame({'key' : np.arange(10000), - 'rvalue' : np.random.randn(10000)}) + self.left = DataFrame({'group': groups.repeat(5000), + 'key': np.tile(np.arange(0, 10000, 2), 10), + 'lvalue': np.random.randn(50000)}) + self.right = DataFrame({'key': np.arange(10000), + 'rvalue': np.random.randn(10000)}) def time_merge_ordered(self): merge_ordered(self.left, self.right, on='key', left_by='group') -# ---------------------------------------------------------------------- -# asof merge - class MergeAsof(object): def setup(self): - import string - np.random.seed(0) one_count = 200000 two_count = 1000000 - self.df1 = pd.DataFrame( + df1 = DataFrame( {'time': np.random.randint(0, one_count / 20, one_count), 'key': np.random.choice(list(string.ascii_uppercase), one_count), 'key2': np.random.randint(0, 25, one_count), 'value1': np.random.randn(one_count)}) - self.df2 = pd.DataFrame( + df2 = DataFrame( {'time': np.random.randint(0, two_count / 20, two_count), 'key': np.random.choice(list(string.ascii_uppercase), two_count), 'key2': np.random.randint(0, 25, two_count), 'value2': np.random.randn(two_count)}) - self.df1 = self.df1.sort_values('time') - self.df2 = self.df2.sort_values('time') + df1 = df1.sort_values('time') + df2 = df2.sort_values('time') - self.df1['time32'] = np.int32(self.df1.time) - self.df2['time32'] = np.int32(self.df2.time) + df1['time32'] = np.int32(df1.time) + df2['time32'] = np.int32(df2.time) - self.df1a = self.df1[['time', 'value1']] - self.df2a = self.df2[['time', 'value2']] - self.df1b = self.df1[['time', 'key', 'value1']] - self.df2b = self.df2[['time', 'key', 'value2']] - self.df1c = self.df1[['time', 'key2', 'value1']] - self.df2c = self.df2[['time', 'key2', 'value2']] - self.df1d = self.df1[['time32', 'value1']] - self.df2d = self.df2[['time32', 'value2']] - self.df1e = self.df1[['time', 'key', 'key2', 'value1']] - self.df2e = self.df2[['time', 'key', 'key2', 'value2']] + self.df1a = df1[['time', 'value1']] + self.df2a = df2[['time', 'value2']] + self.df1b = df1[['time', 'key', 'value1']] + self.df2b = df2[['time', 'key', 'value2']] + self.df1c = df1[['time', 'key2', 'value1']] + self.df2c = df2[['time', 'key2', 'value2']] + self.df1d = df1[['time32', 'value1']] + self.df2d = df2[['time32', 'value2']] + self.df1e = df1[['time', 'key', 'key2', 'value1']] + self.df2e = df2[['time', 'key', 'key2', 'value2']] - def time_noby(self): + def time_on_int(self): merge_asof(self.df1a, self.df2a, on='time') + def time_on_int32(self): + merge_asof(self.df1d, self.df2d, on='time32') + def time_by_object(self): merge_asof(self.df1b, self.df2b, on='time', by='key') def time_by_int(self): merge_asof(self.df1c, self.df2c, on='time', by='key2') - def time_on_int32(self): - merge_asof(self.df1d, self.df2d, on='time32') - def time_multiby(self): merge_asof(self.df1e, self.df2e, on='time', by=['key', 'key2']) -# ---------------------------------------------------------------------- -# data alignment - class Align(object): + goal_time = 0.2 def setup(self): - self.n = 1000000 - self.sz = 500000 - self.rng = np.arange(0, 10000000000000, 10000000) - self.stamps = (np.datetime64(datetime.now()).view('i8') + self.rng) - self.idx1 = np.sort(self.sample(self.stamps, self.sz)) - self.idx2 = np.sort(self.sample(self.stamps, self.sz)) - self.ts1 = Series(np.random.randn(self.sz), self.idx1) - self.ts2 = Series(np.random.randn(self.sz), self.idx2) - - def sample(self, values, k): - self.sampler = np.random.permutation(len(values)) - return values.take(self.sampler[:k]) + size = 5 * 10**5 + rng = np.arange(0, 10**13, 10**7) + stamps = np.datetime64('now').view('i8') + rng + idx1 = np.sort(np.random.choice(stamps, size, replace=False)) + idx2 = np.sort(np.random.choice(stamps, size, replace=False)) + self.ts1 = Series(np.random.randn(size), idx1) + self.ts2 = Series(np.random.randn(size), idx2) def time_series_align_int64_index(self): - (self.ts1 + self.ts2) + self.ts1 + self.ts2 def time_series_align_left_monotonic(self): self.ts1.align(self.ts2, join='left')
Flake8'd, utilized `param`s to add some additional benchmarks, and simplified the setup where possible. ``` $ asv dev -b ^join_merge · Discovering benchmarks · Running 30 total benchmarks (1 commits * 1 environments * 30 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 3.33%] ··· Running join_merge.Align.time_series_align_int64_index 672ms [ 6.67%] ··· Running ...erge.Align.time_series_align_left_monotonic 203ms [ 10.00%] ··· Running join_merge.Append.time_append_homogenous 1.57ms [ 10.00%] ····· /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/join_merge.py:29: FutureWarning: consolidate is deprecated and will be removed in a future release. self.mdf1.consolidate(inplace=True) [ 13.33%] ··· Running join_merge.Append.time_append_mixed 2.51ms [ 13.33%] ····· /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/join_merge.py:29: FutureWarning: consolidate is deprecated and will be removed in a future release. self.mdf1.consolidate(inplace=True) [ 16.67%] ··· Running join_merge.Concat.time_concat_empty_left ok [ 16.67%] ···· ====== ======= axis ------ ------- 0 425μs 1 475μs ====== ======= [ 20.00%] ··· Running join_merge.Concat.time_concat_empty_right ok [ 20.00%] ···· ====== ======= axis ------ ------- 0 406μs 1 481μs ====== ======= [ 23.33%] ··· Running join_merge.Concat.time_concat_series ok [ 23.33%] ···· ====== ======== axis ------ -------- 0 27.5ms 1 218ms ====== ======== [ 26.67%] ··· Running join_merge.Concat.time_concat_small_frames ok [ 26.67%] ···· ====== ======== axis ------ -------- 0 107ms 1 81.2ms ====== ======== [ 30.00%] ··· Running join_merge.ConcatDataFrames.time_c_ordered ok [ 30.00%] ···· ====== ======= ======= -- ignore_index ------ --------------- axis True False ====== ======= ======= 0 146ms 141ms 1 225ms 226ms ====== ======= ======= [ 33.33%] ··· Running join_merge.ConcatDataFrames.time_f_ordered ok [ 33.33%] ···· ====== ======= ======= -- ignore_index ------ --------------- axis True False ====== ======= ======= 0 173ms 175ms 1 161ms 155ms ====== ======= ======= [ 36.67%] ··· Running join_merge.ConcatPanels.time_c_ordered ok [ 36.67%] ···· ====== ======= ======= -- ignore_index ------ --------------- axis True False ====== ======= ======= 0 318ms 314ms 1 359ms 363ms 2 1.67s 1.66s ====== ======= ======= [ 40.00%] ··· Running join_merge.ConcatPanels.time_f_ordered ok [ 40.00%] ···· ====== ======= ======= -- ignore_index ------ --------------- axis True False ====== ======= ======= 0 666ms 666ms 1 321ms 297ms 2 297ms 299ms ====== ======= ======= [ 43.33%] ··· Running join_merge.I8Merge.time_i8merge ok [ 43.33%] ···· ======= ======= how ------- ------- inner 1.62s outer 1.61s left 1.62s right 1.62s ======= ======= [ 46.67%] ··· Running ..._merge.Join.time_join_dataframe_index_multi ok [ 46.67%] ···· ======= ======== sort ------- -------- True 53.0ms False 41.9ms ======= ======== [ 50.00%] ··· Running ...oin_dataframe_index_shuffle_key_bigger_sort ok [ 50.00%] ···· ======= ======== sort ------- -------- True 36.5ms False 29.8ms ======= ======== [ 53.33%] ··· Running ...time_join_dataframe_index_single_key_bigger ok [ 53.33%] ···· ======= ======== sort ------- -------- True 36.6ms False 30.8ms ======= ======== [ 56.67%] ··· Running ....time_join_dataframe_index_single_key_small ok [ 56.67%] ···· ======= ======== sort ------- -------- True 30.6ms False 27.6ms ======= ======== [ 60.00%] ··· Running ..._merge.JoinIndex.time_left_outer_join_index 4.82s [ 63.33%] ··· Running ...ge.JoinNonUnique.time_join_non_unique_equal 417ms [ 66.67%] ··· Running join_merge.Merge.time_merge_2intkey ok [ 66.67%] ···· ======= ======== sort ------- -------- True 72.6ms False 40.9ms ======= ======== [ 70.00%] ··· Running ...rge.Merge.time_merge_dataframe_integer_2key ok [ 70.00%] ···· ======= ======== sort ------- -------- True 23.3ms False 10.0ms ======= ======== [ 73.33%] ··· Running ...erge.Merge.time_merge_dataframe_integer_key ok [ 73.33%] ···· ======= ======== sort ------- -------- True 5.46ms False 4.87ms ======= ======== [ 76.67%] ··· Running join_merge.MergeAsof.time_by_int 48.6ms [ 80.00%] ··· Running join_merge.MergeAsof.time_by_object 83.9ms [ 83.33%] ··· Running join_merge.MergeAsof.time_multiby 1.32s [ 86.67%] ··· Running join_merge.MergeAsof.time_on_int 29.2ms [ 90.00%] ··· Running join_merge.MergeAsof.time_on_int32 33.9ms [ 93.33%] ··· Running join_merge.MergeCategoricals.time_merge_cat 775ms [ 96.67%] ··· Running join_merge.MergeCategoricals.time_merge_object 1.50s [100.00%] ··· Running join_merge.MergeOrdered.time_merge_ordered 146ms ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18917
2017-12-23T02:05:24Z
2017-12-23T19:57:29Z
2017-12-23T19:57:29Z
2017-12-24T05:51:15Z
CLN: ASV plotting
diff --git a/asv_bench/benchmarks/plotting.py b/asv_bench/benchmarks/plotting.py index 16889b2f19e89..5b49112b0e07d 100644 --- a/asv_bench/benchmarks/plotting.py +++ b/asv_bench/benchmarks/plotting.py @@ -1,21 +1,20 @@ -from .pandas_vb_common import * -try: - from pandas import date_range -except ImportError: - def date_range(start=None, end=None, periods=None, freq=None): - return DatetimeIndex(start, end, periods=periods, offset=freq) +import numpy as np +from pandas import DataFrame, Series, DatetimeIndex, date_range try: from pandas.plotting import andrews_curves except ImportError: from pandas.tools.plotting import andrews_curves +import matplotlib +matplotlib.use('Agg') + +from .pandas_vb_common import setup # noqa class Plotting(object): + goal_time = 0.2 def setup(self): - import matplotlib - matplotlib.use('Agg') self.s = Series(np.random.randn(1000000)) self.df = DataFrame({'col': self.s}) @@ -27,18 +26,17 @@ def time_frame_plot(self): class TimeseriesPlotting(object): + goal_time = 0.2 def setup(self): - import matplotlib - matplotlib.use('Agg') N = 2000 M = 5 idx = date_range('1/1/1975', periods=N) self.df = DataFrame(np.random.randn(N, M), index=idx) - idx_irregular = pd.DatetimeIndex(np.concatenate((idx.values[0:10], - idx.values[12:]))) + idx_irregular = DatetimeIndex(np.concatenate((idx.values[0:10], + idx.values[12:]))) self.df2 = DataFrame(np.random.randn(len(idx_irregular), M), index=idx_irregular) @@ -53,16 +51,14 @@ def time_plot_irregular(self): class Misc(object): + goal_time = 0.6 def setup(self): - import matplotlib - matplotlib.use('Agg') - self.N = 500 - self.M = 10 - data_dict = {x: np.random.randn(self.N) for x in range(self.M)} - data_dict["Name"] = ["A"] * self.N - self.df = DataFrame(data_dict) + N = 500 + M = 10 + self.df = DataFrame(np.random.randn(N, M)) + self.df['Name'] = ["A"] * N def time_plot_andrews_curves(self): andrews_curves(self.df, "Name")
Flake8'd and simplified some of the setup. ``` $ asv dev -b ^plotting · Discovering benchmarks · Running 6 total benchmarks (1 commits * 1 environments * 6 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 16.67%] ··· Running plotting.Misc.time_plot_andrews_curves 1.92s [ 33.33%] ··· Running plotting.Plotting.time_frame_plot 417ms [ 50.00%] ··· Running plotting.Plotting.time_series_plot 418ms [ 66.67%] ··· Running ...ting.TimeseriesPlotting.time_plot_irregular 130ms [ 83.33%] ··· Running plotting.TimeseriesPlotting.time_plot_regular 388ms [100.00%] ··· Running ...TimeseriesPlotting.time_plot_regular_compat 123ms ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18916
2017-12-23T01:58:34Z
2017-12-23T20:02:43Z
2017-12-23T20:02:43Z
2017-12-24T05:53:19Z
CLN: ASV panel ctor
diff --git a/asv_bench/benchmarks/panel_ctor.py b/asv_bench/benchmarks/panel_ctor.py index cc6071b054662..456fe959c5aa3 100644 --- a/asv_bench/benchmarks/panel_ctor.py +++ b/asv_bench/benchmarks/panel_ctor.py @@ -1,65 +1,56 @@ -from .pandas_vb_common import * -from datetime import timedelta +from datetime import datetime, timedelta +from pandas import DataFrame, DatetimeIndex, date_range -class Constructors1(object): - goal_time = 0.2 - - def setup(self): - self.data_frames = {} - self.start = datetime(1990, 1, 1) - self.end = datetime(2012, 1, 1) - for x in range(100): - self.end += timedelta(days=1) - self.dr = np.asarray(date_range(self.start, self.end)) - self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr) - self.data_frames[x] = self.df - - def time_panel_from_dict_all_different_indexes(self): - Panel.from_dict(self.data_frames) +from .pandas_vb_common import Panel, setup # noqa -class Constructors2(object): +class DifferentIndexes(object): goal_time = 0.2 def setup(self): self.data_frames = {} + start = datetime(1990, 1, 1) + end = datetime(2012, 1, 1) for x in range(100): - self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq='D')) - self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr) - self.data_frames[x] = self.df + end += timedelta(days=1) + idx = date_range(start, end) + df = DataFrame({'a': 0, 'b': 1, 'c': 2}, index=idx) + self.data_frames[x] = df - def time_panel_from_dict_equiv_indexes(self): + def time_from_dict(self): Panel.from_dict(self.data_frames) -class Constructors3(object): +class SameIndexes(object): + goal_time = 0.2 def setup(self): - self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq='D')) - self.data_frames = {} - for x in range(100): - self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr) - self.data_frames[x] = self.df + idx = DatetimeIndex(start=datetime(1990, 1, 1), + end=datetime(2012, 1, 1), + freq='D') + df = DataFrame({'a': 0, 'b': 1, 'c': 2}, index=idx) + self.data_frames = dict(enumerate([df] * 100)) - def time_panel_from_dict_same_index(self): + def time_from_dict(self): Panel.from_dict(self.data_frames) -class Constructors4(object): +class TwoIndexes(object): + goal_time = 0.2 def setup(self): - self.data_frames = {} - self.start = datetime(1990, 1, 1) - self.end = datetime(2012, 1, 1) - for x in range(100): - if (x == 50): - self.end += timedelta(days=1) - self.dr = np.asarray(date_range(self.start, self.end)) - self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr) - self.data_frames[x] = self.df - - def time_panel_from_dict_two_different_indexes(self): + start = datetime(1990, 1, 1) + end = datetime(2012, 1, 1) + df1 = DataFrame({'a': 0, 'b': 1, 'c': 2}, + index=DatetimeIndex(start=start, end=end, freq='D')) + end += timedelta(days=1) + df2 = DataFrame({'a': 0, 'b': 1, 'c': 2}, + index=DatetimeIndex(start=start, end=end, freq='D')) + dfs = [df1] * 50 + [df2] * 50 + self.data_frames = dict(enumerate(dfs)) + + def time_from_dict(self): Panel.from_dict(self.data_frames)
There were two benchmarks that were essentially the same (dictionary of dataframes with the same index benchmark), so I removed one along with the usual flake8. ``` $ asv dev -b ^panel_ctor · Discovering benchmarks · Running 3 total benchmarks (1 commits * 1 environments * 3 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 33.33%] ··· Running panel_ctor.DifferentIndexes.time_from_dict 387ms [ 66.67%] ··· Running panel_ctor.SameIndexes.time_from_dict 32.0ms [100.00%] ··· Running panel_ctor.TwoIndexes.time_from_dict 105ms ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18915
2017-12-23T01:52:54Z
2017-12-23T20:03:47Z
2017-12-23T20:03:47Z
2017-12-24T05:54:36Z
DOC: update versionadded references of 0.22 to 0.23
diff --git a/doc/source/merging.rst b/doc/source/merging.rst index 86d2ec2254057..5f2e90e6ae4fe 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -568,7 +568,7 @@ standard database join operations between DataFrame objects: .. note:: Support for specifying index levels as the ``on``, ``left_on``, and - ``right_on`` parameters was added in version 0.22.0. + ``right_on`` parameters was added in version 0.23.0. The return type will be the same as ``left``. If ``left`` is a ``DataFrame`` and ``right`` is a subclass of DataFrame, the return type will still be diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 1b81d83bb76c7..e2b7b0e586d70 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -642,7 +642,7 @@ By default new columns will have ``np.uint8`` dtype. To choose another dtype use pd.get_dummies(df, dtype=bool).dtypes -.. versionadded:: 0.22.0 +.. versionadded:: 0.23.0 .. _reshaping.factorize: diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index f9bd6849c5072..845d0243c39e9 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -859,7 +859,7 @@ def rename_categories(self, new_categories, inplace=False): * callable : a callable that is called on all items in the old categories and whose return values comprise the new categories. - .. versionadded:: 0.22.0 + .. versionadded:: 0.23.0 .. warning:: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 65934494b321b..26257f6ecbc37 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -200,7 +200,7 @@ Notes ----- Support for specifying index levels as the `on`, `left_on`, and -`right_on` parameters was added in version 0.22.0 +`right_on` parameters was added in version 0.23.0 Examples -------- @@ -5094,7 +5094,7 @@ def join(self, other, on=None, how='left', lsuffix='', rsuffix='', of DataFrame objects Support for specifying index levels as the `on` parameter was added - in version 0.22.0 + in version 0.23.0 Examples -------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d08fbf8593946..e9dd82eb64834 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1692,7 +1692,7 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None, including the index (``index=False``) is only supported when orient is 'split' or 'table'. - .. versionadded:: 0.22.0 + .. versionadded:: 0.23.0 Returns ------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 5231dc2deb233..79de63b0caeb6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3785,7 +3785,7 @@ def drop(self, labels, errors='raise'): level : int or str, optional, default None Only return values from specified level (for MultiIndex) - .. versionadded:: 0.22.0 + .. versionadded:: 0.23.0 Returns ------- diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index cb786574909db..8b6121f360b76 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -554,7 +554,7 @@ def to_tuples(self, na_tuple=True): Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA value itself if False, ``nan``. - ..versionadded:: 0.22.0 + ..versionadded:: 0.23.0 Examples -------- diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index d5551c6c9f297..c2804c8f8e63e 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -200,7 +200,7 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): .. versionadded:: 0.20.0 - .. versionchanged:: 0.22.0 + .. versionchanged:: 0.23.0 When all suffixes are numeric, they are cast to int64/float64. Returns diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 5bb86885c0875..320ad109f01ba 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -731,7 +731,7 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, dtype : dtype, default np.uint8 Data type for new columns. Only a single dtype is allowed. - .. versionadded:: 0.22.0 + .. versionadded:: 0.23.0 Returns ------- diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 4245b9eb641ba..6b8edbb146e4b 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -184,7 +184,7 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, conversion. May produce sigificant speed-up when parsing duplicate date strings, especially ones with timezone offsets. - .. versionadded:: 0.22.0 + .. versionadded:: 0.23.0 Returns ------- diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 2dbfeab9cc331..97a739b349a98 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -132,7 +132,7 @@ nrows : int, default None Number of rows to parse - .. versionadded:: 0.22.0 + .. versionadded:: 0.23.0 na_values : scalar, str, list-like, or dict, default None Additional strings to recognize as NA/NaN. If dict passed, specific @@ -150,7 +150,7 @@ format. skip_footer : int, default 0 - .. deprecated:: 0.22.0 + .. deprecated:: 0.23.0 Pass in `skipfooter` instead. skipfooter : int, default 0 Rows at the end to skip (0-indexed) diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index daf05bb80d7ca..3af9e78a5aac4 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -795,7 +795,7 @@ def hide_index(self): """ Hide any indices from rendering. - .. versionadded:: 0.22.0 + .. versionadded:: 0.23.0 Returns ------- @@ -808,7 +808,7 @@ def hide_columns(self, subset): """ Hide columns from rendering. - .. versionadded:: 0.22.0 + .. versionadded:: 0.23.0 Parameters ---------- diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index eaaa14e756e22..e431c9447e8f8 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -131,7 +131,7 @@ def read(self, path, columns=None, **kwargs): def _validate_write_lt_070(self, df): # Compatibility shim for pyarrow < 0.7.0 - # TODO: Remove in pandas 0.22.0 + # TODO: Remove in pandas 0.23.0 from pandas.core.indexes.multi import MultiIndex if isinstance(df.index, MultiIndex): msg = ( diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index 4290001fea405..1cd98feb05ea0 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -209,7 +209,7 @@ def ensure_compat(dispatch, name, arg, func_kw=None, *args, **kwargs): kwds[k] = value # TODO: the below is only in place temporary until this module is removed. - kwargs.pop('freq', None) # freq removed in 0.22 + kwargs.pop('freq', None) # freq removed in 0.23 # how is a keyword that if not-None should be in kwds how = kwargs.pop('how', None) if how is not None:
Follow-up on https://github.com/pandas-dev/pandas/pull/18897
https://api.github.com/repos/pandas-dev/pandas/pulls/18911
2017-12-22T09:28:42Z
2017-12-22T14:14:18Z
2017-12-22T14:14:18Z
2017-12-22T14:14:22Z
CLN: ASV panel_methods
diff --git a/asv_bench/benchmarks/panel_methods.py b/asv_bench/benchmarks/panel_methods.py index 6609305502011..9ee1949b311db 100644 --- a/asv_bench/benchmarks/panel_methods.py +++ b/asv_bench/benchmarks/panel_methods.py @@ -1,24 +1,19 @@ -from .pandas_vb_common import * +import numpy as np +from .pandas_vb_common import Panel, setup # noqa -class PanelMethods(object): - goal_time = 0.2 - - def setup(self): - self.index = date_range(start='2000', freq='D', periods=1000) - self.panel = Panel(np.random.randn(100, len(self.index), 1000)) - def time_pct_change_items(self): - self.panel.pct_change(1, axis='items') +class PanelMethods(object): - def time_pct_change_major(self): - self.panel.pct_change(1, axis='major') + goal_time = 0.2 + params = ['items', 'major', 'minor'] + param_names = ['axis'] - def time_pct_change_minor(self): - self.panel.pct_change(1, axis='minor') + def setup(self, axis): + self.panel = Panel(np.random.randn(100, 1000, 100)) - def time_shift(self): - self.panel.shift(1) + def time_pct_change(self, axis): + self.panel.pct_change(1, axis=axis) - def time_shift_minor(self): - self.panel.shift(1, axis='minor') + def time_shift(self, axis): + self.panel.shift(1, axis=axis)
Flake8'd, `param`'d, and this benchmark was timing out on my machine so I scaled down the size of the Panel but should be representative of larger Panels ``` asv dev -b ^panel_methods · Discovering benchmarks · Running 2 total benchmarks (1 commits * 1 environments * 2 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 50.00%] ··· Running panel_methods.PanelMethods.time_pct_change ok [ 50.00%] ···· ======= ======= axis ------- ------- items 1.54s major 1.39s minor 1.41s ======= ======= [100.00%] ··· Running panel_methods.PanelMethods.time_shift ok [100.00%] ···· ======= ======= axis ------- ------- items 397μs major 385μs minor 390μs ======= ======= ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18907
2017-12-22T07:35:19Z
2017-12-22T14:28:40Z
2017-12-22T14:28:40Z
2017-12-22T16:45:53Z
CLN: ASV io benchmarks
diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py new file mode 100644 index 0000000000000..a7c6c43d15026 --- /dev/null +++ b/asv_bench/benchmarks/io/excel.py @@ -0,0 +1,37 @@ +import numpy as np +from pandas import DataFrame, date_range, ExcelWriter, read_excel +from pandas.compat import BytesIO +import pandas.util.testing as tm + +from ..pandas_vb_common import BaseIO, setup # noqa + + +class Excel(object): + + goal_time = 0.2 + params = ['openpyxl', 'xlsxwriter', 'xlwt'] + param_names = ['engine'] + + def setup(self, engine): + N = 2000 + C = 5 + self.df = DataFrame(np.random.randn(N, C), + columns=['float{}'.format(i) for i in range(C)], + index=date_range('20000101', periods=N, freq='H')) + self.df['object'] = tm.makeStringIndex(N) + self.bio_read = BytesIO() + self.writer_read = ExcelWriter(self.bio_read, engine=engine) + self.df.to_excel(self.writer_read, sheet_name='Sheet1') + self.writer_read.save() + self.bio_read.seek(0) + + self.bio_write = BytesIO() + self.bio_write.seek(0) + self.writer_write = ExcelWriter(self.bio_write, engine=engine) + + def time_read_excel(self, engine): + read_excel(self.bio_read) + + def time_write_excel(self, engine): + self.df.to_excel(self.writer_write, sheet_name='Sheet1') + self.writer_write.save() diff --git a/asv_bench/benchmarks/hdfstore_bench.py b/asv_bench/benchmarks/io/hdf.py similarity index 77% rename from asv_bench/benchmarks/hdfstore_bench.py rename to asv_bench/benchmarks/io/hdf.py index d7b3be25a18b9..5c0e9586c1cb5 100644 --- a/asv_bench/benchmarks/hdfstore_bench.py +++ b/asv_bench/benchmarks/io/hdf.py @@ -1,11 +1,11 @@ import numpy as np -from pandas import DataFrame, Panel, date_range, HDFStore +from pandas import DataFrame, Panel, date_range, HDFStore, read_hdf import pandas.util.testing as tm -from .pandas_vb_common import BaseIO, setup # noqa +from ..pandas_vb_common import BaseIO, setup # noqa -class HDF5(BaseIO): +class HDFStoreDataFrame(BaseIO): goal_time = 0.2 @@ -34,9 +34,9 @@ def setup(self): self.df_dc = DataFrame(np.random.randn(N, 10), columns=['C%03d' % i for i in range(10)]) - self.f = '__test__.h5' + self.fname = '__test__.h5' - self.store = HDFStore(self.f) + self.store = HDFStore(self.fname) self.store.put('fixed', self.df) self.store.put('fixed_mixed', self.df_mixed) self.store.append('table', self.df2) @@ -46,7 +46,7 @@ def setup(self): def teardown(self): self.store.close() - self.remove(self.f) + self.remove(self.fname) def time_read_store(self): self.store.get('fixed') @@ -99,25 +99,48 @@ def time_store_info(self): self.store.info() -class HDF5Panel(BaseIO): +class HDFStorePanel(BaseIO): goal_time = 0.2 def setup(self): - self.f = '__test__.h5' + self.fname = '__test__.h5' self.p = Panel(np.random.randn(20, 1000, 25), items=['Item%03d' % i for i in range(20)], major_axis=date_range('1/1/2000', periods=1000), minor_axis=['E%03d' % i for i in range(25)]) - self.store = HDFStore(self.f) + self.store = HDFStore(self.fname) self.store.append('p1', self.p) def teardown(self): self.store.close() - self.remove(self.f) + self.remove(self.fname) def time_read_store_table_panel(self): self.store.select('p1') def time_write_store_table_panel(self): self.store.append('p2', self.p) + + +class HDF(BaseIO): + + goal_time = 0.2 + params = ['table', 'fixed'] + param_names = ['format'] + + def setup(self, format): + self.fname = '__test__.h5' + N = 100000 + C = 5 + self.df = DataFrame(np.random.randn(N, C), + columns=['float{}'.format(i) for i in range(C)], + index=date_range('20000101', periods=N, freq='H')) + self.df['object'] = tm.makeStringIndex(N) + self.df.to_hdf(self.fname, 'df', format=format) + + def time_read_hdf(self, format): + read_hdf(self.fname, 'df') + + def time_write_hdf(self, format): + self.df.to_hdf(self.fname, 'df', format=format) diff --git a/asv_bench/benchmarks/io/msgpack.py b/asv_bench/benchmarks/io/msgpack.py new file mode 100644 index 0000000000000..8ccce01117ca4 --- /dev/null +++ b/asv_bench/benchmarks/io/msgpack.py @@ -0,0 +1,26 @@ +import numpy as np +from pandas import DataFrame, date_range, read_msgpack +import pandas.util.testing as tm + +from ..pandas_vb_common import BaseIO, setup # noqa + + +class MSGPack(BaseIO): + + goal_time = 0.2 + + def setup(self): + self.fname = '__test__.msg' + N = 100000 + C = 5 + self.df = DataFrame(np.random.randn(N, C), + columns=['float{}'.format(i) for i in range(C)], + index=date_range('20000101', periods=N, freq='H')) + self.df['object'] = tm.makeStringIndex(N) + self.df.to_msgpack(self.fname) + + def time_read_msgpack(self): + read_msgpack(self.fname) + + def time_write_msgpack(self): + self.df.to_msgpack(self.fname) diff --git a/asv_bench/benchmarks/io/pickle.py b/asv_bench/benchmarks/io/pickle.py new file mode 100644 index 0000000000000..2ad0fcca6eb26 --- /dev/null +++ b/asv_bench/benchmarks/io/pickle.py @@ -0,0 +1,26 @@ +import numpy as np +from pandas import DataFrame, date_range, read_pickle +import pandas.util.testing as tm + +from ..pandas_vb_common import BaseIO, setup # noqa + + +class Pickle(BaseIO): + + goal_time = 0.2 + + def setup(self): + self.fname = '__test__.pkl' + N = 100000 + C = 5 + self.df = DataFrame(np.random.randn(N, C), + columns=['float{}'.format(i) for i in range(C)], + index=date_range('20000101', periods=N, freq='H')) + self.df['object'] = tm.makeStringIndex(N) + self.df.to_pickle(self.fname) + + def time_read_pickle(self): + read_pickle(self.fname) + + def time_write_pickle(self): + self.df.to_pickle(self.fname) diff --git a/asv_bench/benchmarks/io/sas.py b/asv_bench/benchmarks/io/sas.py new file mode 100644 index 0000000000000..526c524de7fff --- /dev/null +++ b/asv_bench/benchmarks/io/sas.py @@ -0,0 +1,21 @@ +import os + +from pandas import read_sas + + +class SAS(object): + + goal_time = 0.2 + params = ['sas7bdat', 'xport'] + param_names = ['format'] + + def setup(self, format): + # Read files that are located in 'pandas/io/tests/sas/data' + files = {'sas7bdat': 'test1.sas7bdat', 'xport': 'paxraw_d_short.xpt'} + file = files[format] + paths = [os.path.dirname(__file__), '..', '..', '..', 'pandas', + 'tests', 'io', 'sas', 'data', file] + self.f = os.path.join(*paths) + + def time_read_msgpack(self, format): + read_sas(self.f, format=format) diff --git a/asv_bench/benchmarks/io/sql.py b/asv_bench/benchmarks/io/sql.py new file mode 100644 index 0000000000000..ef4e501e5f3b9 --- /dev/null +++ b/asv_bench/benchmarks/io/sql.py @@ -0,0 +1,132 @@ +import sqlite3 + +import numpy as np +import pandas.util.testing as tm +from pandas import DataFrame, date_range, read_sql_query, read_sql_table +from sqlalchemy import create_engine + +from ..pandas_vb_common import setup # noqa + + +class SQL(object): + + goal_time = 0.2 + params = ['sqlalchemy', 'sqlite'] + param_names = ['connection'] + + def setup(self, connection): + N = 10000 + con = {'sqlalchemy': create_engine('sqlite:///:memory:'), + 'sqlite': sqlite3.connect(':memory:')} + self.table_name = 'test_type' + self.query_all = 'SELECT * FROM {}'.format(self.table_name) + self.con = con[connection] + self.df = DataFrame({'float': np.random.randn(N), + 'float_with_nan': np.random.randn(N), + 'string': ['foo'] * N, + 'bool': [True] * N, + 'int': np.random.randint(0, N, size=N), + 'datetime': date_range('2000-01-01', + periods=N, + freq='s')}, + index=tm.makeStringIndex(N)) + self.df.loc[1000:3000, 'float_with_nan'] = np.nan + self.df['datetime_string'] = self.df['datetime'].astype(str) + self.df.to_sql(self.table_name, self.con, if_exists='replace') + + def time_to_sql_dataframe(self, connection): + self.df.to_sql('test1', self.con, if_exists='replace') + + def time_read_sql_query(self, connection): + read_sql_query(self.query_all, self.con) + + +class WriteSQLDtypes(object): + + goal_time = 0.2 + params = (['sqlalchemy', 'sqlite'], + ['float', 'float_with_nan', 'string', 'bool', 'int', 'datetime']) + param_names = ['connection', 'dtype'] + + def setup(self, connection, dtype): + N = 10000 + con = {'sqlalchemy': create_engine('sqlite:///:memory:'), + 'sqlite': sqlite3.connect(':memory:')} + self.table_name = 'test_type' + self.query_col = 'SELECT {} FROM {}'.format(dtype, self.table_name) + self.con = con[connection] + self.df = DataFrame({'float': np.random.randn(N), + 'float_with_nan': np.random.randn(N), + 'string': ['foo'] * N, + 'bool': [True] * N, + 'int': np.random.randint(0, N, size=N), + 'datetime': date_range('2000-01-01', + periods=N, + freq='s')}, + index=tm.makeStringIndex(N)) + self.df.loc[1000:3000, 'float_with_nan'] = np.nan + self.df['datetime_string'] = self.df['datetime'].astype(str) + self.df.to_sql(self.table_name, self.con, if_exists='replace') + + def time_to_sql_dataframe_column(self, connection, dtype): + self.df[[dtype]].to_sql('test1', self.con, if_exists='replace') + + def time_read_sql_query_select_column(self, connection, dtype): + read_sql_query(self.query_col, self.con) + + +class ReadSQLTable(object): + + goal_time = 0.2 + + def setup(self): + N = 10000 + self.table_name = 'test' + self.con = create_engine('sqlite:///:memory:') + self.df = DataFrame({'float': np.random.randn(N), + 'float_with_nan': np.random.randn(N), + 'string': ['foo'] * N, + 'bool': [True] * N, + 'int': np.random.randint(0, N, size=N), + 'datetime': date_range('2000-01-01', + periods=N, + freq='s')}, + index=tm.makeStringIndex(N)) + self.df.loc[1000:3000, 'float_with_nan'] = np.nan + self.df['datetime_string'] = self.df['datetime'].astype(str) + self.df.to_sql(self.table_name, self.con, if_exists='replace') + + def time_read_sql_table_all(self): + read_sql_table(self.table_name, self.con) + + def time_read_sql_table_parse_dates(self): + read_sql_table(self.table_name, self.con, columns=['datetime_string'], + parse_dates=['datetime_string']) + + +class ReadSQLTableDtypes(object): + + goal_time = 0.2 + + params = ['float', 'float_with_nan', 'string', 'bool', 'int', 'datetime'] + param_names = ['dtype'] + + def setup(self, dtype): + N = 10000 + self.table_name = 'test' + self.con = create_engine('sqlite:///:memory:') + self.df = DataFrame({'float': np.random.randn(N), + 'float_with_nan': np.random.randn(N), + 'string': ['foo'] * N, + 'bool': [True] * N, + 'int': np.random.randint(0, N, size=N), + 'datetime': date_range('2000-01-01', + periods=N, + freq='s')}, + index=tm.makeStringIndex(N)) + self.df.loc[1000:3000, 'float_with_nan'] = np.nan + self.df['datetime_string'] = self.df['datetime'].astype(str) + self.df.to_sql(self.table_name, self.con, if_exists='replace') + + def time_read_sql_table_column(self, dtype): + read_sql_table(self.table_name, self.con, columns=[dtype]) diff --git a/asv_bench/benchmarks/io/stata.py b/asv_bench/benchmarks/io/stata.py new file mode 100644 index 0000000000000..e0f5752ca930f --- /dev/null +++ b/asv_bench/benchmarks/io/stata.py @@ -0,0 +1,37 @@ +import numpy as np +from pandas import DataFrame, date_range, read_stata +import pandas.util.testing as tm + +from ..pandas_vb_common import BaseIO, setup # noqa + + +class Stata(BaseIO): + + goal_time = 0.2 + params = ['tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'] + param_names = ['convert_dates'] + + def setup(self, convert_dates): + self.fname = '__test__.dta' + N = 100000 + C = 5 + self.df = DataFrame(np.random.randn(N, C), + columns=['float{}'.format(i) for i in range(C)], + index=date_range('20000101', periods=N, freq='H')) + self.df['object'] = tm.makeStringIndex(N) + self.df['int8_'] = np.random.randint(np.iinfo(np.int8).min, + np.iinfo(np.int8).max - 27, N) + self.df['int16_'] = np.random.randint(np.iinfo(np.int16).min, + np.iinfo(np.int16).max - 27, N) + self.df['int32_'] = np.random.randint(np.iinfo(np.int32).min, + np.iinfo(np.int32).max - 27, N) + self.df['float32_'] = np.array(np.random.randn(N), + dtype=np.float32) + self.convert_dates = {'index': convert_dates} + self.df.to_stata(self.fname, self.convert_dates) + + def time_read_stata(self, convert_dates): + read_stata(self.fname) + + def time_write_stata(self, convert_dates): + self.df.to_stata(self.fname, self.convert_dates) diff --git a/asv_bench/benchmarks/io_sql.py b/asv_bench/benchmarks/io_sql.py deleted file mode 100644 index ec855e5d33525..0000000000000 --- a/asv_bench/benchmarks/io_sql.py +++ /dev/null @@ -1,105 +0,0 @@ -import sqlalchemy -from .pandas_vb_common import * -import sqlite3 -from sqlalchemy import create_engine - - -#------------------------------------------------------------------------------- -# to_sql - -class WriteSQL(object): - goal_time = 0.2 - - def setup(self): - self.engine = create_engine('sqlite:///:memory:') - self.con = sqlite3.connect(':memory:') - self.index = tm.makeStringIndex(10000) - self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index) - - def time_fallback(self): - self.df.to_sql('test1', self.con, if_exists='replace') - - def time_sqlalchemy(self): - self.df.to_sql('test1', self.engine, if_exists='replace') - - -#------------------------------------------------------------------------------- -# read_sql - -class ReadSQL(object): - goal_time = 0.2 - - def setup(self): - self.engine = create_engine('sqlite:///:memory:') - self.con = sqlite3.connect(':memory:') - self.index = tm.makeStringIndex(10000) - self.df = DataFrame({'float1': randn(10000), 'float2': randn(10000), 'string1': (['foo'] * 10000), 'bool1': ([True] * 10000), 'int1': np.random.randint(0, 100000, size=10000), }, index=self.index) - self.df.to_sql('test2', self.engine, if_exists='replace') - self.df.to_sql('test2', self.con, if_exists='replace') - - def time_read_query_fallback(self): - read_sql_query('SELECT * FROM test2', self.con) - - def time_read_query_sqlalchemy(self): - read_sql_query('SELECT * FROM test2', self.engine) - - def time_read_table_sqlalchemy(self): - read_sql_table('test2', self.engine) - - -#------------------------------------------------------------------------------- -# type specific write - -class WriteSQLTypes(object): - goal_time = 0.2 - - def setup(self): - self.engine = create_engine('sqlite:///:memory:') - self.con = sqlite3.connect(':memory:') - self.df = DataFrame({'float': randn(10000), 'string': (['foo'] * 10000), 'bool': ([True] * 10000), 'datetime': date_range('2000-01-01', periods=10000, freq='s'), }) - self.df.loc[1000:3000, 'float'] = np.nan - - def time_string_fallback(self): - self.df[['string']].to_sql('test_string', self.con, if_exists='replace') - - def time_string_sqlalchemy(self): - self.df[['string']].to_sql('test_string', self.engine, if_exists='replace') - - def time_float_fallback(self): - self.df[['float']].to_sql('test_float', self.con, if_exists='replace') - - def time_float_sqlalchemy(self): - self.df[['float']].to_sql('test_float', self.engine, if_exists='replace') - - def time_datetime_sqlalchemy(self): - self.df[['datetime']].to_sql('test_datetime', self.engine, if_exists='replace') - - -#------------------------------------------------------------------------------- -# type specific read - -class ReadSQLTypes(object): - goal_time = 0.2 - - def setup(self): - self.engine = create_engine('sqlite:///:memory:') - self.con = sqlite3.connect(':memory:') - self.df = DataFrame({'float': randn(10000), 'datetime': date_range('2000-01-01', periods=10000, freq='s'), }) - self.df['datetime_string'] = self.df['datetime'].map(str) - self.df.to_sql('test_type', self.engine, if_exists='replace') - self.df[['float', 'datetime_string']].to_sql('test_type', self.con, if_exists='replace') - - def time_datetime_read_and_parse_sqlalchemy(self): - read_sql_table('test_type', self.engine, columns=['datetime_string'], parse_dates=['datetime_string']) - - def time_datetime_read_as_native_sqlalchemy(self): - read_sql_table('test_type', self.engine, columns=['datetime']) - - def time_float_read_query_fallback(self): - read_sql_query('SELECT float FROM test_type', self.con) - - def time_float_read_query_sqlalchemy(self): - read_sql_query('SELECT float FROM test_type', self.engine) - - def time_float_read_table_sqlalchemy(self): - read_sql_table('test_type', self.engine, columns=['float']) diff --git a/asv_bench/benchmarks/packers.py b/asv_bench/benchmarks/packers.py deleted file mode 100644 index 7b6cefc56f0da..0000000000000 --- a/asv_bench/benchmarks/packers.py +++ /dev/null @@ -1,243 +0,0 @@ -from .pandas_vb_common import * -from numpy.random import randint -import pandas as pd -from collections import OrderedDict -from pandas.compat import BytesIO -import sqlite3 -import os -from sqlalchemy import create_engine -import numpy as np -from random import randrange - - -class _Packers(object): - goal_time = 0.2 - - def _setup(self): - self.f = '__test__.msg' - self.N = 100000 - self.C = 5 - self.index = date_range('20000101', periods=self.N, freq='H') - self.df = DataFrame({'float{0}'.format(i): randn(self.N) for i in range(self.C)}, index=self.index) - self.df2 = self.df.copy() - self.df2['object'] = [('%08x' % randrange((16 ** 8))) for _ in range(self.N)] - self.remove(self.f) - - def remove(self, f): - try: - os.remove(f) - except: - pass - - def teardown(self): - self.remove(self.f) - - -class Packers(_Packers): - - def setup(self): - self._setup() - self.df.to_csv(self.f) - - def time_packers_read_csv(self): - pd.read_csv(self.f) - - -class packers_read_excel(_Packers): - - def setup(self): - self._setup() - self.bio = BytesIO() - self.writer = pd.io.excel.ExcelWriter(self.bio, engine='xlsxwriter') - self.df[:2000].to_excel(self.writer) - self.writer.save() - - def time_packers_read_excel(self): - self.bio.seek(0) - pd.read_excel(self.bio) - - -class packers_read_hdf_store(_Packers): - - def setup(self): - self._setup() - self.df2.to_hdf(self.f, 'df') - - def time_packers_read_hdf_store(self): - pd.read_hdf(self.f, 'df') - - -class packers_read_hdf_table(_Packers): - - def setup(self): - self._setup() - self.df2.to_hdf(self.f, 'df', format='table') - - def time_packers_read_hdf_table(self): - pd.read_hdf(self.f, 'df') - - -class packers_read_pack(_Packers): - - def setup(self): - self._setup() - self.df2.to_msgpack(self.f) - - def time_packers_read_pack(self): - pd.read_msgpack(self.f) - - -class packers_read_pickle(_Packers): - - def setup(self): - self._setup() - self.df2.to_pickle(self.f) - - def time_packers_read_pickle(self): - pd.read_pickle(self.f) - - -class packers_read_sql(_Packers): - - def setup(self): - self._setup() - self.engine = create_engine('sqlite:///:memory:') - self.df2.to_sql('table', self.engine, if_exists='replace') - - def time_packers_read_sql(self): - pd.read_sql_table('table', self.engine) - - -class packers_read_stata(_Packers): - - def setup(self): - self._setup() - self.df.to_stata(self.f, {'index': 'tc', }) - - def time_packers_read_stata(self): - pd.read_stata(self.f) - - -class packers_read_stata_with_validation(_Packers): - - def setup(self): - self._setup() - self.df['int8_'] = [randint(np.iinfo(np.int8).min, (np.iinfo(np.int8).max - 27)) for _ in range(self.N)] - self.df['int16_'] = [randint(np.iinfo(np.int16).min, (np.iinfo(np.int16).max - 27)) for _ in range(self.N)] - self.df['int32_'] = [randint(np.iinfo(np.int32).min, (np.iinfo(np.int32).max - 27)) for _ in range(self.N)] - self.df['float32_'] = np.array(randn(self.N), dtype=np.float32) - self.df.to_stata(self.f, {'index': 'tc', }) - - def time_packers_read_stata_with_validation(self): - pd.read_stata(self.f) - - -class packers_read_sas(_Packers): - - def setup(self): - - testdir = os.path.join(os.path.dirname(__file__), '..', '..', - 'pandas', 'tests', 'io', 'sas') - if not os.path.exists(testdir): - testdir = os.path.join(os.path.dirname(__file__), '..', '..', - 'pandas', 'io', 'tests', 'sas') - self.f = os.path.join(testdir, 'data', 'test1.sas7bdat') - self.f2 = os.path.join(testdir, 'data', 'paxraw_d_short.xpt') - - def time_read_sas7bdat(self): - pd.read_sas(self.f, format='sas7bdat') - - def time_read_xport(self): - pd.read_sas(self.f2, format='xport') - - -class CSV(_Packers): - - def setup(self): - self._setup() - - def time_write_csv(self): - self.df.to_csv(self.f) - - -class Excel(_Packers): - - def setup(self): - self._setup() - self.bio = BytesIO() - - def time_write_excel_openpyxl(self): - self.bio.seek(0) - self.writer = pd.io.excel.ExcelWriter(self.bio, engine='openpyxl') - self.df[:2000].to_excel(self.writer) - self.writer.save() - - def time_write_excel_xlsxwriter(self): - self.bio.seek(0) - self.writer = pd.io.excel.ExcelWriter(self.bio, engine='xlsxwriter') - self.df[:2000].to_excel(self.writer) - self.writer.save() - - def time_write_excel_xlwt(self): - self.bio.seek(0) - self.writer = pd.io.excel.ExcelWriter(self.bio, engine='xlwt') - self.df[:2000].to_excel(self.writer) - self.writer.save() - - -class HDF(_Packers): - - def setup(self): - self._setup() - - def time_write_hdf_store(self): - self.df2.to_hdf(self.f, 'df') - - def time_write_hdf_table(self): - self.df2.to_hdf(self.f, 'df', table=True) - - -class MsgPack(_Packers): - - def setup(self): - self._setup() - - def time_write_msgpack(self): - self.df2.to_msgpack(self.f) - - -class Pickle(_Packers): - - def setup(self): - self._setup() - - def time_write_pickle(self): - self.df2.to_pickle(self.f) - - -class SQL(_Packers): - - def setup(self): - self._setup() - self.engine = create_engine('sqlite:///:memory:') - - def time_write_sql(self): - self.df2.to_sql('table', self.engine, if_exists='replace') - - -class STATA(_Packers): - - def setup(self): - self._setup() - - self.df3=self.df.copy() - self.df3['int8_'] = [randint(np.iinfo(np.int8).min, (np.iinfo(np.int8).max - 27)) for _ in range(self.N)] - self.df3['int16_'] = [randint(np.iinfo(np.int16).min, (np.iinfo(np.int16).max - 27)) for _ in range(self.N)] - self.df3['int32_'] = [randint(np.iinfo(np.int32).min, (np.iinfo(np.int32).max - 27)) for _ in range(self.N)] - self.df3['float32_'] = np.array(randn(self.N), dtype=np.float32) - - def time_write_stata(self): - self.df.to_stata(self.f, {'index': 'tc', }) - - def time_write_stata_with_validation(self): - self.df3.to_stata(self.f, {'index': 'tc', })
[xref](https://github.com/pandas-dev/pandas/pull/18815#issuecomment-352723075) Consolidated the benchmarks from `io_sql.py`, `hdfstore_bench.py`, and `packers.py` into their own files in the `io` folder. Benchmarks are largely the same as they were, just cleaned and simplified where able. ``` asv dev -b ^io · Discovering benchmarks · Running 67 total benchmarks (1 commits * 1 environments * 67 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 1.49%] ··· Running io.csv.ReadCSVCategorical.time_convert_direct 98.5ms [ 2.99%] ··· Running io.csv.ReadCSVCategorical.time_convert_post 142ms [ 4.48%] ··· Running io.csv.ReadCSVComment.time_comment 53.6ms [ 5.97%] ··· Running io.csv.ReadCSVDInferDatetimeFormat.time_read_csv ok [ 5.97%] ···· ======================= ======== ========= ======== -- format ----------------------- --------------------------- infer_datetime_format custom iso8601 ymd ======================= ======== ========= ======== True 24.4ms 4.73ms 4.99ms False 662ms 3.50ms 3.23ms ======================= ======== ========= ======== [ 7.46%] ··· Running io.csv.ReadCSVFloatPrecision.time_read_csv ok [ 7.46%] ···· ===== ========== ========== ================ ========== ========== ================ -- decimal / float_precision ----- ----------------------------------------------------------------------------- sep . / None . / high . / round_trip _ / None _ / high _ / round_trip ===== ========== ========== ================ ========== ========== ================ , 3.91ms 3.77ms 5.52ms 4.25ms 4.19ms 4.20ms ; 3.86ms 3.73ms 5.56ms 4.16ms 4.23ms 4.13ms ===== ========== ========== ================ ========== ========== ================ [ 8.96%] ··· Running io.csv.ReadCSVFloatPrecision.time_read_csv_python_engine ok [ 8.96%] ···· ===== ========== ========== ================ ========== ========== ================ -- decimal / float_precision ----- ----------------------------------------------------------------------------- sep . / None . / high . / round_trip _ / None _ / high _ / round_trip ===== ========== ========== ================ ========== ========== ================ , 8.54ms 8.44ms 8.50ms 7.03ms 6.99ms 6.99ms ; 8.46ms 8.53ms 8.42ms 7.00ms 7.09ms 7.00ms ===== ========== ========== ================ ========== ========== ================ [ 10.45%] ··· Running io.csv.ReadCSVParseDates.time_baseline 2.86ms [ 11.94%] ··· Running io.csv.ReadCSVParseDates.time_multiple_date 2.85ms [ 13.43%] ··· Running io.csv.ReadCSVSkipRows.time_skipprows ok [ 13.43%] ···· ========== ======== skiprows ---------- -------- None 46.1ms 10000 31.4ms ========== ======== [ 14.93%] ··· Running io.csv.ReadCSVThousands.time_thousands ok [ 14.93%] ···· ===== ======== ======== -- thousands ----- ----------------- sep None , ===== ======== ======== , 38.6ms 35.6ms | 38.9ms 38.6ms ===== ======== ======== [ 16.42%] ··· Running io.csv.ReadUint64Integers.time_read_uint64 8.95ms [ 17.91%] ··· Running io.csv.ReadUint64Integers.time_read_uint64_na_values 13.0ms [ 19.40%] ··· Running io.csv.ReadUint64Integers.time_read_uint64_neg_values 13.0ms [ 20.90%] ··· Running io.csv.S3.time_read_csv_10_rows ok [ 20.90%] ···· ============= ======== ======= -- engine ------------- ---------------- compression python c ============= ======== ======= None 8.43s 6.27s gzip 6.80s 7.74s bz2 36.6s n/a ============= ======== ======= [ 22.39%] ··· Running io.csv.ToCSV.time_frame ok [ 22.39%] ···· ======= ======== kind ------- -------- wide 89.1ms long 172ms mixed 38.6ms ======= ======== [ 23.88%] ··· Running io.csv.ToCSVDatetime.time_frame_date_formatting 23.1ms [ 25.37%] ··· Running io.excel.Excel.time_read_excel ok [ 25.37%] ···· ============ ======= engine ------------ ------- openpyxl 565ms xlsxwriter 569ms xlwt 158ms ============ ======= [ 26.87%] ··· Running io.excel.Excel.time_write_excel ok [ 26.87%] ···· ============ ======= engine ------------ ------- openpyxl 1.24s xlsxwriter 970ms xlwt 704ms ============ ======= [ 28.36%] ··· Running io.hdf.HDF.time_read_hdf ok [ 28.36%] ···· ======== ======== format -------- -------- table 63.0ms fixed 79.8ms ======== ======== [ 29.85%] ··· Running io.hdf.HDF.time_write_hdf ok [ 29.85%] ···· ======== ======= format -------- ------- table 121ms fixed 149ms ======== ======= [ 31.34%] ··· Running io.hdf.HDFStoreDataFrame.time_query_store_table 26.4ms [ 32.84%] ··· Running io.hdf.HDFStoreDataFrame.time_query_store_table_wide 33.2ms [ 34.33%] ··· Running io.hdf.HDFStoreDataFrame.time_read_store 12.1ms [ 35.82%] ··· Running io.hdf.HDFStoreDataFrame.time_read_store_mixed 25.4ms [ 37.31%] ··· Running io.hdf.HDFStoreDataFrame.time_read_store_table 18.0ms [ 38.81%] ··· Running io.hdf.HDFStoreDataFrame.time_read_store_table_mixed 36.6ms [ 40.30%] ··· Running io.hdf.HDFStoreDataFrame.time_read_store_table_wide 30.1ms [ 41.79%] ··· Running io.hdf.HDFStoreDataFrame.time_store_info 53.2ms [ 43.28%] ··· Running io.hdf.HDFStoreDataFrame.time_store_repr 112μs [ 44.78%] ··· Running io.hdf.HDFStoreDataFrame.time_store_str 109μs [ 46.27%] ··· Running io.hdf.HDFStoreDataFrame.time_write_store 13.6ms [ 47.76%] ··· Running io.hdf.HDFStoreDataFrame.time_write_store_mixed 31.6ms [ 49.25%] ··· Running io.hdf.HDFStoreDataFrame.time_write_store_table 45.1ms [ 50.75%] ··· Running io.hdf.HDFStoreDataFrame.time_write_store_table_dc 357ms [ 52.24%] ··· Running io.hdf.HDFStoreDataFrame.time_write_store_table_mixed 56.7ms [ 53.73%] ··· Running io.hdf.HDFStoreDataFrame.time_write_store_table_wide 160ms [ 55.22%] ··· Running io.hdf.HDFStorePanel.time_read_store_table_panel 55.2ms [ 56.72%] ··· Running io.hdf.HDFStorePanel.time_write_store_table_panel 90.3ms [ 58.21%] ··· Running io.json.ReadJSON.time_read_json ok [ 58.21%] ···· ========= ======= ========== -- index --------- ------------------ orient int datetime ========= ======= ========== split 253ms 270ms index 7.80s 7.97s records 619ms 627ms ========= ======= ========== [ 59.70%] ··· Running io.json.ReadJSONLines.peakmem_read_json_lines ok [ 59.70%] ···· ========== ====== index ---------- ------ int 192M datetime 192M ========== ====== [ 61.19%] ··· Running io.json.ReadJSONLines.peakmem_read_json_lines_concat ok [ 61.19%] ···· ========== ====== index ---------- ------ int 164M datetime 164M ========== ====== [ 62.69%] ··· Running io.json.ReadJSONLines.time_read_json_lines ok [ 62.69%] ···· ========== ======= index ---------- ------- int 734ms datetime 740ms ========== ======= [ 64.18%] ··· Running io.json.ReadJSONLines.time_read_json_lines_concat ok [ 64.18%] ···· ========== ======= index ---------- ------- int 767ms datetime 763ms ========== ======= [ 65.67%] ··· Running io.json.ToJSON.time_delta_int_tstamp ok [ 65.67%] ···· ========= ======= orient --------- ------- split 347ms columns 353ms index 400ms ========= ======= [ 67.16%] ··· Running io.json.ToJSON.time_delta_int_tstamp_lines ok [ 67.16%] ···· ========= ======= orient --------- ------- split 634ms columns 643ms index 632ms ========= ======= [ 68.66%] ··· Running io.json.ToJSON.time_float_int ok [ 68.66%] ···· ========= ======= orient --------- ------- split 232ms columns 233ms index 389ms ========= ======= [ 70.15%] ··· Running io.json.ToJSON.time_float_int_lines ok [ 70.15%] ···· ========= ======= orient --------- ------- split 684ms columns 686ms index 685ms ========= ======= [ 71.64%] ··· Running io.json.ToJSON.time_float_int_str ok [ 71.64%] ···· ========= ======= orient --------- ------- split 354ms columns 231ms index 411ms ========= ======= [ 73.13%] ··· Running io.json.ToJSON.time_float_int_str_lines ok [ 73.13%] ···· ========= ======= orient --------- ------- split 713ms columns 713ms index 714ms ========= ======= [ 74.63%] ··· Running io.json.ToJSON.time_floats_with_dt_index ok [ 74.63%] ···· ========= ======= orient --------- ------- split 191ms columns 222ms index 220ms ========= ======= [ 76.12%] ··· Running io.json.ToJSON.time_floats_with_dt_index_lines ok [ 76.12%] ···· ========= ======= orient --------- ------- split 531ms columns 527ms index 533ms ========= ======= [ 77.61%] ··· Running io.json.ToJSON.time_floats_with_int_idex_lines ok [ 77.61%] ···· ========= ======= orient --------- ------- split 525ms columns 525ms index 524ms ========= ======= [ 79.10%] ··· Running io.json.ToJSON.time_floats_with_int_index ok [ 79.10%] ···· ========= ======= orient --------- ------- split 167ms columns 183ms index 194ms ========= ======= [ 80.60%] ··· Running io.msgpack.MSGPack.time_read_msgpack 48.2ms [ 82.09%] ··· Running io.msgpack.MSGPack.time_write_msgpack 73.5ms [ 83.58%] ··· Running io.pickle.Pickle.time_read_pickle 110ms [ 85.07%] ··· Running io.pickle.Pickle.time_write_pickle 157ms [ 86.57%] ··· Running io.sas.SAS.time_read_msgpack ok [ 86.57%] ···· ========== ======== format ---------- -------- sas7bdat 608ms xport 9.92ms ========== ======== [ 88.06%] ··· Running io.sql.ReadSQLTable.time_read_sql_table_all ok [ 88.06%] ···· ================ ======= dtype ---------------- ------- float 101ms float_with_nan 101ms string 101ms bool 102ms int 101ms datetime 101ms ================ ======= [ 89.55%] ··· Running io.sql.ReadSQLTable.time_read_sql_table_column ok [ 89.55%] ···· ================ ======== dtype ---------------- -------- float 24.1ms float_with_nan 23.0ms string 25.1ms bool 23.8ms int 25.6ms datetime 46.6ms ================ ======== [ 91.04%] ··· Running io.sql.ReadSQLTable.time_read_sql_table_parse_dates ok [ 91.04%] ···· ================ ======== dtype ---------------- -------- float 31.5ms float_with_nan 31.9ms string 31.0ms bool 30.9ms int 31.2ms datetime 31.5ms ================ ======== [ 92.54%] ··· Running io.sql.SQL.time_read_sql_query_select_all ok [ 92.54%] ···· ============ ======== ================ ======== ======== ======== ========== -- dtype ------------ --------------------------------------------------------------- connection float float_with_nan string bool int datetime ============ ======== ================ ======== ======== ======== ========== sqlalchemy 70.6ms 71.0ms 70.8ms 70.4ms 70.6ms 71.5ms sqlite 58.7ms 58.1ms 58.0ms 57.7ms 58.5ms 58.2ms ============ ======== ================ ======== ======== ======== ========== [ 94.03%] ··· Running io.sql.SQL.time_read_sql_query_select_column ok [ 94.03%] ···· ============ ======== ================ ======== ======== ======== ========== -- dtype ------------ --------------------------------------------------------------- connection float float_with_nan string bool int datetime ============ ======== ================ ======== ======== ======== ========== sqlalchemy 71.6ms 70.6ms 71.5ms 70.8ms 70.8ms 70.4ms sqlite 58.4ms 58.8ms 58.4ms 57.8ms 57.7ms 58.1ms ============ ======== ================ ======== ======== ======== ========== [ 95.52%] ··· Running io.sql.SQL.time_to_sql_dataframe_colums ok [ 95.52%] ···· ============ ======== ================ ======== ======== ======== ========== -- dtype ------------ --------------------------------------------------------------- connection float float_with_nan string bool int datetime ============ ======== ================ ======== ======== ======== ========== sqlalchemy 140ms 149ms 136ms 148ms 135ms 257ms sqlite 54.4ms 63.2ms 55.5ms 92.4ms 52.7ms 118ms ============ ======== ================ ======== ======== ======== ========== [ 97.01%] ··· Running io.sql.SQL.time_to_sql_dataframe_full ok [ 97.01%] ···· ============ ======= ================ ======== ======= ======= ========== -- dtype ------------ ------------------------------------------------------------ connection float float_with_nan string bool int datetime ============ ======= ================ ======== ======= ======= ========== sqlalchemy 435ms 440ms 433ms 436ms 436ms 435ms sqlite 185ms 186ms 185ms 186ms 185ms 186ms ============ ======= ================ ======== ======= ======= ========== [ 98.51%] ··· Running io.stata.Stata.time_read_stata ok [ 98.51%] ···· =============== ======= convert_dates --------------- ------- tc 514ms td 510ms tm 1.35s tw 1.27s th 1.36s tq 1.35s ty 1.76s =============== ======= [100.00%] ··· Running io.stata.Stata.time_write_stata ok [100.00%] ···· =============== ======= convert_dates --------------- ------- tc 593ms td 592ms tm 603ms tw 1.34s th 624ms tq 616ms ty 592ms =============== ======= ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18906
2017-12-22T07:07:12Z
2017-12-23T20:04:44Z
2017-12-23T20:04:44Z
2017-12-24T05:55:16Z
Fixed read_json int overflow
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 40e1e2011479c..348c1c6dafbcb 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -310,6 +310,7 @@ I/O - Bug in :func:`read_csv` where a ``MultiIndex`` with duplicate columns was not being mangled appropriately (:issue:`18062`) - Bug in :func:`read_sas` where a file with 0 variables gave an ``AttributeError`` incorrectly. Now it gives an ``EmptyDataError`` (:issue:`18184`) - Bug in :func:`DataFrame.to_latex()` where pairs of braces meant to serve as invisible placeholders were escaped (:issue:`18667`) +- Bug in :func:`read_json` where large numeric values were causing an ``OverflowError`` (:issue:`18842`) - Plotting diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 0e0aae0506809..bb435c625ff35 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -724,7 +724,7 @@ def _try_convert_to_date(self, data): if new_data.dtype == 'object': try: new_data = data.astype('int64') - except (TypeError, ValueError): + except (TypeError, ValueError, OverflowError): pass # ignore numbers that are out of range diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 7cf3d6cd7b612..10139eb07a925 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1074,6 +1074,20 @@ def test_read_jsonl_unicode_chars(self): columns=['a', 'b']) assert_frame_equal(result, expected) + def test_read_json_large_numbers(self): + # GH18842 + json = '{"articleId": "1404366058080022500245"}' + json = StringIO(json) + result = read_json(json, typ="series") + expected = Series(1.404366e+21, index=['articleId']) + assert_series_equal(result, expected) + + json = '{"0": {"articleId": "1404366058080022500245"}}' + json = StringIO(json) + result = read_json(json) + expected = DataFrame(1.404366e+21, index=['articleId'], columns=[0]) + assert_frame_equal(result, expected) + def test_to_jsonl(self): # GH9180 df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
- [X] closes #18842 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18905
2017-12-22T02:11:00Z
2017-12-27T20:20:25Z
2017-12-27T20:20:25Z
2018-02-27T01:32:14Z
DEPR: convert_datetime64 parameter in to_records()
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 408a52e0526ee..856e36fc24202 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -842,6 +842,7 @@ Deprecations - ``pandas.tseries.plotting.tsplot`` is deprecated. Use :func:`Series.plot` instead (:issue:`18627`) - ``Index.summary()`` is deprecated and will be removed in a future version (:issue:`18217`) - ``NDFrame.get_ftype_counts()`` is deprecated and will be removed in a future version (:issue:`18243`) +- The ``convert_datetime64`` parameter in :func:`DataFrame.to_records` has been deprecated and will be removed in a future version. The NumPy bug motivating this parameter has been resolved. The default value for this parameter has also changed from ``True`` to ``None`` (:issue:`18160`). .. _whatsnew_0230.prior_deprecations: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9e57579ddfc05..7c0e367e74ffa 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1311,7 +1311,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None, return cls(mgr) - def to_records(self, index=True, convert_datetime64=True): + def to_records(self, index=True, convert_datetime64=None): """ Convert DataFrame to a NumPy record array. @@ -1322,7 +1322,9 @@ def to_records(self, index=True, convert_datetime64=True): ---------- index : boolean, default True Include index in resulting record array, stored in 'index' field. - convert_datetime64 : boolean, default True + convert_datetime64 : boolean, default None + .. deprecated:: 0.23.0 + Whether to convert the index to datetime.datetime if it is a DatetimeIndex. @@ -1376,6 +1378,13 @@ def to_records(self, index=True, convert_datetime64=True): ('2018-01-01T09:01:00.000000000', 2, 0.75)], dtype=[('index', '<M8[ns]'), ('A', '<i8'), ('B', '<f8')]) """ + + if convert_datetime64 is not None: + warnings.warn("The 'convert_datetime64' parameter is " + "deprecated and will be removed in a future " + "version", + FutureWarning, stacklevel=2) + if index: if is_datetime64_any_dtype(self.index) and convert_datetime64: ix_vals = [self.index.to_pydatetime()] diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py index 82dadacd5b1ac..32b8a6e2b6b86 100644 --- a/pandas/tests/frame/test_convert_to.py +++ b/pandas/tests/frame/test_convert_to.py @@ -79,10 +79,23 @@ def test_to_records_dt64(self): df = DataFrame([["one", "two", "three"], ["four", "five", "six"]], index=date_range("2012-01-01", "2012-01-02")) - assert df.to_records()['index'][0] == df.index[0] - rs = df.to_records(convert_datetime64=False) - assert rs['index'][0] == df.index.values[0] + # convert_datetime64 defaults to None + expected = df.index.values[0] + result = df.to_records()['index'][0] + assert expected == result + + # check for FutureWarning if convert_datetime64=False is passed + with tm.assert_produces_warning(FutureWarning): + expected = df.index.values[0] + result = df.to_records(convert_datetime64=False)['index'][0] + assert expected == result + + # check for FutureWarning if convert_datetime64=True is passed + with tm.assert_produces_warning(FutureWarning): + expected = df.index[0] + result = df.to_records(convert_datetime64=True)['index'][0] + assert expected == result def test_to_records_with_multindex(self): # GH3189
- [x] closes #18160 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry As noted in the original issue, the underlying NumPy bug seems to be fixed so this parameter may no longer be needed. I also changed the default value for ``convert_datetime64`` from ``True`` to ``False`` and only give a ``FutureWarning`` when ``convert_datetime64=True`` is passed but I'm not sure if this is more appropriate than leaving ``True`` as the default and always giving a ``FutureWarning``.
https://api.github.com/repos/pandas-dev/pandas/pulls/18902
2017-12-21T23:44:27Z
2018-04-14T23:58:31Z
2018-04-14T23:58:31Z
2018-04-15T11:14:27Z
DOC: Fix names
diff --git a/doc/source/whatsnew/v0.22.0 b/doc/source/whatsnew/v0.22.0.txt similarity index 100% rename from doc/source/whatsnew/v0.22.0 rename to doc/source/whatsnew/v0.22.0.txt diff --git a/doc/source/whatsnew/v0.23.0 b/doc/source/whatsnew/v0.23.0.txt similarity index 100% rename from doc/source/whatsnew/v0.23.0 rename to doc/source/whatsnew/v0.23.0.txt
[ci skip] cc @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/18900
2017-12-21T21:14:40Z
2017-12-21T21:16:40Z
2017-12-21T21:16:40Z
2017-12-21T21:17:11Z
DOC: move versions 0.22 -> 0.23, add 0.22 docs
diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst index 64cbe0b050a61..d61a98fe2dae4 100644 --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -18,6 +18,8 @@ What's New These are new features and improvements of note in each release. +.. include:: whatsnew/v0.23.0.txt + .. include:: whatsnew/v0.22.0.txt .. include:: whatsnew/v0.21.1.txt diff --git a/doc/source/whatsnew/v0.22.0 b/doc/source/whatsnew/v0.22.0 new file mode 100644 index 0000000000000..2d30e00142846 --- /dev/null +++ b/doc/source/whatsnew/v0.22.0 @@ -0,0 +1,14 @@ +.. _whatsnew_0220: + +v0.22.0 +------- + +This is a major release from 0.21.1 and includes a number of API changes, +deprecations, new features, enhancements, and performance improvements along +with a large number of bug fixes. We recommend that all users upgrade to this +version. + +.. _whatsnew_0220.api_breaking: + +Backwards incompatible API changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.23.0 similarity index 97% rename from doc/source/whatsnew/v0.22.0.txt rename to doc/source/whatsnew/v0.23.0 index a289cf32949be..40e1e2011479c 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.23.0 @@ -1,6 +1,6 @@ -.. _whatsnew_0220: +.. _whatsnew_0230: -v0.22.0 +v0.23.0 ------- This is a major release from 0.21.1 and includes a number of API changes, @@ -8,7 +8,7 @@ deprecations, new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all users upgrade to this version. -.. _whatsnew_0220.enhancements: +.. _whatsnew_0230.enhancements: New features ~~~~~~~~~~~~ @@ -32,7 +32,7 @@ The :func:`get_dummies` now accepts a ``dtype`` argument, which specifies a dtyp pd.get_dummies(df, columns=['c'], dtype=bool).dtypes -.. _whatsnew_0220.enhancements.merge_on_columns_and_levels: +.. _whatsnew_0230.enhancements.merge_on_columns_and_levels: Merging on a combination of columns and index levels ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -63,7 +63,7 @@ levels <merging.merge_on_columns_and_levels>` documentation section. left.merge(right, on=['key1', 'key2']) -.. _whatsnew_0220.enhancements.ran_inf: +.. _whatsnew_0230.enhancements.ran_inf: ``.rank()`` handles ``inf`` values when ``NaN`` are present ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -119,7 +119,7 @@ Current Behavior s.rank(na_option='top') -.. _whatsnew_0220.enhancements.other: +.. _whatsnew_0230.enhancements.other: Other Enhancements ^^^^^^^^^^^^^^^^^^ @@ -142,12 +142,12 @@ Other Enhancements - ``Categorical.rename_categories``, ``CategoricalIndex.rename_categories`` and :attr:`Series.cat.rename_categories` can now take a callable as their argument (:issue:`18862`) -.. _whatsnew_0220.api_breaking: +.. _whatsnew_0230.api_breaking: Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. _whatsnew_0220.api_breaking.deps: +.. _whatsnew_0230.api_breaking.deps: Dependencies have increased minimum versions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -171,7 +171,7 @@ Build Changes - Building from source now explicity requires ``setuptools`` in ``setup.py`` (:issue:`18113`) - Updated conda recipe to be in compliance with conda-build 3.0+ (:issue:`18002`) -.. _whatsnew_0220.api: +.. _whatsnew_0230.api: Other API Changes ^^^^^^^^^^^^^^^^^ @@ -201,7 +201,7 @@ Other API Changes - :func:`pandas.merge` now raises a ``ValueError`` when trying to merge on incompatible data types (:issue:`9780`) - :func:`wide_to_long` previously kept numeric-like suffixes as ``object`` dtype. Now they are cast to numeric if possible (:issue:`17627`) -.. _whatsnew_0220.deprecations: +.. _whatsnew_0230.deprecations: Deprecations ~~~~~~~~~~~~ @@ -217,7 +217,7 @@ Deprecations - :func:`read_excel` has deprecated the ``skip_footer`` parameter. Use ``skipfooter`` instead (:issue:`18836`) - The ``is_copy`` attribute is deprecated and will be removed in a future version (:issue:`18801`). -.. _whatsnew_0220.prior_deprecations: +.. _whatsnew_0230.prior_deprecations: Removal of prior version deprecations/changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -238,7 +238,7 @@ Removal of prior version deprecations/changes - :func:`read_csv` has dropped the ``buffer_lines`` parameter (:issue:`13360`) - :func:`read_csv` has dropped the ``compact_ints`` and ``use_unsigned`` parameters (:issue:`13323`) -.. _whatsnew_0220.performance: +.. _whatsnew_0230.performance: Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -256,7 +256,7 @@ Performance Improvements - Improved performance of ``DatetimeIndex`` and ``Series`` arithmetic operations with Business-Month and Business-Quarter frequencies (:issue:`18489`) - :func:`Series` / :func:`DataFrame` tab completion limits to 100 values, for better performance. (:issue:`18587`) -.. _whatsnew_0220.docs: +.. _whatsnew_0230.docs: Documentation Changes ~~~~~~~~~~~~~~~~~~~~~ @@ -265,7 +265,7 @@ Documentation Changes - - -.. _whatsnew_0220.bug_fixes: +.. _whatsnew_0230.bug_fixes: Bug Fixes ~~~~~~~~~
https://api.github.com/repos/pandas-dev/pandas/pulls/18897
2017-12-21T13:54:16Z
2017-12-21T17:24:57Z
2017-12-21T17:24:57Z
2017-12-22T09:33:41Z
CI: move coverage
diff --git a/.travis.yml b/.travis.yml index ea9d4307d6bf1..e56435faeec19 100644 --- a/.travis.yml +++ b/.travis.yml @@ -52,10 +52,10 @@ matrix: # In allow_failures - dist: trusty env: - - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true COVERAGE=true + - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true - dist: trusty env: - - JOB="3.6" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" CONDA_FORGE=true + - JOB="3.6" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" CONDA_FORGE=true COVERAGE=true # In allow_failures - dist: trusty env: @@ -80,7 +80,7 @@ matrix: # TODO(jreback) - dist: trusty env: - - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true COVERAGE=true + - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true - dist: trusty env: - JOB="2.7_SLOW" SLOW=true
closes #18895
https://api.github.com/repos/pandas-dev/pandas/pulls/18896
2017-12-21T13:49:32Z
2017-12-21T15:00:31Z
2017-12-21T15:00:31Z
2017-12-21T15:05:24Z
BLD: fix conda to 4.3.30
diff --git a/.travis.yml b/.travis.yml index 9eccf87960dd0..ea9d4307d6bf1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -49,6 +49,7 @@ matrix: apt: packages: - python-gtk2 + # In allow_failures - dist: trusty env: - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true COVERAGE=true @@ -76,6 +77,10 @@ matrix: env: - JOB="3.6_DOC" DOC=true allow_failures: + # TODO(jreback) + - dist: trusty + env: + - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true COVERAGE=true - dist: trusty env: - JOB="2.7_SLOW" SLOW=true diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 475fc6a46955d..800a20aa94b8f 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -56,6 +56,11 @@ if [ "$CONDA_BUILD_TEST" ]; then conda install conda-build fi +# TODO(jreback) +echo +echo "[fix conda version]" +conda install conda=4.3.30 + echo echo "[add channels]" conda config --remove channels defaults || exit 1 diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 2d56e12533cd0..a0070dce6a7f1 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -40,7 +40,7 @@ def __fspath__(self): except ImportError: pass -HERE = os.path.dirname(__file__) +HERE = os.path.abspath(os.path.dirname(__file__)) class TestCommonIOCapabilities(object): @@ -150,10 +150,8 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext): (pd.read_fwf, 'os', os.path.join(HERE, 'data', 'fixed_width_format.txt')), (pd.read_excel, 'xlrd', os.path.join(HERE, 'data', 'test1.xlsx')), - - # TODO(jreback) gh-18873 - # (pd.read_feather, 'feather', os.path.join(HERE, 'data', - # 'feather-0_3_1.feather')), + (pd.read_feather, 'feather', os.path.join(HERE, 'data', + 'feather-0_3_1.feather')), (pd.read_hdf, 'tables', os.path.join(HERE, 'data', 'legacy_hdf', 'datetimetz_object.h5')), (pd.read_stata, 'os', os.path.join(HERE, 'data', 'stata10_115.dta')),
CI: move 3.5 conda build to allowed_failures xref #18870 for abspath
https://api.github.com/repos/pandas-dev/pandas/pulls/18893
2017-12-21T12:58:16Z
2017-12-21T13:43:32Z
2017-12-21T13:43:32Z
2017-12-21T13:43:32Z
BUG: Fixed timedelta numeric operations
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 0579a80aad28e..d5d7b85d669e6 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -338,7 +338,8 @@ Reshaping Numeric ^^^^^^^ -- +- Fixed ``std`` and ``var`` computations for timedelta arrays not returning results in timedelta units (:issue:`18880`) +- Fixed ``skipna`` handling for some operations like ``sum`` on timedelta arrays (:issue:`18880`) - - diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index e1c09947ac0b4..61e94f0e7190b 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -239,6 +239,9 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, elif copy: values = values.copy() + if is_timedelta64_dtype(values) and not skipna: + values = values.astype('float64') + values[mask] = np.nan values = _view_if_needed(values) # return a platform independent precision dtype @@ -406,7 +409,10 @@ def _get_counts_nanvar(mask, axis, ddof, dtype=float): @disallow('M8') @bottleneck_switch(ddof=1) def nanstd(values, axis=None, skipna=True, ddof=1): - result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof)) + var_ = nanvar(values, axis=axis, skipna=skipna, ddof=ddof) + if is_timedelta64_dtype(values): + var_ = var_.value + result = np.sqrt(var_) return _wrap_results(result, values.dtype) @@ -448,7 +454,7 @@ def nanvar(values, axis=None, skipna=True, ddof=1): # precision as the original values array. if is_float_dtype(dtype): result = result.astype(dtype) - return _wrap_results(result, values.dtype) + return _wrap_results(result, dtype) @disallow('M8', 'm8') diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 203a0b4a54858..1aec3f1664416 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1215,6 +1215,39 @@ def test_timedelta64_analytics(self): expected = Timedelta('1 days') assert result == expected + def test_timedelta64_sum(self): + # https://github.com/pandas-dev/pandas/issues/18880 + s = pd.Series(pd.timedelta_range(0, periods=12, freq='S')) + s[0] = np.nan + + result = s.sum(skipna=False) + assert result is pd.NaT + + result = s.sum() + assert result == pd.Timedelta(minutes=1, seconds=6) + + @pytest.mark.parametrize('method', [ + 'sum', 'mean', 'min', 'max', 'median', + 'std', 'var', + ]) + def test_timedelta64_many(self, method): + s_float = pd.Series(np.arange(12) * 1e3) + s_timed = pd.Series(pd.timedelta_range(0, periods=12, freq='us')) + + expected = pd.Timedelta(getattr(s_float, method)()) + result = getattr(s_timed, method)() + if pd.isna(expected): + assert pd.isna(result) + else: + assert result == expected + + s_float[0] = np.nan + s_timed[0] = np.nan + result = getattr(s_timed, method)(skipna=False) + expected = getattr(s_float, method)(skipna=False) + assert pd.isna(result) + assert pd.isna(expected) + def test_idxmin(self): # test idxmin # _check_stat_op approach can not be used here because of isna check. diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 9305504f8d5e3..e0006a9fec9cf 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -888,6 +888,19 @@ def test_nanstd_roundoff(self): result = data.std(ddof=ddof) assert result == 0.0 + def test_nanvar_timedelta(self): + result = pd.Series(dtype='m8[ns]').var() + assert result is pd.NaT + + result = pd.Series([1, 1], dtype='m8[ns]').var() + assert result == pd.Timedelta(0) + + result = pd.Series([10, 20], dtype='m8[ns]').var() + assert result == pd.Timedelta(50) + + result = pd.Series([np.nan, 10, 20, np.nan], dtype='m8[ns]').var() + assert result == pd.Timedelta(50) + @property def prng(self): return np.random.RandomState(1234)
Closes https://github.com/pandas-dev/pandas/issues/18880
https://api.github.com/repos/pandas-dev/pandas/pulls/18892
2017-12-21T12:56:47Z
2018-05-02T13:09:33Z
null
2018-05-02T13:09:33Z
API: Allow ordered=None in CategoricalDtype
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 083242cd69b74..f1158b9ad87eb 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -460,6 +460,29 @@ To restore previous behavior, simply set ``expand`` to ``False``: extracted type(extracted) +.. _whatsnew_0230.api_breaking.cdt_ordered: + +Default value for the ``ordered`` parameter of ``CategoricalDtype`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The default value of the ``ordered`` parameter for :class:`~pandas.api.types.CategoricalDtype` has changed from ``False`` to ``None`` to allow updating of ``categories`` without impacting ``ordered``. Behavior should remain consistent for downstream objects, such as :class:`Categorical` (:issue:`18790`) + +In previous versions, the default value for the ``ordered`` parameter was ``False``. This could potentially lead to the ``ordered`` parameter unintentionally being changed from ``True`` to ``False`` when users attempt to update ``categories`` if ``ordered`` is not explicitly specified, as it would silently default to ``False``. The new behavior for ``ordered=None`` is to retain the existing value of ``ordered``. + +New Behavior: + +.. ipython:: python + + from pandas.api.types import CategoricalDtype + cat = pd.Categorical(list('abcaba'), ordered=True, categories=list('cba')) + cat + cdt = CategoricalDtype(categories=list('cbad')) + cat.astype(cdt) + +Notice in the example above that the converted ``Categorical`` has retained ``ordered=True``. Had the default value for ``ordered`` remained as ``False``, the converted ``Categorical`` would have become unordered, despite ``ordered=False`` never being explicitly specified. To change the value of ``ordered``, explicitly pass it to the new dtype, e.g. ``CategoricalDtype(categories=list('cbad'), ordered=False)``. + +Note that the unintenional conversion of ``ordered`` discussed above did not arise in previous versions due to separate bugs that prevented ``astype`` from doing any type of category to category conversion (:issue:`10696`, :issue:`18593`). These bugs have been fixed in this release, and motivated changing the default value of ``ordered``. + .. _whatsnew_0230.api: Other API Changes diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 62c6a6b16cbe9..93250bdbb5054 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -243,7 +243,7 @@ class Categorical(ExtensionArray, PandasObject): # For comparisons, so that numpy uses our implementation if the compare # ops, which raise __array_priority__ = 1000 - _dtype = CategoricalDtype() + _dtype = CategoricalDtype(ordered=False) _deprecations = frozenset(['labels']) _typ = 'categorical' @@ -294,7 +294,7 @@ def __init__(self, values, categories=None, ordered=None, dtype=None, if fastpath: self._codes = coerce_indexer_dtype(values, categories) - self._dtype = dtype + self._dtype = self._dtype.update_dtype(dtype) return # null_mask indicates missing values we want to exclude from inference. @@ -358,7 +358,7 @@ def __init__(self, values, categories=None, ordered=None, dtype=None, full_codes[~null_mask] = codes codes = full_codes - self._dtype = dtype + self._dtype = self._dtype.update_dtype(dtype) self._codes = coerce_indexer_dtype(codes, dtype.categories) @property @@ -438,7 +438,7 @@ def astype(self, dtype, copy=True): """ if is_categorical_dtype(dtype): # GH 10696/18593 - dtype = self.dtype._update_dtype(dtype) + dtype = self.dtype.update_dtype(dtype) self = self.copy() if copy else self if dtype == self.dtype: return self @@ -560,7 +560,7 @@ def from_codes(cls, codes, categories, ordered=False): raise ValueError( "codes need to be convertible to an arrays of integers") - categories = CategoricalDtype._validate_categories(categories) + categories = CategoricalDtype.validate_categories(categories) if len(codes) and (codes.max() >= len(categories) or codes.min() < -1): raise ValueError("codes need to be between -1 and " @@ -1165,7 +1165,7 @@ def __setstate__(self, state): # Provide compatibility with pre-0.15.0 Categoricals. if '_categories' not in state and '_levels' in state: - state['_categories'] = self.dtype._validate_categories(state.pop( + state['_categories'] = self.dtype.validate_categories(state.pop( '_levels')) if '_codes' not in state and 'labels' in state: state['_codes'] = coerce_indexer_dtype( diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index d8d3a96992757..99e4033f104db 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -159,11 +159,11 @@ class CategoricalDtype(PandasExtensionDtype): _metadata = ['categories', 'ordered'] _cache = {} - def __init__(self, categories=None, ordered=False): + def __init__(self, categories=None, ordered=None): self._finalize(categories, ordered, fastpath=False) @classmethod - def _from_fastpath(cls, categories=None, ordered=False): + def _from_fastpath(cls, categories=None, ordered=None): self = cls.__new__(cls) self._finalize(categories, ordered, fastpath=True) return self @@ -180,14 +180,12 @@ def _from_categorical_dtype(cls, dtype, categories=None, ordered=None): def _finalize(self, categories, ordered, fastpath=False): - if ordered is None: - ordered = False - else: - self._validate_ordered(ordered) + if ordered is not None: + self.validate_ordered(ordered) if categories is not None: - categories = self._validate_categories(categories, - fastpath=fastpath) + categories = self.validate_categories(categories, + fastpath=fastpath) self._categories = categories self._ordered = ordered @@ -208,6 +206,17 @@ def __hash__(self): return int(self._hash_categories(self.categories, self.ordered)) def __eq__(self, other): + """ + Rules for CDT equality: + 1) Any CDT is equal to the string 'category' + 2) Any CDT is equal to a CDT with categories=None regardless of ordered + 3) A CDT with ordered=True is only equal to another CDT with + ordered=True and identical categories in the same order + 4) A CDT with ordered={False, None} is only equal to another CDT with + ordered={False, None} and identical categories, but same order is + not required. There is no distinction between False/None. + 5) Any other comparison returns False + """ if isinstance(other, compat.string_types): return other == self.name @@ -220,12 +229,16 @@ def __eq__(self, other): # CDT(., .) = CDT(None, False) and *all* # CDT(., .) = CDT(None, True). return True - elif self.ordered: - return other.ordered and self.categories.equals(other.categories) - elif other.ordered: - return False + elif self.ordered or other.ordered: + # At least one has ordered=True; equal if both have ordered=True + # and the same values for categories in the same order. + return ((self.ordered == other.ordered) and + self.categories.equals(other.categories)) else: - # both unordered; this could probably be optimized / cached + # Neither has ordered=True; equal if both have the same categories, + # but same order is not necessary. There is no distinction between + # ordered=False and ordered=None: CDT(., False) and CDT(., None) + # will be equal if they have the same categories. return hash(self) == hash(other) def __repr__(self): @@ -288,7 +301,7 @@ def construct_from_string(cls, string): raise TypeError("cannot construct a CategoricalDtype") @staticmethod - def _validate_ordered(ordered): + def validate_ordered(ordered): """ Validates that we have a valid ordered parameter. If it is not a boolean, a TypeError will be raised. @@ -308,7 +321,7 @@ def _validate_ordered(ordered): raise TypeError("'ordered' must either be 'True' or 'False'") @staticmethod - def _validate_categories(categories, fastpath=False): + def validate_categories(categories, fastpath=False): """ Validates that we have good categories @@ -340,7 +353,7 @@ def _validate_categories(categories, fastpath=False): return categories - def _update_dtype(self, dtype): + def update_dtype(self, dtype): """ Returns a CategoricalDtype with categories and ordered taken from dtype if specified, otherwise falling back to self if unspecified @@ -361,11 +374,16 @@ def _update_dtype(self, dtype): 'got {dtype!r}').format(dtype=dtype) raise ValueError(msg) - # dtype is CDT: keep current categories if None (ordered can't be None) + # dtype is CDT: keep current categories/ordered if None new_categories = dtype.categories if new_categories is None: new_categories = self.categories - return CategoricalDtype(new_categories, dtype.ordered) + + new_ordered = dtype.ordered + if new_ordered is None: + new_ordered = self.ordered + + return CategoricalDtype(new_categories, new_ordered) @property def categories(self): diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index b36bc1df23247..60f5552576ea1 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -344,7 +344,7 @@ def astype(self, dtype, copy=True): return IntervalIndex(np.array(self)) elif is_categorical_dtype(dtype): # GH 18630 - dtype = self.dtype._update_dtype(dtype) + dtype = self.dtype.update_dtype(dtype) if dtype == self.dtype: return self.copy() if copy else self diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index d800a7b92b559..cc833af03ae66 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -24,6 +24,11 @@ import pandas.util.testing as tm +@pytest.fixture(params=[True, False, None]) +def ordered(request): + return request.param + + class Base(object): def setup_method(self, method): @@ -124,41 +129,6 @@ def test_tuple_categories(self): result = CategoricalDtype(categories) assert all(result.categories == categories) - @pytest.mark.parametrize('dtype', [ - CategoricalDtype(list('abc'), False), - CategoricalDtype(list('abc'), True)]) - @pytest.mark.parametrize('new_dtype', [ - 'category', - CategoricalDtype(None, False), - CategoricalDtype(None, True), - CategoricalDtype(list('abc'), False), - CategoricalDtype(list('abc'), True), - CategoricalDtype(list('cba'), False), - CategoricalDtype(list('cba'), True), - CategoricalDtype(list('wxyz'), False), - CategoricalDtype(list('wxyz'), True)]) - def test_update_dtype(self, dtype, new_dtype): - if isinstance(new_dtype, string_types) and new_dtype == 'category': - expected_categories = dtype.categories - expected_ordered = dtype.ordered - else: - expected_categories = new_dtype.categories - if expected_categories is None: - expected_categories = dtype.categories - expected_ordered = new_dtype.ordered - - result = dtype._update_dtype(new_dtype) - tm.assert_index_equal(result.categories, expected_categories) - assert result.ordered is expected_ordered - - @pytest.mark.parametrize('bad_dtype', [ - 'foo', object, np.int64, PeriodDtype('Q')]) - def test_update_dtype_errors(self, bad_dtype): - dtype = CategoricalDtype(list('abc'), False) - msg = 'a CategoricalDtype must be passed to perform an update, ' - with tm.assert_raises_regex(ValueError, msg): - dtype._update_dtype(bad_dtype) - class TestDatetimeTZDtype(Base): @@ -609,17 +579,12 @@ def test_caching(self): class TestCategoricalDtypeParametrized(object): - @pytest.mark.parametrize('categories, ordered', [ - (['a', 'b', 'c', 'd'], False), - (['a', 'b', 'c', 'd'], True), - (np.arange(1000), False), - (np.arange(1000), True), - (['a', 'b', 10, 2, 1.3, True], False), - ([True, False], True), - ([True, False], False), - (pd.date_range('2017', periods=4), True), - (pd.date_range('2017', periods=4), False), - ]) + @pytest.mark.parametrize('categories', [ + list('abcd'), + np.arange(1000), + ['a', 'b', 10, 2, 1.3, True], + [True, False], + pd.date_range('2017', periods=4)]) def test_basic(self, categories, ordered): c1 = CategoricalDtype(categories, ordered=ordered) tm.assert_index_equal(c1.categories, pd.Index(categories)) @@ -627,21 +592,24 @@ def test_basic(self, categories, ordered): def test_order_matters(self): categories = ['a', 'b'] - c1 = CategoricalDtype(categories, ordered=False) - c2 = CategoricalDtype(categories, ordered=True) + c1 = CategoricalDtype(categories, ordered=True) + c2 = CategoricalDtype(categories, ordered=False) + c3 = CategoricalDtype(categories, ordered=None) assert c1 is not c2 + assert c1 is not c3 - def test_unordered_same(self): - c1 = CategoricalDtype(['a', 'b']) - c2 = CategoricalDtype(['b', 'a']) + @pytest.mark.parametrize('ordered', [False, None]) + def test_unordered_same(self, ordered): + c1 = CategoricalDtype(['a', 'b'], ordered=ordered) + c2 = CategoricalDtype(['b', 'a'], ordered=ordered) assert hash(c1) == hash(c2) def test_categories(self): result = CategoricalDtype(['a', 'b', 'c']) tm.assert_index_equal(result.categories, pd.Index(['a', 'b', 'c'])) - assert result.ordered is False + assert result.ordered is None - def test_equal_but_different(self): + def test_equal_but_different(self, ordered): c1 = CategoricalDtype([1, 2, 3]) c2 = CategoricalDtype([1., 2., 3.]) assert c1 is not c2 @@ -652,9 +620,11 @@ def test_equal_but_different(self): ([1, 2, 3], [3, 2, 1]), ]) def test_order_hashes_different(self, v1, v2): - c1 = CategoricalDtype(v1) + c1 = CategoricalDtype(v1, ordered=False) c2 = CategoricalDtype(v2, ordered=True) + c3 = CategoricalDtype(v1, ordered=None) assert c1 is not c2 + assert c1 is not c3 def test_nan_invalid(self): with pytest.raises(ValueError): @@ -669,26 +639,46 @@ def test_same_categories_different_order(self): c2 = CategoricalDtype(['b', 'a'], ordered=True) assert c1 is not c2 - @pytest.mark.parametrize('ordered, other, expected', [ - (True, CategoricalDtype(['a', 'b'], True), True), - (False, CategoricalDtype(['a', 'b'], False), True), - (True, CategoricalDtype(['a', 'b'], False), False), - (False, CategoricalDtype(['a', 'b'], True), False), - (True, CategoricalDtype([1, 2], False), False), - (False, CategoricalDtype([1, 2], True), False), - (False, CategoricalDtype(None, True), True), - (True, CategoricalDtype(None, True), True), - (False, CategoricalDtype(None, False), True), - (True, CategoricalDtype(None, False), True), - (True, 'category', True), - (False, 'category', True), - (True, 'not a category', False), - (False, 'not a category', False), - ]) - def test_categorical_equality(self, ordered, other, expected): - c1 = CategoricalDtype(['a', 'b'], ordered) + @pytest.mark.parametrize('ordered1', [True, False, None]) + @pytest.mark.parametrize('ordered2', [True, False, None]) + def test_categorical_equality(self, ordered1, ordered2): + # same categories, same order + # any combination of None/False are equal + # True/True is the only combination with True that are equal + c1 = CategoricalDtype(list('abc'), ordered1) + c2 = CategoricalDtype(list('abc'), ordered2) + result = c1 == c2 + expected = bool(ordered1) is bool(ordered2) + assert result is expected + + # same categories, different order + # any combination of None/False are equal (order doesn't matter) + # any combination with True are not equal (different order of cats) + c1 = CategoricalDtype(list('abc'), ordered1) + c2 = CategoricalDtype(list('cab'), ordered2) + result = c1 == c2 + expected = (bool(ordered1) is False) and (bool(ordered2) is False) + assert result is expected + + # different categories + c2 = CategoricalDtype([1, 2, 3], ordered2) + assert c1 != c2 + + # none categories + c1 = CategoricalDtype(list('abc'), ordered1) + c2 = CategoricalDtype(None, ordered2) + c3 = CategoricalDtype(None, ordered1) + assert c1 == c2 + assert c2 == c1 + assert c2 == c3 + + @pytest.mark.parametrize('categories', [list('abc'), None]) + @pytest.mark.parametrize('other', ['category', 'not a category']) + def test_categorical_equality_strings(self, categories, ordered, other): + c1 = CategoricalDtype(categories, ordered) result = c1 == other - assert result == expected + expected = other == 'category' + assert result is expected def test_invalid_raises(self): with tm.assert_raises_regex(TypeError, 'ordered'): @@ -729,12 +719,12 @@ def test_from_categorical_dtype_both(self): c1, categories=[1, 2], ordered=False) assert result == CategoricalDtype([1, 2], ordered=False) - def test_str_vs_repr(self): - c1 = CategoricalDtype(['a', 'b']) + def test_str_vs_repr(self, ordered): + c1 = CategoricalDtype(['a', 'b'], ordered=ordered) assert str(c1) == 'category' # Py2 will have unicode prefixes - pat = r"CategoricalDtype\(categories=\[.*\], ordered=False\)" - assert re.match(pat, repr(c1)) + pat = r"CategoricalDtype\(categories=\[.*\], ordered={ordered}\)" + assert re.match(pat.format(ordered=ordered), repr(c1)) def test_categorical_categories(self): # GH17884 @@ -742,3 +732,38 @@ def test_categorical_categories(self): tm.assert_index_equal(c1.categories, pd.Index(['a', 'b'])) c1 = CategoricalDtype(CategoricalIndex(['a', 'b'])) tm.assert_index_equal(c1.categories, pd.Index(['a', 'b'])) + + @pytest.mark.parametrize('new_categories', [ + list('abc'), list('cba'), list('wxyz'), None]) + @pytest.mark.parametrize('new_ordered', [True, False, None]) + def test_update_dtype(self, ordered, new_categories, new_ordered): + dtype = CategoricalDtype(list('abc'), ordered) + new_dtype = CategoricalDtype(new_categories, new_ordered) + + expected_categories = new_dtype.categories + if expected_categories is None: + expected_categories = dtype.categories + + expected_ordered = new_dtype.ordered + if expected_ordered is None: + expected_ordered = dtype.ordered + + result = dtype.update_dtype(new_dtype) + tm.assert_index_equal(result.categories, expected_categories) + assert result.ordered is expected_ordered + + def test_update_dtype_string(self, ordered): + dtype = CategoricalDtype(list('abc'), ordered) + expected_categories = dtype.categories + expected_ordered = dtype.ordered + result = dtype.update_dtype('category') + tm.assert_index_equal(result.categories, expected_categories) + assert result.ordered is expected_ordered + + @pytest.mark.parametrize('bad_dtype', [ + 'foo', object, np.int64, PeriodDtype('Q')]) + def test_update_dtype_errors(self, bad_dtype): + dtype = CategoricalDtype(list('abc'), False) + msg = 'a CategoricalDtype must be passed to perform an update, ' + with tm.assert_raises_regex(ValueError, msg): + dtype.update_dtype(bad_dtype)
- [X] closes #18790 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry For equality comparisons with `ordered=None`, I essentially treated it as if it where `ordered=False`: - `CDT(['a', 'b'], None) == CDT(['a', 'b'], False)` --> `True` - `CDT(['a', 'b'], None) == CDT(['b', 'a'], False)` --> `True` - `CDT(['a', 'b'], None) == CDT(['a', 'b'], True)` --> `False` This maintains existing comparison behavior when ordered is not specified: - `CDT(['a', 'b'], False) == CDT(['a', 'b'])` --> `True` - `CDT(['a', 'b'], True) == CDT(['a', 'b'])` --> `False` <br /> I didn't make any code modifications in regards to hashing, so `CDT(*, None)` will have the same hash as `CDT(*, False)`. This seems to be consistent with how equality is treated. Makes the logic implementing equality nicer too, since the case when both dtypes are unordered relies on hashes.
https://api.github.com/repos/pandas-dev/pandas/pulls/18889
2017-12-21T07:27:50Z
2018-02-10T17:02:30Z
2018-02-10T17:02:29Z
2018-09-24T17:26:41Z
[WIP] BLD: Maybe appease the Travis?
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 475fc6a46955d..db1107b8bc8f4 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -175,7 +175,7 @@ if [ "$PIP_BUILD_TEST" ]; then echo "[building release]" bash scripts/build_dist_for_release.sh conda uninstall -y cython - time pip install dist/*tar.gz --quiet || exit 1 + time pip install dist/*.tar.gz --quiet || exit 1 elif [ "$CONDA_BUILD_TEST" ]; then diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index 57e72da2fd3f4..9c812a765dfc8 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -53,11 +53,13 @@ def s3_resource(tips_file, jsonl_file): def add_tips_files(bucket_name): for s3_key, file_name in test_s3_files: - with open(file_name, 'rb') as f: - conn.Bucket(bucket_name).put_object( - Key=s3_key, - Body=f) - + try: + with open(file_name, 'rb') as f: + conn.Bucket(bucket_name).put_object( + Key=s3_key, + Body=f) + except Exception: + raise Exception(os.listdir(os.path.dirname(file_name))) boto3 = pytest.importorskip('boto3') # see gh-16135 bucket = 'pandas-test' diff --git a/setup.py b/setup.py index 515e1660fa6de..88a98d285e14f 100755 --- a/setup.py +++ b/setup.py @@ -721,7 +721,8 @@ def pxd(name): 'pandas.tests.io': ['data/legacy_hdf/*.h5', 'data/legacy_pickle/*/*.pickle', 'data/legacy_msgpack/*/*.msgpack', - 'data/html_encoding/*.html']}, + 'data/html_encoding/*.html', + 'parser/data/*.jsonl']}, ext_modules=extensions, maintainer_email=EMAIL, description=DESCRIPTION,
I have no idea if this will work.
https://api.github.com/repos/pandas-dev/pandas/pulls/18887
2017-12-21T02:44:53Z
2017-12-21T13:45:28Z
null
2023-05-11T01:16:59Z
BLD: more quiet in the build
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 6946d7dd11870..475fc6a46955d 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -175,7 +175,7 @@ if [ "$PIP_BUILD_TEST" ]; then echo "[building release]" bash scripts/build_dist_for_release.sh conda uninstall -y cython - time pip install dist/*tar.gz || exit 1 + time pip install dist/*tar.gz --quiet || exit 1 elif [ "$CONDA_BUILD_TEST" ]; then diff --git a/scripts/build_dist_for_release.sh b/scripts/build_dist_for_release.sh index e77974ae08b0c..bee0f23a68ec2 100644 --- a/scripts/build_dist_for_release.sh +++ b/scripts/build_dist_for_release.sh @@ -5,6 +5,6 @@ # this builds the release cleanly & is building on the current checkout rm -rf dist git clean -xfd -python setup.py clean -python setup.py cython -python setup.py sdist --formats=gztar +python setup.py clean --quiet +python setup.py cython --quiet +python setup.py sdist --formats=gztar --quiet
https://api.github.com/repos/pandas-dev/pandas/pulls/18886
2017-12-21T00:00:51Z
2017-12-21T02:08:23Z
2017-12-21T02:08:23Z
2017-12-21T02:08:23Z
Fix Series[timedelta64]+DatetimeIndex[tz] bugs
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 5fd7c3e217928..119dd894abe4c 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -370,6 +370,7 @@ Numeric - Bug in :func:`Series.__sub__` subtracting a non-nanosecond ``np.datetime64`` object from a ``Series`` gave incorrect results (:issue:`7996`) - Bug in :class:`DatetimeIndex`, :class:`TimedeltaIndex` addition and subtraction of zero-dimensional integer arrays gave incorrect results (:issue:`19012`) +- Bug in :func:`Series.__add__` adding Series with dtype ``timedelta64[ns]`` to a timezone-aware ``DatetimeIndex`` incorrectly dropped timezone information (:issue:`13905`) - Categorical diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 2a77a23c2cfa1..ee2fdd213dd9a 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -671,7 +671,9 @@ def __add__(self, other): from pandas.tseries.offsets import DateOffset other = lib.item_from_zerodim(other) - if is_timedelta64_dtype(other): + if isinstance(other, ABCSeries): + return NotImplemented + elif is_timedelta64_dtype(other): return self._add_delta(other) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): if hasattr(other, '_add_delta'): @@ -702,7 +704,9 @@ def __sub__(self, other): from pandas.tseries.offsets import DateOffset other = lib.item_from_zerodim(other) - if is_timedelta64_dtype(other): + if isinstance(other, ABCSeries): + return NotImplemented + elif is_timedelta64_dtype(other): return self._add_delta(-other) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): if not isinstance(other, TimedeltaIndex): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index b17682b6c3448..ef0406a4b9f9d 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -854,6 +854,9 @@ def _maybe_update_attributes(self, attrs): return attrs def _add_delta(self, delta): + if isinstance(delta, ABCSeries): + return NotImplemented + from pandas import TimedeltaIndex name = self.name diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 89d793a586e74..0229f7c256464 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -39,7 +39,7 @@ from pandas.core.dtypes.generic import ( ABCSeries, ABCDataFrame, - ABCIndex, + ABCIndex, ABCDatetimeIndex, ABCPeriodIndex) # ----------------------------------------------------------------------------- @@ -514,8 +514,9 @@ def _convert_to_array(self, values, name=None, other=None): values[:] = iNaT # a datelike - elif isinstance(values, pd.DatetimeIndex): - values = values.to_series() + elif isinstance(values, ABCDatetimeIndex): + # TODO: why are we casting to_series in the first place? + values = values.to_series(keep_tz=True) # datetime with tz elif (isinstance(ovalues, datetime.datetime) and hasattr(ovalues, 'tzinfo')): @@ -535,6 +536,11 @@ def _convert_to_array(self, values, name=None, other=None): elif inferred_type in ('timedelta', 'timedelta64'): # have a timedelta, convert to to ns here values = to_timedelta(values, errors='coerce', box=False) + if isinstance(other, ABCDatetimeIndex): + # GH#13905 + # Defer to DatetimeIndex/TimedeltaIndex operations where + # timezones are handled carefully. + values = pd.TimedeltaIndex(values) elif inferred_type == 'integer': # py3 compat where dtype is 'm' but is an integer if values.dtype.kind == 'm': @@ -754,25 +760,26 @@ def wrapper(left, right, name=name, na_op=na_op): na_op = converted.na_op if isinstance(rvalues, ABCSeries): - name = _maybe_match_name(left, rvalues) lvalues = getattr(lvalues, 'values', lvalues) rvalues = getattr(rvalues, 'values', rvalues) # _Op aligns left and right else: - if isinstance(rvalues, pd.Index): - name = _maybe_match_name(left, rvalues) - else: - name = left.name if (hasattr(lvalues, 'values') and - not isinstance(lvalues, pd.DatetimeIndex)): + not isinstance(lvalues, ABCDatetimeIndex)): lvalues = lvalues.values + if isinstance(right, (ABCSeries, pd.Index)): + # `left` is always a Series object + res_name = _maybe_match_name(left, right) + else: + res_name = left.name + result = wrap_results(safe_na_op(lvalues, rvalues)) return construct_result( left, result, index=left.index, - name=name, + name=res_name, dtype=dtype, ) diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index 4684eb89557bf..381e2ef3041e7 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -364,6 +364,33 @@ def test_datetimeindex_sub_timestamp_overflow(self): with pytest.raises(OverflowError): dtimin - variant + @pytest.mark.parametrize('names', [('foo', None, None), + ('baz', 'bar', None), + ('bar', 'bar', 'bar')]) + @pytest.mark.parametrize('tz', [None, 'America/Chicago']) + def test_dti_add_series(self, tz, names): + # GH#13905 + index = DatetimeIndex(['2016-06-28 05:30', '2016-06-28 05:31'], + tz=tz, name=names[0]) + ser = Series([Timedelta(seconds=5)] * 2, + index=index, name=names[1]) + expected = Series(index + Timedelta(seconds=5), + index=index, name=names[2]) + + # passing name arg isn't enough when names[2] is None + expected.name = names[2] + assert expected.dtype == index.dtype + result = ser + index + tm.assert_series_equal(result, expected) + result2 = index + ser + tm.assert_series_equal(result2, expected) + + expected = index + Timedelta(seconds=5) + result3 = ser.values + index + tm.assert_index_equal(result3, expected) + result4 = index + ser.values + tm.assert_index_equal(result4, expected) + @pytest.mark.parametrize('box', [np.array, pd.Index]) def test_dti_add_offset_array(self, tz, box): # GH#18849
ser + index lost timezone index + ser retained timezone but returned a DatetimeIndex - [x] closes #13905 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18884
2017-12-20T22:22:45Z
2018-01-02T11:23:50Z
2018-01-02T11:23:49Z
2018-01-23T04:40:47Z
Fix DatetimeIndex.insert(pd.NaT) for tz-aware index
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 3f300deddebeb..cfaf8718544ca 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -303,6 +303,7 @@ Indexing - :func:`Index.to_series` now accepts ``index`` and ``name`` kwargs (:issue:`18699`) - :func:`DatetimeIndex.to_series` now accepts ``index`` and ``name`` kwargs (:issue:`18699`) - Bug in indexing non-scalar value from ``Series`` having non-unique ``Index`` will return value flattened (:issue:`17610`) +- Bug in :func:`DatetimeIndex.insert` where inserting ``NaT`` into a timezone-aware index incorrectly raised (:issue:`16357`) I/O diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index bec26ef72d63a..3fc3cf9a78a25 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1775,7 +1775,7 @@ def insert(self, loc, item): if isinstance(item, (datetime, np.datetime64)): self._assert_can_do_op(item) - if not self._has_same_tz(item): + if not self._has_same_tz(item) and not isna(item): raise ValueError( 'Passed item and index have different timezone') # check freq can be preserved on edge cases diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index b3ce22962d5d4..48ceefd6368c0 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -46,6 +46,15 @@ def test_where_tz(self): expected = i2 tm.assert_index_equal(result, expected) + @pytest.mark.parametrize('null', [None, np.nan, pd.NaT]) + @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern']) + def test_insert_nat(self, tz, null): + # GH#16537, GH#18295 (test missing) + idx = pd.DatetimeIndex(['2017-01-01'], tz=tz) + expected = pd.DatetimeIndex(['NaT', '2017-01-01'], tz=tz) + res = idx.insert(0, null) + tm.assert_index_equal(res, expected) + def test_insert(self): idx = DatetimeIndex( ['2000-01-04', '2000-01-01', '2000-01-02'], name='idx') @@ -145,13 +154,6 @@ def test_insert(self): assert result.tz == expected.tz assert result.freq is None - # GH 18295 (test missing) - expected = DatetimeIndex( - ['20170101', pd.NaT, '20170102', '20170103', '20170104']) - for na in (np.nan, pd.NaT, None): - result = date_range('20170101', periods=4).insert(1, na) - tm.assert_index_equal(result, expected) - def test_delete(self): idx = date_range(start='2000-01-01', periods=5, freq='M', name='idx')
- [x] closes #16357 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18883
2017-12-20T22:01:53Z
2017-12-29T14:39:38Z
2017-12-29T14:39:37Z
2017-12-29T16:29:10Z
API: disallow duplicate level names
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 5e55efb4e21fb..df50624a9fb2f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -184,6 +184,7 @@ Other API Changes - A :class:`Series` of ``dtype=category`` constructed from an empty ``dict`` will now have categories of ``dtype=object`` rather than ``dtype=float64``, consistently with the case in which an empty list is passed (:issue:`18515`) - ``NaT`` division with :class:`datetime.timedelta` will now return ``NaN`` instead of raising (:issue:`17876`) - All-NaN levels in a ``MultiIndex`` are now assigned ``float`` rather than ``object`` dtype, promoting consistency with ``Index`` (:issue:`17929`). +- Levels names of a ``MultiIndex`` (when not None) are now required to be unique: trying to create a ``MultiIndex`` with repeated names will raise a ``ValueError`` (:issue:`18872`) - :class:`Timestamp` will no longer silently ignore unused or invalid ``tz`` or ``tzinfo`` keyword arguments (:issue:`17690`) - :class:`Timestamp` will no longer silently ignore invalid ``freq`` arguments (:issue:`5168`) - :class:`CacheableOffset` and :class:`WeekDay` are no longer available in the ``pandas.tseries.offsets`` module (:issue:`17830`) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index c20c6e1f75a24..f4c4f91d2cc57 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -579,23 +579,24 @@ def _set_names(self, names, level=None, validate=True): if level is None: level = range(self.nlevels) + used = {} else: level = [self._get_level_number(l) for l in level] + used = {self.levels[l].name: l + for l in set(range(self.nlevels)) - set(level)} # set the name for l, name in zip(level, names): + if name is not None and name in used: + raise ValueError('Duplicated level name: "{}", assigned to ' + 'level {}, is already used for level ' + '{}.'.format(name, l, used[name])) self.levels[l].rename(name, inplace=True) + used[name] = l names = property(fset=_set_names, fget=_get_names, doc="Names of levels in MultiIndex") - def _reference_duplicate_name(self, name): - """ - Returns True if the name refered to in self.names is duplicated. - """ - # count the times name equals an element in self.names. - return sum(name == n for n in self.names) > 1 - def _format_native_types(self, na_rep='nan', **kwargs): new_levels = [] new_labels = [] diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 320ad109f01ba..1ca014baa9ec8 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -91,12 +91,6 @@ def __init__(self, values, index, level=-1, value_columns=None, self.index = index - if isinstance(self.index, MultiIndex): - if index._reference_duplicate_name(level): - msg = ("Ambiguous reference to {level}. The index " - "names are not unique.".format(level=level)) - raise ValueError(msg) - self.level = self.index._get_level_number(level) # when index includes `nan`, need to lift levels/strides by 1 @@ -502,11 +496,6 @@ def factorize(index): return categories, codes N, K = frame.shape - if isinstance(frame.columns, MultiIndex): - if frame.columns._reference_duplicate_name(level): - msg = ("Ambiguous reference to {level}. The column " - "names are not unique.".format(level=level)) - raise ValueError(msg) # Will also convert negative level numbers and check if out of bounds. level_num = frame.columns._get_level_number(level) diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index e7ea3f9c62540..6e3b7a059fd49 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -130,6 +130,20 @@ def test_set_index2(self): result = df.set_index(df.C) assert result.index.name == 'C' + @pytest.mark.parametrize('level', ['a', pd.Series(range(3), name='a')]) + def test_set_index_duplicate_names(self, level): + # GH18872 + df = pd.DataFrame(np.arange(8).reshape(4, 2), columns=['a', 'b']) + + # Pass an existing level name: + df.index.name = 'a' + pytest.raises(ValueError, df.set_index, level, append=True) + pytest.raises(ValueError, df.set_index, [level], append=True) + + # Pass twice the same level name: + df.index.name = 'c' + pytest.raises(ValueError, df.set_index, [level, level]) + def test_set_index_nonuniq(self): df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'], 'B': ['one', 'two', 'three', 'one', 'two'], @@ -591,19 +605,6 @@ def test_reorder_levels(self): index=e_idx) assert_frame_equal(result, expected) - result = df.reorder_levels([0, 0, 0]) - e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']], - labels=[[0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0]], - names=['L0', 'L0', 'L0']) - expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, - index=e_idx) - assert_frame_equal(result, expected) - - result = df.reorder_levels(['L0', 'L0', 'L0']) - assert_frame_equal(result, expected) - def test_reset_index(self): stacked = self.frame.stack()[::2] stacked = DataFrame({'foo': stacked, 'bar': stacked}) @@ -831,7 +832,7 @@ def test_set_index_names(self): mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B']) mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values, - names=['A', 'B', 'A', 'B']) + names=['A', 'B', 'C', 'D']) df = df.set_index(['A', 'B']) @@ -843,13 +844,14 @@ def test_set_index_names(self): # Check actual equality tm.assert_index_equal(df.set_index(df.index).index, mi) + idx2 = df.index.rename(['C', 'D']) + # Check that [MultiIndex, MultiIndex] yields a MultiIndex rather # than a pair of tuples - assert isinstance(df.set_index( - [df.index, df.index]).index, MultiIndex) + assert isinstance(df.set_index([df.index, idx2]).index, MultiIndex) # Check equality - tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2) + tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2) def test_rename_objects(self): renamed = self.mixed_frame.rename(columns=str.upper) diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index f34d25142a057..5ff4f58774322 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -560,16 +560,6 @@ def test_unstack_dtypes(self): assert left.shape == (3, 2) tm.assert_frame_equal(left, right) - def test_unstack_non_unique_index_names(self): - idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], - names=['c1', 'c1']) - df = DataFrame([1, 2], index=idx) - with pytest.raises(ValueError): - df.unstack('c1') - - with pytest.raises(ValueError): - df.T.stack('c1') - def test_unstack_nan_index(self): # GH7466 cast = lambda val: '{0:1}'.format('' if val != val else val) nan = np.nan diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 5e3d2bb9cf091..12f5b98fb64f4 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -388,8 +388,8 @@ def test_groupby_multi_categorical_as_index(self): columns=['cat', 'A', 'B']) tm.assert_frame_equal(result, expected) - # another not in-axis grouper (conflicting names in index) - s = Series(['a', 'b', 'b'], name='cat') + # another not in-axis grouper + s = Series(['a', 'b', 'b'], name='cat2') result = df.groupby(['cat', s], as_index=False).sum() expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]), 'A': [10.0, nan, nan, 22.0, nan, nan], @@ -397,6 +397,10 @@ def test_groupby_multi_categorical_as_index(self): columns=['cat', 'A', 'B']) tm.assert_frame_equal(result, expected) + # GH18872: conflicting names in desired index + pytest.raises(ValueError, lambda: df.groupby(['cat', + s.rename('cat')]).sum()) + # is original index dropped? expected = DataFrame({'cat': Categorical([1, 1, 2, 2, 3, 3]), 'A': [10, 11, 10, 11, 10, 11], diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 4d6e543851d4f..2a7c020f4c9e9 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -536,15 +536,6 @@ def test_names(self): level_names = [level.name for level in index.levels] assert ind_names == level_names - def test_reference_duplicate_name(self): - idx = MultiIndex.from_tuples( - [('a', 'b'), ('c', 'd')], names=['x', 'x']) - assert idx._reference_duplicate_name('x') - - idx = MultiIndex.from_tuples( - [('a', 'b'), ('c', 'd')], names=['x', 'y']) - assert not idx._reference_duplicate_name('x') - def test_astype(self): expected = self.index.copy() actual = self.index.astype('O') @@ -609,6 +600,23 @@ def test_constructor_mismatched_label_levels(self): with tm.assert_raises_regex(ValueError, label_error): self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]]) + @pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2], + [1, 'a', 1]]) + def test_duplicate_level_names(self, names): + # GH18872 + pytest.raises(ValueError, pd.MultiIndex.from_product, + [[0, 1]] * 3, names=names) + + # With .rename() + mi = pd.MultiIndex.from_product([[0, 1]] * 3) + tm.assert_raises_regex(ValueError, "Duplicated level name:", + mi.rename, names) + + # With .rename(., level=) + mi.rename(names[0], level=1, inplace=True) + tm.assert_raises_regex(ValueError, "Duplicated level name:", + mi.rename, names[:2], level=[0, 2]) + def assert_multiindex_copied(self, copy, original): # Levels should be (at least, shallow copied) tm.assert_copy(copy.levels, original.levels) @@ -667,11 +675,6 @@ def test_changing_names(self): shallow_copy.names = [name + "c" for name in shallow_copy.names] self.check_level_names(self.index, new_names) - def test_duplicate_names(self): - self.index.names = ['foo', 'foo'] - tm.assert_raises_regex(KeyError, 'Level foo not found', - self.index._get_level_number, 'foo') - def test_get_level_number_integer(self): self.index.names = [1, 0] assert self.index._get_level_number(1) == 0 diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index bedafccca5798..2f8ef32722051 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -589,8 +589,8 @@ def test_to_latex_no_bold_rows(self): """ assert observed == expected - @pytest.mark.parametrize('name0', [None, 'named']) - @pytest.mark.parametrize('name1', [None, 'named']) + @pytest.mark.parametrize('name0', [None, 'named0']) + @pytest.mark.parametrize('name1', [None, 'named1']) @pytest.mark.parametrize('axes', [[0], [1], [0, 1]]) def test_to_latex_multiindex_names(self, name0, name1, axes): # GH 18667 diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 47be8d115a07e..305c1ebcedc6f 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -1908,12 +1908,6 @@ def make_index(names=None): 'a', 'b'], index=make_index(['date', 'a', 't'])) pytest.raises(ValueError, store.append, 'df', df) - # dup within level - _maybe_remove(store, 'df') - df = DataFrame(np.zeros((12, 2)), columns=['a', 'b'], - index=make_index(['date', 'date', 'date'])) - pytest.raises(ValueError, store.append, 'df', df) - # fully names _maybe_remove(store, 'df') df = DataFrame(np.zeros((12, 2)), columns=[ diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 5b64f62527da4..786c57a4a82df 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1623,14 +1623,9 @@ def test_crosstab_with_numpy_size(self): tm.assert_frame_equal(result, expected) def test_crosstab_dup_index_names(self): - # GH 13279 + # GH 13279, GH 18872 s = pd.Series(range(3), name='foo') - result = pd.crosstab(s, s) - expected_index = pd.Index(range(3), name='foo') - expected = pd.DataFrame(np.eye(3, dtype=np.int64), - index=expected_index, - columns=expected_index) - tm.assert_frame_equal(result, expected) + pytest.raises(ValueError, pd.crosstab, s, s) @pytest.mark.parametrize("names", [['a', ('b', 'c')], [('a', 'b'), 'c']]) diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index f3be7bb9905f4..714e43a4af1f8 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -214,17 +214,6 @@ def test_reorder_levels(self): expected = Series(np.arange(6), index=e_idx) assert_series_equal(result, expected) - result = s.reorder_levels([0, 0, 0]) - e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']], - labels=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0]], - names=['L0', 'L0', 'L0']) - expected = Series(np.arange(6), index=e_idx) - assert_series_equal(result, expected) - - result = s.reorder_levels(['L0', 'L0', 'L0']) - assert_series_equal(result, expected) - def test_rename_axis_inplace(self): # GH 15704 series = self.ts.copy()
- [x] closes #18872 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18882
2017-12-20T20:53:23Z
2017-12-29T14:34:18Z
2017-12-29T14:34:18Z
2018-01-01T17:47:00Z
TST: xfail more in 3.5 conda build
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index caee8c8d85811..2d56e12533cd0 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -144,15 +144,16 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext): with pytest.raises(error_class): reader(path) - @pytest.mark.xfail(reason="not working in 3.5 conda build") @pytest.mark.parametrize('reader, module, path', [ (pd.read_csv, 'os', os.path.join(HERE, 'data', 'iris.csv')), (pd.read_table, 'os', os.path.join(HERE, 'data', 'iris.csv')), (pd.read_fwf, 'os', os.path.join(HERE, 'data', 'fixed_width_format.txt')), (pd.read_excel, 'xlrd', os.path.join(HERE, 'data', 'test1.xlsx')), - (pd.read_feather, 'feather', os.path.join(HERE, 'data', - 'feather-0_3_1.feather')), + + # TODO(jreback) gh-18873 + # (pd.read_feather, 'feather', os.path.join(HERE, 'data', + # 'feather-0_3_1.feather')), (pd.read_hdf, 'tables', os.path.join(HERE, 'data', 'legacy_hdf', 'datetimetz_object.h5')), (pd.read_stata, 'os', os.path.join(HERE, 'data', 'stata10_115.dta')),
https://api.github.com/repos/pandas-dev/pandas/pulls/18879
2017-12-20T19:13:35Z
2017-12-20T22:56:00Z
2017-12-20T22:56:00Z
2017-12-20T22:56:00Z
Fix FY5253 onOffset/apply bug, simplify
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 24f3e4433411e..3a6c4e10eaa97 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -290,6 +290,7 @@ Conversion - Bug in :class:`Timestamp` where comparison with an array of ``Timestamp`` objects would result in a ``RecursionError`` (:issue:`15183`) - Bug in :class:`WeekOfMonth` and class:`Week` where addition and subtraction did not roll correctly (:issue:`18510`,:issue:`18672`,:issue:`18864`) - Bug in :meth:`DatetimeIndex.astype` when converting between timezone aware dtypes, and converting from timezone aware to naive (:issue:`18951`) +- Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`) Indexing diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py index 2dd061dcc6f9e..09206439e9996 100644 --- a/pandas/tests/tseries/offsets/test_fiscal.py +++ b/pandas/tests/tseries/offsets/test_fiscal.py @@ -158,17 +158,6 @@ def test_apply(self): class TestFY5253NearestEndMonth(Base): - def test_get_target_month_end(self): - assert (makeFY5253NearestEndMonth( - startingMonth=8, weekday=WeekDay.SAT).get_target_month_end( - datetime(2013, 1, 1)) == datetime(2013, 8, 31)) - assert (makeFY5253NearestEndMonth( - startingMonth=12, weekday=WeekDay.SAT).get_target_month_end( - datetime(2013, 1, 1)) == datetime(2013, 12, 31)) - assert (makeFY5253NearestEndMonth( - startingMonth=2, weekday=WeekDay.SAT).get_target_month_end( - datetime(2013, 1, 1)) == datetime(2013, 2, 28)) - def test_get_year_end(self): assert (makeFY5253NearestEndMonth( startingMonth=8, weekday=WeekDay.SAT).get_year_end( @@ -625,3 +614,22 @@ def test_bunched_yearends(): assert fy.rollback(dt) == Timestamp('2002-12-28') assert (-fy).apply(dt) == Timestamp('2002-12-28') assert dt - fy == Timestamp('2002-12-28') + + +def test_fy5253_last_onoffset(): + # GH#18877 dates on the year-end but not normalized to midnight + offset = FY5253(n=-5, startingMonth=5, variation="last", weekday=0) + ts = Timestamp('1984-05-28 06:29:43.955911354+0200', + tz='Europe/San_Marino') + fast = offset.onOffset(ts) + slow = (ts + offset) - offset == ts + assert fast == slow + + +def test_fy5253_nearest_onoffset(): + # GH#18877 dates on the year-end but not normalized to midnight + offset = FY5253(n=3, startingMonth=7, variation="nearest", weekday=2) + ts = Timestamp('2032-07-28 00:12:59.035729419+0000', tz='Africa/Dakar') + fast = offset.onOffset(ts) + slow = (ts + offset) - offset == ts + assert fast == slow diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 54250bbf903a4..0e6a2259274ed 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1814,13 +1814,6 @@ def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1, raise ValueError('{variation} is not a valid variation' .format(variation=self.variation)) - @cache_readonly - def _offset_lwom(self): - if self.variation == "nearest": - return None - else: - return LastWeekOfMonth(n=1, weekday=self.weekday) - def isAnchored(self): return (self.n == 1 and self.startingMonth is not None and @@ -1841,6 +1834,8 @@ def onOffset(self, dt): @apply_wraps def apply(self, other): + norm = Timestamp(other).normalize() + n = self.n prev_year = self.get_year_end( datetime(other.year - 1, self.startingMonth, 1)) @@ -1853,32 +1848,26 @@ def apply(self, other): cur_year = tslib._localize_pydatetime(cur_year, other.tzinfo) next_year = tslib._localize_pydatetime(next_year, other.tzinfo) - if other == prev_year: + # Note: next_year.year == other.year + 1, so we will always + # have other < next_year + if norm == prev_year: n -= 1 - elif other == cur_year: + elif norm == cur_year: pass - elif other == next_year: - n += 1 - # TODO: Not hit in tests elif n > 0: - if other < prev_year: + if norm < prev_year: n -= 2 - elif prev_year < other < cur_year: + elif prev_year < norm < cur_year: n -= 1 - elif cur_year < other < next_year: + elif cur_year < norm < next_year: pass - else: - assert False else: - if next_year < other: - n += 2 - # TODO: Not hit in tests; UPDATE: looks impossible - elif cur_year < other < next_year: + if cur_year < norm < next_year: n += 1 - elif prev_year < other < cur_year: + elif prev_year < norm < cur_year: pass - elif (other.year == prev_year.year and other < prev_year and - prev_year - other <= timedelta(6)): + elif (norm.year == prev_year.year and norm < prev_year and + prev_year - norm <= timedelta(6)): # GH#14774, error when next_year.year == cur_year.year # e.g. prev_year == datetime(2004, 1, 3), # other == datetime(2004, 1, 1) @@ -1894,35 +1883,30 @@ def apply(self, other): return result def get_year_end(self, dt): - if self.variation == "nearest": - return self._get_year_end_nearest(dt) - else: - return self._get_year_end_last(dt) - - def get_target_month_end(self, dt): - target_month = datetime(dt.year, self.startingMonth, 1, - tzinfo=dt.tzinfo) - return shift_month(target_month, 0, 'end') - # TODO: is this DST-safe? + assert dt.tzinfo is None - def _get_year_end_nearest(self, dt): - target_date = self.get_target_month_end(dt) + dim = ccalendar.get_days_in_month(dt.year, self.startingMonth) + target_date = datetime(dt.year, self.startingMonth, dim) wkday_diff = self.weekday - target_date.weekday() if wkday_diff == 0: + # year_end is the same for "last" and "nearest" cases return target_date - days_forward = wkday_diff % 7 - if days_forward <= 3: - # The upcoming self.weekday is closer than the previous one - return target_date + timedelta(days_forward) - else: - # The previous self.weekday is closer than the upcoming one - return target_date + timedelta(days_forward - 7) + if self.variation == "last": + days_forward = (wkday_diff % 7) - 7 - def _get_year_end_last(self, dt): - current_year = datetime(dt.year, self.startingMonth, 1, - tzinfo=dt.tzinfo) - return current_year + self._offset_lwom + # days_forward is always negative, so we always end up + # in the same year as dt + return target_date + timedelta(days=days_forward) + else: + # variation == "nearest": + days_forward = wkday_diff % 7 + if days_forward <= 3: + # The upcoming self.weekday is closer than the previous one + return target_date + timedelta(days_forward) + else: + # The previous self.weekday is closer than the upcoming one + return target_date + timedelta(days_forward - 7) @property def rule_code(self):
Similar to #18875. The actual bugfix here is just changing the comparison in `apply` from `other < whatever` to `norm < whatever`. The remaining edits to `get_year_end` are orthogonal simplification (which I guess could go in a separate PR). - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18877
2017-12-20T17:59:07Z
2017-12-28T12:28:44Z
2017-12-28T12:28:44Z
2018-02-11T22:00:22Z
ENH: Added a min_count keyword to stat funcs
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index d38b677df321c..16b7cbff44e03 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -36,7 +36,8 @@ def get_dispatch(dtypes): def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=1): """ Only aggregates on axis=0 """ @@ -88,7 +89,7 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, for i in range(ncounts): for j in range(K): - if nobs[i, j] == 0: + if nobs[i, j] < min_count: out[i, j] = NAN else: out[i, j] = sumx[i, j] @@ -99,7 +100,8 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=1): """ Only aggregates on axis=0 """ @@ -147,7 +149,7 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, for i in range(ncounts): for j in range(K): - if nobs[i, j] == 0: + if nobs[i, j] < min_count: out[i, j] = NAN else: out[i, j] = prodx[i, j] @@ -159,12 +161,15 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) {{dest_type2}} val, ct, oldmean ndarray[{{dest_type2}}, ndim=2] nobs, mean + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -208,12 +213,15 @@ def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) {{dest_type2}} val, count ndarray[{{dest_type2}}, ndim=2] sumx, nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -263,7 +271,8 @@ def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -272,6 +281,8 @@ def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{dest_type2}} val, count Py_ssize_t ngroups = len(counts) + assert min_count == -1, "'min_count' only used in add and prod" + if len(labels) == 0: return @@ -332,7 +343,8 @@ def get_dispatch(dtypes): def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -342,6 +354,8 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[{{dest_type2}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -382,7 +396,8 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels, int64_t rank): + ndarray[int64_t] labels, int64_t rank, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -392,6 +407,8 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[{{dest_type2}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -455,7 +472,8 @@ def get_dispatch(dtypes): def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -464,6 +482,8 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{dest_type2}} val, count ndarray[{{dest_type2}}, ndim=2] maxx, nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -526,7 +546,8 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -535,6 +556,8 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{dest_type2}} val, count ndarray[{{dest_type2}}, ndim=2] minx, nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -686,7 +709,8 @@ def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_median_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float64_t, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -695,6 +719,9 @@ def group_median_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] _counts ndarray data float64_t* ptr + + assert min_count == -1, "'min_count' only used in add and prod" + ngroups = len(counts) N, K = (<object> values).shape diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f2dbb3ef4d32a..2acf64f1d9f74 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7322,7 +7322,8 @@ def _add_numeric_operations(cls): @Substitution(outname='mad', desc="Return the mean absolute deviation of the values " "for the requested axis", - name1=name, name2=name2, axis_descr=axis_descr) + name1=name, name2=name2, axis_descr=axis_descr, + min_count='', examples='') @Appender(_num_doc) def mad(self, axis=None, skipna=None, level=None): if skipna is None: @@ -7363,7 +7364,8 @@ def mad(self, axis=None, skipna=None, level=None): @Substitution(outname='compounded', desc="Return the compound percentage of the values for " "the requested axis", name1=name, name2=name2, - axis_descr=axis_descr) + axis_descr=axis_descr, + min_count='', examples='') @Appender(_num_doc) def compound(self, axis=None, skipna=None, level=None): if skipna is None: @@ -7387,10 +7389,10 @@ def compound(self, axis=None, skipna=None, level=None): lambda y, axis: np.maximum.accumulate(y, axis), "max", -np.inf, np.nan) - cls.sum = _make_stat_function( + cls.sum = _make_min_count_stat_function( cls, 'sum', name, name2, axis_descr, 'Return the sum of the values for the requested axis', - nanops.nansum) + nanops.nansum, _sum_examples) cls.mean = _make_stat_function( cls, 'mean', name, name2, axis_descr, 'Return the mean of the values for the requested axis', @@ -7406,10 +7408,10 @@ def compound(self, axis=None, skipna=None, level=None): "by N-1\n", nanops.nankurt) cls.kurtosis = cls.kurt - cls.prod = _make_stat_function( + cls.prod = _make_min_count_stat_function( cls, 'prod', name, name2, axis_descr, 'Return the product of the values for the requested axis', - nanops.nanprod) + nanops.nanprod, _prod_examples) cls.product = cls.prod cls.median = _make_stat_function( cls, 'median', name, name2, axis_descr, @@ -7540,10 +7542,13 @@ def _doc_parms(cls): numeric_only : boolean, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. +%(min_count)s\ Returns ------- -%(outname)s : %(name1)s or %(name2)s (if level specified)\n""" +%(outname)s : %(name1)s or %(name2)s (if level specified) + +%(examples)s""" _num_ddof_doc = """ @@ -7611,9 +7616,92 @@ def _doc_parms(cls): """ +_sum_examples = """\ +Examples +-------- +By default, the sum of an empty series is ``NaN``. + +>>> pd.Series([]).sum() # min_count=1 is the default +nan + +This can be controlled with the ``min_count`` parameter. For example, if +you'd like the sum of an empty series to be 0, pass ``min_count=0``. + +>>> pd.Series([]).sum(min_count=0) +0.0 + +Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and +empty series identically. + +>>> pd.Series([np.nan]).sum() +nan + +>>> pd.Series([np.nan]).sum(min_count=0) +0.0 +""" + +_prod_examples = """\ +Examples +-------- +By default, the product of an empty series is ``NaN`` + +>>> pd.Series([]).prod() +nan + +This can be controlled with the ``min_count`` parameter + +>>> pd.Series([]).prod(min_count=0) +1.0 + +Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and +empty series identically. + +>>> pd.Series([np.nan]).prod() +nan + +>>> pd.Series([np.nan]).sum(min_count=0) +1.0 +""" + + +_min_count_stub = """\ +min_count : int, default 1 + The required number of valid values to perform the operation. If fewer than + ``min_count`` non-NA values are present the result will be NA. + + .. versionadded :: 0.21.2 + + Added with the default being 1. This means the sum or product + of an all-NA or empty series is ``NaN``. +""" + + +def _make_min_count_stat_function(cls, name, name1, name2, axis_descr, desc, + f, examples): + @Substitution(outname=name, desc=desc, name1=name1, name2=name2, + axis_descr=axis_descr, min_count=_min_count_stub, + examples=examples) + @Appender(_num_doc) + def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, + min_count=1, + **kwargs): + nv.validate_stat_func(tuple(), kwargs, fname=name) + if skipna is None: + skipna = True + if axis is None: + axis = self._stat_axis_number + if level is not None: + return self._agg_by_level(name, axis=axis, level=level, + skipna=skipna, min_count=min_count) + return self._reduce(f, name, axis=axis, skipna=skipna, + numeric_only=numeric_only, min_count=min_count) + + return set_function_name(stat_func, name, cls) + + def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f): @Substitution(outname=name, desc=desc, name1=name1, name2=name2, - axis_descr=axis_descr) + axis_descr=axis_descr, min_count='', examples='') @Appender(_num_doc) def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 47b80c00da4d4..041239ed06d88 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -986,7 +986,8 @@ def _cython_transform(self, how, numeric_only=True): return self._wrap_transformed_output(output, names) - def _cython_agg_general(self, how, alt=None, numeric_only=True): + def _cython_agg_general(self, how, alt=None, numeric_only=True, + min_count=-1): output = {} for name, obj in self._iterate_slices(): is_numeric = is_numeric_dtype(obj.dtype) @@ -994,7 +995,8 @@ def _cython_agg_general(self, how, alt=None, numeric_only=True): continue try: - result, names = self.grouper.aggregate(obj.values, how) + result, names = self.grouper.aggregate(obj.values, how, + min_count=min_count) except AssertionError as e: raise GroupByError(str(e)) output[name] = self._try_cast(result, obj) @@ -1301,7 +1303,8 @@ def _add_numeric_operations(cls): """ add numeric operations to the GroupBy generically """ def groupby_function(name, alias, npfunc, - numeric_only=True, _convert=False): + numeric_only=True, _convert=False, + min_count=-1): _local_template = "Compute %(f)s of group values" @@ -1311,6 +1314,8 @@ def groupby_function(name, alias, npfunc, def f(self, **kwargs): if 'numeric_only' not in kwargs: kwargs['numeric_only'] = numeric_only + if 'min_count' not in kwargs: + kwargs['min_count'] = min_count self._set_group_selection() try: return self._cython_agg_general( @@ -1358,8 +1363,8 @@ def last(x): else: return last(x) - cls.sum = groupby_function('sum', 'add', np.sum) - cls.prod = groupby_function('prod', 'prod', np.prod) + cls.sum = groupby_function('sum', 'add', np.sum, min_count=1) + cls.prod = groupby_function('prod', 'prod', np.prod, min_count=1) cls.min = groupby_function('min', 'min', np.min, numeric_only=False) cls.max = groupby_function('max', 'max', np.max, numeric_only=False) cls.first = groupby_function('first', 'first', first_compat, @@ -2139,7 +2144,7 @@ def get_group_levels(self): 'var': 'group_var', 'first': { 'name': 'group_nth', - 'f': lambda func, a, b, c, d: func(a, b, c, d, 1) + 'f': lambda func, a, b, c, d, e: func(a, b, c, d, 1, -1) }, 'last': 'group_last', 'ohlc': 'group_ohlc', @@ -2209,7 +2214,7 @@ def wrapper(*args, **kwargs): (how, dtype_str)) return func, dtype_str - def _cython_operation(self, kind, values, how, axis): + def _cython_operation(self, kind, values, how, axis, min_count=-1): assert kind in ['transform', 'aggregate'] # can we do this operation with our cython functions @@ -2294,11 +2299,12 @@ def _cython_operation(self, kind, values, how, axis): counts = np.zeros(self.ngroups, dtype=np.int64) result = self._aggregate( result, counts, values, labels, func, is_numeric, - is_datetimelike) + is_datetimelike, min_count) elif kind == 'transform': result = _maybe_fill(np.empty_like(values, dtype=out_dtype), fill_value=np.nan) + # TODO: min_count result = self._transform( result, values, labels, func, is_numeric, is_datetimelike) @@ -2335,14 +2341,15 @@ def _cython_operation(self, kind, values, how, axis): return result, names - def aggregate(self, values, how, axis=0): - return self._cython_operation('aggregate', values, how, axis) + def aggregate(self, values, how, axis=0, min_count=-1): + return self._cython_operation('aggregate', values, how, axis, + min_count=min_count) def transform(self, values, how, axis=0): return self._cython_operation('transform', values, how, axis) def _aggregate(self, result, counts, values, comp_ids, agg_func, - is_numeric, is_datetimelike): + is_numeric, is_datetimelike, min_count=-1): if values.ndim > 3: # punting for now raise NotImplementedError("number of dimensions is currently " @@ -2351,9 +2358,10 @@ def _aggregate(self, result, counts, values, comp_ids, agg_func, for i, chunk in enumerate(values.transpose(2, 0, 1)): chunk = chunk.squeeze() - agg_func(result[:, :, i], counts, chunk, comp_ids) + agg_func(result[:, :, i], counts, chunk, comp_ids, + min_count) else: - agg_func(result, counts, values, comp_ids) + agg_func(result, counts, values, comp_ids, min_count) return result @@ -3643,9 +3651,10 @@ def _iterate_slices(self): continue yield val, slicer(val) - def _cython_agg_general(self, how, alt=None, numeric_only=True): + def _cython_agg_general(self, how, alt=None, numeric_only=True, + min_count=-1): new_items, new_blocks = self._cython_agg_blocks( - how, alt=alt, numeric_only=numeric_only) + how, alt=alt, numeric_only=numeric_only, min_count=min_count) return self._wrap_agged_blocks(new_items, new_blocks) def _wrap_agged_blocks(self, items, blocks): @@ -3671,7 +3680,8 @@ def _wrap_agged_blocks(self, items, blocks): _block_agg_axis = 0 - def _cython_agg_blocks(self, how, alt=None, numeric_only=True): + def _cython_agg_blocks(self, how, alt=None, numeric_only=True, + min_count=-1): # TODO: the actual managing of mgr_locs is a PITA # here, it should happen via BlockManager.combine @@ -3688,7 +3698,7 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True): locs = block.mgr_locs.as_array try: result, _ = self.grouper.aggregate( - block.values, how, axis=agg_axis) + block.values, how, axis=agg_axis, min_count=min_count) except NotImplementedError: # generally if we have numeric_only=False # and non-applicable functions diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index e1c09947ac0b4..88f69f6ff2e14 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -107,21 +107,9 @@ def f(values, axis=None, skipna=True, **kwds): if k not in kwds: kwds[k] = v try: - if values.size == 0: - - # we either return np.nan or pd.NaT - if is_numeric_dtype(values): - values = values.astype('float64') - fill_value = na_value_for_dtype(values.dtype) - - if values.ndim == 1: - return fill_value - else: - result_shape = (values.shape[:axis] + - values.shape[axis + 1:]) - result = np.empty(result_shape, dtype=values.dtype) - result.fill(fill_value) - return result + if values.size == 0 and kwds.get('min_count') is None: + # We are empty, returning NA for our type + return _na_for_min_count(values, axis) if (_USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name)): @@ -292,6 +280,22 @@ def _wrap_results(result, dtype): return result +def _na_for_min_count(values, axis): + # we either return np.nan or pd.NaT + if is_numeric_dtype(values): + values = values.astype('float64') + fill_value = na_value_for_dtype(values.dtype) + + if values.ndim == 1: + return fill_value + else: + result_shape = (values.shape[:axis] + + values.shape[axis + 1:]) + result = np.empty(result_shape, dtype=values.dtype) + result.fill(fill_value) + return result + + def nanany(values, axis=None, skipna=True): values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna) return values.any(axis) @@ -304,7 +308,7 @@ def nanall(values, axis=None, skipna=True): @disallow('M8') @bottleneck_switch() -def nansum(values, axis=None, skipna=True): +def nansum(values, axis=None, skipna=True, min_count=1): values, mask, dtype, dtype_max = _get_values(values, skipna, 0) dtype_sum = dtype_max if is_float_dtype(dtype): @@ -312,7 +316,7 @@ def nansum(values, axis=None, skipna=True): elif is_timedelta64_dtype(dtype): dtype_sum = np.float64 the_sum = values.sum(axis, dtype=dtype_sum) - the_sum = _maybe_null_out(the_sum, axis, mask) + the_sum = _maybe_null_out(the_sum, axis, mask, min_count=min_count) return _wrap_results(the_sum, dtype) @@ -641,13 +645,13 @@ def nankurt(values, axis=None, skipna=True): @disallow('M8', 'm8') -def nanprod(values, axis=None, skipna=True): +def nanprod(values, axis=None, skipna=True, min_count=1): mask = isna(values) if skipna and not is_any_int_dtype(values): values = values.copy() values[mask] = 1 result = values.prod(axis) - return _maybe_null_out(result, axis, mask) + return _maybe_null_out(result, axis, mask, min_count=min_count) def _maybe_arg_null_out(result, axis, mask, skipna): @@ -683,9 +687,9 @@ def _get_counts(mask, axis, dtype=float): return np.array(count, dtype=dtype) -def _maybe_null_out(result, axis, mask): +def _maybe_null_out(result, axis, mask, min_count=1): if axis is not None and getattr(result, 'ndim', False): - null_mask = (mask.shape[axis] - mask.sum(axis)) == 0 + null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0 if np.any(null_mask): if is_numeric_dtype(result): if np.iscomplexobj(result): @@ -698,7 +702,7 @@ def _maybe_null_out(result, axis, mask): result[null_mask] = None elif result is not tslib.NaT: null_mask = mask.size - mask.sum() - if null_mask == 0: + if null_mask < min_count: result = np.nan return result diff --git a/pandas/core/resample.py b/pandas/core/resample.py index c2bf7cff746eb..a30c727ecb87c 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -625,9 +625,20 @@ def size(self): Resampler._deprecated_valids += dir(Resampler) + +# downsample methods +for method in ['sum', 'prod']: + + def f(self, _method=method, min_count=1, *args, **kwargs): + nv.validate_resampler_func(_method, args, kwargs) + return self._downsample(_method, min_count=min_count) + f.__doc__ = getattr(GroupBy, method).__doc__ + setattr(Resampler, method, f) + + # downsample methods -for method in ['min', 'max', 'first', 'last', 'sum', 'mean', 'sem', - 'median', 'prod', 'ohlc']: +for method in ['min', 'max', 'first', 'last', 'mean', 'sem', + 'median', 'ohlc']: def f(self, _method=method, *args, **kwargs): nv.validate_resampler_func(_method, args, kwargs) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 17d711f937bf7..80e9acd0d2281 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -973,6 +973,37 @@ def test_sum_corner(self): assert len(axis0) == 0 assert len(axis1) == 0 + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_sum_prod_nanops(self, method, unit): + idx = ['a', 'b', 'c'] + df = pd.DataFrame({"a": [unit, unit], + "b": [unit, np.nan], + "c": [np.nan, np.nan]}) + + result = getattr(df, method)(min_count=1) + expected = pd.Series([unit, unit, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + result = getattr(df, method)(min_count=0) + expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') + tm.assert_series_equal(result, expected) + + result = getattr(df.iloc[1:], method)(min_count=1) + expected = pd.Series([unit, np.nan, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) + result = getattr(df, method)(min_count=5) + expected = pd.Series(result, index=['A', 'B']) + tm.assert_series_equal(result, expected) + + result = getattr(df, method)(min_count=6) + expected = pd.Series(result, index=['A', 'B']) + tm.assert_series_equal(result, expected) + def test_sum_object(self): values = self.frame.values.astype(int) frame = DataFrame(values, index=self.frame.index, diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py index 3d27df31cee6e..07ecc085098bf 100644 --- a/pandas/tests/groupby/test_aggregate.py +++ b/pandas/tests/groupby/test_aggregate.py @@ -809,26 +809,33 @@ def test__cython_agg_general(self): exc.args += ('operation: %s' % op, ) raise - def test_cython_agg_empty_buckets(self): - ops = [('mean', np.mean), - ('median', lambda x: np.median(x) if len(x) > 0 else np.nan), - ('var', lambda x: np.var(x, ddof=1)), - ('add', lambda x: np.sum(x) if len(x) > 0 else np.nan), - ('prod', np.prod), - ('min', np.min), - ('max', np.max), ] - + @pytest.mark.parametrize('op, targop', [ + ('mean', np.mean), + ('median', lambda x: np.median(x) if len(x) > 0 else np.nan), + ('var', lambda x: np.var(x, ddof=1)), + ('add', lambda x: np.sum(x) if len(x) > 0 else np.nan), + ('prod', np.prod), + ('min', np.min), + ('max', np.max), ] + ) + def test_cython_agg_empty_buckets(self, op, targop): df = pd.DataFrame([11, 12, 13]) grps = range(0, 55, 5) - for op, targop in ops: - result = df.groupby(pd.cut(df[0], grps))._cython_agg_general(op) - expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x)) - try: - tm.assert_frame_equal(result, expected) - except BaseException as exc: - exc.args += ('operation: %s' % op,) - raise + # calling _cython_agg_general directly, instead of via the user API + # which sets different values for min_count, so do that here. + if op in ('add', 'prod'): + min_count = 1 + else: + min_count = -1 + result = df.groupby(pd.cut(df[0], grps))._cython_agg_general( + op, min_count=min_count) + expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x)) + try: + tm.assert_frame_equal(result, expected) + except BaseException as exc: + exc.args += ('operation: %s' % op,) + raise def test_agg_over_numpy_arrays(self): # GH 3788 diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index c73423921898d..5e3d2bb9cf091 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -662,3 +662,48 @@ def test_groupby_categorical_two_columns(self): "C3": [nan, nan, nan, nan, 10, 100, nan, nan, nan, nan, 200, 34]}, index=idx) tm.assert_frame_equal(res, exp) + + def test_empty_sum(self): + # https://github.com/pandas-dev/pandas/issues/18678 + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=['a', 'b', 'c']), + 'B': [1, 2, 1]}) + expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A') + + # NA by default + result = df.groupby("A").B.sum() + expected = pd.Series([3, 1, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df.groupby("A").B.sum(min_count=0) + expected = pd.Series([3, 1, 0], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df.groupby("A").B.sum(min_count=1) + expected = pd.Series([3, 1, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + def test_empty_prod(self): + # https://github.com/pandas-dev/pandas/issues/18678 + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=['a', 'b', 'c']), + 'B': [1, 2, 1]}) + + expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A') + + # NA by default + result = df.groupby("A").B.prod() + expected = pd.Series([2, 1, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df.groupby("A").B.prod(min_count=0) + expected = pd.Series([2, 1, 1], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df.groupby("A").B.prod(min_count=1) + expected = pd.Series([2, 1, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 0dae6aa96ced1..cd92edc927173 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -30,38 +30,122 @@ class TestSeriesAnalytics(TestData): @pytest.mark.parametrize("use_bottleneck", [True, False]) - @pytest.mark.parametrize("method", ["sum", "prod"]) - def test_empty(self, method, use_bottleneck): - + @pytest.mark.parametrize("method, unit", [ + ("sum", 0.0), + ("prod", 1.0) + ]) + def test_empty(self, method, unit, use_bottleneck): with pd.option_context("use_bottleneck", use_bottleneck): # GH 9422 - # treat all missing as NaN + # Entirely empty s = Series([]) + # NA by default result = getattr(s, method)() assert isna(result) + # Explict + result = getattr(s, method)(min_count=0) + assert result == unit + + result = getattr(s, method)(min_count=1) + assert isna(result) + + # Skipna, default result = getattr(s, method)(skipna=True) assert isna(result) + # Skipna, explicit + result = getattr(s, method)(skipna=True, min_count=0) + assert result == unit + + result = getattr(s, method)(skipna=True, min_count=1) + assert isna(result) + + # All-NA s = Series([np.nan]) + # NA by default result = getattr(s, method)() assert isna(result) + # Explicit + result = getattr(s, method)(min_count=0) + assert result == unit + + result = getattr(s, method)(min_count=1) + assert isna(result) + + # Skipna, default result = getattr(s, method)(skipna=True) assert isna(result) + # skipna, explicit + result = getattr(s, method)(skipna=True, min_count=0) + assert result == unit + + result = getattr(s, method)(skipna=True, min_count=1) + assert isna(result) + + # Mix of valid, empty s = Series([np.nan, 1]) + # Default result = getattr(s, method)() assert result == 1.0 - s = Series([np.nan, 1]) + # Explicit + result = getattr(s, method)(min_count=0) + assert result == 1.0 + + result = getattr(s, method)(min_count=1) + assert result == 1.0 + + # Skipna result = getattr(s, method)(skipna=True) assert result == 1.0 + result = getattr(s, method)(skipna=True, min_count=0) + assert result == 1.0 + + result = getattr(s, method)(skipna=True, min_count=1) + assert result == 1.0 + # GH #844 (changed in 9422) df = DataFrame(np.empty((10, 0))) assert (df.sum(1).isnull()).all() + s = pd.Series([1]) + result = getattr(s, method)(min_count=2) + assert isna(result) + + s = pd.Series([np.nan]) + result = getattr(s, method)(min_count=2) + assert isna(result) + + s = pd.Series([np.nan, 1]) + result = getattr(s, method)(min_count=2) + assert isna(result) + + @pytest.mark.parametrize('method, unit', [ + ('sum', 0.0), + ('prod', 1.0), + ]) + def test_empty_multi(self, method, unit): + s = pd.Series([1, np.nan, np.nan, np.nan], + index=pd.MultiIndex.from_product([('a', 'b'), (0, 1)])) + # NaN by default + result = getattr(s, method)(level=0) + expected = pd.Series([1, np.nan], index=['a', 'b']) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = getattr(s, method)(level=0, min_count=0) + expected = pd.Series([1, unit], index=['a', 'b']) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = getattr(s, method)(level=0, min_count=1) + expected = pd.Series([1, np.nan], index=['a', 'b']) + tm.assert_series_equal(result, expected) + @pytest.mark.parametrize( "method", ['sum', 'mean', 'median', 'std', 'var']) def test_ops_consistency_on_empty(self, method): diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index 38f4b8be469a5..4a3c4eff9f8c3 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -4,6 +4,7 @@ from datetime import datetime, timedelta from functools import partial from textwrap import dedent +from operator import methodcaller import pytz import pytest @@ -3382,6 +3383,34 @@ def test_aggregate_normal(self): assert_frame_equal(expected, dt_result) """ + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_resample_entirly_nat_window(self, method, unit): + s = pd.Series([0] * 2 + [np.nan] * 2, + index=pd.date_range('2017', periods=4)) + # nan by default + result = methodcaller(method)(s.resample("2d")) + expected = pd.Series([0.0, np.nan], + index=pd.to_datetime(['2017-01-01', + '2017-01-03'])) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = methodcaller(method, min_count=0)(s.resample("2d")) + expected = pd.Series([0.0, unit], + index=pd.to_datetime(['2017-01-01', + '2017-01-03'])) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = methodcaller(method, min_count=1)(s.resample("2d")) + expected = pd.Series([0.0, np.nan], + index=pd.to_datetime(['2017-01-01', + '2017-01-03'])) + tm.assert_series_equal(result, expected) + def test_aggregate_with_nat(self): # check TimeGrouper's aggregation is identical as normal groupby @@ -3441,3 +3470,29 @@ def test_repr(self): "closed='left', label='left', how='mean', " "convention='e', base=0)") assert result == expected + + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_upsample_sum(self, method, unit): + s = pd.Series(1, index=pd.date_range("2017", periods=2, freq="H")) + resampled = s.resample("30T") + index = pd.to_datetime(['2017-01-01T00:00:00', + '2017-01-01T00:30:00', + '2017-01-01T01:00:00']) + + # NaN by default + result = methodcaller(method)(resampled) + expected = pd.Series([1, np.nan, 1], index=index) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = methodcaller(method, min_count=0)(resampled) + expected = pd.Series([1, unit, 1], index=index) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = methodcaller(method, min_count=1)(resampled) + expected = pd.Series([1, np.nan, 1], index=index) + tm.assert_series_equal(result, expected)
The current default is 1, reproducing the behavior of pandas 0.21. The current test suite should pass. I'll add additional commits here changing the default to be 0. Currently, only nansum and nanprod actually do anything with `min_count`. It will not be hard to adjust other nan* methods use it if we want. This was just simplest for now. Additional tests for the new behavior have been added. closes #18678
https://api.github.com/repos/pandas-dev/pandas/pulls/18876
2017-12-20T17:36:56Z
2017-12-28T14:44:51Z
2017-12-28T14:44:51Z
2017-12-29T00:58:23Z
Fix bugs in WeekOfMonth.apply, Week.onOffset
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 40e1e2011479c..1a3b3e751190b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -282,6 +282,8 @@ Conversion - Bug in :meth:`Index.astype` with a categorical dtype where the resultant index is not converted to a :class:`CategoricalIndex` for all types of index (:issue:`18630`) - Bug in :meth:`Series.astype` and ``Categorical.astype()`` where an existing categorical data does not get updated (:issue:`10696`, :issue:`18593`) - Bug in :class:`Series` constructor with an int or float list where specifying ``dtype=str``, ``dtype='str'`` or ``dtype='U'`` failed to convert the data elements to strings (:issue:`16605`) +- Bug in :class:`Timestamp` where comparison with an array of ``Timestamp`` objects would result in a ``RecursionError`` (:issue:`15183`) +- Bug in :class:`WeekOfMonth` and class:`Week` where addition and subtraction did not roll correctly (:issue:`18510`,:issue:`18672`,:issue:`18864`) Indexing @@ -361,4 +363,3 @@ Other ^^^^^ - Improved error message when attempting to use a Python keyword as an identifier in a ``numexpr`` backed query (:issue:`18221`) -- Bug in :class:`Timestamp` where comparison with an array of ``Timestamp`` objects would result in a ``RecursionError`` (:issue:`15183`) diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 5b4c2f9d86674..b304ebff55b6e 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -3147,3 +3147,37 @@ def test_require_integers(offset_types): cls = offset_types with pytest.raises(ValueError): cls(n=1.5) + + +def test_weeks_onoffset(): + # GH#18510 Week with weekday = None, normalize = False should always + # be onOffset + offset = Week(n=2, weekday=None) + ts = Timestamp('1862-01-13 09:03:34.873477378+0210', tz='Africa/Lusaka') + fast = offset.onOffset(ts) + slow = (ts + offset) - offset == ts + assert fast == slow + + # negative n + offset = Week(n=2, weekday=None) + ts = Timestamp('1856-10-24 16:18:36.556360110-0717', tz='Pacific/Easter') + fast = offset.onOffset(ts) + slow = (ts + offset) - offset == ts + assert fast == slow + + +def test_weekofmonth_onoffset(): + # GH#18864 + # Make sure that nanoseconds don't trip up onOffset (and with it apply) + offset = WeekOfMonth(n=2, week=2, weekday=0) + ts = Timestamp('1916-05-15 01:14:49.583410462+0422', tz='Asia/Qyzylorda') + fast = offset.onOffset(ts) + slow = (ts + offset) - offset == ts + assert fast == slow + + # negative n + offset = WeekOfMonth(n=-3, week=1, weekday=0) + ts = Timestamp('1980-12-08 03:38:52.878321185+0500', tz='Asia/Oral') + fast = offset.onOffset(ts) + slow = (ts + offset) - offset == ts + assert fast == slow diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 8b12b2f3ad2ce..54250bbf903a4 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -112,6 +112,31 @@ def wrapper(self, other): return wrapper +def shift_day(other, days): + """ + Increment the datetime `other` by the given number of days, retaining + the time-portion of the datetime. For tz-naive datetimes this is + equivalent to adding a timedelta. For tz-aware datetimes it is similar to + dateutil's relativedelta.__add__, but handles pytz tzinfo objects. + + Parameters + ---------- + other : datetime or Timestamp + days : int + + Returns + ------- + shifted: datetime or Timestamp + """ + if other.tzinfo is None: + return other + timedelta(days=days) + + tz = other.tzinfo + naive = other.replace(tzinfo=None) + shifted = naive + timedelta(days=days) + return tslib._localize_pydatetime(shifted, tz) + + # --------------------------------------------------------------------- # DateOffset @@ -1342,6 +1367,8 @@ def apply_index(self, i): def onOffset(self, dt): if self.normalize and not _is_normalized(dt): return False + elif self.weekday is None: + return True return dt.weekday() == self.weekday @property @@ -1361,7 +1388,29 @@ def _from_name(cls, suffix=None): return cls(weekday=weekday) -class WeekOfMonth(DateOffset): +class _WeekOfMonthMixin(object): + """Mixin for methods common to WeekOfMonth and LastWeekOfMonth""" + @apply_wraps + def apply(self, other): + compare_day = self._get_offset_day(other) + + months = self.n + if months > 0 and compare_day > other.day: + months -= 1 + elif months <= 0 and compare_day < other.day: + months += 1 + + shifted = shift_month(other, months, 'start') + to_day = self._get_offset_day(shifted) + return shift_day(shifted, to_day - shifted.day) + + def onOffset(self, dt): + if self.normalize and not _is_normalized(dt): + return False + return dt.day == self._get_offset_day(dt) + + +class WeekOfMonth(_WeekOfMonthMixin, DateOffset): """ Describes monthly dates like "the Tuesday of the 2nd week of each month" @@ -1400,34 +1449,23 @@ def __init__(self, n=1, normalize=False, week=None, weekday=None): self.kwds = {'weekday': weekday, 'week': week} - @apply_wraps - def apply(self, other): - base = other - offsetOfMonth = self.getOffsetOfMonth(other) - - months = self.n - if months > 0 and offsetOfMonth > other: - months -= 1 - elif months <= 0 and offsetOfMonth < other: - months += 1 - - other = self.getOffsetOfMonth(shift_month(other, months, 'start')) - other = datetime(other.year, other.month, other.day, base.hour, - base.minute, base.second, base.microsecond) - return other + def _get_offset_day(self, other): + """ + Find the day in the same month as other that has the same + weekday as self.weekday and is the self.week'th such day in the month. - def getOffsetOfMonth(self, dt): - w = Week(weekday=self.weekday) - d = datetime(dt.year, dt.month, 1, tzinfo=dt.tzinfo) - # TODO: Is this DST-safe? - d = w.rollforward(d) - return d + timedelta(weeks=self.week) + Parameters + ---------- + other: datetime - def onOffset(self, dt): - if self.normalize and not _is_normalized(dt): - return False - d = datetime(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo) - return d == self.getOffsetOfMonth(dt) + Returns + ------- + day: int + """ + mstart = datetime(other.year, other.month, 1) + wday = mstart.weekday() + shift_days = (self.weekday - wday) % 7 + return 1 + shift_days + self.week * 7 @property def rule_code(self): @@ -1448,7 +1486,7 @@ def _from_name(cls, suffix=None): return cls(week=week, weekday=weekday) -class LastWeekOfMonth(DateOffset): +class LastWeekOfMonth(_WeekOfMonthMixin, DateOffset): """ Describes monthly dates in last week of month like "the last Tuesday of each month" @@ -1482,31 +1520,24 @@ def __init__(self, n=1, normalize=False, weekday=None): self.kwds = {'weekday': weekday} - @apply_wraps - def apply(self, other): - offsetOfMonth = self.getOffsetOfMonth(other) - - months = self.n - if months > 0 and offsetOfMonth > other: - months -= 1 - elif months <= 0 and offsetOfMonth < other: - months += 1 - - return self.getOffsetOfMonth(shift_month(other, months, 'start')) + def _get_offset_day(self, other): + """ + Find the day in the same month as other that has the same + weekday as self.weekday and is the last such day in the month. - def getOffsetOfMonth(self, dt): - m = MonthEnd() - d = datetime(dt.year, dt.month, 1, dt.hour, dt.minute, - dt.second, dt.microsecond, tzinfo=dt.tzinfo) - eom = m.rollforward(d) - # TODO: Is this DST-safe? - w = Week(weekday=self.weekday) - return w.rollback(eom) + Parameters + ---------- + other: datetime - def onOffset(self, dt): - if self.normalize and not _is_normalized(dt): - return False - return dt == self.getOffsetOfMonth(dt) + Returns + ------- + day: int + """ + dim = ccalendar.get_days_in_month(other.year, other.month) + mend = datetime(other.year, other.month, dim) + wday = mend.weekday() + shift_days = (wday - self.weekday) % 7 + return dim - shift_days @property def rule_code(self):
In the process we get rid of `WeekOfMonth.getOffsetOfMonth` and `LastWeekOfMonth.getOffsetOfMonth`, which were idiosyncratic what arguments they passed to `datetime`. The issues this addresses are orthogonal to #18762, but the code affected does overlap. In particular, after this, `roll_monthday` will not be needed in 18762. closes #18864 closes #18672 closes #18510 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18875
2017-12-20T17:26:27Z
2017-12-23T20:34:46Z
2017-12-23T20:34:45Z
2018-02-11T22:00:27Z
TST: xfail conda 3.5 fails
diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 13a393d9109ae..caee8c8d85811 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -144,6 +144,7 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext): with pytest.raises(error_class): reader(path) + @pytest.mark.xfail(reason="not working in 3.5 conda build") @pytest.mark.parametrize('reader, module, path', [ (pd.read_csv, 'os', os.path.join(HERE, 'data', 'iris.csv')), (pd.read_table, 'os', os.path.join(HERE, 'data', 'iris.csv')),
closes #18870
https://api.github.com/repos/pandas-dev/pandas/pulls/18873
2017-12-20T15:54:18Z
2017-12-20T18:09:57Z
2017-12-20T18:09:57Z
2017-12-20T18:09:57Z
[WIP] Prod/Sum of all-NA / all-empty
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 0579a80aad28e..b8bcdd8ec71d2 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -8,6 +8,103 @@ deprecations, new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all users upgrade to this version. +.. _whatsnew_0220.na_sum: + +Pandas 0.22.0 changes the handling of empty and all-NA sums and products. The +summary is that + +* The sum of an all-NA or empty series is now 0 +* The product of an all-NA or empty series is now 1 +* We've added an ``empty_is_na`` keyword to the ``sum`` and ``prod`` methods + to control whether the sum or product of an empty series should be NA. The + default is ``False``. To restore the 0.21 behavior, use + ``empty_is_na=True``. + +Some background: In pandas 0.21.1, we fixed a long-standing inconsistency +in the return value of all-NA series depending on whether or not bottleneck +was installed. See :ref:`whatsnew_0210.api_breaking.bottleneck`_. At the same +time, we changed the sum and prod of an empty Series to also be ``NaN``. + +Based on feedback, we've partially reverted those changes. The defualt sum +for all-NA and empty series is now 0 (1 for ``prod``). You can achieve the +pandas 0.21.0 behavior, returning ``NaN``, with the ``empty_is_na`` keyword. + +*pandas 0.21* + +.. code-block:: ipython + + In [1]: import pandas as pd + + In [2]: import numpy as np + + In [3]: pd.Series([]).sum() + Out[3]: nan + + In [4]: pd.Series([np.nan]).sum() + Out[4]: nan + +*pandas 0.22.0* + +.. ipython:: python + + pd.Series([]).sum() + pd.Series([np.nan]).sum() + +To have the sum of an empty series return ``NaN``, use the ``empty_is_na`` +keyword. Thanks to the ``skipna`` parameter, the ``.sum`` on an all-NA +series is conceptually the same as on an empty. The ``empty_is_na`` parameter +controls the return value after removing NAs. + +.. ipython:: python + + pd.Series([]).sum(empty_is_na=True) + pd.Series([np.nan]).sum(empty_is_na=True) + +Note that this affects some other places in the library: + +1. Grouping by a Categorical with some unobserved categories + +*pandas 0.21* + +.. code-block:: ipython + + In [3]: grouper = pd.Categorical(['a', 'a'], categories=['a', 'b']) + + In [4]: pd.Series([1, 2]).groupby(grouper).sum() + Out[4]: + a 3.0 + b NaN + dtype: float64 + +*pandas 0.22* + +.. ipython:: python + + grouper = pd.Categorical(['a', 'a'], categories=['a', 'b']) + pd.Series([1, 2]).groupby(grouepr).sum() + +2. Upsampling + +*pandas 0.21.0* + +.. code-block:: ipython + + In [5]: idx = pd.DatetimeIndex(['2017-01-01', '2017-01-02']) + + In [6]: pd.Series([1, 2], index=idx).resample('12H').sum() + Out[6]: + 2017-01-01 00:00:00 1.0 + 2017-01-01 12:00:00 NaN + 2017-01-02 00:00:00 2.0 + Freq: 12H, dtype: float64 + +*pandas 0.22.0* + +.. ipython:: python + + idx = pd.DatetimeIndex(['2017-01-01', '2017-01-02']) + pd.Series([1, 2], index=idx).resample("12H").sum() + .. _whatsnew_0220.enhancements: New features diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index d38b677df321c..19dec86fd23fa 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -89,7 +89,7 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: - out[i, j] = NAN + out[i, j] = 0 else: out[i, j] = sumx[i, j] @@ -148,7 +148,7 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: - out[i, j] = NAN + out[i, j] = 1 else: out[i, j] = prodx[i, j] diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4eb7865523cc3..aaf35e502c315 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7310,7 +7310,8 @@ def _add_numeric_operations(cls): @Substitution(outname='mad', desc="Return the mean absolute deviation of the values " "for the requested axis", - name1=name, name2=name2, axis_descr=axis_descr) + name1=name, name2=name2, axis_descr=axis_descr, + empty_is_na='') @Appender(_num_doc) def mad(self, axis=None, skipna=None, level=None): if skipna is None: @@ -7351,7 +7352,7 @@ def mad(self, axis=None, skipna=None, level=None): @Substitution(outname='compounded', desc="Return the compound percentage of the values for " "the requested axis", name1=name, name2=name2, - axis_descr=axis_descr) + axis_descr=axis_descr, empty_is_na='') @Appender(_num_doc) def compound(self, axis=None, skipna=None, level=None): if skipna is None: @@ -7375,10 +7376,11 @@ def compound(self, axis=None, skipna=None, level=None): lambda y, axis: np.maximum.accumulate(y, axis), "max", -np.inf, np.nan) - cls.sum = _make_stat_function( + cls.sum = _make_empty_stat_function( cls, 'sum', name, name2, axis_descr, 'Return the sum of the values for the requested axis', - nanops.nansum) + nanops.nansum, + empty_is_na=False) cls.mean = _make_stat_function( cls, 'mean', name, name2, axis_descr, 'Return the mean of the values for the requested axis', @@ -7394,10 +7396,11 @@ def compound(self, axis=None, skipna=None, level=None): "by N-1\n", nanops.nankurt) cls.kurtosis = cls.kurt - cls.prod = _make_stat_function( + cls.prod = _make_empty_stat_function( cls, 'prod', name, name2, axis_descr, 'Return the product of the values for the requested axis', - nanops.nanprod) + nanops.nanprod, + empty_is_na=False) cls.product = cls.prod cls.median = _make_stat_function( cls, 'median', name, name2, axis_descr, @@ -7520,14 +7523,14 @@ def _doc_parms(cls): ---------- axis : %(axis_descr)s skipna : boolean, default True - Exclude NA/null values. If an entire row/column is NA or empty, the result - will be NA + Exclude NA/null values before computing the result. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a %(name1)s numeric_only : boolean, default None Include only float, int, boolean columns. If None, will attempt to use - everything, then use only numeric data. Not implemented for Series. + everything, then use only numeric data. Not implemented for + Series.%(empty_is_na)s Returns ------- @@ -7584,7 +7587,7 @@ def _doc_parms(cls): axis : %(axis_descr)s skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result - will be NA + will be NA. Returns ------- @@ -7598,16 +7601,45 @@ def _doc_parms(cls): """ +_empty_is_na_doc = """ +empty_is_na : bool, default False + The result of operating on an empty array should be NA. The default + behavior is for the sum of an empty array to be 0, and the product + of an empty array to be 1. + + When ``skipna=True``, "empty" refers to whether or not the array + is empty after removing NAs. So operating on an all-NA array with + ``skipna=True`` will be NA when ``empty_is_na`` is True. + """ + + +def _make_empty_stat_function(cls, name, name1, name2, axis_descr, desc, f, + empty_is_na=False): + @Substitution(outname=name, desc=desc, name1=name1, name2=name2, + axis_descr=axis_descr, empty_is_na=_empty_is_na_doc) + @Appender(_num_doc) + def stat_func(self, axis=None, skipna=True, level=None, numeric_only=None, + empty_is_na=empty_is_na, **kwargs): + nv.validate_stat_func(tuple(), kwargs, fname=name) + if axis is None: + axis = self._stat_axis_number + if level is not None: + return self._agg_by_level(name, axis=axis, level=level, + skipna=skipna, empty_is_na=empty_is_na) + return self._reduce(f, name, axis=axis, skipna=skipna, + numeric_only=numeric_only, + empty_is_na=empty_is_na) + + return set_function_name(stat_func, name, cls) + def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f): @Substitution(outname=name, desc=desc, name1=name1, name2=name2, - axis_descr=axis_descr) + axis_descr=axis_descr, empty_is_na='') @Appender(_num_doc) - def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, + def stat_func(self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs): nv.validate_stat_func(tuple(), kwargs, fname=name) - if skipna is None: - skipna = True if axis is None: axis = self._stat_axis_number if level is not None: diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index e1c09947ac0b4..2302561d303e8 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -107,7 +107,8 @@ def f(values, axis=None, skipna=True, **kwds): if k not in kwds: kwds[k] = v try: - if values.size == 0: + # TODO: NaT + if values.size == 0 and kwds.get('empty_is_na'): # we either return np.nan or pd.NaT if is_numeric_dtype(values): @@ -155,6 +156,7 @@ def _bn_ok_dtype(dt, name): # Bottleneck chokes on datetime64 if (not is_object_dtype(dt) and not is_datetime_or_timedelta_dtype(dt)): + # TODO: handle this overflow # GH 15507 # bottleneck does not properly upcast during the sum # so can overflow @@ -163,6 +165,9 @@ def _bn_ok_dtype(dt, name): # further we also want to preserve NaN when all elements # are NaN, unlinke bottleneck/numpy which consider this # to be 0 + + # https://github.com/kwgoodman/bottleneck/issues/180 + # No upcast for boolean -> int if name in ['nansum', 'nanprod']: return False @@ -303,8 +308,8 @@ def nanall(values, axis=None, skipna=True): @disallow('M8') -@bottleneck_switch() -def nansum(values, axis=None, skipna=True): +@bottleneck_switch(empty_is_na=False) +def nansum(values, axis=None, skipna=True, empty_is_na=False): values, mask, dtype, dtype_max = _get_values(values, skipna, 0) dtype_sum = dtype_max if is_float_dtype(dtype): @@ -312,13 +317,12 @@ def nansum(values, axis=None, skipna=True): elif is_timedelta64_dtype(dtype): dtype_sum = np.float64 the_sum = values.sum(axis, dtype=dtype_sum) - the_sum = _maybe_null_out(the_sum, axis, mask) + the_sum = _maybe_null_out(the_sum, axis, mask, empty_is_na) return _wrap_results(the_sum, dtype) @disallow('M8') -@bottleneck_switch() def nanmean(values, axis=None, skipna=True): values, mask, dtype, dtype_max = _get_values(values, skipna, 0) @@ -641,13 +645,15 @@ def nankurt(values, axis=None, skipna=True): @disallow('M8', 'm8') -def nanprod(values, axis=None, skipna=True): +@bottleneck_switch(empty_is_na=False) +def nanprod(values, axis=None, skipna=True, empty_is_na=False): mask = isna(values) if skipna and not is_any_int_dtype(values): values = values.copy() values[mask] = 1 result = values.prod(axis) - return _maybe_null_out(result, axis, mask) + + return _maybe_null_out(result, axis, mask, empty_is_na, unit=1.0) def _maybe_arg_null_out(result, axis, mask, skipna): @@ -683,9 +689,13 @@ def _get_counts(mask, axis, dtype=float): return np.array(count, dtype=dtype) -def _maybe_null_out(result, axis, mask): +def _maybe_null_out(result, axis, mask, empty_is_na=True, unit=0.0): if axis is not None and getattr(result, 'ndim', False): null_mask = (mask.shape[axis] - mask.sum(axis)) == 0 + + if not empty_is_na: + null_mask[result == unit] = False + if np.any(null_mask): if is_numeric_dtype(result): if np.iscomplexobj(result): @@ -698,7 +708,7 @@ def _maybe_null_out(result, axis, mask): result[null_mask] = None elif result is not tslib.NaT: null_mask = mask.size - mask.sum() - if null_mask == 0: + if null_mask == 0.0 and empty_is_na: result = np.nan return result diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 17d711f937bf7..a6b5137d49eea 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -478,10 +478,11 @@ def test_nunique(self): Series({0: 1, 1: 3, 2: 2})) def test_sum(self): - self._check_stat_op('sum', np.sum, has_numeric_only=True) + self._check_stat_op('sum', np.nansum, has_numeric_only=True, + no_skipna_alternative=np.sum) # mixed types (with upcasting happening) - self._check_stat_op('sum', np.sum, + self._check_stat_op('sum', np.nansum, frame=self.mixed_float.astype('float32'), has_numeric_only=True, check_dtype=False, check_less_precise=True) @@ -753,7 +754,8 @@ def alt(x): def _check_stat_op(self, name, alternative, frame=None, has_skipna=True, has_numeric_only=False, check_dtype=True, - check_dates=False, check_less_precise=False): + check_dates=False, check_less_precise=False, + no_skipna_alternative=None): if frame is None: frame = self.frame # set some NAs @@ -774,14 +776,20 @@ def _check_stat_op(self, name, alternative, frame=None, has_skipna=True, assert len(result) if has_skipna: - def skipna_wrapper(x): - nona = x.dropna() - if len(nona) == 0: - return np.nan - return alternative(nona) + alt = no_skipna_alternative or alternative # e.g. sum / nansum + + if no_skipna_alternative: + def skipna_wrapper(x): + return alternative(x.values) + else: + def skipna_wrapper(x): + nona = x.dropna() + if len(nona) == 0: + return np.nan + return alt(nona) def wrapper(x): - return alternative(x.values) + return alt(x.values) result0 = f(axis=0, skipna=False) result1 = f(axis=1, skipna=False) @@ -793,7 +801,7 @@ def wrapper(x): check_dtype=False, check_less_precise=check_less_precise) else: - skipna_wrapper = alternative + skipna_wrapper =alternative wrapper = alternative result0 = f(axis=0) @@ -834,6 +842,12 @@ def wrapper(x): r0 = getattr(all_na, name)(axis=0) r1 = getattr(all_na, name)(axis=1) if name in ['sum', 'prod']: + tm.assert_numpy_array_equal(r0.values, np.zeros_like(r0)) + tm.assert_numpy_array_equal(r1.values, np.zeros_like(r1)) + + if name in ['sum', 'prod']: + r0 = getattr(all_na, name)(axis=0, skipna=False) + r1 = getattr(all_na, name)(axis=1, skipna=False) assert np.isnan(r0).all() assert np.isnan(r1).all() diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py index 3d27df31cee6e..7d354702b6eca 100644 --- a/pandas/tests/groupby/test_aggregate.py +++ b/pandas/tests/groupby/test_aggregate.py @@ -813,8 +813,6 @@ def test_cython_agg_empty_buckets(self): ops = [('mean', np.mean), ('median', lambda x: np.median(x) if len(x) > 0 else np.nan), ('var', lambda x: np.var(x, ddof=1)), - ('add', lambda x: np.sum(x) if len(x) > 0 else np.nan), - ('prod', np.prod), ('min', np.min), ('max', np.max), ] @@ -830,6 +828,23 @@ def test_cython_agg_empty_buckets(self): exc.args += ('operation: %s' % op,) raise + def test_cython_agg_empty_buckets_nanops(self): + # Bug in python agg func not being evaluated on empty buckets + df = pd.DataFrame([11, 12, 13], columns=['a']) + grps = range(0, 25, 5) + result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('add') + intervals = pd.interval_range(0, 20, freq=5) + expected = pd.DataFrame( + {"a": [0, 0, 36, 0]}, + index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + tm.assert_frame_equal(result, expected) + + result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('prod') + expected = pd.DataFrame( + {"a": [1, 1, 1716, 1]}, + index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + tm.assert_frame_equal(result, expected) + def test_agg_over_numpy_arrays(self): # GH 3788 df = pd.DataFrame([[1, np.array([10, 20, 30])], @@ -925,3 +940,17 @@ def test_agg_structs_series(self, structure, expected): result = df.groupby('A')['C'].aggregate(structure) expected.index.name = 'A' assert_series_equal(result, expected) + + @pytest.mark.xfail(reason="agg functions not called on empty groups") + def test_agg_category_nansum(self): + categories = ['a', 'b', 'c'] + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=categories), + 'B': [1, 2, 3]}) + result = df.groupby("A").B.agg(np.nansum) + expected = pd.Series([3, 3, 0], + index=pd.CategoricalIndex(['a', 'b', 'c'], + categories=categories, + name='A'), + name='B') + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index c73423921898d..1c05860b7f5fd 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -37,7 +37,7 @@ def test_groupby(self): # single grouper gb = df.groupby("A") exp_idx = CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True) - expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)}) + expected = DataFrame({'values': Series([3, 7, 0], index=exp_idx)}) result = gb.sum() tm.assert_frame_equal(result, expected) @@ -662,3 +662,25 @@ def test_groupby_categorical_two_columns(self): "C3": [nan, nan, nan, nan, 10, 100, nan, nan, nan, nan, 200, 34]}, index=idx) tm.assert_frame_equal(res, exp) + + def test_sum_zero(self): + df = pd.DataFrame({"A": pd.Categorical(['a', 'b', 'a'], + categories=['a', 'b', 'c']), + 'B': [1, 2, 1]}) + result = df.groupby("A").B.sum() + expected = pd.Series([2, 2, 0], + index=pd.CategoricalIndex(['a', 'b', 'c'], + name='A'), + name='B') + tm.assert_series_equal(result, expected) + + def test_prod_one(self): + df = pd.DataFrame({"A": pd.Categorical(['a', 'b', 'a'], + categories=['a', 'b', 'c']), + 'B': [1, 2, 1]}) + result = df.groupby("A").B.prod() + expected = pd.Series([1, 2, 1], + index=pd.CategoricalIndex(['a', 'b', 'c'], + name='A'), + name='B') + tm.assert_series_equal(result, expected) \ No newline at end of file diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index cf4a6ec1c932a..a13d985ab6974 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2704,7 +2704,7 @@ def h(df, arg3): # Assert the results here index = pd.Index(['A', 'B', 'C'], name='group') - expected = pd.Series([-79.5160891089, -78.4839108911, None], + expected = pd.Series([-79.5160891089, -78.4839108911, -80], index=index) assert_series_equal(expected, result) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index c8503b16a0e16..dedde090f347c 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -41,12 +41,12 @@ def test_groupby_with_timegrouper(self): df = df.set_index(['Date']) expected = DataFrame( - {'Quantity': np.nan}, + {'Quantity': 0}, index=date_range('20130901 13:00:00', '20131205 13:00:00', freq='5D', name='Date', closed='left')) expected.iloc[[0, 6, 18], 0] = np.array( - [24., 6., 9.], dtype='float64') + [24, 6, 9], dtype='int64') result1 = df.resample('5D') .sum() assert_frame_equal(result1, expected) @@ -261,9 +261,10 @@ def test_timegrouper_with_reg_groups(self): for freq in ['D', 'M', 'A', 'Q-APR']: expected = df.groupby('user_id')[ 'whole_cost'].resample( - freq).sum().dropna().reorder_levels( + freq).sum().reorder_levels( ['date', 'user_id']).sort_index().astype('int64') expected.name = 'whole_cost' + expected = expected[expected > 0] result1 = df.sort_index().groupby([pd.Grouper(freq=freq), 'user_id'])['whole_cost'].sum() diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 203a0b4a54858..6777bebcc35a1 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -33,26 +33,30 @@ class TestSeriesAnalytics(TestData): @pytest.mark.parametrize("method", ["sum", "prod"]) def test_empty(self, method, use_bottleneck): + if method == "sum": + unit = 0 + else: + unit = 1 with pd.option_context("use_bottleneck", use_bottleneck): - # GH 9422 - # treat all missing as NaN + # GH 9422 / 18678 + # treat all missing as 0 s = Series([]) result = getattr(s, method)() - assert isna(result) + assert result == unit result = getattr(s, method)(skipna=True) - assert isna(result) + assert result == unit s = Series([np.nan]) result = getattr(s, method)() - assert isna(result) + assert result == unit result = getattr(s, method)(skipna=True) - assert isna(result) + assert result == unit s = Series([np.nan, 1]) result = getattr(s, method)() - assert result == 1.0 + assert result == 1 s = Series([np.nan, 1]) result = getattr(s, method)(skipna=True) @@ -60,13 +64,15 @@ def test_empty(self, method, use_bottleneck): # GH #844 (changed in 9422) df = DataFrame(np.empty((10, 0))) - assert (df.sum(1).isnull()).all() + result = df.sum(1) + expected = pd.Series(0, index=df.index, dtype='float64') + tm.assert_series_equal(result, expected) @pytest.mark.parametrize( - "method", ['sum', 'mean', 'median', 'std', 'var']) + "method", ['mean', 'median', 'std', 'var']) def test_ops_consistency_on_empty(self, method): - # GH 7869 + # GH 7869 / 18678 # consistency on empty # float @@ -77,6 +83,19 @@ def test_ops_consistency_on_empty(self, method): result = getattr(Series(dtype='m8[ns]'), method)() assert result is pd.NaT + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_ops_consistency_on_empty_sum_prod(self, method, unit): + # GH 18678 + result = getattr(Series(dtype=float), method)() + assert result == unit + + if method == 'sum': + result = getattr(Series(dtype='m8[ns]'), method)() + assert result == pd.Timedelta(0) + def test_nansum_buglet(self): s = Series([1.0, np.nan], index=[0, 1]) result = np.nansum(s) @@ -111,7 +130,7 @@ def test_sum_overflow(self, use_bottleneck): assert np.allclose(float(result), v[-1]) def test_sum(self): - self._check_stat_op('sum', np.sum, check_allna=True) + self._check_stat_op('sum', np.nansum, check_allna=False) def test_sum_inf(self): s = Series(np.random.randn(10)) diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py index 14a44c36c6a0c..3c93ff1d3f31e 100644 --- a/pandas/tests/series/test_quantile.py +++ b/pandas/tests/series/test_quantile.py @@ -38,7 +38,7 @@ def test_quantile(self): # GH7661 result = Series([np.timedelta64('NaT')]).sum() - assert result is pd.NaT + assert result == pd.Timedelta(0) msg = 'percentiles should all be in the interval \\[0, 1\\]' for invalid in [-1, 2, [0.5, -1], [0.5, 2]]: diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 9305504f8d5e3..2f084cabd261d 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -181,12 +181,16 @@ def _coerce_tds(targ, res): check_dtype=check_dtype) def check_fun_data(self, testfunc, targfunc, testarval, targarval, - targarnanval, check_dtype=True, **kwargs): + targarnanval, check_dtype=True, empty_targfunc=None, + **kwargs): for axis in list(range(targarval.ndim)) + [None]: for skipna in [False, True]: targartempval = targarval if skipna else targarnanval - try: + if skipna and empty_targfunc and pd.isna(targartempval).all(): + targ = empty_targfunc(targartempval, axis=axis, **kwargs) + else: targ = targfunc(targartempval, axis=axis, **kwargs) + try: res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs) self.check_results(targ, res, axis, @@ -218,7 +222,9 @@ def check_fun_data(self, testfunc, targfunc, testarval, targarval, except ValueError: return self.check_fun_data(testfunc, targfunc, testarval2, targarval2, - targarnanval2, check_dtype=check_dtype, **kwargs) + targarnanval2, check_dtype=check_dtype, + empty_targfunc=empty_targfunc, + **kwargs) def check_fun(self, testfunc, targfunc, testar, targar=None, targarnan=None, **kwargs): @@ -328,7 +334,8 @@ def test_nanall(self): def test_nansum(self): self.check_funs(nanops.nansum, np.sum, allow_str=False, - allow_date=False, allow_tdelta=True, check_dtype=False) + allow_date=False, allow_tdelta=True, check_dtype=False, + empty_targfunc=np.nansum) def test_nanmean(self): self.check_funs(nanops.nanmean, np.mean, allow_complex=False, @@ -462,7 +469,8 @@ def test_nankurt(self): def test_nanprod(self): self.check_funs(nanops.nanprod, np.prod, allow_str=False, - allow_date=False, allow_tdelta=False) + allow_date=False, allow_tdelta=False, + empty_targfunc=np.nanprod) def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs): res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs) @@ -992,6 +1000,50 @@ def test_nans_skipna(self): def prng(self): return np.random.RandomState(1234) + def test_empty_sum(self): + ser = Series(dtype=np.float64) + result = ser.sum() + assert result == 0.0 + + result = ser.sum(empty_is_na=True) + assert pd.isna(result) + + def test_empty_prod(self): + ser = Series(dtype=np.float64) + result = ser.prod() + assert result == 1.0 + + result = ser.prod(empty_is_na=True) + assert pd.isna(result) + + def test_bool_sum(self): + ser = Series([True, True, False]) + result = ser.sum() + assert result == 2 + + @pytest.mark.parametrize('skipna, series, empty_is_na, expected', [ + (True, pd.Series([]), False, 0), + (True, pd.Series([]), True, np.nan), + (True, pd.Series([np.nan]), False, 0), + (True, pd.Series([np.nan]), True, np.nan), + (False, pd.Series([]), False, 0), + (False, pd.Series([]), True, np.nan), + (False, pd.Series([np.nan]), False, np.nan), + (False, pd.Series([np.nan]), True, np.nan), + + ]) + def test_sum_table(self, skipna, series, empty_is_na, expected): + # https://github.com/pandas-dev/pandas/issues/18678 + # #issuecomment-351437890 + the_sum = series.sum(skipna=skipna, empty_is_na=empty_is_na) + the_prod = series.prod(skipna=skipna, empty_is_na=empty_is_na) + if np.isnan(expected): + assert np.isnan(the_sum) + assert np.isnan(the_prod) + else: + assert the_sum == 0.0 + assert the_prod == 1.0 + def test_use_bottleneck(): @@ -1003,4 +1055,4 @@ def test_use_bottleneck(): pd.set_option('use_bottleneck', False) assert not pd.get_option('use_bottleneck') - pd.set_option('use_bottleneck', use_bn) + pd.set_option('use_bottleneck', use_bn) \ No newline at end of file diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index f00fa07d868a1..e9b030fe7eb31 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -3385,7 +3385,13 @@ def test_aggregate_with_nat(self): for func in ['min', 'max', 'sum', 'prod']: normal_result = getattr(normal_grouped, func)() dt_result = getattr(dt_grouped, func)() - pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]], index=[3], + if func == 'sum': + fill_value = 0 + elif func == 'prod': + fill_value = 1 + else: + fill_value = np.nan + pad = DataFrame([[fill_value] * 4], index=[3], columns=['A', 'B', 'C', 'D']) expected = normal_result.append(pad) expected = expected.sort_index()
To make review a bit easier, I've opened this before it's complete. Of course we still need to decide 1. Do we want a. `sum([]) = 0`; `prod([]) = 1` b. `sum([NaN]) = 0`; `prod([NaN])` = 1 2. Do we like the keyword approach (requiring people who want empty to be NA to always specify it) or do we want a new method? 3. Do we like the `empty_is_na` keyword? Alternatives are `fill_value`, ... 4. Does this apply to groupby (unobserved categoricals) 5. Does this apply to resample? There are still some failing tests. I've tried to keep the commits separate: - groupby: https://github.com/pandas-dev/pandas/commit/e96e38649b9e6784fda2eae3de6c72c44f6ca813 - resample: https://github.com/pandas-dev/pandas/commit/20f2ccc4fa2bcb559732e1b37a8671f29d7ef54b closes #18678
https://api.github.com/repos/pandas-dev/pandas/pulls/18871
2017-12-20T13:55:12Z
2017-12-20T17:37:14Z
null
2017-12-29T15:07:25Z
BLD: try try again
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 8cf70e47a4b8f..6946d7dd11870 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -184,7 +184,7 @@ elif [ "$CONDA_BUILD_TEST" ]; then conda build ./conda.recipe --numpy 1.13 --python 3.5 -q --no-test echo "[installing]" - conda install $(conda build ./conda.recipe --numpy 1.13 --python 3.5 --output) --quiet --use-local + conda install pandas --use-local else
https://api.github.com/repos/pandas-dev/pandas/pulls/18868
2017-12-20T11:12:52Z
2017-12-20T12:36:48Z
2017-12-20T12:36:48Z
2017-12-20T12:36:48Z
DOC: Modify astype copy=False example to work across platforms
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 4eb7865523cc3..7fc9a91c83267 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4343,7 +4343,7 @@ def astype(self, dtype, copy=True, errors='raise', **kwargs): pandas object may propagate changes: >>> s1 = pd.Series([1,2]) - >>> s2 = s1.astype('int', copy=False) + >>> s2 = s1.astype('int64', copy=False) >>> s2[0] = 10 >>> s1 # note that s1[0] has changed too 0 10
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Very minor, just changed `'int'` -> `'int64'`. The example as previously written does not work on Windows, since `astype('int')` converts to `int32` instead of `int64`. The `Series` constructor defaults to `int64`, so on Windows the `astype` ends up making a copy since the dtype changes, and thus the propagation to `s1` doesn't occur. This shouldn't impact Linux or Mac, since `astype('int')` converts to `int64` on those platforms, so `astype('int64')` should be equivalent. I suppose this might not work for people using 32 bit distributions (?), but this should have wider coverage of potential readers of the docs, and is less confusing than using something like `'intp'`.
https://api.github.com/repos/pandas-dev/pandas/pulls/18865
2017-12-20T06:19:09Z
2017-12-21T15:03:20Z
2017-12-21T15:03:20Z
2017-12-21T15:30:11Z
ENH: Let Categorical.rename_categories take a callable
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 0579a80aad28e..fcca50d1acdfd 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -139,6 +139,8 @@ Other Enhancements - :func:`read_excel()` has gained the ``nrows`` parameter (:issue:`16645`) - :func:``DataFrame.to_json`` and ``Series.to_json`` now accept an ``index`` argument which allows the user to exclude the index from the JSON output (:issue:`17394`) - ``IntervalIndex.to_tuples()`` has gained the ``na_tuple`` parameter to control whether NA is returned as a tuple of NA, or NA itself (:issue:`18756`) +- ``Categorical.rename_categories``, ``CategoricalIndex.rename_categories`` and :attr:`Series.cat.rename_categories` + can now take a callable as their argument (:issue:`18862`) .. _whatsnew_0220.api_breaking: diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 356e76df366b4..f9bd6849c5072 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -844,7 +844,7 @@ def rename_categories(self, new_categories, inplace=False): Parameters ---------- - new_categories : list-like or dict-like + new_categories : list-like, dict-like or callable * list-like: all items must be unique and the number of items in the new categories must match the existing number of categories. @@ -852,7 +852,14 @@ def rename_categories(self, new_categories, inplace=False): * dict-like: specifies a mapping from old categories to new. Categories not contained in the mapping are passed through and extra categories in the mapping are - ignored. *New in version 0.21.0*. + ignored. + + .. versionadded:: 0.21.0 + + * callable : a callable that is called on all items in the old + categories and whose return values comprise the new categories. + + .. versionadded:: 0.22.0 .. warning:: @@ -890,6 +897,12 @@ def rename_categories(self, new_categories, inplace=False): >>> c.rename_categories({'a': 'A', 'c': 'C'}) [A, A, b] Categories (2, object): [A, b] + + You may also provide a callable to create the new categories + + >>> c.rename_categories(lambda x: x.upper()) + [A, A, B] + Categories (2, object): [A, B] """ inplace = validate_bool_kwarg(inplace, 'inplace') cat = self if inplace else self.copy() @@ -906,6 +919,8 @@ def rename_categories(self, new_categories, inplace=False): if is_dict_like(new_categories): cat.categories = [new_categories.get(item, item) for item in cat.categories] + elif callable(new_categories): + cat.categories = [new_categories(item) for item in cat.categories] else: cat.categories = new_categories if not inplace: diff --git a/pandas/tests/categorical/test_api.py b/pandas/tests/categorical/test_api.py index 7cc0aafaf05b6..12db4a9bea28b 100644 --- a/pandas/tests/categorical/test_api.py +++ b/pandas/tests/categorical/test_api.py @@ -71,9 +71,14 @@ def test_rename_categories(self): exp_cat = Index(["a", "b", "c"]) tm.assert_index_equal(cat.categories, exp_cat) - res = cat.rename_categories([1, 2, 3], inplace=True) + + # GH18862 (let rename_categories take callables) + result = cat.rename_categories(lambda x: x.upper()) + expected = Categorical(["A", "B", "C", "A"]) + tm.assert_categorical_equal(result, expected) # and now inplace + res = cat.rename_categories([1, 2, 3], inplace=True) assert res is None tm.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)) diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 543f59013ff12..f7328a99195b9 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -185,6 +185,11 @@ def test_method_delegation(self): tm.assert_index_equal(result, CategoricalIndex( list('ffggef'), categories=list('efg'))) + # GH18862 (let rename_categories take callables) + result = ci.rename_categories(lambda x: x.upper()) + tm.assert_index_equal(result, CategoricalIndex( + list('AABBCA'), categories=list('CAB'))) + ci = CategoricalIndex(list('aabbca'), categories=list('cab')) result = ci.add_categories(['d']) tm.assert_index_equal(result, CategoricalIndex( diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 617ca2199f588..a2838f803421c 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -588,6 +588,14 @@ def f(): pytest.raises(Exception, f) # right: s.cat.set_categories([4,3,2,1]) + # GH18862 (let Series.cat.rename_categories take callables) + s = Series(Categorical(["a", "b", "c", "a"], ordered=True)) + result = s.cat.rename_categories(lambda x: x.upper()) + expected = Series(Categorical(["A", "B", "C", "A"], + categories=["A", "B", "C"], + ordered=True)) + tm.assert_series_equal(result, expected) + def test_str_accessor_api_for_categorical(self): # https://github.com/pandas-dev/pandas/issues/10661 from pandas.core.strings import StringMethods
- [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This PR allows ``Categorical.rename_categories`` to take a callable as its argument. This is useful for quickly changing the categories the same way for all categories, e.g. ```python >>> pd.Categorical(['a', 'b']).rename_categories("cat_{}".format) [cat_a, cat_b] Categories (2, object): [cat_a, cat_b] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/18862
2017-12-20T01:25:03Z
2017-12-21T15:02:36Z
2017-12-21T15:02:36Z
2017-12-26T22:06:02Z
BLD: --use-local on conda
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 67a175268e22e..8cf70e47a4b8f 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -184,7 +184,7 @@ elif [ "$CONDA_BUILD_TEST" ]; then conda build ./conda.recipe --numpy 1.13 --python 3.5 -q --no-test echo "[installing]" - conda install $(conda build ./conda.recipe --numpy 1.13 --python 3.5 --output) --quiet + conda install $(conda build ./conda.recipe --numpy 1.13 --python 3.5 --output) --quiet --use-local else
https://api.github.com/repos/pandas-dev/pandas/pulls/18858
2017-12-19T23:32:26Z
2017-12-20T00:57:01Z
2017-12-20T00:57:01Z
2017-12-20T00:57:01Z
ENH: df.assign accepting dependent **kwargs (#14207)
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index d7650b6b0938f..78e2fdb46f659 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -95,7 +95,7 @@ constructed from the sorted keys of the dict, if possible. NaN (not a number) is the standard missing data marker used in pandas. -**From scalar value** +**From scalar value** If ``data`` is a scalar value, an index must be provided. The value will be repeated to match the length of **index**. @@ -154,7 +154,7 @@ See also the :ref:`section on attribute access<indexing.attribute_access>`. Vectorized operations and label alignment with Series ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When working with raw NumPy arrays, looping through value-by-value is usually +When working with raw NumPy arrays, looping through value-by-value is usually not necessary. The same is true when working with Series in pandas. Series can also be passed into most NumPy methods expecting an ndarray. @@ -324,7 +324,7 @@ From a list of dicts From a dict of tuples ~~~~~~~~~~~~~~~~~~~~~ -You can automatically create a multi-indexed frame by passing a tuples +You can automatically create a multi-indexed frame by passing a tuples dictionary. .. ipython:: python @@ -347,7 +347,7 @@ column name provided). **Missing Data** Much more will be said on this topic in the :ref:`Missing data <missing_data>` -section. To construct a DataFrame with missing data, we use ``np.nan`` to +section. To construct a DataFrame with missing data, we use ``np.nan`` to represent missing values. Alternatively, you may pass a ``numpy.MaskedArray`` as the data argument to the DataFrame constructor, and its masked entries will be considered missing. @@ -370,7 +370,7 @@ set to ``'index'`` in order to use the dict keys as row labels. ``DataFrame.from_records`` takes a list of tuples or an ndarray with structured dtype. It works analogously to the normal ``DataFrame`` constructor, except that -the resulting DataFrame index may be a specific field of the structured +the resulting DataFrame index may be a specific field of the structured dtype. For example: .. ipython:: python @@ -506,25 +506,70 @@ to be inserted (for example, a ``Series`` or NumPy array), or a function of one argument to be called on the ``DataFrame``. A *copy* of the original DataFrame is returned, with the new values inserted. +.. versionmodified:: 0.23.0 + +Starting with Python 3.6 the order of ``**kwargs`` is preserved. This allows +for *dependent* assignment, where an expression later in ``**kwargs`` can refer +to a column created earlier in the same :meth:`~DataFrame.assign`. + +.. ipython:: python + + dfa = pd.DataFrame({"A": [1, 2, 3], + "B": [4, 5, 6]}) + dfa.assign(C=lambda x: x['A'] + x['B'], + D=lambda x: x['A'] + x['C']) + +In the second expression, ``x['C']`` will refer to the newly created column, +that's equal to ``dfa['A'] + dfa['B']``. + +To write code compatible with all versions of Python, split the assignment in two. + +.. ipython:: python + + dependent = pd.DataFrame({"A": [1, 1, 1]}) + (dependent.assign(A=lambda x: x['A'] + 1) + .assign(B=lambda x: x['A'] + 2)) + .. warning:: - Since the function signature of ``assign`` is ``**kwargs``, a dictionary, - the order of the new columns in the resulting DataFrame cannot be guaranteed - to match the order you pass in. To make things predictable, items are inserted - alphabetically (by key) at the end of the DataFrame. + Dependent assignment maybe subtly change the behavior of your code between + Python 3.6 and older versions of Python. + + If you wish write code that supports versions of python before and after 3.6, + you'll need to take care when passing ``assign`` expressions that + + * Updating an existing column + * Refering to the newly updated column in the same ``assign`` + + For example, we'll update column "A" and then refer to it when creating "B". + + .. code-block:: python + + >>> dependent = pd.DataFrame({"A": [1, 1, 1]}) + >>> dependent.assign(A=lambda x: x["A"] + 1, + B=lambda x: x["A"] + 2) + + For Python 3.5 and earlier the expression creating ``B`` refers to the + "old" value of ``A``, ``[1, 1, 1]``. The output is then + + .. code-block:: python + + A B + 0 2 3 + 1 2 3 + 2 2 3 + + For Python 3.6 and later, the expression creating ``A`` refers to the + "new" value of ``A``, ``[2, 2, 2]``, which results in + + .. code-block:: python - All expressions are computed first, and then assigned. So you can't refer - to another column being assigned in the same call to ``assign``. For example: + A B + 0 2 4 + 1 2 4 + 2 2 4 - .. ipython:: - :verbatim: - In [1]: # Don't do this, bad reference to `C` - df.assign(C = lambda x: x['A'] + x['B'], - D = lambda x: x['A'] + x['C']) - In [2]: # Instead, break it into two assigns - (df.assign(C = lambda x: x['A'] + x['B']) - .assign(D = lambda x: x['A'] + x['C'])) Indexing / Selection ~~~~~~~~~~~~~~~~~~~~ @@ -914,7 +959,7 @@ For example, using the earlier example data, we could do: Squeezing ~~~~~~~~~ -Another way to change the dimensionality of an object is to ``squeeze`` a 1-len +Another way to change the dimensionality of an object is to ``squeeze`` a 1-len object, similar to ``wp['Item1']``. .. ipython:: python diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index cf5a44442045b..db5c79dcb3c42 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -248,6 +248,46 @@ Current Behavior: pd.RangeIndex(1, 5) / 0 +.. _whatsnew_0230.enhancements.assign_dependent: + +``.assign()`` accepts dependent arguments +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :func:`DataFrame.assign` now accepts dependent keyword arguments for python version later than 3.6 (see also `PEP 468 +<https://www.python.org/dev/peps/pep-0468/>`_). Later keyword arguments may now refer to earlier ones if the argument is a callable. See the +:ref:`documentation here <dsintro.chained_assignment>` (:issue:`14207`) + +.. ipython:: python + + df = pd.DataFrame({'A': [1, 2, 3]}) + df + df.assign(B=df.A, C=lambda x:x['A']+ x['B']) + +.. warning:: + + This may subtly change the behavior of your code when you're + using ``.assign()`` to update an existing column. Previously, callables + referring to other variables being updated would get the "old" values + + Previous Behaviour: + + .. code-block:: ipython + + In [2]: df = pd.DataFrame({"A": [1, 2, 3]}) + + In [3]: df.assign(A=lambda df: df.A + 1, C=lambda df: df.A * -1) + Out[3]: + A C + 0 2 -1 + 1 3 -2 + 2 4 -3 + + New Behaviour: + + .. ipython:: python + + df.assign(A=df.A+1, C= lambda df: df.A* -1) + .. _whatsnew_0230.enhancements.other: Other Enhancements diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6d8dcb8a1ca89..c99c59db1d8cb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2687,12 +2687,17 @@ def assign(self, **kwargs): Notes ----- - For python 3.6 and above, the columns are inserted in the order of - \*\*kwargs. For python 3.5 and earlier, since \*\*kwargs is unordered, - the columns are inserted in alphabetical order at the end of your - DataFrame. Assigning multiple columns within the same ``assign`` - is possible, but you cannot reference other columns created within - the same ``assign`` call. + Assigning multiple columns within the same ``assign`` is possible. + For Python 3.6 and above, later items in '\*\*kwargs' may refer to + newly created or modified columns in 'df'; items are computed and + assigned into 'df' in order. For Python 3.5 and below, the order of + keyword arguments is not specified, you cannot refer to newly created + or modified columns. All items are computed first, and then assigned + in alphabetical order. + + .. versionmodified :: 0.23.0 + + Keyword argument order is maintained for Python 3.6 and later. Examples -------- @@ -2728,22 +2733,34 @@ def assign(self, **kwargs): 7 8 -1.495604 2.079442 8 9 0.549296 2.197225 9 10 -0.758542 2.302585 + + Where the keyword arguments depend on each other + + >>> df = pd.DataFrame({'A': [1, 2, 3]}) + + >>> df.assign(B=df.A, C=lambda x:x['A']+ x['B']) + A B C + 0 1 1 2 + 1 2 2 4 + 2 3 3 6 """ data = self.copy() - # do all calculations first... - results = OrderedDict() - for k, v in kwargs.items(): - results[k] = com._apply_if_callable(v, data) - - # preserve order for 3.6 and later, but sort by key for 3.5 and earlier + # >= 3.6 preserve order of kwargs if PY36: - results = results.items() + for k, v in kwargs.items(): + data[k] = com._apply_if_callable(v, data) else: + # <= 3.5: do all calculations first... + results = OrderedDict() + for k, v in kwargs.items(): + results[k] = com._apply_if_callable(v, data) + + # <= 3.5 and earlier results = sorted(results.items()) - # ... and then assign - for k, v in results: - data[k] = v + # ... and then assign + for k, v in results: + data[k] = v return data def _sanitize_column(self, key, value, broadcast=True): diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py index 9acdf2f17d86a..8236a41d00243 100644 --- a/pandas/tests/frame/test_mutate_columns.py +++ b/pandas/tests/frame/test_mutate_columns.py @@ -89,11 +89,35 @@ def test_assign_bad(self): df.assign(lambda x: x.A) with pytest.raises(AttributeError): df.assign(C=df.A, D=df.A + df.C) + + @pytest.mark.skipif(PY36, reason="""Issue #14207: valid for python + 3.6 and above""") + def test_assign_dependent_old_python(self): + df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}) + + # Key C does not exist at defition time of df with pytest.raises(KeyError): - df.assign(C=lambda df: df.A, D=lambda df: df['A'] + df['C']) + df.assign(C=lambda df: df.A, + D=lambda df: df['A'] + df['C']) with pytest.raises(KeyError): df.assign(C=df.A, D=lambda x: x['A'] + x['C']) + @pytest.mark.skipif(not PY36, reason="""Issue #14207: not valid for + python 3.5 and below""") + def test_assign_dependent(self): + df = DataFrame({'A': [1, 2], 'B': [3, 4]}) + + result = df.assign(C=df.A, D=lambda x: x['A'] + x['C']) + expected = DataFrame([[1, 3, 1, 2], [2, 4, 2, 4]], + columns=list('ABCD')) + assert_frame_equal(result, expected) + + result = df.assign(C=lambda df: df.A, + D=lambda df: df['A'] + df['C']) + expected = DataFrame([[1, 3, 1, 2], [2, 4, 2, 4]], + columns=list('ABCD')) + assert_frame_equal(result, expected) + def test_insert_error_msmgs(self): # GH 7432
Specifically, 'df.assign(b=1, c=lambda x:x['b'])' does not throw an exception in python 3.6 and above. Further details are discussed in Issues #14207 and #18797. closes #14207 closes #18797 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18852
2017-12-19T20:12:25Z
2018-02-10T16:20:19Z
2018-02-10T16:20:18Z
2018-02-10T17:08:25Z
CLN: Drop compact_ints/use_unsigned from read_csv
diff --git a/doc/source/io.rst b/doc/source/io.rst index d51307081b17f..2584941ac14d2 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -199,21 +199,6 @@ low_memory : boolean, default ``True`` Note that the entire file is read into a single DataFrame regardless, use the ``chunksize`` or ``iterator`` parameter to return the data in chunks. (Only valid with C parser) -compact_ints : boolean, default False - .. deprecated:: 0.19.0 - - Argument moved to ``pd.to_numeric`` - - If ``compact_ints`` is ``True``, then for any column that is of integer dtype, the - parser will attempt to cast it as the smallest integer ``dtype`` possible, either - signed or unsigned depending on the specification from the ``use_unsigned`` parameter. -use_unsigned : boolean, default False - .. deprecated:: 0.18.2 - - Argument moved to ``pd.to_numeric`` - - If integer columns are being compacted (i.e. ``compact_ints=True``), specify whether - the column should be compacted to the smallest signed or unsigned integer dtype. memory_map : boolean, default False If a filepath is provided for ``filepath_or_buffer``, map the file object directly onto memory and access the data directly from there. Using this diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 0579a80aad28e..0e1577c1d9e29 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -233,6 +233,7 @@ Removal of prior version deprecations/changes - :func:`read_csv` has dropped the ``skip_footer`` parameter (:issue:`13386`) - :func:`read_csv` has dropped the ``as_recarray`` parameter (:issue:`13373`) - :func:`read_csv` has dropped the ``buffer_lines`` parameter (:issue:`13360`) +- :func:`read_csv` has dropped the ``compact_ints`` and ``use_unsigned`` parameters (:issue:`13323`) .. _whatsnew_0220.performance: diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index f01068ae2e538..1f7c359b519a5 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -305,7 +305,6 @@ cdef class TextReader: object index_col object low_memory object skiprows - object compact_ints, use_unsigned object dtype object encoding object compression @@ -355,10 +354,7 @@ cdef class TextReader: na_fvalues=None, true_values=None, false_values=None, - - compact_ints=False, allow_leading_cols=True, - use_unsigned=False, low_memory=False, skiprows=None, skipfooter=0, @@ -482,10 +478,7 @@ cdef class TextReader: self.false_set = kset_from_list(self.false_values) self.converters = converters - self.na_filter = na_filter - self.compact_ints = compact_ints - self.use_unsigned = use_unsigned self.verbose = verbose self.low_memory = low_memory @@ -1122,11 +1115,6 @@ cdef class TextReader: if upcast_na and na_count > 0: col_res = _maybe_upcast(col_res) - if issubclass(col_res.dtype.type, - np.integer) and self.compact_ints: - col_res = lib.downcast_int64(col_res, na_values, - self.use_unsigned) - if col_res is None: raise ParserError('Unable to parse column %d' % i) diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index 8bfed4fe60fed..5ed8828a0f122 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -1657,74 +1657,3 @@ def fast_multiget(dict mapping, ndarray keys, default=np.nan): output[i] = default return maybe_convert_objects(output) - - -def downcast_int64(ndarray[int64_t] arr, object na_values, - bint use_unsigned=0): - cdef: - Py_ssize_t i, n = len(arr) - int64_t mx = INT64_MIN + 1, mn = INT64_MAX - int64_t NA = na_values[np.int64] - int64_t val - ndarray[uint8_t] mask - int na_count = 0 - - _mask = np.empty(n, dtype=bool) - mask = _mask.view(np.uint8) - - for i in range(n): - val = arr[i] - - if val == NA: - mask[i] = 1 - na_count += 1 - continue - - # not NA - mask[i] = 0 - - if val > mx: - mx = val - - if val < mn: - mn = val - - if mn >= 0 and use_unsigned: - if mx <= UINT8_MAX - 1: - result = arr.astype(np.uint8) - if na_count: - np.putmask(result, _mask, na_values[np.uint8]) - return result - - if mx <= UINT16_MAX - 1: - result = arr.astype(np.uint16) - if na_count: - np.putmask(result, _mask, na_values[np.uint16]) - return result - - if mx <= UINT32_MAX - 1: - result = arr.astype(np.uint32) - if na_count: - np.putmask(result, _mask, na_values[np.uint32]) - return result - - else: - if mn >= INT8_MIN + 1 and mx <= INT8_MAX: - result = arr.astype(np.int8) - if na_count: - np.putmask(result, _mask, na_values[np.int8]) - return result - - if mn >= INT16_MIN + 1 and mx <= INT16_MAX: - result = arr.astype(np.int16) - if na_count: - np.putmask(result, _mask, na_values[np.int16]) - return result - - if mn >= INT32_MIN + 1 and mx <= INT32_MAX: - result = arr.astype(np.int32) - if na_count: - np.putmask(result, _mask, na_values[np.int32]) - return result - - return arr diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 3d07b0e6cbdfd..92f58db775423 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -273,21 +273,6 @@ Note that the entire file is read into a single DataFrame regardless, use the `chunksize` or `iterator` parameter to return the data in chunks. (Only valid with C parser) -compact_ints : boolean, default False - .. deprecated:: 0.19.0 - Argument moved to ``pd.to_numeric`` - - If compact_ints is True, then for any column that is of integer dtype, - the parser will attempt to cast it as the smallest integer dtype possible, - either signed or unsigned depending on the specification from the - `use_unsigned` parameter. -use_unsigned : boolean, default False - .. deprecated:: 0.19.0 - Argument moved to ``pd.to_numeric`` - - If integer columns are being compacted (i.e. `compact_ints=True`), specify - whether the column should be compacted to the smallest signed or unsigned - integer dtype. memory_map : boolean, default False If a filepath is provided for `filepath_or_buffer`, map the file object directly onto memory and access the data directly from there. Using this @@ -496,8 +481,6 @@ def _read(filepath_or_buffer, kwds): _c_parser_defaults = { 'delim_whitespace': False, 'na_filter': True, - 'compact_ints': False, - 'use_unsigned': False, 'low_memory': True, 'memory_map': False, 'error_bad_lines': True, @@ -518,13 +501,9 @@ def _read(filepath_or_buffer, kwds): } _deprecated_defaults = { - 'compact_ints': None, - 'use_unsigned': None, 'tupleize_cols': None } _deprecated_args = { - 'compact_ints', - 'use_unsigned', 'tupleize_cols', } @@ -596,8 +575,6 @@ def parser_f(filepath_or_buffer, # Internal doublequote=True, delim_whitespace=False, - compact_ints=None, - use_unsigned=None, low_memory=_c_parser_defaults['low_memory'], memory_map=False, float_precision=None): @@ -662,8 +639,6 @@ def parser_f(filepath_or_buffer, float_precision=float_precision, na_filter=na_filter, - compact_ints=compact_ints, - use_unsigned=use_unsigned, delim_whitespace=delim_whitespace, warn_bad_lines=warn_bad_lines, error_bad_lines=error_bad_lines, @@ -1569,11 +1544,6 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False, if cast_type and not is_dtype_equal(cvals, cast_type): cvals = self._cast_types(cvals, cast_type, c) - if issubclass(cvals.dtype.type, np.integer) and self.compact_ints: - cvals = lib.downcast_int64( - cvals, parsers.na_values, - self.use_unsigned) - result[c] = cvals if verbose and na_count: print('Filled %d NA values in column %s' % (na_count, str(c))) @@ -2064,8 +2034,6 @@ def __init__(self, f, **kwds): self.converters = kwds['converters'] self.dtype = kwds['dtype'] - self.compact_ints = kwds['compact_ints'] - self.use_unsigned = kwds['use_unsigned'] self.thousands = kwds['thousands'] self.decimal = kwds['decimal'] diff --git a/pandas/tests/dtypes/test_io.py b/pandas/tests/dtypes/test_io.py index ae92e9ecca681..06b61371c9a0b 100644 --- a/pandas/tests/dtypes/test_io.py +++ b/pandas/tests/dtypes/test_io.py @@ -71,39 +71,3 @@ def test_convert_sql_column_decimals(self): result = lib.convert_sql_column(arr) expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8') tm.assert_numpy_array_equal(result, expected) - - def test_convert_downcast_int64(self): - from pandas._libs.parsers import na_values - - arr = np.array([1, 2, 7, 8, 10], dtype=np.int64) - expected = np.array([1, 2, 7, 8, 10], dtype=np.int8) - - # default argument - result = lib.downcast_int64(arr, na_values) - tm.assert_numpy_array_equal(result, expected) - - result = lib.downcast_int64(arr, na_values, use_unsigned=False) - tm.assert_numpy_array_equal(result, expected) - - expected = np.array([1, 2, 7, 8, 10], dtype=np.uint8) - result = lib.downcast_int64(arr, na_values, use_unsigned=True) - tm.assert_numpy_array_equal(result, expected) - - # still cast to int8 despite use_unsigned=True - # because of the negative number as an element - arr = np.array([1, 2, -7, 8, 10], dtype=np.int64) - expected = np.array([1, 2, -7, 8, 10], dtype=np.int8) - result = lib.downcast_int64(arr, na_values, use_unsigned=True) - tm.assert_numpy_array_equal(result, expected) - - arr = np.array([1, 2, 7, 8, 300], dtype=np.int64) - expected = np.array([1, 2, 7, 8, 300], dtype=np.int16) - result = lib.downcast_int64(arr, na_values) - tm.assert_numpy_array_equal(result, expected) - - int8_na = na_values[np.int8] - int64_na = na_values[np.int64] - arr = np.array([int64_na, 2, 3, 10, 15], dtype=np.int64) - expected = np.array([int8_na, 2, 3, 10, 15], dtype=np.int8) - result = lib.downcast_int64(arr, na_values) - tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index 8a1f23d203a32..8525cb42c2455 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -1371,49 +1371,6 @@ def test_raise_on_no_columns(self): data = "\n\n\n" pytest.raises(EmptyDataError, self.read_csv, StringIO(data)) - def test_compact_ints_use_unsigned(self): - # see gh-13323 - data = 'a,b,c\n1,9,258' - - # sanity check - expected = DataFrame({ - 'a': np.array([1], dtype=np.int64), - 'b': np.array([9], dtype=np.int64), - 'c': np.array([258], dtype=np.int64), - }) - out = self.read_csv(StringIO(data)) - tm.assert_frame_equal(out, expected) - - expected = DataFrame({ - 'a': np.array([1], dtype=np.int8), - 'b': np.array([9], dtype=np.int8), - 'c': np.array([258], dtype=np.int16), - }) - - # default behaviour for 'use_unsigned' - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - out = self.read_csv(StringIO(data), compact_ints=True) - tm.assert_frame_equal(out, expected) - - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - out = self.read_csv(StringIO(data), compact_ints=True, - use_unsigned=False) - tm.assert_frame_equal(out, expected) - - expected = DataFrame({ - 'a': np.array([1], dtype=np.uint8), - 'b': np.array([9], dtype=np.uint8), - 'c': np.array([258], dtype=np.uint16), - }) - - with tm.assert_produces_warning( - FutureWarning, check_stacklevel=False): - out = self.read_csv(StringIO(data), compact_ints=True, - use_unsigned=True) - tm.assert_frame_equal(out, expected) - def test_memory_map(self): mmap_file = os.path.join(self.dirpath, 'test_mmap.csv') expected = DataFrame({ diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index 30dcc3e5731aa..3117f6fae55da 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -128,20 +128,12 @@ def read(self): class TestDeprecatedFeatures(object): @pytest.mark.parametrize("engine", ["c", "python"]) - @pytest.mark.parametrize("kwargs", [{"compact_ints": True}, - {"compact_ints": False}, - {"use_unsigned": True}, - {"use_unsigned": False}, - {"tupleize_cols": True}, + @pytest.mark.parametrize("kwargs", [{"tupleize_cols": True}, {"tupleize_cols": False}]) def test_deprecated_args(self, engine, kwargs): data = "1,2,3" arg, _ = list(kwargs.items())[0] - if engine == "python" and arg == "buffer_lines": - # unsupported --> exception is raised - return - with tm.assert_produces_warning( FutureWarning, check_stacklevel=False): read_csv(StringIO(data), engine=engine, **kwargs)
Deprecated in v0.19.0 xref #13323
https://api.github.com/repos/pandas-dev/pandas/pulls/18851
2017-12-19T17:34:31Z
2017-12-21T15:01:25Z
2017-12-21T15:01:25Z
2017-12-22T05:05:45Z
BUG: DatetimeIndex + arraylike of DateOffsets
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 709009542e160..ff041a4849138 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -291,6 +291,7 @@ Conversion - Bug in :class:`WeekOfMonth` and class:`Week` where addition and subtraction did not roll correctly (:issue:`18510`,:issue:`18672`,:issue:`18864`) - Bug in :meth:`DatetimeIndex.astype` when converting between timezone aware dtypes, and converting from timezone aware to naive (:issue:`18951`) - Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`) +- Bug in :class:`DatetimeIndex` where adding or subtracting an array-like of ``DateOffset`` objects either raised (``np.array``, ``pd.Index``) or broadcast incorrectly (``pd.Series``) (:issue:`18849`) Indexing diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index a441e6c3fd36a..40c07376d2522 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -18,6 +18,7 @@ is_list_like, is_scalar, is_bool_dtype, + is_offsetlike, is_categorical_dtype, is_datetime_or_timedelta_dtype, is_float_dtype, @@ -649,6 +650,14 @@ def _sub_datelike(self, other): def _sub_period(self, other): return NotImplemented + def _add_offset_array(self, other): + # Array/Index of DateOffset objects + return NotImplemented + + def _sub_offset_array(self, other): + # Array/Index of DateOffset objects + return NotImplemented + @classmethod def _add_datetimelike_methods(cls): """ @@ -671,7 +680,12 @@ def __add__(self, other): return self._add_delta(other) elif is_integer(other): return self.shift(other) - elif isinstance(other, (Index, datetime, np.datetime64)): + elif isinstance(other, (datetime, np.datetime64)): + return self._add_datelike(other) + elif is_offsetlike(other): + # Array/Index of DateOffset objects + return self._add_offset_array(other) + elif isinstance(other, Index): return self._add_datelike(other) else: # pragma: no cover return NotImplemented @@ -692,10 +706,6 @@ def __sub__(self, other): return self._add_delta(-other) elif isinstance(other, DatetimeIndex): return self._sub_datelike(other) - elif isinstance(other, Index): - raise TypeError("cannot subtract {typ1} and {typ2}" - .format(typ1=type(self).__name__, - typ2=type(other).__name__)) elif isinstance(other, (DateOffset, timedelta)): return self._add_delta(-other) elif is_integer(other): @@ -704,6 +714,14 @@ def __sub__(self, other): return self._sub_datelike(other) elif isinstance(other, Period): return self._sub_period(other) + elif is_offsetlike(other): + # Array/Index of DateOffset objects + return self._sub_offset_array(other) + elif isinstance(other, Index): + raise TypeError("cannot subtract {typ1} and {typ2}" + .format(typ1=type(self).__name__, + typ2=type(other).__name__)) + else: # pragma: no cover return NotImplemented cls.__sub__ = __sub__ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 9e804b6575c47..321d59eb0e35f 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -893,6 +893,32 @@ def _add_offset(self, offset): "or DatetimeIndex", PerformanceWarning) return self.astype('O') + offset + def _add_offset_array(self, other): + # Array/Index of DateOffset objects + if isinstance(other, ABCSeries): + return NotImplemented + elif len(other) == 1: + return self + other[0] + else: + warnings.warn("Adding/subtracting array of DateOffsets to " + "{} not vectorized".format(type(self)), + PerformanceWarning) + return self.astype('O') + np.array(other) + # TODO: This works for __add__ but loses dtype in __sub__ + + def _sub_offset_array(self, other): + # Array/Index of DateOffset objects + if isinstance(other, ABCSeries): + return NotImplemented + elif len(other) == 1: + return self - other[0] + else: + warnings.warn("Adding/subtracting array of DateOffsets to " + "{} not vectorized".format(type(self)), + PerformanceWarning) + res_values = self.astype('O').values - np.array(other) + return self.__class__(res_values, freq='infer') + def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs): from pandas.io.formats.format import _get_format_datetime64_from_values format = _get_format_datetime64_from_values(self, date_format) diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 05ec7f41b0c66..3a7a5e44d5a88 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -757,7 +757,10 @@ def wrapper(left, right, name=name, na_op=na_op): rvalues = getattr(rvalues, 'values', rvalues) # _Op aligns left and right else: - name = left.name + if isinstance(rvalues, pd.Index): + name = _maybe_match_name(left, rvalues) + else: + name = left.name if (hasattr(lvalues, 'values') and not isinstance(lvalues, pd.DatetimeIndex)): lvalues = lvalues.values diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index a46462e91a866..6cfa083172921 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -363,6 +363,51 @@ def test_datetimeindex_sub_timestamp_overflow(self): with pytest.raises(OverflowError): dtimin - variant + @pytest.mark.parametrize('box', [np.array, pd.Index]) + def test_dti_add_offset_array(self, tz, box): + # GH#18849 + dti = pd.date_range('2017-01-01', periods=2, tz=tz) + other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) + res = dti + other + expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))], + name=dti.name, freq='infer') + tm.assert_index_equal(res, expected) + + res2 = other + dti + tm.assert_index_equal(res2, expected) + + @pytest.mark.parametrize('box', [np.array, pd.Index]) + def test_dti_sub_offset_array(self, tz, box): + # GH#18824 + dti = pd.date_range('2017-01-01', periods=2, tz=tz) + other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) + res = dti - other + expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))], + name=dti.name, freq='infer') + tm.assert_index_equal(res, expected) + + @pytest.mark.parametrize('names', [(None, None, None), + ('foo', 'bar', None), + ('foo', 'foo', 'foo')]) + def test_dti_with_offset_series(self, tz, names): + # GH#18849 + dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0]) + other = pd.Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], + name=names[1]) + + expected_add = pd.Series([dti[n] + other[n] for n in range(len(dti))], + name=names[2]) + res = dti + other + tm.assert_series_equal(res, expected_add) + res2 = other + dti + tm.assert_series_equal(res2, expected_add) + + expected_sub = pd.Series([dti[n] - other[n] for n in range(len(dti))], + name=names[2]) + + res3 = dti - other + tm.assert_series_equal(res3, expected_sub) + # GH 10699 @pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex], diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py index 66aa5d2db6569..b64f9074c3cf0 100644 --- a/pandas/tests/indexes/period/test_arithmetic.py +++ b/pandas/tests/indexes/period/test_arithmetic.py @@ -12,6 +12,32 @@ class TestPeriodIndexArithmetic(object): + def test_pi_add_offset_array(self): + # GH#18849 + pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')]) + offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12), + pd.offsets.QuarterEnd(n=-2, startingMonth=12)]) + res = pi + offs + expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')]) + tm.assert_index_equal(res, expected) + + unanchored = np.array([pd.offsets.Hour(n=1), + pd.offsets.Minute(n=-2)]) + with pytest.raises(period.IncompatibleFrequency): + pi + unanchored + with pytest.raises(TypeError): + unanchored + pi + + @pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case') + def test_pi_radd_offset_array(self): + # GH#18849 + pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')]) + offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12), + pd.offsets.QuarterEnd(n=-2, startingMonth=12)]) + res = offs + pi + expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')]) + tm.assert_index_equal(res, expected) + def test_add_iadd(self): rng = pd.period_range('1/1/2000', freq='D', periods=5) other = pd.period_range('1/6/2000', freq='D', periods=5) diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 087567354d32d..3c567e52cccb5 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -28,6 +28,24 @@ def freq(request): class TestTimedeltaIndexArithmetic(object): _holder = TimedeltaIndex + @pytest.mark.xfail(reason='GH#18824 ufunc add cannot use operands...') + def test_tdi_with_offset_array(self): + # GH#18849 + tdi = pd.TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00']) + offs = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)]) + expected = pd.TimedeltaIndex(['1 days 01:00:00', '3 days 04:02:00']) + + res = tdi + offs + tm.assert_index_equal(res, expected) + + res2 = offs + tdi + tm.assert_index_equal(res2, expected) + + anchored = np.array([pd.offsets.QuarterEnd(), + pd.offsets.Week(weekday=2)]) + with pytest.raises(TypeError): + tdi + anchored + # TODO: Split by ops, better name def test_numeric_compat(self): idx = self._holder(np.arange(5, dtype='int64'))
Before: ``` >>> dti = pd.date_range('2017-01-01', periods=2) >>> other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) >>> dti + other Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: ufunc add cannot use operands with types dtype('<M8[ns]') and dtype('O') # Same for `dti - other`, `dti + pd.Index(other)`, `dti - pd.Index(other)` >>> dti + pd.Series(other) 0 DatetimeIndex(['2017-01-31', '2017-01-31'], dt... 1 DatetimeIndex(['2017-01-03', '2017-01-04'], dt... dtype: object # yikes. ``` After: ``` >>> dti + other pandas/core/indexes/datetimelike.py:677: PerformanceWarning: Adding/subtracting array of DateOffsets to <class 'pandas.core.indexes.datetimes.DatetimeIndex'> not vectorized PerformanceWarning) DatetimeIndex(['2017-01-31', '2017-01-04'], dtype='datetime64[ns]', freq=None) >>> dti - pd.Index(other) DatetimeIndex(['2016-12-31', '2016-12-31'], dtype='datetime64[ns]', freq=None) >>> dti + pd.Series(other) 0 2017-01-31 1 2017-01-04 dtype: datetime64[ns] ``` <b>Caveat</b> This will need a follow-up to make sure `name` attribute is propogated correctly. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18849
2017-12-19T17:22:14Z
2017-12-29T00:25:53Z
2017-12-29T00:25:53Z
2018-01-05T19:23:01Z
Make DatetimeIndex iterator pickleable by dill
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index ec5c20d341b50..bec26ef72d63a 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1251,8 +1251,7 @@ def __iter__(self): converted = libts.ints_to_pydatetime(data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp") - for v in converted: - yield v + return iter(converted) def _wrap_union_result(self, other, result): name = self.name if self.name == other.name else None
Currently, dill (https://github.com/uqfoundation/dill) cannot pickle iterators over DatetimeIndex because they are generators. This simple change removes that limitation. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18848
2017-12-19T16:26:47Z
2017-12-21T15:03:47Z
2017-12-21T15:03:47Z
2018-05-30T21:01:38Z
Fixed typo in test_eval arguments
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 90f197738543a..9c3572f9ffe72 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -1798,7 +1798,7 @@ def test_invalid_parser(): 'pandas': PandasExprVisitor} -@pytest.mark.parametrize('engine', _parsers) +@pytest.mark.parametrize('engine', _engines) @pytest.mark.parametrize('parser', _parsers) def test_disallowed_nodes(engine, parser): VisitorClass = _parsers[parser]
- [X] closes #18821 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18847
2017-12-19T16:22:59Z
2017-12-21T15:04:15Z
2017-12-21T15:04:14Z
2017-12-21T15:05:02Z
Dec cleanup
diff --git a/pandas/conftest.py b/pandas/conftest.py index b09119895617c..4cf5c9da44697 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -3,7 +3,6 @@ from distutils.version import LooseVersion import numpy import pandas -import pandas.util.testing as tm import dateutil @@ -51,7 +50,6 @@ def add_imports(doctest_namespace): @pytest.fixture(params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']) def spmatrix(request): - tm._skip_if_no_scipy() from scipy import sparse return getattr(sparse, request.param + '_matrix') diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index d63764e90d26e..47be8d115a07e 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -718,8 +718,6 @@ def test_put_compression(self): @td.skip_if_windows_python_3 def test_put_compression_blosc(self): - tm.skip_if_no_package('tables', min_version='2.2', - app='blosc support') df = tm.makeTimeDataFrame() with ensure_clean_store(self.path) as store: diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py index bb5dbdcaaa7c4..4b9d6621a20fb 100644 --- a/pandas/tests/sparse/test_frame.py +++ b/pandas/tests/sparse/test_frame.py @@ -7,6 +7,7 @@ from numpy import nan import numpy as np import pandas as pd +from distutils.version import LooseVersion from pandas import Series, DataFrame, bdate_range, Panel from pandas.core.dtypes.common import ( @@ -20,6 +21,7 @@ from pandas.compat import lrange from pandas import compat from pandas.core.sparse import frame as spf +import pandas.util._test_decorators as td from pandas._libs.sparse import BlockIndex, IntIndex from pandas.core.sparse.api import SparseSeries, SparseDataFrame, SparseArray @@ -1169,14 +1171,13 @@ def test_notna(self): tm.assert_frame_equal(res.to_dense(), exp) +@td.skip_if_no_scipy @pytest.mark.parametrize('index', [None, list('abc')]) # noqa: F811 @pytest.mark.parametrize('columns', [None, list('def')]) @pytest.mark.parametrize('fill_value', [None, 0, np.nan]) @pytest.mark.parametrize('dtype', [bool, int, float, np.uint16]) def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype): # GH 4343 - tm.skip_if_no_package('scipy') - # Make one ndarray and from it one sparse matrix, both to be used for # constructing frames and comparing results arr = np.eye(3, dtype=dtype) @@ -1225,13 +1226,17 @@ def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype): assert sdf.to_coo().dtype == np.object_ +@td.skip_if_no_scipy @pytest.mark.parametrize('fill_value', [None, 0, np.nan]) # noqa: F811 def test_from_to_scipy_object(spmatrix, fill_value): # GH 4343 dtype = object columns = list('cd') index = list('ab') - tm.skip_if_no_package('scipy', max_version='0.19.0') + import scipy + if (spmatrix is scipy.sparse.dok_matrix and LooseVersion( + scipy.__version__) >= LooseVersion('0.19.0')): + pytest.skip("dok_matrix from object does not work in SciPy >= 0.19") # Make one ndarray and from it one sparse matrix, both to be used for # constructing frames and comparing results @@ -1270,10 +1275,9 @@ def test_from_to_scipy_object(spmatrix, fill_value): assert sdf.to_coo().dtype == res_dtype +@td.skip_if_no_scipy def test_from_scipy_correct_ordering(spmatrix): # GH 16179 - tm.skip_if_no_package('scipy') - arr = np.arange(1, 5).reshape(2, 2) try: spm = spmatrix(arr) @@ -1290,10 +1294,9 @@ def test_from_scipy_correct_ordering(spmatrix): tm.assert_frame_equal(sdf.to_dense(), expected.to_dense()) +@td.skip_if_no_scipy def test_from_scipy_fillna(spmatrix): # GH 16112 - tm.skip_if_no_package('scipy') - arr = np.eye(3) arr[1:, 0] = np.nan diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 9305504f8d5e3..d03ecb9f9b5b7 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -12,6 +12,7 @@ from pandas.core.dtypes.common import is_integer_dtype import pandas.core.nanops as nanops import pandas.util.testing as tm +import pandas.util._test_decorators as td use_bn = nanops._USE_BOTTLENECK @@ -381,8 +382,8 @@ def test_nanstd(self): allow_str=False, allow_date=False, allow_tdelta=True, allow_obj='convert') + @td.skip_if_no('scipy', min_version='0.17.0') def test_nansem(self): - tm.skip_if_no_package('scipy', min_version='0.17.0') from scipy.stats import sem with np.errstate(invalid='ignore'): self.check_funs_ddof(nanops.nansem, sem, allow_complex=False, @@ -441,8 +442,8 @@ def _skew_kurt_wrap(self, values, axis=None, func=None): return 0. return result + @td.skip_if_no('scipy', min_version='0.17.0') def test_nanskew(self): - tm.skip_if_no_package('scipy', min_version='0.17.0') from scipy.stats import skew func = partial(self._skew_kurt_wrap, func=skew) with np.errstate(invalid='ignore'): @@ -450,8 +451,8 @@ def test_nanskew(self): allow_str=False, allow_date=False, allow_tdelta=False) + @td.skip_if_no('scipy', min_version='0.17.0') def test_nankurt(self): - tm.skip_if_no_package('scipy', min_version='0.17.0') from scipy.stats import kurtosis func1 = partial(kurtosis, fisher=True) func = partial(self._skew_kurt_wrap, func=func1) @@ -549,8 +550,8 @@ def test_nancorr_pearson(self): self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method='pearson') + @td.skip_if_no_scipy def test_nancorr_kendall(self): - tm.skip_if_no_package('scipy.stats') from scipy.stats import kendalltau targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0] targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] @@ -561,8 +562,8 @@ def test_nancorr_kendall(self): self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method='kendall') + @td.skip_if_no_scipy def test_nancorr_spearman(self): - tm.skip_if_no_package('scipy.stats') from scipy.stats import spearmanr targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0] targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 131d470053a79..4e9282c3bd031 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -15,7 +15,6 @@ from datetime import datetime from functools import wraps, partial from contextlib import contextmanager -from distutils.version import LooseVersion from numpy.random import randn, rand import numpy as np @@ -317,35 +316,6 @@ def close(fignum=None): _close(fignum) -def _skip_if_mpl_1_5(): - import matplotlib as mpl - - v = mpl.__version__ - if LooseVersion(v) > LooseVersion('1.4.3') or str(v)[0] == '0': - import pytest - pytest.skip("matplotlib 1.5") - else: - mpl.use("Agg", warn=False) - - -def _skip_if_no_scipy(): - import pytest - - pytest.importorskip("scipy.stats") - pytest.importorskip("scipy.sparse") - pytest.importorskip("scipy.interpolate") - - -def _skip_if_no_mock(): - try: - import mock # noqa - except ImportError: - try: - from unittest import mock # noqa - except ImportError: - import pytest - raise pytest.skip("mock is not installed") - # ----------------------------------------------------------------------------- # locale utilities @@ -1979,62 +1949,6 @@ def __init__(self, *args, **kwargs): dict.__init__(self, *args, **kwargs) -# Dependency checker when running tests. -# -# Copied this from nipy/nipype -# Copyright of respective developers, License: BSD-3 -def skip_if_no_package(pkg_name, min_version=None, max_version=None, - app='pandas', checker=LooseVersion): - """Check that the min/max version of the required package is installed. - - If the package check fails, the test is automatically skipped. - - Parameters - ---------- - pkg_name : string - Name of the required package. - min_version : string, optional - Minimal version number for required package. - max_version : string, optional - Max version number for required package. - app : string, optional - Application that is performing the check. For instance, the - name of the tutorial being executed that depends on specific - packages. - checker : object, optional - The class that will perform the version checking. Default is - distutils.version.LooseVersion. - - Examples - -------- - package_check('numpy', '1.3') - - """ - - import pytest - if app: - msg = '{app} requires {pkg_name}'.format(app=app, pkg_name=pkg_name) - else: - msg = 'module requires {pkg_name}'.format(pkg_name=pkg_name) - if min_version: - msg += ' with version >= {min_version}'.format(min_version=min_version) - if max_version: - msg += ' with version < {max_version}'.format(max_version=max_version) - try: - mod = __import__(pkg_name) - except ImportError: - mod = None - try: - have_version = mod.__version__ - except AttributeError: - pytest.skip('Cannot find version for {pkg_name}' - .format(pkg_name=pkg_name)) - if min_version and checker(have_version) < checker(min_version): - pytest.skip(msg) - if max_version and checker(have_version) >= checker(max_version): - pytest.skip(msg) - - def optional_args(decorator): """allows a decorator to take optional positional and keyword arguments. Assumes that taking a single, callable, positional argument means that
- [X] closes #18190 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This should be the last commit to close out the issue referenced above. I think there's further opportunity to convert some of the imperative ``pytest.skip`` calls over to the new decorator methodology (especially Ibn ``pandas/tests/io``) but to keep the scope clean I would rather open a new issue for those rather than continually update #18190
https://api.github.com/repos/pandas-dev/pandas/pulls/18844
2017-12-19T15:48:10Z
2017-12-21T15:05:14Z
2017-12-21T15:05:14Z
2017-12-22T09:42:33Z
BLD: fix conda install - try again
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index e350dd95d9d7e..ba1de3dd0397e 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -48,9 +48,7 @@ echo echo "[update conda]" conda config --set ssl_verify false || exit 1 conda config --set quiet true --set always_yes true --set changeps1 false || exit 1 - -conda install conda=4.3.30 -# conda update -q conda +conda update -q conda if [ "$CONDA_BUILD_TEST" ]; then echo @@ -58,7 +56,6 @@ if [ "$CONDA_BUILD_TEST" ]; then conda install conda-build fi - echo echo "[add channels]" conda config --remove channels defaults || exit 1 @@ -125,7 +122,7 @@ if [ "$COVERAGE" ]; then fi echo -if [ -z "$PIP_BUILD_TEST" ] and [ -z "$CONDA_BUILD_TEST" ]; then +if [ -z "$PIP_BUILD_TEST" ] && [ -z "$CONDA_BUILD_TEST" ]; then # build but don't install echo "[build em]"
https://api.github.com/repos/pandas-dev/pandas/pulls/18841
2017-12-19T13:27:26Z
2017-12-19T14:08:23Z
2017-12-19T14:08:23Z
2017-12-19T14:08:24Z
BLD: fix conda version to 4.3.30
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 90b9bf3f3186e..e350dd95d9d7e 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -48,7 +48,9 @@ echo echo "[update conda]" conda config --set ssl_verify false || exit 1 conda config --set quiet true --set always_yes true --set changeps1 false || exit 1 -conda update -q conda + +conda install conda=4.3.30 +# conda update -q conda if [ "$CONDA_BUILD_TEST" ]; then echo
https://api.github.com/repos/pandas-dev/pandas/pulls/18838
2017-12-19T12:06:34Z
2017-12-19T12:48:00Z
2017-12-19T12:48:00Z
2017-12-19T12:48:00Z
DEPR: Deprecate skip_footer in read_excel
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index ae6d0816abc41..58e80361a4fba 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -208,6 +208,7 @@ Deprecations that is the actual tuple, instead of treating the tuple as multiple keys. To retain the previous behavior, use a list instead of a tuple (:issue:`18314`) - ``Series.valid`` is deprecated. Use :meth:`Series.dropna` instead (:issue:`18800`). +- :func:`read_excel` has deprecated the ``skip_footer`` parameter. Use ``skipfooter`` instead (:issue:`18836`) .. _whatsnew_0220.prior_deprecations: diff --git a/pandas/io/excel.py b/pandas/io/excel.py index a1dcd52b61270..2dbfeab9cc331 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -149,6 +149,10 @@ any numeric columns will automatically be parsed, regardless of display format. skip_footer : int, default 0 + + .. deprecated:: 0.22.0 + Pass in `skipfooter` instead. +skipfooter : int, default 0 Rows at the end to skip (0-indexed) convert_float : boolean, default True convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric @@ -200,6 +204,7 @@ def get_writer(engine_name): @Appender(_read_excel_doc) @deprecate_kwarg("parse_cols", "usecols") +@deprecate_kwarg("skip_footer", "skipfooter") def read_excel(io, sheet_name=0, header=0, @@ -218,7 +223,7 @@ def read_excel(io, parse_dates=False, date_parser=None, thousands=None, - skip_footer=0, + skipfooter=0, convert_float=True, **kwds): @@ -251,7 +256,7 @@ def read_excel(io, parse_dates=parse_dates, date_parser=date_parser, thousands=thousands, - skip_footer=skip_footer, + skipfooter=skipfooter, convert_float=convert_float, **kwds) @@ -333,7 +338,7 @@ def parse(self, parse_dates=False, date_parser=None, thousands=None, - skip_footer=0, + skipfooter=0, convert_float=True, **kwds): """ @@ -358,7 +363,7 @@ def parse(self, parse_dates=parse_dates, date_parser=date_parser, thousands=thousands, - skip_footer=skip_footer, + skipfooter=skipfooter, convert_float=convert_float, **kwds) @@ -412,14 +417,10 @@ def _parse_excel(self, parse_dates=False, date_parser=None, thousands=None, - skip_footer=0, + skipfooter=0, convert_float=True, **kwds): - skipfooter = kwds.pop('skipfooter', None) - if skipfooter is not None: - skip_footer = skipfooter - _validate_header_arg(header) if 'chunksize' in kwds: @@ -590,7 +591,7 @@ def _parse_cell(cell_contents, cell_typ): parse_dates=parse_dates, date_parser=date_parser, thousands=thousands, - skipfooter=skip_footer, + skipfooter=skipfooter, **kwds) output[asheetname] = parser.read(nrows=nrows) diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 274d60c40e83f..71677322329f5 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -286,14 +286,14 @@ def test_excel_table_sheet_by_index(self): tm.assert_frame_equal(df2, dfref, check_names=False) df3 = read_excel(excel, 0, index_col=0, skipfooter=1) - df4 = read_excel(excel, 0, index_col=0, skip_footer=1) tm.assert_frame_equal(df3, df1.iloc[:-1]) - tm.assert_frame_equal(df3, df4) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + df4 = read_excel(excel, 0, index_col=0, skip_footer=1) + tm.assert_frame_equal(df3, df4) df3 = excel.parse(0, index_col=0, skipfooter=1) - df4 = excel.parse(0, index_col=0, skip_footer=1) tm.assert_frame_equal(df3, df1.iloc[:-1]) - tm.assert_frame_equal(df3, df4) import xlrd with pytest.raises(xlrd.XLRDError): @@ -311,10 +311,7 @@ def test_excel_table(self): df3 = self.get_exceldf('test1', 'Sheet1', index_col=0, skipfooter=1) - df4 = self.get_exceldf('test1', 'Sheet1', index_col=0, - skip_footer=1) tm.assert_frame_equal(df3, df1.iloc[:-1]) - tm.assert_frame_equal(df3, df4) def test_reader_special_dtypes(self):
For consistency with `read_csv`, which uses `skipfooter`.
https://api.github.com/repos/pandas-dev/pandas/pulls/18836
2017-12-19T06:38:00Z
2017-12-19T11:01:02Z
2017-12-19T11:01:02Z
2017-12-19T11:12:03Z
CLN: Drop the buffer_lines parameter in read_csv
diff --git a/doc/source/io.rst b/doc/source/io.rst index 184767015bf93..d51307081b17f 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -199,11 +199,6 @@ low_memory : boolean, default ``True`` Note that the entire file is read into a single DataFrame regardless, use the ``chunksize`` or ``iterator`` parameter to return the data in chunks. (Only valid with C parser) -buffer_lines : int, default None - .. deprecated:: 0.19.0 - - Argument removed because its value is not respected by the parser - compact_ints : boolean, default False .. deprecated:: 0.19.0 diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index ae6d0816abc41..24867ca17141f 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -227,6 +227,7 @@ Removal of prior version deprecations/changes - ``DatetimeIndex.to_datetime``, ``Timestamp.to_datetime``, ``PeriodIndex.to_datetime``, and ``Index.to_datetime`` have been removed (:issue:`8254`, :issue:`14096`, :issue:`14113`) - :func:`read_csv` has dropped the ``skip_footer`` parameter (:issue:`13386`) - :func:`read_csv` has dropped the ``as_recarray`` parameter (:issue:`13373`) +- :func:`read_csv` has dropped the ``buffer_lines`` parameter (:issue:`13360`) .. _whatsnew_0220.performance: diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index c6899fa527b6e..f01068ae2e538 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -360,7 +360,6 @@ cdef class TextReader: allow_leading_cols=True, use_unsigned=False, low_memory=False, - buffer_lines=None, skiprows=None, skipfooter=0, verbose=False, @@ -557,7 +556,7 @@ cdef class TextReader: if not self.table_width: raise EmptyDataError("No columns to parse from file") - # compute buffer_lines as function of table width + # Compute buffer_lines as function of table width. heuristic = 2**20 // self.table_width self.buffer_lines = 1 while self.buffer_lines * 2 < heuristic: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index c2fca1f961222..3d07b0e6cbdfd 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -273,9 +273,6 @@ Note that the entire file is read into a single DataFrame regardless, use the `chunksize` or `iterator` parameter to return the data in chunks. (Only valid with C parser) -buffer_lines : int, default None - .. deprecated:: 0.19.0 - This argument is not respected by the parser compact_ints : boolean, default False .. deprecated:: 0.19.0 Argument moved to ``pd.to_numeric`` @@ -503,7 +500,6 @@ def _read(filepath_or_buffer, kwds): 'use_unsigned': False, 'low_memory': True, 'memory_map': False, - 'buffer_lines': None, 'error_bad_lines': True, 'warn_bad_lines': True, 'tupleize_cols': False, @@ -518,18 +514,15 @@ def _read(filepath_or_buffer, kwds): _c_unsupported = {'skipfooter'} _python_unsupported = { 'low_memory', - 'buffer_lines', 'float_precision', } _deprecated_defaults = { - 'buffer_lines': None, 'compact_ints': None, 'use_unsigned': None, 'tupleize_cols': None } _deprecated_args = { - 'buffer_lines', 'compact_ints', 'use_unsigned', 'tupleize_cols', @@ -606,7 +599,6 @@ def parser_f(filepath_or_buffer, compact_ints=None, use_unsigned=None, low_memory=_c_parser_defaults['low_memory'], - buffer_lines=None, memory_map=False, float_precision=None): @@ -676,7 +668,6 @@ def parser_f(filepath_or_buffer, warn_bad_lines=warn_bad_lines, error_bad_lines=error_bad_lines, low_memory=low_memory, - buffer_lines=buffer_lines, mangle_dupe_cols=mangle_dupe_cols, tupleize_cols=tupleize_cols, infer_datetime_format=infer_datetime_format, diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index b944322b1ed40..30dcc3e5731aa 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -128,9 +128,7 @@ def read(self): class TestDeprecatedFeatures(object): @pytest.mark.parametrize("engine", ["c", "python"]) - @pytest.mark.parametrize("kwargs", [{"buffer_lines": True}, - {"buffer_lines": False}, - {"compact_ints": True}, + @pytest.mark.parametrize("kwargs", [{"compact_ints": True}, {"compact_ints": False}, {"use_unsigned": True}, {"use_unsigned": False},
Deprecated back in 0.19.0 xref #13360.
https://api.github.com/repos/pandas-dev/pandas/pulls/18835
2017-12-19T05:39:41Z
2017-12-19T11:01:58Z
2017-12-19T11:01:58Z
2017-12-19T11:11:49Z
DOC: Rewritten style.py docstrings: #12148
diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index ae6d0816abc41..b57cf4ae8fa52 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -250,8 +250,7 @@ Performance Improvements Documentation Changes ~~~~~~~~~~~~~~~~~~~~~ - -- +- Docstring documentation for io/formats/style.py has been overhauled to more accurately reflect the functioning of the `Styler` class. (:issue:`12148`) - - @@ -296,7 +295,7 @@ I/O - Bug in :func:`read_msgpack` with a non existent file is passed in Python 2 (:issue:`15296`) - Bug in :func:`read_csv` where a ``MultiIndex`` with duplicate columns was not being mangled appropriately (:issue:`18062`) - Bug in :func:`read_sas` where a file with 0 variables gave an ``AttributeError`` incorrectly. Now it gives an ``EmptyDataError`` (:issue:`18184`) -- +- - Plotting diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 65934494b321b..3f69d06376964 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -678,7 +678,7 @@ def _repr_html_(self): def style(self): """ Property returning a Styler object containing methods for - building a styled HTML representation fo the DataFrame. + building a styled HTML representation of the DataFrame. See Also -------- diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index daf05bb80d7ca..4ccf1e2ac47c1 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -1,6 +1,5 @@ """ -Module for applying conditional formatting to -DataFrames and Series. +Module for applying data-dependent formatting to DataFrames and Series. """ from functools import partial from itertools import product @@ -49,19 +48,21 @@ def _mpl(func): class Styler(object): """ - Helps style a DataFrame or Series according to the - data with HTML and CSS. + A decorator class for a `DataFrame` object to provide an + HTML renderer configurable by data-dependent parameters. Parameters ---------- - data: Series or DataFrame - precision: int + data : Series or DataFrame + the object to which the Styler is attached + precision : int precision to round floats to, defaults to pd.options.display.precision - table_styles: list-like, default None - list of {selector: (attr, value)} dicts; see Notes - uuid: str, default None + table_styles : list-like, default None + list of ``{selector: (attr, value)}`` dicts; see docs for the + `set_table_styles` method + uuid : str, default None a unique identifier to avoid CSS collisons; generated automatically - caption: str, default None + caption : str, default None caption to attach to the table Attributes @@ -72,32 +73,47 @@ class Styler(object): Notes ----- - Most styling will be done by passing style functions into - ``Styler.apply`` or ``Styler.applymap``. Style functions should - return values with strings containing CSS ``'attr: value'`` that will - be applied to the indicated cells. - If using in the Jupyter notebook, Styler has defined a ``_repr_html_`` - to automatically render itself. Otherwise call Styler.render to get - the genterated HTML. - - CSS classes are attached to the generated HTML - - * Index and Column names include ``index_name`` and ``level<k>`` - where `k` is its level in a MultiIndex - * Index label cells include - - * ``row_heading`` - * ``row<n>`` where `n` is the numeric position of the row - * ``level<k>`` where `k` is the level in a MultiIndex - - * Column label cells include - * ``col_heading`` - * ``col<n>`` where `n` is the numeric position of the column - * ``evel<k>`` where `k` is the level in a MultiIndex - - * Blank cells include ``blank`` - * Data cells include ``data`` + `Styler` renders an HTML representation of a `DataFrame` using its + own `Styler.render` method; in particular, the Styler class will not + affect, and is independent from, its `data` object's `DataFrame.to_html` + rendering method. + + `Styler.render` returns an HTML string based on attributes which are set + using methods of `Styler`, and are stored as attributes of a `Styler` + instance. + + Some of these attributes are _data-dependent_ functions: they affect the + rendered HTML corresponding to an entry, row, or column of `data` based on + its contents. + + Others are _data-independent_: they affect the rendering of the entire + table, or only affect entries of the table based on their position. + + Some, like the table values set by `set_table_values`, are _semi-data- + dependent_: for instance, HTML table entries corresponding to empty + `DataFrame` entries are endowed with CSS selectors which indicate this + fact, so their styles can be set with a data-independent global attribute. + See the notes for `Styler.render` for details. + + `Styler.render` first constructs a table of strings corresponding + to each entry of `data`, possibly with the help of the data-dependent + function passed into `Styler.format`. It then constructs dict + with general formatting determined by the data-independent properties set, + for instance, by `Styler.set_caption` and `Styler.set_uuid`; and CSS + attributes applied to cells, rows, and columns by data-dependent functions + passed into `Styler.apply`, `Styler.applymap`, and `Styler.where`. + Currently, you cannot apply data-dependent styling to column or row + headers. It then passes this dict to Jinja2's render function with a + fixed template file. + + `Styler also has a `_repr_html_` function which calls + `Styler.render`, so when a Styler object is the value of the last line + in a Jupyter cell, the Styler is automatically rendered to the ``Out`` + field of the cell. + + Style data can be transferred between `Styler` instances with the + `Styler.use` and `Styler.export` methods. See Also -------- @@ -120,7 +136,7 @@ def __init__(self, data, precision=None, table_styles=None, uuid=None, if data.ndim == 1: data = data.to_frame() if not data.index.is_unique or not data.columns.is_unique: - raise ValueError("style is not supported for non-unique indicies.") + raise ValueError("style is not supported for non-unique indices.") self.data = data self.index = data.index @@ -147,7 +163,7 @@ def default_display_func(x): self._display_funcs = defaultdict(lambda: default_display_func) def _repr_html_(self): - """Hooks into Jupyter notebook rich display system.""" + """Hooks into Jupyter notebook's rich display system.""" return self.render() @Appender(_shared_docs['to_excel'] % dict( @@ -162,22 +178,35 @@ def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', index_label=None, startrow=0, startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep='inf', verbose=True, freeze_panes=None): - - from pandas.io.formats.excel import ExcelFormatter - formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns, - header=header, - float_format=float_format, index=index, - index_label=index_label, - merge_cells=merge_cells, - inf_rep=inf_rep) - formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow, - startcol=startcol, freeze_panes=freeze_panes, - engine=engine) + """ + Returns an Excel representation of the DataFrame. + Currently, `to_excel` creates a + `pandas.io.formats.excel.ExcelFormatter` object and passes + all parameters to (depending on the parameter) its + constructor, or its `write` method; all keyword arguments + are described in `ExcelFormatter`'s documentation. + + See Also + -------- + pandas.io.formats.excel.ExcelFormatter + """ + from pandas.io.formats.excel import ExcelFormatter + formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns, + header=header, + float_format=float_format, + index=index, + index_label=index_label, + merge_cells=merge_cells, + inf_rep=inf_rep) + formatter.write(excel_writer, sheet_name=sheet_name, + startrow=startrow, + startcol=startcol, freeze_panes=freeze_panes, + engine=engine) def _translate(self): """ - Convert the DataFrame in `self.data` and the attrs from `_build_styles` - into a dictionary of {head, body, uuid, cellstyle} + Builds a dictionary representation out of the data processed with + `_compute()` to be passed to `self.template.render()`. """ table_styles = self.table_styles or [] caption = self.caption @@ -330,16 +359,18 @@ def format_attr(pair): def format(self, formatter, subset=None): """ - Format the text display value of cells. + Specify the text rendered for each table entry based on a formatter. .. versionadded:: 0.18.0 Parameters ---------- formatter: str, callable, or dict + A `callable` must return a `str`, and a `dict`'s values + must be either `str`'s or `callable`'s which return a `str`. subset: IndexSlice - An argument to ``DataFrame.loc`` that restricts which elements - ``formatter`` is applied to. + An argument to `DataFrame.loc` that restricts which elements + `formatter` is applied to. Returns ------- @@ -347,15 +378,31 @@ def format(self, formatter, subset=None): Notes ----- - - ``formatter`` is either an ``a`` or a dict ``{column name: a}`` where - ``a`` is one of - - - str: this will be wrapped in: ``a.format(x)`` - - callable: called with the value of an individual cell - - The default display value for numeric values is the "general" (``g``) - format with ``pd.options.display.precision`` precision. + A `DataFrame` entry with contents `x` will be formatted to the + string representation: + * `formatter.format(x)` if `formatter` is a `str`. + * `formatter(x)` if `formatter` is a `callable`. + * `formatter[<col_name>].format(x)` if `formatter` is a + `dict`, `x` is in the column defined by `<col_name>`, + and `formatter[<col_name>]` is a `str`. + * `formatter[<col_name>].(x)` if `formatter` is a `dict`, + `x` is in the column defined by `<col_name>`, and + `formatter[<col_name>]` is a `callable` which returns + a `str`. + + If no formatter is specified for an entry, the default display value + for numeric values is the "general" (``g``) format with + `pd.options.display.precision` precision. + + Unlike the methods which apply CSS to table entries, each `Styler` + instance can have at most one `formatter` method specified at any + point; calling `format` a second time with a different `formatter` + parameter replaces the old `formatter` method with the newly + specified parameter. + + Note that because `Styler`'s current implementation does not sanitize + HTML tags, you can insert arbitrary HTML code/javascript into your + format strings, and they will render/execute. See examples below. Examples -------- @@ -364,6 +411,12 @@ def format(self, formatter, subset=None): >>> df.style.format("{:.2%}") >>> df['c'] = ['a', 'b', 'c', 'd'] >>> df.style.format({'C': str.upper}) + >>> df.style.format("<b>{}</b>") # Make everything bold + >>> df.style.format(\"\"\" + ... <script> + ... document.removeChild(document.documentElement)) + ... </script> + ... \"\"\") # You probably don't want to do this """ if subset is None: row_locs = range(len(self.data)) @@ -394,13 +447,15 @@ def format(self, formatter, subset=None): return self def render(self, **kwargs): - """Render the built up styles to HTML + """ + Render the DataFrame `data` as HTML, as described in the Notes + for the class. Parameters ---------- - `**kwargs`: - Any additional keyword arguments are passed through - to ``self.template.render``. This is useful when you + `**kwargs` : + Any keyword arguments are passed through to the default renderer + provided by `jinja2.Environment`. This is useful when you need to provide additional variables for a custom template. @@ -408,21 +463,39 @@ def render(self, **kwargs): Returns ------- - rendered: str + rendered : str the rendered HTML Notes ----- - ``Styler`` objects have defined the ``_repr_html_`` method - which automatically calls ``self.render()`` when it's the - last item in a Notebook cell. When calling ``Styler.render()`` - directly, wrap the result in ``IPython.display.HTML`` to view - the rendered HTML in the notebook. - - Pandas uses the following keys in render. Arguments passed - in ``**kwargs`` take precedence, so think carefuly if you want - to override them: - + `Styler._repr_html_` calls render to do HTML rendering. This means + that `Styler.render` is automatically invoked to display the output + in a Jupyter Notebook cell if the value of the last item is a + `Styler`. + + In order to render HTML into the Out field of a Notebook cell when + a `Styler` is not the last value, invoke + `IPython.display.HTML(self.render())`. + + Entries are rendered with CSS selectors according to gross properties + of their position and content: + * Index and Column names include ``index_name`` and ``level<k>`` + where `k` is its level in a MultiIndex + * Index label cells include + * ``row_heading`` + * ``row<n>`` where `n` is the numeric position of the row + * ``level<k>`` where `k` is the level in a MultiIndex + * Column label cells include + * ``col_heading`` + * ``col<n>`` where `n` is the numeric position of the column + * ``level<k>`` where `k` is the level in a MultiIndex + * Blank cells include ``blank`` + * Data cells include ``data`` + This means that `set_table_values` can be used to set semi-data- + dependent style attributes globally. + + + `Styler` renders HTML by creating a dictionary with the entries: * head * cellstyle * body @@ -431,6 +504,22 @@ def render(self, **kwargs): * table_styles * caption * table_attributes + based on the attributes already assigned to the `Styler` instance, + and passes this dictionary to the `render` method determined by + `jinja2.Environment`. + + This dictionary is the penultimate result of the process embodied by + `Styler` (the final product being, of course, the rendered HTML). + Dictionary values can be over-ridden by keyword arguments passed to + `Styler.render`; this accesses the underlying rendering engine + directly, however, so is unstable: the behavior keywords could change + if the backend changes. Therefore, unless you have both a really good + reason, and intimate knowledge of the rendering backend, you probably + should not pass keyword arguments to `render`. + + See also: + --------- + Styler.apply, Styler.applymap, Styler.where, Styler.set_table_values """ self._compute() # TODO: namespace all the pandas keys @@ -446,13 +535,14 @@ def render(self, **kwargs): def _update_ctx(self, attrs): """ - update the state of the Styler. Collects a mapping - of {index_label: ['<property>: <value>']} + Update the state of the Styler. Collects a dict of the form + {index_label: ['<property>: <value>']} + Parameters + ---------- attrs: Series or DataFrame - should contain strings of '<property>: <value>;<prop2>: <val2>' - Whitespace shouldn't matter and the final trailing ';' shouldn't - matter. + Should contain strings of the form '<property>: <value>; <prop2>: + <val2>; ...'. Neither whitespace nor the final semicolon matters. """ for row_label, v in attrs.iterrows(): for col_label, col in v.iteritems(): @@ -475,7 +565,7 @@ def _copy(self, deepcopy=False): def __copy__(self): """ - Deep copy by default. + Shallow copy by default. """ return self._copy(deepcopy=False) @@ -483,7 +573,8 @@ def __deepcopy__(self, memo): return self._copy(deepcopy=True) def clear(self): - """"Reset" the styler, removing any previously applied styles. + """ + "Reset" the styler, removing any previously applied styles. Returns None. """ self.ctx.clear() @@ -491,12 +582,11 @@ def clear(self): def _compute(self): """ - Execute the style functions built up in `self._todo`. + Execute the style methods enumerated in `self._todo`, + which usually got there because they were appended by + `Styler.apply`, `Styler.applymap`, and `Styler.where`. - Relies on the conventions that all style functions go through - .apply or .applymap. The append styles to apply as tuples of - - (application method, *args, **kwargs) + Returns none """ r = self for func, args, kwargs in self._todo: @@ -536,25 +626,27 @@ def _apply(self, func, axis=0, subset=None, **kwargs): def apply(self, func, axis=0, subset=None, **kwargs): """ - Apply a function column-wise, row-wise, or table-wase, - updating the HTML representation with the result. + Assure that when `Styler.render` is called, CSS attributes + determined by the data-dependent function `func` are added + to cells, rows or entries. Parameters ---------- func : function - ``func`` should take a Series or DataFrame (depending - on ``axis``), and return an object with the same shape. + `func` should take a Series or DataFrame (depending + on `axis`), and return an object with the same shape. Must return a DataFrame with identical index and - column labels when ``axis=None`` + column labels to those of `data` when `axis=None` axis : int, str or None - apply to each column (``axis=0`` or ``'index'``) - or to each row (``axis=1`` or ``'columns'``) or - to the entire DataFrame at once with ``axis=None`` + determine whether to add attributes to each row + (`axis=0` or `'index'`), to each column (`axis=1` or + `'columns'`) or to the entire DataFrame at once with + `axis=None` subset : IndexSlice - a valid indexer to limit ``data`` to *before* applying the - function. Consider using a pandas.IndexSlice + a valid indexer to limit `data` to *before* applying `func`. + Consider using a pandas.IndexSlice kwargs : dict - pass along to ``func`` + pass along to `func` Returns ------- @@ -562,13 +654,13 @@ def apply(self, func, axis=0, subset=None, **kwargs): Notes ----- - The output shape of ``func`` should match the input, i.e. if - ``x`` is the input row, column, or table (depending on ``axis``), - then ``func(x.shape) == x.shape`` should be true. + The output shape of `func` should match the input, i.e., if + `x` is the input row, column, or table (depending on `axis`), + then `func(x.shape) == x.shape` should be true. - This is similar to ``DataFrame.apply``, except that ``axis=None`` - applies the function to the entire DataFrame at once, - rather than column-wise or row-wise. + In this way, `Styler.apply` handles its parameters similarly to + `DataFrame.apply`, except that `axis=None` applies `func` to + the entire DataFrame at once, rather than column-wise or row-wise. Examples -------- @@ -584,7 +676,7 @@ def apply(self, func, axis=0, subset=None, **kwargs): return self def _applymap(self, func, subset=None, **kwargs): - func = partial(func, **kwargs) # applymap doesn't take kwargs? + func = partial(func, **kwargs) if subset is None: subset = pd.IndexSlice[:] subset = _non_reducing_slice(subset) @@ -594,18 +686,18 @@ def _applymap(self, func, subset=None, **kwargs): def applymap(self, func, subset=None, **kwargs): """ - Apply a function elementwise, updating the HTML - representation with the result. + Assure that when `Styler.render` is called, CSS attributes + determined by the data-dependent function parameter `func` + are added to individual table entries. Parameters ---------- func : function - ``func`` should take a scalar and return a scalar + `func` should take a scalar and return a scalar subset : IndexSlice - a valid indexer to limit ``data`` to *before* applying the - function. Consider using a pandas.IndexSlice + determines to which cells `func` will apply kwargs : dict - pass along to ``func`` + parameters to pass to `func` Returns ------- @@ -622,25 +714,26 @@ def applymap(self, func, subset=None, **kwargs): def where(self, cond, value, other=None, subset=None, **kwargs): """ - Apply a function elementwise, updating the HTML - representation with a style which is selected in - accordance with the return value of a function. + Assure that when `Styler.render` is called, the CSS attribute + `value` is added to an entry when `cond` of that entry is True, + and that the CSS attribute `other` is added when `cond` is False. + are added to individual table entries .. versionadded:: 0.21.0 Parameters ---------- cond : callable - ``cond`` should take a scalar and return a boolean + `cond` should take a scalar and return a boolean value : str - applied when ``cond`` returns true + applied when `cond` returns true other : str - applied when ``cond`` returns false + applied when `cond` returns false subset : IndexSlice - a valid indexer to limit ``data`` to *before* applying the - function. Consider using a pandas.IndexSlice + a valid indexer to limit `data` to *before* applying `cond`. + Consider using a pandas.IndexSlice kwargs : dict - pass along to ``cond`` + pass along to `cond` Returns ------- @@ -676,8 +769,8 @@ def set_precision(self, precision): def set_table_attributes(self, attributes): """ Set the table attributes. These are the items - that show up in the opening ``<table>`` tag in addition - to to automatic (by default) id. + that show up in the opening `<table>` tag in addition + to to automatic (by default) id. Data-independent attribute. Parameters ---------- @@ -699,7 +792,7 @@ def set_table_attributes(self, attributes): def export(self): """ Export the styles to applied to the current Styler. - Can be applied to a second style with ``Styler.use``. + Can be applied to a second style with `Styler.use`. Returns ------- @@ -714,7 +807,7 @@ def export(self): def use(self, styles): """ Set the styles on the current Styler, possibly using styles - from ``Styler.export``. + from `Styler.export`. Parameters ---------- @@ -764,22 +857,26 @@ def set_caption(self, caption): def set_table_styles(self, table_styles): """ - Set the table styles on a Styler. These are placed in a - ``<style>`` tag before the generated HTML table. + Set table styles. These are placed in a `<style>` tag before the + generated HTML table. Parameters ---------- table_styles: list - Each individual table_style should be a dictionary with - ``selector`` and ``props`` keys. ``selector`` should be a CSS + Each list item should be a dictionary with + `selector` and `props` keys. `selector` should be a CSS selector that the style will be applied to (automatically - prefixed by the table's UUID) and ``props`` should be a list of - tuples with ``(attribute, value)``. + prefixed by the table's UUID) and `props` should be a list of + tuples with `(attribute, value)`. Returns ------- self : Styler + Notes + ----- + See the notes for `Styler.render`. + Examples -------- >>> df = pd.DataFrame(np.random.randn(10, 4)) @@ -813,7 +910,7 @@ def hide_columns(self, subset): Parameters ---------- subset: IndexSlice - An argument to ``DataFrame.loc`` that identifies which columns + An argument to `DataFrame.loc` that identifies which columns are hidden. Returns @@ -836,7 +933,7 @@ def _highlight_null(v, null_color): def highlight_null(self, null_color='red'): """ - Shade the background ``null_color`` for missing values. + Set the background color of table entries with missing values. Parameters ---------- @@ -852,7 +949,7 @@ def highlight_null(self, null_color='red'): def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0, subset=None): """ - Color the background in a gradient according to + Color the background of table entries in a gradient according to the data in each column (optionally row). Requires matplotlib. @@ -865,7 +962,7 @@ def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0, axis: int or str 1 or 'columns' for columnwise, 0 or 'index' for rowwise subset: IndexSlice - a valid slice for ``data`` to limit the style application to + a valid slice for `data` to limit the style application to Returns ------- @@ -873,10 +970,10 @@ def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0, Notes ----- - Tune ``low`` and ``high`` to keep the text legible by + Tune `low` and `high` to keep the text legible by not using the entire range of the color map. These extend - the range of the data by ``low * (x.max() - x.min())`` - and ``high * (x.max() - x.min())`` before normalizing. + the range of the data by `low * (x.max() - x.min())` + and `high * (x.max() - x.min())` before normalizing. """ subset = _maybe_numeric_slice(self.data, subset) subset = _non_reducing_slice(subset) @@ -902,12 +999,12 @@ def _background_gradient(s, cmap='PuBu', low=0, high=0): def set_properties(self, subset=None, **kwargs): """ Convenience method for setting one or more non-data dependent - properties or each cell. + properties for each cell. Parameters ---------- subset: IndexSlice - a valid slice for ``data`` to limit the style application to + a valid slice for `data` to limit the style application to kwargs: dict property: value pairs to be set for each cell @@ -932,13 +1029,13 @@ def _bar_left(s, color, width, base): The minimum value is aligned at the left of the cell Parameters ---------- - color: 2-tuple/list, of [``color_negative``, ``color_positive``] + color: 2-tuple/list, of [`color_negative`, `color_positive`] width: float - A number between 0 or 100. The largest value will cover ``width`` + A number between 0 or 100. The largest value will cover `width` percent of the cell's width base: str The base css format of the cell, e.g.: - ``base = 'width: 10em; height: 80%;'`` + `base = 'width: 10em; height: 80%;'` Returns ------- self : Styler @@ -959,13 +1056,13 @@ def _bar_center_zero(s, color, width, base): Creates a bar chart where the zero is centered in the cell Parameters ---------- - color: 2-tuple/list, of [``color_negative``, ``color_positive``] + color: 2-tuple/list, of [`color_negative`, `color_positive`] width: float - A number between 0 or 100. The largest value will cover ``width`` + A number between 0 or 100. The largest value will cover `width` percent of the cell's width base: str The base css format of the cell, e.g.: - ``base = 'width: 10em; height: 80%;'`` + `base = 'width: 10em; height: 80%;'` Returns ------- self : Styler @@ -995,13 +1092,13 @@ def _bar_center_mid(s, color, width, base): Creates a bar chart where the midpoint is centered in the cell Parameters ---------- - color: 2-tuple/list, of [``color_negative``, ``color_positive``] + color: 2-tuple/list, of [`color_negative`, `color_positive`] width: float - A number between 0 or 100. The largest value will cover ``width`` + A number between 0 or 100. The largest value will cover `width` percent of the cell's width base: str The base css format of the cell, e.g.: - ``base = 'width: 10em; height: 80%;'`` + `base = 'width: 10em; height: 80%;'` Returns ------- self : Styler @@ -1038,13 +1135,14 @@ def _bar_center_mid(s, color, width, base): def bar(self, subset=None, axis=0, color='#d65f5f', width=100, align='left'): """ - Color the background ``color`` proptional to the values in each column. - Excludes non-numeric data by default. + Color the background of each cell with a bar of color `color`, + and of length proportional to the value of the cell relative to those + in its column. Excludes non-numeric data by default. Parameters ---------- subset: IndexSlice, default None - a valid slice for ``data`` to limit the style application to + a valid slice for `data` to limit the style application to axis: int color: str or 2-tuple/list If a str is passed, the color is the same for both @@ -1052,7 +1150,7 @@ def bar(self, subset=None, axis=0, color='#d65f5f', width=100, first element is the color_negative and the second is the color_positive (eg: ['#d65f5f', '#5fba7d']) width: float - A number between 0 or 100. The largest value will cover ``width`` + A number between 0 or 100. The largest value will cover `width` percent of the cell's width align : {'left', 'zero',' mid'}, default 'left' - 'left' : the min value starts at the left of the cell @@ -1099,16 +1197,17 @@ def bar(self, subset=None, axis=0, color='#d65f5f', width=100, def highlight_max(self, subset=None, color='yellow', axis=0): """ - Highlight the maximum by shading the background + Set the color to shade the background of an entry + containing the maximum value in a column, row, or table. Parameters ---------- subset: IndexSlice, default None - a valid slice for ``data`` to limit the style application to + a valid slice for `data` to limit the style application to color: str, default 'yellow' axis: int, str, or None; default 0 0 or 'index' for columnwise (default), 1 or 'columns' for rowwise, - or ``None`` for tablewise + or `None` for tablewise Returns ------- @@ -1119,16 +1218,17 @@ def highlight_max(self, subset=None, color='yellow', axis=0): def highlight_min(self, subset=None, color='yellow', axis=0): """ - Highlight the minimum by shading the background + Set the color to shade the background of an entry + containing the maximum value in a column, row, or table. Parameters ---------- subset: IndexSlice, default None - a valid slice for ``data`` to limit the style application to + a valid slice for `data` to limit the style application to color: str, default 'yellow' axis: int, str, or None; default 0 0 or 'index' for columnwise (default), 1 or 'columns' for rowwise, - or ``None`` for tablewise + or `None` for tablewise Returns ------- @@ -1165,7 +1265,7 @@ def _highlight_extrema(data, color='yellow', max_=True): @classmethod def from_custom_template(cls, searchpath, name): """ - Factory function for creating a subclass of ``Styler`` + Factory function for creating a subclass of `Styler` with a custom template and Jinja environment. Parameters @@ -1178,7 +1278,7 @@ def from_custom_template(cls, searchpath, name): Returns ------- MyStyler : subclass of Styler - has the correct ``env`` and ``template`` class attributes set. + has the correct `env` and `template` class attributes set. """ loader = ChoiceLoader([ FileSystemLoader(searchpath), @@ -1201,11 +1301,12 @@ def _is_visible(idx_row, idx_col, lengths): def _get_level_lengths(index, hidden_elements=None): """ - Given an index, find the level lenght for each element. + Given an index, find the level length for each element. Optional argument is a list of index positions which should not be visible. - Result is a dictionary of (level, inital_position): span + Returns: + (level, inital_position): span """ sentinel = sentinel_factory() levels = index.format(sparsify=sentinel, adjoin=False, names=False)
Docstrings for pandas/io/formats/style.py now accurately reflects the behavior of the ``Styler`` class. Fixed a typo in the docstring of the `style` property of ``DataFrame``. - [x] closes #12148 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/18834
2017-12-19T05:34:43Z
2018-02-10T18:41:17Z
null
2018-02-10T18:41:17Z