title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
REF: simplify ensure_index
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index f91b96dc1b1dc..0c8f2baabc804 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -190,11 +190,7 @@ def maybe_indices_to_slice( max_len: int, ) -> slice | np.ndarray: ... # np.ndarray[np.uint8] -def clean_index_list(obj: list) -> tuple[ - list | np.ndarray, # np.ndarray[object | np.int64 | np.uint64] - bool, -]: ... - +def is_all_arraylike(obj: list) -> bool: ... # ----------------------------------------------------------------- # Functions which in reality take memoryviews diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 4b5ef3e909a00..1a07b76583fca 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -740,19 +740,15 @@ cpdef ndarray[object] ensure_string_array( return result -@cython.wraparound(False) -@cython.boundscheck(False) -def clean_index_list(obj: list): +def is_all_arraylike(obj: list) -> bool: """ - Utility used in ``pandas.core.indexes.api.ensure_index``. + Should we treat these as levels of a MultiIndex, as opposed to Index items? """ cdef: Py_ssize_t i, n = len(obj) object val bint all_arrays = True - # First check if we have a list of arraylikes, in which case we will - # pass them to MultiIndex.from_arrays for i in range(n): val = obj[i] if not (isinstance(val, list) or @@ -762,31 +758,7 @@ def clean_index_list(obj: list): all_arrays = False break - if all_arrays: - return obj, all_arrays - - # don't force numpy coerce with nan's - inferred = infer_dtype(obj, skipna=False) - if inferred in ['string', 'bytes', 'mixed', 'mixed-integer']: - return np.asarray(obj, dtype=object), 0 - elif inferred in ['integer']: - # we infer an integer but it *could* be a uint64 - - arr = np.asarray(obj) - if arr.dtype.kind not in ["i", "u"]: - # eg [0, uint64max] gets cast to float64, - # but then we know we have either uint64 or object - if (arr < 0).any(): - # TODO: similar to maybe_cast_to_integer_array - return np.asarray(obj, dtype="object"), 0 - - # GH#35481 - guess = np.asarray(obj, dtype="uint64") - return guess, 0 - - return arr, 0 - - return np.asarray(obj), 0 + return all_arrays # ------------------------------------------------------------------------------ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 124903446220d..96278f5686b57 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1,6 +1,5 @@ from __future__ import annotations -from copy import copy as copy_func from datetime import datetime import functools from itertools import zip_longest @@ -6312,21 +6311,15 @@ def ensure_index(index_like: AnyArrayLike | Sequence, copy: bool = False) -> Ind # check in clean_index_list index_like = list(index_like) - converted, all_arrays = lib.clean_index_list(index_like) - - if len(converted) > 0 and all_arrays: + if len(index_like) and lib.is_all_arraylike(index_like): from pandas.core.indexes.multi import MultiIndex - return MultiIndex.from_arrays(converted) + return MultiIndex.from_arrays(index_like) else: - index_like = converted + return Index(index_like, copy=copy, tupleize_cols=False) else: - # clean_index_list does the equivalent of copying - # so only need to do this if not list instance - if copy: - index_like = copy_func(index_like) - return Index(index_like) + return Index(index_like, copy=copy) def ensure_has_len(seq): diff --git a/pandas/tests/indexes/numeric/test_numeric.py b/pandas/tests/indexes/numeric/test_numeric.py index c796a25faf0a6..9572aeaf41c91 100644 --- a/pandas/tests/indexes/numeric/test_numeric.py +++ b/pandas/tests/indexes/numeric/test_numeric.py @@ -531,6 +531,14 @@ def test_constructor(self, dtype): res = Index([1, 2 ** 63 + 1], dtype=dtype) tm.assert_index_equal(res, idx) + @pytest.mark.xfail(reason="https://github.com/numpy/numpy/issues/19146") + def test_constructor_does_not_cast_to_float(self): + # https://github.com/numpy/numpy/issues/19146 + values = [0, np.iinfo(np.uint64).max] + + result = UInt64Index(values) + assert list(result) == values + @pytest.mark.parametrize( "box", diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index f75e4af888643..d7abaf0b5dfbe 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1622,6 +1622,18 @@ def test_ensure_index_mixed_closed_intervals(self): expected = Index(intervals, dtype=object) tm.assert_index_equal(result, expected) + def test_ensure_index_uint64(self): + # with both 0 and a large-uint64, np.array will infer to float64 + # https://github.com/numpy/numpy/issues/19146 + # but a more accurate choice would be uint64 + values = [0, np.iinfo(np.uint64).max] + + result = ensure_index(values) + assert list(result) == values + + expected = Index(values, dtype="uint64") + tm.assert_index_equal(result, expected) + def test_get_combined_index(self): result = _get_combined_index([]) expected = Index([]) diff --git a/pandas/tests/libs/test_lib.py b/pandas/tests/libs/test_lib.py index 0b1f807f2da63..5b7e90fe16d8f 100644 --- a/pandas/tests/libs/test_lib.py +++ b/pandas/tests/libs/test_lib.py @@ -206,15 +206,3 @@ def test_no_default_pickle(): # GH#40397 obj = tm.round_trip_pickle(lib.no_default) assert obj is lib.no_default - - -def test_clean_index_list(): - # with both 0 and a large-uint64, np.array will infer to float64 - # https://github.com/numpy/numpy/issues/19146 - # but a more accurate choice would be uint64 - values = [0, np.iinfo(np.uint64).max] - - result, _ = lib.clean_index_list(values) - - expected = np.array(values, dtype="uint64") - tm.assert_numpy_array_equal(result, expected, check_dtype=True)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41793
2021-06-03T01:14:12Z
2021-06-03T17:15:18Z
2021-06-03T17:15:18Z
2021-06-03T18:34:17Z
REF: de-duplicate ExtensionIndex methods
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 7c76a04a605e3..a8626158d48b2 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -346,6 +346,36 @@ def where( res_values = np.where(mask, self._ndarray, value) return self._from_backing_data(res_values) + # ------------------------------------------------------------------------ + # Index compat methods + + def insert( + self: NDArrayBackedExtensionArrayT, loc: int, item + ) -> NDArrayBackedExtensionArrayT: + """ + Make new ExtensionArray inserting new item at location. Follows + Python list.append semantics for negative values. + + Parameters + ---------- + loc : int + item : object + + Returns + ------- + type(self) + """ + code = self._validate_scalar(item) + + new_vals = np.concatenate( + ( + self._ndarray[:loc], + np.asarray([code], dtype=self._ndarray.dtype), + self._ndarray[loc:], + ) + ) + return self._from_backing_data(new_vals) + # ------------------------------------------------------------------------ # Additional array methods # These are not part of the EA API, but we implement them because diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 8c2cff21c114e..2cbf1a8063a92 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -37,7 +37,11 @@ is_string_dtype, needs_i8_conversion, ) -from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + IntervalDtype, + PeriodDtype, +) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCExtensionArray, @@ -630,7 +634,13 @@ def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool: # This is needed for Categorical, but is kind of weird return True - # must be PeriodDType + elif isinstance(dtype, PeriodDtype): + return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) + + elif isinstance(dtype, IntervalDtype): + return lib.is_float(obj) or obj is None or obj is libmissing.NA + + # fallback, default to allowing NaN, None, NA, NaT return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index 066fa1f547328..b1cabf92bf985 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -13,17 +13,12 @@ from pandas._typing import ArrayLike from pandas.compat.numpy import function as nv -from pandas.errors import AbstractMethodError from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.util._exceptions import rewrite_exception -from pandas.core.dtypes.cast import ( - find_common_type, - infer_dtype_from, -) from pandas.core.dtypes.common import ( is_dtype_equal, is_object_dtype, @@ -34,6 +29,7 @@ ABCSeries, ) +from pandas.core.array_algos.putmask import validate_putmask from pandas.core.arrays import ( Categorical, DatetimeArray, @@ -297,6 +293,21 @@ def searchsorted(self, value, side="left", sorter=None) -> np.ndarray: # overriding IndexOpsMixin improves performance GH#38083 return self._data.searchsorted(value, side=side, sorter=sorter) + def putmask(self, mask, value) -> Index: + mask, noop = validate_putmask(self._data, mask) + if noop: + return self.copy() + + try: + self._validate_fill_value(value) + except (ValueError, TypeError): + dtype = self._find_common_type_compat(value) + return self.astype(dtype).putmask(mask, value) + + arr = self._data.copy() + arr.putmask(mask, value) + return type(self)._simple_new(arr, name=self.name) + # --------------------------------------------------------------------- def _get_engine_target(self) -> np.ndarray: @@ -323,9 +334,30 @@ def repeat(self, repeats, axis=None): result = self._data.repeat(repeats, axis=axis) return type(self)._simple_new(result, name=self.name) - def insert(self, loc: int, item): - # ExtensionIndex subclasses must override Index.insert - raise AbstractMethodError(self) + def insert(self, loc: int, item) -> Index: + """ + Make new Index inserting new item at location. Follows + Python list.append semantics for negative values. + + Parameters + ---------- + loc : int + item : object + + Returns + ------- + new_index : Index + """ + try: + result = self._data.insert(loc, item) + except (ValueError, TypeError): + # e.g. trying to insert an integer into a DatetimeIndex + # We cannot keep the same dtype, so cast to the (often object) + # minimal shared dtype before doing the insert. + dtype = self._find_common_type_compat(item) + return self.astype(dtype).insert(loc, item) + else: + return type(self)._simple_new(result, name=self.name) def _validate_fill_value(self, value): """ @@ -426,60 +458,3 @@ def _get_engine_target(self) -> np.ndarray: def _from_join_target(self, result: np.ndarray) -> ArrayLike: assert result.dtype == self._data._ndarray.dtype return self._data._from_backing_data(result) - - def insert(self: _T, loc: int, item) -> Index: - """ - Make new Index inserting new item at location. Follows - Python list.append semantics for negative values. - - Parameters - ---------- - loc : int - item : object - - Returns - ------- - new_index : Index - - Raises - ------ - ValueError if the item is not valid for this dtype. - """ - arr = self._data - try: - code = arr._validate_scalar(item) - except (ValueError, TypeError): - # e.g. trying to insert an integer into a DatetimeIndex - # We cannot keep the same dtype, so cast to the (often object) - # minimal shared dtype before doing the insert. - dtype, _ = infer_dtype_from(item, pandas_dtype=True) - dtype = find_common_type([self.dtype, dtype]) - return self.astype(dtype).insert(loc, item) - else: - new_vals = np.concatenate( - ( - arr._ndarray[:loc], - np.asarray([code], dtype=arr._ndarray.dtype), - arr._ndarray[loc:], - ) - ) - new_arr = arr._from_backing_data(new_vals) - return type(self)._simple_new(new_arr, name=self.name) - - def putmask(self, mask, value) -> Index: - res_values = self._data.copy() - try: - res_values.putmask(mask, value) - except (TypeError, ValueError): - return self.astype(object).putmask(mask, value) - - return type(self)._simple_new(res_values, name=self.name) - - # error: Argument 1 of "_wrap_joined_index" is incompatible with supertype - # "Index"; supertype defines the argument type as "Union[ExtensionArray, ndarray]" - def _wrap_joined_index( # type: ignore[override] - self: _T, joined: NDArrayBackedExtensionArray, other: _T - ) -> _T: - name = get_op_result_name(self, other) - - return type(self)._simple_new(joined, name=name) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 6dcb2a44e7d3d..06ab7fdbcf872 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -67,7 +67,6 @@ take_nd, unique, ) -from pandas.core.array_algos.putmask import validate_putmask from pandas.core.arrays.interval import ( IntervalArray, _interval_shared_docs, @@ -854,46 +853,6 @@ def mid(self) -> Index: def length(self) -> Index: return Index(self._data.length, copy=False) - def putmask(self, mask, value) -> Index: - mask, noop = validate_putmask(self._data, mask) - if noop: - return self.copy() - - try: - self._validate_fill_value(value) - except (ValueError, TypeError): - dtype = self._find_common_type_compat(value) - return self.astype(dtype).putmask(mask, value) - - arr = self._data.copy() - arr.putmask(mask, value) - return type(self)._simple_new(arr, name=self.name) - - def insert(self, loc: int, item): - """ - Return a new IntervalIndex inserting new item at location. Follows - Python list.append semantics for negative values. Only Interval - objects and NA can be inserted into an IntervalIndex - - Parameters - ---------- - loc : int - item : object - - Returns - ------- - IntervalIndex - """ - try: - result = self._data.insert(loc, item) - except (ValueError, TypeError): - # e.g trying to insert a string - dtype, _ = infer_dtype_from_scalar(item, pandas_dtype=True) - dtype = find_common_type([self.dtype, dtype]) - return self.astype(dtype).insert(loc, item) - - return type(self)._simple_new(result, name=self.name) - # -------------------------------------------------------------------- # Rendering Methods # __repr__ associated methods are based on MultiIndex diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index 778373fc7f0df..92ef388d73fde 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -24,6 +24,7 @@ ) from pandas.core.dtypes.missing import ( array_equivalent, + is_valid_na_for_dtype, isna, isnull, na_value_for_dtype, @@ -729,3 +730,12 @@ def test_is_matching_na_nan_matches_none(self): assert libmissing.is_matching_na(None, np.nan, nan_matches_none=True) assert libmissing.is_matching_na(np.nan, None, nan_matches_none=True) + + +class TestIsValidNAForDtype: + def test_is_valid_na_for_dtype_interval(self): + dtype = IntervalDtype("int64", "left") + assert not is_valid_na_for_dtype(NaT, dtype) + + dtype = IntervalDtype("datetime64[ns]", "both") + assert not is_valid_na_for_dtype(NaT, dtype)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41791
2021-06-02T23:34:38Z
2021-06-03T00:51:15Z
2021-06-03T00:51:15Z
2021-06-03T00:54:56Z
REF: de-duplicate _validate_fill_value/_validate_scalar
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 7c76a04a605e3..0bb6b2fa4b703 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -94,7 +94,7 @@ def take( axis: int = 0, ) -> NDArrayBackedExtensionArrayT: if allow_fill: - fill_value = self._validate_fill_value(fill_value) + fill_value = self._validate_scalar(fill_value) new_data = take( self._ndarray, @@ -107,25 +107,6 @@ def take( ) return self._from_backing_data(new_data) - def _validate_fill_value(self, fill_value): - """ - If a fill_value is passed to `take` convert it to a representation - suitable for self._ndarray, raising TypeError if this is not possible. - - Parameters - ---------- - fill_value : object - - Returns - ------- - fill_value : native representation - - Raises - ------ - TypeError - """ - raise AbstractMethodError(self) - # ------------------------------------------------------------------------ def equals(self, other) -> bool: @@ -194,7 +175,7 @@ def shift(self, periods=1, fill_value=None, axis=0): def _validate_shift_value(self, fill_value): # TODO: after deprecation in datetimelikearraymixin is enforced, # we can remove this and ust validate_fill_value directly - return self._validate_fill_value(fill_value) + return self._validate_scalar(fill_value) def __setitem__(self, key, value): key = check_array_indexer(self, key) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 47779dd6dba25..068f5703649fa 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1407,7 +1407,7 @@ def _validate_searchsorted_value(self, value): codes = np.array(locs, dtype=self.codes.dtype) # type: ignore[assignment] return codes - def _validate_fill_value(self, fill_value): + def _validate_scalar(self, fill_value): """ Convert a user-facing fill_value to a representation to use with our underlying ndarray, raising TypeError if this is not possible. @@ -1436,8 +1436,6 @@ def _validate_fill_value(self, fill_value): ) return fill_value - _validate_scalar = _validate_fill_value - # ------------------------------------------------------------- def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ff46715d0a527..ba5be03b93490 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -557,27 +557,8 @@ def _validate_comparison_value(self, other): return other - def _validate_fill_value(self, fill_value): - """ - If a fill_value is passed to `take` convert it to an i8 representation, - raising TypeError if this is not possible. - - Parameters - ---------- - fill_value : object - - Returns - ------- - fill_value : np.int64, np.datetime64, or np.timedelta64 - - Raises - ------ - TypeError - """ - return self._validate_scalar(fill_value) - def _validate_shift_value(self, fill_value): - # TODO(2.0): once this deprecation is enforced, use _validate_fill_value + # TODO(2.0): once this deprecation is enforced, use _validate_scalar if is_valid_na_for_dtype(fill_value, self.dtype): fill_value = NaT elif isinstance(fill_value, self._recognized_scalars): diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 4aa3bab168ac6..8836695efcbcb 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -801,7 +801,7 @@ def fillna( if limit is not None: raise TypeError("limit is not supported for IntervalArray.") - value_left, value_right = self._validate_fill_value(value) + value_left, value_right = self._validate_scalar(value) left = self.left.fillna(value=value_left) right = self.right.fillna(value=value_right) @@ -1000,7 +1000,7 @@ def take( fill_left = fill_right = fill_value if allow_fill: - fill_left, fill_right = self._validate_fill_value(fill_value) + fill_left, fill_right = self._validate_scalar(fill_value) left_take = take( self._left, indices, allow_fill=allow_fill, fill_value=fill_left @@ -1037,6 +1037,7 @@ def _validate_scalar(self, value): if isinstance(value, Interval): self._check_closed_matches(value, name="value") left, right = value.left, value.right + # TODO: check subdtype match like _validate_setitem_value? elif is_valid_na_for_dtype(value, self.left.dtype): # GH#18295 left = right = value @@ -1046,9 +1047,6 @@ def _validate_scalar(self, value): ) return left, right - def _validate_fill_value(self, value): - return self._validate_scalar(value) - def _validate_setitem_value(self, value): needs_float_conversion = False diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index e9d554200805e..dc592f205b3ea 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -190,7 +190,7 @@ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): def isna(self) -> np.ndarray: return isna(self._ndarray) - def _validate_fill_value(self, fill_value): + def _validate_scalar(self, fill_value): if fill_value is None: # Primarily for subclasses fill_value = self.dtype.na_value
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41790
2021-06-02T23:28:12Z
2021-06-03T00:51:45Z
2021-06-03T00:51:45Z
2021-06-03T00:56:17Z
Bug in xs raising KeyError for MultiIndex columns with droplevel False and list indexe
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index 500030e1304c6..42c71acdee7e0 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -21,6 +21,13 @@ Fixed regressions .. --------------------------------------------------------------------------- +.. _whatsnew_125.deprecations: + +Deprecations +~~~~~~~~~~~~ + +- Deprecated passing lists as ``key`` to :meth:`DataFrame.xs` and :meth:`Series.xs` (:issue:`41760`) + .. _whatsnew_125.bug_fixes: Bug fixes diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 99816237155b3..35e10b27e978c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3756,6 +3756,15 @@ class animal locomotion """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) + + if isinstance(key, list): + warnings.warn( + "Passing lists as key for xs is deprecated and will be removed in a " + "future version. Pass key as a tuple instead.", + FutureWarning, + stacklevel=2, + ) + if level is not None: if not isinstance(labels, MultiIndex): raise TypeError("Index must be a MultiIndex") diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py index 57ea6a63f86e8..ccd989e2de411 100644 --- a/pandas/tests/frame/indexing/test_xs.py +++ b/pandas/tests/frame/indexing/test_xs.py @@ -106,7 +106,8 @@ def test_xs_keep_level(self): expected = df[:1] tm.assert_frame_equal(result, expected) - result = df.xs([2008, "sat"], level=["year", "day"], drop_level=False) + with tm.assert_produces_warning(FutureWarning): + result = df.xs([2008, "sat"], level=["year", "day"], drop_level=False) tm.assert_frame_equal(result, expected) def test_xs_view(self, using_array_manager): @@ -187,7 +188,11 @@ def test_xs_with_duplicates(self, key, level, multiindex_dataframe_random_data): assert df.index.is_unique is False expected = concat([frame.xs("one", level="second")] * 2) - result = df.xs(key, level=level) + if isinstance(key, list): + with tm.assert_produces_warning(FutureWarning): + result = df.xs(key, level=level) + else: + result = df.xs(key, level=level) tm.assert_frame_equal(result, expected) def test_xs_missing_values_in_index(self): @@ -358,3 +363,11 @@ def test_xs_droplevel_false_view(self, using_array_manager): df.iloc[0, 0] = 2 expected = DataFrame({"a": [1]}) tm.assert_frame_equal(result, expected) + + def test_xs_list_indexer_droplevel_false(self): + # GH#41760 + mi = MultiIndex.from_tuples([("x", "m", "a"), ("x", "n", "b"), ("y", "o", "c")]) + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=mi) + with tm.assert_produces_warning(FutureWarning): + with pytest.raises(KeyError, match="y"): + df.xs(["x", "y"], drop_level=False, axis=1) diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py index 932295c28c8cf..a99f09143e282 100644 --- a/pandas/tests/indexing/multiindex/test_partial.py +++ b/pandas/tests/indexing/multiindex/test_partial.py @@ -69,7 +69,8 @@ def test_xs_partial( ) df = DataFrame(np.random.randn(8, 4), index=index, columns=list("abcd")) - result = df.xs(["foo", "one"]) + with tm.assert_produces_warning(FutureWarning): + result = df.xs(["foo", "one"]) expected = df.loc["foo", "one"] tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/indexing/test_xs.py b/pandas/tests/series/indexing/test_xs.py index b6351e970222f..9a277783a1b3d 100644 --- a/pandas/tests/series/indexing/test_xs.py +++ b/pandas/tests/series/indexing/test_xs.py @@ -69,3 +69,13 @@ def test_series_xs_droplevel_false(self): ), ) tm.assert_series_equal(result, expected) + + def test_xs_key_as_list(self): + # GH#41760 + mi = MultiIndex.from_tuples([("a", "x")], names=["level1", "level2"]) + ser = Series([1], index=mi) + with tm.assert_produces_warning(FutureWarning): + ser.xs(["a", "x"], axis=0, drop_level=False) + + with tm.assert_produces_warning(FutureWarning): + ser.xs(["a"], axis=0, drop_level=False)
- [x] closes #41760 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry cc @simonjayhawkins Should we include in 1.2.5 or push to 1.3?
https://api.github.com/repos/pandas-dev/pandas/pulls/41789
2021-06-02T20:36:06Z
2021-06-04T00:20:06Z
2021-06-04T00:20:05Z
2021-06-04T10:59:48Z
BLD: Adjust Numpy Minimum Versions for aarch64/arm64 compatibility
diff --git a/pyproject.toml b/pyproject.toml index 3947856d94d01..86b255ab6bf58 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,9 +5,17 @@ requires = [ "setuptools>=38.6.0", "wheel", "Cython>=0.29.21,<3", # Note: sync with setup.py - "numpy==1.17.3; python_version=='3.7'", - "numpy==1.18.3; python_version=='3.8'", - "numpy; python_version>='3.9'", + # Numpy requirements for different OS/architectures + # Copied from https://github.com/scipy/scipy/blob/master/pyproject.toml (which is also licensed under BSD) + "numpy==1.17.3; python_version=='3.7' and (platform_machine!='arm64' or platform_system!='Darwin') and platform_machine!='aarch64'", + "numpy==1.18.3; python_version=='3.8' and (platform_machine!='arm64' or platform_system!='Darwin') and platform_machine!='aarch64'", + "numpy==1.19.3; python_version>='3.9' and (platform_machine!='arm64' or platform_system!='Darwin') and platform_machine!='aarch64'", + # Aarch64(Python 3.9 requirements are the same as AMD64) + "numpy==1.19.2; python_version=='3.7' and platform_machine=='aarch64'", + "numpy==1.19.2; python_version=='3.8' and platform_machine=='aarch64'", + # Darwin Arm64 + "numpy>=1.20.0; python_version=='3.8' and platform_machine=='arm64' and platform_system=='Darwin'", + "numpy>=1.20.0; python_version=='3.9' and platform_machine=='arm64' and platform_system=='Darwin'" ] # uncomment to enable pep517 after versioneer problem is fixed. # https://github.com/python-versioneer/python-versioneer/issues/193
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41788
2021-06-02T20:29:46Z
2021-06-21T13:07:35Z
2021-06-21T13:07:35Z
2021-09-23T18:52:57Z
REF: de-duplicate nested-dict handling in DataFrame construction
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index 06620c2ad0dca..c1caf474b2020 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -52,8 +52,6 @@ def is_float_array(values: np.ndarray, skipna: bool = False): ... def is_integer_array(values: np.ndarray, skipna: bool = False): ... def is_bool_array(values: np.ndarray, skipna: bool = False): ... -def fast_multiget(mapping: dict, keys: np.ndarray, default=np.nan) -> np.ndarray: ... - def fast_unique_multiple_list_gen(gen: Generator, sort: bool = True) -> list: ... def fast_unique_multiple_list(lists: list, sort: bool = True) -> list: ... def fast_unique_multiple(arrays: list, sort: bool = True) -> list: ... diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 4d184ee13e3db..352f50df01dc9 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2841,25 +2841,3 @@ def to_object_array_tuples(rows: object) -> np.ndarray: result[i, j] = row[j] return result - - -@cython.wraparound(False) -@cython.boundscheck(False) -def fast_multiget(dict mapping, ndarray keys, default=np.nan) -> np.ndarray: - cdef: - Py_ssize_t i, n = len(keys) - object val - ndarray[object] output = np.empty(n, dtype='O') - - if n == 0: - # kludge, for Series - return np.empty(0, dtype='f8') - - for i in range(n): - val = keys[i] - if val in mapping: - output[i] = mapping[val] - else: - output[i] = default - - return maybe_convert_objects(output) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 5c7211a5d1852..161572f3f1ac3 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -780,22 +780,6 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> tuple[DtypeObj, return dtype, val -def dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]: - """ - Convert datetimelike-keyed dicts to a Timestamp-keyed dict. - - Parameters - ---------- - d: dict-like object - - Returns - ------- - dict - - """ - return {maybe_box_datetimelike(key): value for key, value in d.items()} - - def infer_dtype_from_array( arr, pandas_dtype: bool = False ) -> tuple[DtypeObj, ArrayLike]: diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 46eb138dc74d1..270eddf2bd3a5 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -27,7 +27,6 @@ from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, construct_1d_ndarray_preserving_na, - dict_compat, maybe_cast_to_datetime, maybe_convert_platform, maybe_infer_to_datetimelike, @@ -61,6 +60,7 @@ TimedeltaArray, ) from pandas.core.construction import ( + create_series_with_explicit_dtype, ensure_wrapped_if_datetimelike, extract_array, range_to_ndarray, @@ -68,9 +68,7 @@ ) from pandas.core.indexes import base as ibase from pandas.core.indexes.api import ( - DatetimeIndex, Index, - TimedeltaIndex, ensure_index, get_objs_combined_axis, union_indexes, @@ -566,7 +564,6 @@ def convert(v): def _homogenize(data, index: Index, dtype: DtypeObj | None) -> list[ArrayLike]: - oindex = None homogenized = [] for val in data: @@ -581,16 +578,10 @@ def _homogenize(data, index: Index, dtype: DtypeObj | None) -> list[ArrayLike]: val = val._values else: if isinstance(val, dict): - if oindex is None: - oindex = index.astype("O") - - if isinstance(index, (DatetimeIndex, TimedeltaIndex)): - # see test_constructor_dict_datetime64_index - val = dict_compat(val) - else: - # see test_constructor_subclass_dict - val = dict(val) - val = lib.fast_multiget(val, oindex._values, default=np.nan) + # see test_constructor_subclass_dict + # test_constructor_dict_datetime64_index + val = create_series_with_explicit_dtype(val, index=index)._values + val = sanitize_array( val, index, dtype=dtype, copy=False, raise_cast_failure=False ) diff --git a/pandas/tests/dtypes/cast/test_dict_compat.py b/pandas/tests/dtypes/cast/test_dict_compat.py deleted file mode 100644 index 13dc82d779f95..0000000000000 --- a/pandas/tests/dtypes/cast/test_dict_compat.py +++ /dev/null @@ -1,14 +0,0 @@ -import numpy as np - -from pandas.core.dtypes.cast import dict_compat - -from pandas import Timestamp - - -def test_dict_compat(): - data_datetime64 = {np.datetime64("1990-03-15"): 1, np.datetime64("2015-03-15"): 2} - data_unchanged = {1: 2, 3: 4, 5: 6} - expected = {Timestamp("1990-3-15"): 1, Timestamp("2015-03-15"): 2} - assert dict_compat(data_datetime64) == expected - assert dict_compat(expected) == expected - assert dict_compat(data_unchanged) == data_unchanged
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry This was the motivation for #41707. This will introduce some overhead, but will also prevent the behaviors from getting out of sync again.
https://api.github.com/repos/pandas-dev/pandas/pulls/41785
2021-06-02T14:33:22Z
2021-06-02T18:40:04Z
2021-06-02T18:40:04Z
2021-06-26T07:54:47Z
BUG: clean_index_list handle uint64 case
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index 06620c2ad0dca..92daad2d6a5d7 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -185,7 +185,7 @@ def maybe_indices_to_slice( ) -> slice | np.ndarray: ... # np.ndarray[np.uint8] def clean_index_list(obj: list) -> tuple[ - list | np.ndarray, # np.ndarray[object] | np.ndarray[np.int64] + list | np.ndarray, # np.ndarray[object | np.int64 | np.uint64] bool, ]: ... diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 4d184ee13e3db..cbe5a556d55b0 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -747,10 +747,14 @@ def clean_index_list(obj: list): object val bint all_arrays = True + # First check if we have a list of arraylikes, in which case we will + # pass them to MultiIndex.from_arrays for i in range(n): val = obj[i] if not (isinstance(val, list) or util.is_array(val) or hasattr(val, '_data')): + # TODO: EA? + # exclude tuples, frozensets as they may be contained in an Index all_arrays = False break @@ -762,11 +766,21 @@ def clean_index_list(obj: list): if inferred in ['string', 'bytes', 'mixed', 'mixed-integer']: return np.asarray(obj, dtype=object), 0 elif inferred in ['integer']: - # TODO: we infer an integer but it *could* be a uint64 - try: - return np.asarray(obj, dtype='int64'), 0 - except OverflowError: - return np.asarray(obj, dtype='object'), 0 + # we infer an integer but it *could* be a uint64 + + arr = np.asarray(obj) + if arr.dtype.kind not in ["i", "u"]: + # eg [0, uint64max] gets cast to float64, + # but then we know we have either uint64 or object + if (arr < 0).any(): + # TODO: similar to maybe_cast_to_integer_array + return np.asarray(obj, dtype="object"), 0 + + # GH#35481 + guess = np.asarray(obj, dtype="uint64") + return guess, 0 + + return arr, 0 return np.asarray(obj), 0 @@ -1552,9 +1566,7 @@ def infer_dtype(value: object, skipna: bool = True) -> str: for i in range(n): val = values[i] - if (util.is_integer_object(val) and - not util.is_timedelta64_object(val) and - not util.is_datetime64_object(val)): + if util.is_integer_object(val): return "mixed-integer" return "mixed" diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 02fd680775141..14ec3d6009b61 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6299,27 +6299,18 @@ def ensure_index(index_like: AnyArrayLike | Sequence, copy: bool = False) -> Ind if copy: index_like = index_like.copy() return index_like - if hasattr(index_like, "name"): - # https://github.com/python/mypy/issues/1424 - # error: Item "ExtensionArray" of "Union[ExtensionArray, - # Sequence[Any]]" has no attribute "name" - # error: Item "Sequence[Any]" of "Union[ExtensionArray, Sequence[Any]]" - # has no attribute "name" - # error: "Sequence[Any]" has no attribute "name" - # error: Item "Sequence[Any]" of "Union[Series, Sequence[Any]]" has no - # attribute "name" - # error: Item "Sequence[Any]" of "Union[Any, Sequence[Any]]" has no - # attribute "name" - name = index_like.name # type: ignore[union-attr, attr-defined] + + if isinstance(index_like, ABCSeries): + name = index_like.name return Index(index_like, name=name, copy=copy) if is_iterator(index_like): index_like = list(index_like) - # must check for exactly list here because of strict type - # check in clean_index_list if isinstance(index_like, list): - if type(index_like) != list: + if type(index_like) is not list: + # must check for exactly list here because of strict type + # check in clean_index_list index_like = list(index_like) converted, all_arrays = lib.clean_index_list(index_like) @@ -6329,13 +6320,6 @@ def ensure_index(index_like: AnyArrayLike | Sequence, copy: bool = False) -> Ind return MultiIndex.from_arrays(converted) else: - if isinstance(converted, np.ndarray) and converted.dtype == np.int64: - # Check for overflows if we should actually be uint64 - # xref GH#35481 - alt = np.asarray(index_like) - if alt.dtype == np.uint64: - converted = alt - index_like = converted else: # clean_index_list does the equivalent of copying diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index be5b89f08b5ca..d5555561088eb 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1934,7 +1934,9 @@ def _setitem_with_indexer_missing(self, indexer, value): # e.g. 0.0 -> 0 # GH#12246 if index.is_unique: - new_indexer = index.get_indexer([new_index[-1]]) + # pass new_index[-1:] instead if [new_index[-1]] + # so that we retain dtype + new_indexer = index.get_indexer(new_index[-1:]) if (new_indexer != -1).any(): # We get only here with loc, so can hard code return self._setitem_with_indexer(new_indexer, value, "loc") diff --git a/pandas/tests/libs/test_lib.py b/pandas/tests/libs/test_lib.py index 5b7e90fe16d8f..0b1f807f2da63 100644 --- a/pandas/tests/libs/test_lib.py +++ b/pandas/tests/libs/test_lib.py @@ -206,3 +206,15 @@ def test_no_default_pickle(): # GH#40397 obj = tm.round_trip_pickle(lib.no_default) assert obj is lib.no_default + + +def test_clean_index_list(): + # with both 0 and a large-uint64, np.array will infer to float64 + # https://github.com/numpy/numpy/issues/19146 + # but a more accurate choice would be uint64 + values = [0, np.iinfo(np.uint64).max] + + result, _ = lib.clean_index_list(values) + + expected = np.array(values, dtype="uint64") + tm.assert_numpy_array_equal(result, expected, check_dtype=True)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41784
2021-06-02T13:47:10Z
2021-06-02T15:16:26Z
2021-06-02T15:16:26Z
2021-06-02T16:58:01Z
TYP: remove future import from pyi file
diff --git a/pandas/_libs/tslibs/timezones.pyi b/pandas/_libs/tslibs/timezones.pyi index 346cc34576184..a631191f8b005 100644 --- a/pandas/_libs/tslibs/timezones.pyi +++ b/pandas/_libs/tslibs/timezones.pyi @@ -1,5 +1,3 @@ -from __future__ import annotations - from datetime import ( datetime, tzinfo, diff --git a/pandas/_libs/tslibs/vectorized.pyi b/pandas/_libs/tslibs/vectorized.pyi index e3d5acdfe2577..2a23289cdf61b 100644 --- a/pandas/_libs/tslibs/vectorized.pyi +++ b/pandas/_libs/tslibs/vectorized.pyi @@ -2,8 +2,6 @@ For cython types that cannot be represented precisely, closest-available python equivalents are used, and the precise types kept as adjacent comments. """ -from __future__ import annotations - from datetime import tzinfo import numpy as np
followup #41769 #41774 xref #41771
https://api.github.com/repos/pandas-dev/pandas/pulls/41783
2021-06-02T12:12:11Z
2021-06-02T15:14:32Z
2021-06-02T15:14:32Z
2021-06-18T02:24:25Z
Backport PR #41739: TST: Make ARM build work (not in the CI)
diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/circle-37-arm64.yaml similarity index 100% rename from ci/deps/travis-37-arm64.yaml rename to ci/deps/circle-37-arm64.yaml diff --git a/ci/setup_env.sh b/ci/setup_env.sh index c36422884f2ec..e6bd9950331ca 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -12,41 +12,30 @@ if [[ "$(uname)" == "Linux" && -n "$LC_ALL" ]]; then echo fi -MINICONDA_DIR="$HOME/miniconda3" - - -if [ -d "$MINICONDA_DIR" ]; then - echo - echo "rm -rf "$MINICONDA_DIR"" - rm -rf "$MINICONDA_DIR" -fi echo "Install Miniconda" -UNAME_OS=$(uname) -if [[ "$UNAME_OS" == 'Linux' ]]; then +DEFAULT_CONDA_URL="https://repo.continuum.io/miniconda/Miniconda3-latest" +if [[ "$(uname -m)" == 'aarch64' ]]; then + CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.10.1-4/Miniforge3-4.10.1-4-Linux-aarch64.sh" +elif [[ "$(uname)" == 'Linux' ]]; then if [[ "$BITS32" == "yes" ]]; then - CONDA_OS="Linux-x86" + CONDA_URL="$DEFAULT_CONDA_URL-Linux-x86.sh" else - CONDA_OS="Linux-x86_64" + CONDA_URL="$DEFAULT_CONDA_URL-Linux-x86_64.sh" fi -elif [[ "$UNAME_OS" == 'Darwin' ]]; then - CONDA_OS="MacOSX-x86_64" +elif [[ "$(uname)" == 'Darwin' ]]; then + CONDA_URL="$DEFAULT_CONDA_URL-MacOSX-x86_64.sh" else - echo "OS $UNAME_OS not supported" + echo "OS $(uname) not supported" exit 1 fi - -if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then - CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.8.5-1/Miniforge3-4.8.5-1-Linux-aarch64.sh" -else - CONDA_URL="https://repo.continuum.io/miniconda/Miniconda3-latest-$CONDA_OS.sh" -fi +echo "Downloading $CONDA_URL" wget -q $CONDA_URL -O miniconda.sh chmod +x miniconda.sh -# Installation path is required for ARM64 platform as miniforge script installs in path $HOME/miniforge3. +MINICONDA_DIR="$HOME/miniconda3" +rm -rf $MINICONDA_DIR ./miniconda.sh -b -p $MINICONDA_DIR - export PATH=$MINICONDA_DIR/bin:$PATH echo diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 2ac9b9e2c875c..9aa261fd745d5 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -77,6 +77,18 @@ def is_platform_mac() -> bool: return sys.platform == "darwin" +def is_platform_arm() -> bool: + """ + Checking if he running platform use ARM architecture. + + Returns + ------- + bool + True if the running platform uses ARM architecture. + """ + return platform.machine() in ("arm64", "aarch64") + + def import_lzma(): """ Importing the `lzma` module. diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index e2cdf76d038ec..218d247a25380 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -3,6 +3,7 @@ import numpy as np import pytest +from pandas.compat import is_platform_arm from pandas.errors import UnsupportedFunctionCall from pandas import ( @@ -891,6 +892,7 @@ def test_rolling_sem(frame_or_series): tm.assert_series_equal(result, expected) +@pytest.mark.xfail(is_platform_arm(), reason="GH 41740") @pytest.mark.parametrize( ("func", "third_value", "values"), [
Backport PR #41739
https://api.github.com/repos/pandas-dev/pandas/pulls/41781
2021-06-02T09:08:31Z
2021-06-02T10:26:55Z
2021-06-02T10:26:54Z
2021-06-02T10:26:59Z
DOC: 1.2.5 release date
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index 5b8b5eb9e651c..d3ceb2b919b5d 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -1,7 +1,7 @@ .. _whatsnew_125: -What's new in 1.2.5 (May ??, 2021) ----------------------------------- +What's new in 1.2.5 (June 22, 2021) +----------------------------------- These are the changes in pandas 1.2.5. See :ref:`release` for a full changelog including other versions of pandas. @@ -14,36 +14,15 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ -- Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) +- Fixed regression in :func:`concat` between two :class:`DataFrame` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) - Fixed regression in :meth:`DataFrame.sum` and :meth:`DataFrame.prod` when ``min_count`` and ``numeric_only`` are both given (:issue:`41074`) -- Regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`) -- Regression in :meth:`DataFrame.replace` and :meth:`Series.replace` when the values to replace is a NumPy float array (:issue:`40371`) -- Regression in :func:`ExcelFile` when a corrupt file is opened but not closed (:issue:`41778`) +- Fixed regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`) +- Fixed regression in :meth:`DataFrame.replace` and :meth:`Series.replace` when the values to replace is a NumPy float array (:issue:`40371`) +- Fixed regression in :func:`ExcelFile` when a corrupt file is opened but not closed (:issue:`41778`) - Fixed regression in :meth:`DataFrame.astype` with ``dtype=str`` failing to convert ``NaN`` in categorical columns (:issue:`41797`) .. --------------------------------------------------------------------------- - -.. _whatsnew_125.bug_fixes: - -Bug fixes -~~~~~~~~~ - -- -- - -.. --------------------------------------------------------------------------- - -.. _whatsnew_125.other: - -Other -~~~~~ - -- -- - -.. --------------------------------------------------------------------------- - .. _whatsnew_125.contributors: Contributors
https://api.github.com/repos/pandas-dev/pandas/pulls/41780
2021-06-02T07:57:22Z
2021-06-21T18:07:01Z
2021-06-21T18:07:01Z
2021-06-22T08:33:28Z
BUG: Series.loc[-1] with UInt64Index
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index b36499c340fd9..cac7b9d8677b0 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -945,6 +945,7 @@ Indexing - Bug in :meth:`DataFrame.loc` returning :class:`MultiIndex` in wrong order if indexer has duplicates (:issue:`40978`) - Bug in :meth:`DataFrame.__setitem__` raising ``TypeError`` when using a str subclass as the column name with a :class:`DatetimeIndex` (:issue:`37366`) - Bug in :meth:`PeriodIndex.get_loc` failing to raise ``KeyError`` when given a :class:`Period` with a mismatched ``freq`` (:issue:`41670`) +- Bug ``.loc.__getitem__`` with a :class:`UInt64Index` and negative-integer keys raising ``OverflowError`` instead of ``KeyError`` in some cases, wrapping around to positive integers in others (:issue:`41777`) Missing ^^^^^^^ diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index f7cec262ca302..3351bb7cac7d6 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -106,7 +106,8 @@ cdef class IndexEngine: try: return self.mapping.get_item(val) - except (TypeError, ValueError): + except (TypeError, ValueError, OverflowError): + # GH#41775 OverflowError e.g. if we are uint64 and val is -1 raise KeyError(val) cdef inline _get_loc_duplicates(self, object val): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 124903446220d..db718916d7fd7 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -5410,6 +5410,7 @@ def _find_common_type_compat(self, target) -> DtypeObj: return np.dtype("object") dtype = find_common_type([self.dtype, target_dtype]) + if dtype.kind in ["i", "u"]: # TODO: what about reversed with self being categorical? if ( diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 5f24eb0cfaad6..3dc46f04d1d45 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -49,7 +49,6 @@ TimedeltaArray, ) from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin -import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import ( Index, @@ -599,7 +598,7 @@ def _convert_arr_indexer(self, keyarr): try: return self._data._validate_listlike(keyarr, allow_object=True) except (ValueError, TypeError): - return com.asarray_tuplesafe(keyarr) + return super()._convert_arr_indexer(keyarr) class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin): diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index de7c522b4fbec..e6526bd0eaf2f 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -37,7 +37,6 @@ ) from pandas.core.dtypes.generic import ABCSeries -import pandas.core.common as com from pandas.core.indexes.base import ( Index, maybe_extract_name, @@ -250,21 +249,6 @@ def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default): # we will try to coerce to integers return self._maybe_cast_indexer(label) - @doc(Index._convert_arr_indexer) - def _convert_arr_indexer(self, keyarr) -> np.ndarray: - if not is_unsigned_integer_dtype(self.dtype): - return super()._convert_arr_indexer(keyarr) - - # Cast the indexer to uint64 if possible so that the values returned - # from indexing are also uint64. - dtype = None - if is_integer_dtype(keyarr) or ( - lib.infer_dtype(keyarr, skipna=False) == "integer" - ): - dtype = np.dtype(np.uint64) - - return com.asarray_tuplesafe(keyarr, dtype=dtype) - # ---------------------------------------------------------------- @doc(Index._shallow_copy) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index ab868a3d3713d..dcccd42c52c8c 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1010,18 +1010,32 @@ def test_loc_copy_vs_view(self): def test_loc_uint64(self): # GH20722 # Test whether loc accept uint64 max value as index. - s = Series([1, 2], index=[np.iinfo("uint64").max - 1, np.iinfo("uint64").max]) + umax = np.iinfo("uint64").max + ser = Series([1, 2], index=[umax - 1, umax]) - result = s.loc[np.iinfo("uint64").max - 1] - expected = s.iloc[0] + result = ser.loc[umax - 1] + expected = ser.iloc[0] assert result == expected - result = s.loc[[np.iinfo("uint64").max - 1]] - expected = s.iloc[[0]] + result = ser.loc[[umax - 1]] + expected = ser.iloc[[0]] tm.assert_series_equal(result, expected) - result = s.loc[[np.iinfo("uint64").max - 1, np.iinfo("uint64").max]] - tm.assert_series_equal(result, s) + result = ser.loc[[umax - 1, umax]] + tm.assert_series_equal(result, ser) + + def test_loc_uint64_disallow_negative(self): + # GH#41775 + umax = np.iinfo("uint64").max + ser = Series([1, 2], index=[umax - 1, umax]) + + with pytest.raises(KeyError, match="-1"): + # don't wrap around + ser.loc[-1] + + with pytest.raises(KeyError, match="-1"): + # don't wrap around + ser.loc[[-1]] def test_loc_setitem_empty_append_expands_rows(self): # GH6173, various appends to an empty dataframe
- [x] closes #41775 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41777
2021-06-02T05:09:17Z
2021-06-03T17:15:56Z
2021-06-03T17:15:56Z
2021-06-03T18:35:02Z
ENH: maybe_convert_objects handle IntervalArray
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 1ebcdb347c428..4d184ee13e3db 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1187,6 +1187,7 @@ cdef class Seen: bint timedelta_ # seen_timedelta bint datetimetz_ # seen_datetimetz bint period_ # seen_period + bint interval_ # seen_interval def __cinit__(self, bint coerce_numeric=False): """ @@ -1212,6 +1213,7 @@ cdef class Seen: self.timedelta_ = False self.datetimetz_ = False self.period_ = False + self.interval_ = False self.coerce_numeric = coerce_numeric cdef inline bint check_uint64_conflict(self) except -1: @@ -2035,7 +2037,6 @@ cpdef bint is_interval_array(ndarray values): """ Is this an ndarray of Interval (or np.nan) with a single dtype? """ - cdef: Py_ssize_t i, n = len(values) str closed = None @@ -2320,6 +2321,7 @@ def maybe_convert_objects(ndarray[object] objects, bint convert_datetime=False, bint convert_timedelta=False, bint convert_period=False, + bint convert_interval=False, bint convert_to_nullable_integer=False) -> "ArrayLike": """ Type inference function-- convert object array to proper dtype @@ -2343,6 +2345,9 @@ def maybe_convert_objects(ndarray[object] objects, convert_period : bool, default False If an array-like object contains only (homogeneous-freq) Period values or NaT, whether to convert and return a PeriodArray. + convert_interval : bool, default False + If an array-like object contains only Interval objects (with matching + dtypes and closedness) or NaN, whether to convert to IntervalArray. convert_to_nullable_integer : bool, default False If an array-like object contains only integer values (and NaN) is encountered, whether to convert and return an IntegerArray. @@ -2473,6 +2478,13 @@ def maybe_convert_objects(ndarray[object] objects, except (ValueError, TypeError): seen.object_ = True break + elif is_interval(val): + if convert_interval: + seen.interval_ = True + break + else: + seen.object_ = True + break else: seen.object_ = True break @@ -2494,6 +2506,17 @@ def maybe_convert_objects(ndarray[object] objects, # unbox to PeriodArray return pi._data + seen.object_ = True + + if seen.interval_: + if is_interval_array(objects): + from pandas import IntervalIndex + ii = IntervalIndex(objects) + + # unbox to IntervalArray + return ii._data + + seen.object_ = True if not seen.object_: result = None diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index cd5e28baef16b..7e0b26391e132 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -813,6 +813,22 @@ def test_mixed_dtypes_remain_object_array(self): result = lib.maybe_convert_objects(arr, convert_datetime=True) tm.assert_numpy_array_equal(result, arr) + @pytest.mark.parametrize( + "idx", + [ + pd.IntervalIndex.from_breaks(range(5), closed="both"), + pd.period_range("2016-01-01", periods=3, freq="D"), + ], + ) + def test_maybe_convert_objects_ea(self, idx): + + result = lib.maybe_convert_objects( + np.array(idx, dtype=object), + convert_period=True, + convert_interval=True, + ) + tm.assert_extension_array_equal(result, idx._data) + class TestTypeInference:
Inching towards getting rid of special-casing interval+period in sanitize_array
https://api.github.com/repos/pandas-dev/pandas/pulls/41776
2021-06-02T04:40:03Z
2021-06-02T13:05:19Z
2021-06-02T13:05:19Z
2021-06-02T13:51:52Z
TYP: use type annotations in vectorized.pyi
diff --git a/pandas/_libs/tslibs/vectorized.pyi b/pandas/_libs/tslibs/vectorized.pyi index 6ed1e10ef2353..e3d5acdfe2577 100644 --- a/pandas/_libs/tslibs/vectorized.pyi +++ b/pandas/_libs/tslibs/vectorized.pyi @@ -2,11 +2,9 @@ For cython types that cannot be represented precisely, closest-available python equivalents are used, and the precise types kept as adjacent comments. """ +from __future__ import annotations + from datetime import tzinfo -from typing import ( - Optional, - Union, -) import numpy as np @@ -16,32 +14,24 @@ from pandas._libs.tslibs.offsets import BaseOffset def dt64arr_to_periodarr( stamps: np.ndarray, # const int64_t[:] freq: int, - tz: Optional[tzinfo], + tz: tzinfo | None, ) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1] - - def is_date_array_normalized( stamps: np.ndarray, # const int64_t[:] - tz: Optional[tzinfo] = None, + tz: tzinfo | None = None, ) -> bool: ... - - def normalize_i8_timestamps( stamps: np.ndarray, # const int64_t[:] - tz: Optional[tzinfo], + tz: tzinfo | None, ) -> np.ndarray: ... # np.ndarray[np.int64] - - def get_resolution( stamps: np.ndarray, # const int64_t[:] - tz: Optional[tzinfo] = None, + tz: tzinfo | None = None, ) -> Resolution: ... - - def ints_to_pydatetime( arr: np.ndarray, # const int64_t[:}] - tz: Optional[tzinfo] = None, - freq: Optional[Union[str, BaseOffset]] = None, + tz: tzinfo | None = None, + freq: str | BaseOffset | None = None, fold: bool = False, box: str = "datetime", ) -> np.ndarray: ... # np.ndarray[object]
https://api.github.com/repos/pandas-dev/pandas/pulls/41774
2021-06-02T00:38:40Z
2021-06-02T01:42:10Z
2021-06-02T01:42:10Z
2021-06-18T02:24:32Z
REF: Simplify Index.union
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 14ec3d6009b61..124903446220d 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -77,7 +77,6 @@ is_float_dtype, is_hashable, is_integer, - is_integer_dtype, is_interval_dtype, is_iterator, is_list_like, @@ -2963,20 +2962,7 @@ def union(self, other, sort=None): stacklevel=2, ) - dtype = find_common_type([self.dtype, other.dtype]) - if self._is_numeric_dtype and other._is_numeric_dtype: - # Right now, we treat union(int, float) a bit special. - # See https://github.com/pandas-dev/pandas/issues/26778 for discussion - # We may change union(int, float) to go to object. - # float | [u]int -> float (the special case) - # <T> | <T> -> T - # <T> | <U> -> object - if not (is_integer_dtype(self.dtype) and is_integer_dtype(other.dtype)): - dtype = np.dtype("float64") - else: - # one is int64 other is uint64 - dtype = np.dtype("object") - + dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) @@ -5410,6 +5396,19 @@ def _find_common_type_compat(self, target) -> DtypeObj: return IntervalDtype(np.float64, closed=self.closed) target_dtype, _ = infer_dtype_from(target, pandas_dtype=True) + + # special case: if one dtype is uint64 and the other a signed int, return object + # See https://github.com/pandas-dev/pandas/issues/26778 for discussion + # Now it's: + # * float | [u]int -> float + # * uint64 | signed int -> object + # We may change union(float | [u]int) to go to object. + if self.dtype == "uint64" or target_dtype == "uint64": + if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype( + target_dtype + ): + return np.dtype("object") + dtype = find_common_type([self.dtype, target_dtype]) if dtype.kind in ["i", "u"]: # TODO: what about reversed with self being categorical? diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py index 62c07f4306a96..087ccbef7b778 100644 --- a/pandas/tests/indexes/test_setops.py +++ b/pandas/tests/indexes/test_setops.py @@ -8,7 +8,7 @@ import numpy as np import pytest -from pandas.core.dtypes.common import is_dtype_equal +from pandas.core.dtypes.cast import find_common_type from pandas import ( CategoricalIndex, @@ -25,6 +25,7 @@ import pandas._testing as tm from pandas.api.types import ( is_datetime64tz_dtype, + is_signed_integer_dtype, pandas_dtype, ) @@ -48,7 +49,11 @@ def test_union_different_types(index_flat, index_flat2): idx1 = index_flat idx2 = index_flat2 - type_pair = tuple(sorted([idx1.dtype.type, idx2.dtype.type], key=lambda x: str(x))) + common_dtype = find_common_type([idx1.dtype, idx2.dtype]) + + any_uint64 = idx1.dtype == np.uint64 or idx2.dtype == np.uint64 + idx1_signed = is_signed_integer_dtype(idx1.dtype) + idx2_signed = is_signed_integer_dtype(idx2.dtype) # Union with a non-unique, non-monotonic index raises error # This applies to the boolean index @@ -58,23 +63,12 @@ def test_union_different_types(index_flat, index_flat2): res1 = idx1.union(idx2) res2 = idx2.union(idx1) - if is_dtype_equal(idx1.dtype, idx2.dtype): - assert res1.dtype == idx1.dtype - assert res2.dtype == idx1.dtype - - elif type_pair not in COMPATIBLE_INCONSISTENT_PAIRS: - # A union with a CategoricalIndex (even as dtype('O')) and a - # non-CategoricalIndex can only be made if both indices are monotonic. - # This is true before this PR as well. + if any_uint64 and (idx1_signed or idx2_signed): assert res1.dtype == np.dtype("O") assert res2.dtype == np.dtype("O") - - elif idx1.dtype.kind in ["f", "i", "u"] and idx2.dtype.kind in ["f", "i", "u"]: - assert res1.dtype == np.dtype("f8") - assert res2.dtype == np.dtype("f8") - else: - raise NotImplementedError + assert res1.dtype == common_dtype + assert res2.dtype == common_dtype @pytest.mark.parametrize(
This is the `Index.union` part of #41153. This helps simplify that PR. The special casing in `Index.union` is currently active if both are numeric. After #41153 it should only be special cased if one dtype is uint64 and other a signed int. So after this and #41153: * int8 & uint32 -> int64 * [u]int64 & float64 -> float64 * int64 & uint64 -> object * int8 & uint64 -> object The first and second case is handled correctly by `find_common_type`, but the others aren't currently. This PR changes no functionality itself, but prepares for the changes in #41153, where we want e.g. `NumericIndex[int8] .union(NumericIndex[uint32])` to give `NumericIndex[int64]` and not `Index[object]`.
https://api.github.com/repos/pandas-dev/pandas/pulls/41773
2021-06-02T00:07:30Z
2021-06-02T21:40:04Z
2021-06-02T21:40:04Z
2022-02-23T00:28:12Z
CLN: assorted follow-ups
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 409125b6d6691..1556c88aaecc6 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -643,6 +643,7 @@ Other API changes - Partially initialized :class:`CategoricalDtype` (i.e. those with ``categories=None`` objects will no longer compare as equal to fully initialized dtype objects. - Accessing ``_constructor_expanddim`` on a :class:`DataFrame` and ``_constructor_sliced`` on a :class:`Series` now raise an ``AttributeError``. Previously a ``NotImplementedError`` was raised (:issue:`38782`) - Added new ``engine`` and ``**engine_kwargs`` parameters to :meth:`DataFrame.to_sql` to support other future "SQL engines". Currently we still only use ``SQLAlchemy`` under the hood, but more engines are planned to be supported such as ``turbodbc`` (:issue:`36893`) +- Removed redundant ``freq`` from :class:`PeriodIndex` string representation (:issue:`41653`) Build ===== diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 6a270c0a55638..e2883dbf4c76b 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1461,7 +1461,7 @@ def infer_dtype(value: object, skipna: bool = True) -> str: for i in range(n): val = values[i] - # do not use is_nul_datetimelike to keep + # do not use is_null_datetimelike to keep # np.datetime64('nat') and np.timedelta64('nat') if val is None or util.is_nan(val): pass diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index f8f5e5e05bc35..30f42435ad177 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1266,14 +1266,14 @@ def compute(self, method: str) -> Series: return dropped.sort_values(ascending=ascending).head(n) # fast method - arr, pandas_dtype = _ensure_data(dropped.values) + arr, new_dtype = _ensure_data(dropped.values) if method == "nlargest": arr = -arr - if is_integer_dtype(pandas_dtype): + if is_integer_dtype(new_dtype): # GH 21426: ensure reverse ordering at boundaries arr -= 1 - elif is_bool_dtype(pandas_dtype): + elif is_bool_dtype(new_dtype): # GH 26154: ensure False is smaller than True arr = 1 - (-arr) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index ec69d9ccbdd90..020f708606353 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -2104,7 +2104,6 @@ def sequence_to_dt64ns( result = data.view(DT64NS_DTYPE) if copy: - # TODO: should this be deepcopy? result = result.copy() assert isinstance(result, np.ndarray), type(result) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index c2323c8697eee..d8c1b9cef468a 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -866,7 +866,7 @@ def start_time(self) -> DatetimeArray: def end_time(self) -> DatetimeArray: return self.to_timestamp(how="end") - def _require_matching_freq(self, other, base=False): + def _require_matching_freq(self, other, base: bool = False) -> None: # See also arrays.period.raise_on_incompatible if isinstance(other, BaseOffset): other_freq = other @@ -1057,7 +1057,7 @@ def dt64arr_to_periodarr(data, freq, tz=None): Returns ------- - ordinals : ndarray[int] + ordinals : ndarray[int64] freq : Tick The frequency extracted from the Series or DatetimeIndex if that's used. diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 30215b40593d3..eb203d349b4e7 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -776,6 +776,7 @@ def _engine(self) -> libindex.IndexEngine: target_values = self._get_engine_target() return self._engine_type(lambda: target_values, len(self)) + @final @cache_readonly def _dir_additions_for_owner(self) -> set[str_t]: """ @@ -6209,6 +6210,7 @@ def shape(self) -> Shape: # See GH#27775, GH#27384 for history/reasoning in how this is defined. return (len(self),) + @final def _deprecated_arg(self, value, name: str_t, methodname: str_t) -> None: """ Issue a FutureWarning if the arg/kwarg is not no_default.
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41772
2021-06-01T21:15:53Z
2021-06-02T01:38:46Z
2021-06-02T01:38:46Z
2021-06-02T02:31:33Z
TYP: use type annotations in tzconversion.pyi
diff --git a/pandas/_libs/tslibs/tzconversion.pyi b/pandas/_libs/tslibs/tzconversion.pyi index f47885a2e3306..1cbe55320099b 100644 --- a/pandas/_libs/tslibs/tzconversion.pyi +++ b/pandas/_libs/tslibs/tzconversion.pyi @@ -2,11 +2,7 @@ from datetime import ( timedelta, tzinfo, ) -from typing import ( - Iterable, - Optional, - Union, -) +from typing import Iterable import numpy as np @@ -14,12 +10,10 @@ def tz_convert_from_utc( vals: np.ndarray, # const int64_t[:] tz: tzinfo, ) -> np.ndarray: ... # np.ndarray[np.int64] - def tz_convert_from_utc_single(val: np.int64, tz: tzinfo) -> np.int64: ... - def tz_localize_to_utc( vals: np.ndarray, # np.ndarray[np.int64] - tz: Optional[tzinfo], - ambiguous: Optional[Union[str, bool, Iterable[bool]]] = None, - nonexistent: Optional[Union[str, timedelta, np.timedelta64]] = None, + tz: tzinfo | None, + ambiguous: str | bool | Iterable[bool] | None = None, + nonexistent: str | timedelta | np.timedelta64 | None = None, ) -> np.ndarray: ... # np.ndarray[np.int64]
https://api.github.com/repos/pandas-dev/pandas/pulls/41771
2021-06-01T19:18:35Z
2021-06-02T13:05:51Z
2021-06-02T13:05:51Z
2021-06-18T02:25:40Z
DEPR: DataFrame(floaty, dtype=inty) match Series
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index 760da36a30075..c32eda4928da7 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -652,7 +652,9 @@ class Rank: ] def setup(self, dtype): - self.df = DataFrame(np.random.randn(10000, 10), columns=range(10), dtype=dtype) + self.df = DataFrame( + np.random.randn(10000, 10).astype(dtype), columns=range(10), dtype=dtype + ) def time_rank(self, dtype): self.df.rank() diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index b36499c340fd9..0bca312c0bdce 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -700,6 +700,7 @@ Deprecations - Deprecated passing arguments as positional in :meth:`DataFrame.reset_index` (other than ``"level"``) and :meth:`Series.reset_index` (:issue:`41485`) - Deprecated construction of :class:`Series` or :class:`DataFrame` with ``DatetimeTZDtype`` data and ``datetime64[ns]`` dtype. Use ``Series(data).dt.tz_localize(None)`` instead (:issue:`41555`,:issue:`33401`) - Deprecated behavior of :class:`Series` construction with large-integer values and small-integer dtype silently overflowing; use ``Series(data).astype(dtype)`` instead (:issue:`41734`) +- Deprecated behavior of :class:`DataFrame` construction with floating data and integer dtype casting even when lossy; in a future version this will remain floating, matching :class:`Series` behavior (:issue:`41770`) - Deprecated inference of ``timedelta64[ns]``, ``datetime64[ns]``, or ``DatetimeTZDtype`` dtypes in :class:`Series` construction when data containing strings is passed and no ``dtype`` is passed (:issue:`33558`) - In a future version, constructing :class:`Series` or :class:`DataFrame` with ``datetime64[ns]`` data and ``DatetimeTZDtype`` will treat the data as wall-times instead of as UTC times (matching DatetimeIndex behavior). To treat the data as UTC times, use ``pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(dtype.tz)`` or ``pd.Series(data.view("int64"), dtype=dtype)`` (:issue:`33401`) - Deprecated passing arguments as positional in :meth:`DataFrame.set_axis` and :meth:`Series.set_axis` (other than ``"labels"``) (:issue:`41485`) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index edaa53cd55042..c877d27fd2392 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -24,6 +24,7 @@ Dtype, DtypeObj, ) +from pandas.errors import IntCastingNaNError from pandas.core.dtypes.base import ( ExtensionDtype, @@ -511,7 +512,24 @@ def sanitize_array( # possibility of nan -> garbage try: subarr = _try_cast(data, dtype, copy, True) + except IntCastingNaNError: + subarr = np.array(data, copy=copy) except ValueError: + if not raise_cast_failure: + # i.e. called via DataFrame constructor + warnings.warn( + "In a future version, passing float-dtype values and an " + "integer dtype to DataFrame will retain floating dtype " + "if they cannot be cast losslessly (matching Series behavior). " + "To retain the old behavior, use DataFrame(data).astype(dtype)", + FutureWarning, + stacklevel=4, + ) + # GH#40110 until the deprecation is enforced, we _dont_ + # ignore the dtype for DataFrame, and _do_ cast even though + # it is lossy. + dtype = cast(np.dtype, dtype) + return np.array(data, dtype=dtype, copy=copy) subarr = np.array(data, copy=copy) else: # we will try to copy by-definition here diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 161572f3f1ac3..177b1ccd166cb 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -2088,7 +2088,13 @@ def maybe_cast_to_integer_array( if is_unsigned_integer_dtype(dtype) and (arr < 0).any(): raise OverflowError("Trying to coerce negative values to unsigned integers") - if is_float_dtype(arr.dtype) or is_object_dtype(arr.dtype): + if is_float_dtype(arr.dtype): + if not np.isfinite(arr).all(): + raise IntCastingNaNError( + "Cannot convert non-finite values (NA or inf) to integer" + ) + raise ValueError("Trying to coerce float values to integers") + if is_object_dtype(arr.dtype): raise ValueError("Trying to coerce float values to integers") if casted.dtype < arr.dtype: @@ -2102,6 +2108,17 @@ def maybe_cast_to_integer_array( ) return casted + if arr.dtype.kind in ["m", "M"]: + # test_constructor_maskedarray_nonfloat + warnings.warn( + f"Constructing Series or DataFrame from {arr.dtype} values and " + f"dtype={dtype} is deprecated and will raise in a future version. " + "Use values.view(dtype) instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + return casted + # No known cases that get here, but raising explicitly to cover our bases. raise ValueError(f"values cannot be losslessly cast to {dtype}") diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 270eddf2bd3a5..81bf3ca4ba07a 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -22,11 +22,9 @@ DtypeObj, Manager, ) -from pandas.errors import IntCastingNaNError from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, - construct_1d_ndarray_preserving_na, maybe_cast_to_datetime, maybe_convert_platform, maybe_infer_to_datetimelike, @@ -303,22 +301,12 @@ def ndarray_to_mgr( shape = values.shape flat = values.ravel() - if not is_integer_dtype(dtype): - # TODO: skipping integer_dtype is needed to keep the tests passing, - # not clear it is correct - # Note: we really only need _try_cast, but keeping to exposed funcs - values = sanitize_array( - flat, None, dtype=dtype, copy=copy, raise_cast_failure=True - ) - else: - try: - values = construct_1d_ndarray_preserving_na( - flat, dtype=dtype, copy=False - ) - except IntCastingNaNError: - # following Series, we ignore the dtype and retain floating - # values instead of casting nans to meaningless ints - pass + # GH#40110 see similar check inside sanitize_array + rcf = not (is_integer_dtype(dtype) and values.dtype.kind == "f") + + values = sanitize_array( + flat, None, dtype=dtype, copy=copy, raise_cast_failure=rcf + ) values = values.reshape(shape) diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index 6e176310da6b4..dac3c0382df01 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -603,7 +603,7 @@ def test_sort_index_level_large_cardinality(self): # GH#2684 (int64) index = MultiIndex.from_arrays([np.arange(4000)] * 3) - df = DataFrame(np.random.randn(4000), index=index, dtype=np.int64) + df = DataFrame(np.random.randn(4000).astype("int64"), index=index) # it works! result = df.sort_index(level=0) @@ -611,7 +611,7 @@ def test_sort_index_level_large_cardinality(self): # GH#2684 (int32) index = MultiIndex.from_arrays([np.arange(4000)] * 3) - df = DataFrame(np.random.randn(4000), index=index, dtype=np.int32) + df = DataFrame(np.random.randn(4000).astype("int32"), index=index) # it works! result = df.sort_index(level=0) diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py index 769b08373b890..5156d0371e9b7 100644 --- a/pandas/tests/frame/methods/test_to_csv.py +++ b/pandas/tests/frame/methods/test_to_csv.py @@ -714,7 +714,9 @@ def create_cols(name): np.random.randn(100, 5), dtype="float64", columns=create_cols("float") ) df_int = DataFrame( - np.random.randn(100, 5), dtype="int64", columns=create_cols("int") + np.random.randn(100, 5).astype("int64"), + dtype="int64", + columns=create_cols("int"), ) df_bool = DataFrame(True, index=df_float.index, columns=create_cols("bool")) df_object = DataFrame( @@ -765,7 +767,7 @@ def test_to_csv_dups_cols(self): tm.assert_frame_equal(result, df) df_float = DataFrame(np.random.randn(1000, 3), dtype="float64") - df_int = DataFrame(np.random.randn(1000, 3), dtype="int64") + df_int = DataFrame(np.random.randn(1000, 3)).astype("int64") df_bool = DataFrame(True, index=df_float.index, columns=range(3)) df_object = DataFrame("foo", index=df_float.index, columns=range(3)) df_dt = DataFrame(Timestamp("20010101"), index=df_float.index, columns=range(3)) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 784969c199c9f..6e0013c196760 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -10,6 +10,7 @@ import functools import itertools import re +import warnings import numpy as np import numpy.ma as ma @@ -999,7 +1000,17 @@ def test_constructor_maskedarray_nonfloat(self): assert isna(frame).values.all() # cast type - frame = DataFrame(mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64) + msg = r"datetime64\[ns\] values and dtype=int64" + with tm.assert_produces_warning(FutureWarning, match=msg): + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="elementwise comparison failed", + ) + frame = DataFrame( + mat, columns=["A", "B", "C"], index=[1, 2], dtype=np.int64 + ) assert frame.values.dtype == np.int64 # Check non-masked values @@ -2484,6 +2495,27 @@ def test_nested_list_columns(self): tm.assert_frame_equal(result, expected) +class TestDataFrameConstructorWithDtypeCoercion: + def test_floating_values_integer_dtype(self): + # GH#40110 make DataFrame behavior with arraylike floating data and + # inty dtype match Series behavior + + arr = np.random.randn(10, 5) + + msg = "if they cannot be cast losslessly" + with tm.assert_produces_warning(FutureWarning, match=msg): + DataFrame(arr, dtype="i8") + + with tm.assert_produces_warning(None): + # if they can be cast losslessly, no warning + DataFrame(arr.round(), dtype="i8") + + # with NaNs, we already have the correct behavior, so no warning + arr[0, 0] = np.nan + with tm.assert_produces_warning(None): + DataFrame(arr, dtype="i8") + + class TestDataFrameConstructorWithDatetimeTZ: @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) def test_construction_preserves_tzaware_dtypes(self, tz): diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py index c9a39eb460cf4..d010426bee53e 100644 --- a/pandas/tests/frame/test_nonunique_indexes.py +++ b/pandas/tests/frame/test_nonunique_indexes.py @@ -294,7 +294,7 @@ def test_multi_dtype2(self): def test_dups_across_blocks(self, using_array_manager): # dups across blocks df_float = DataFrame(np.random.randn(10, 3), dtype="float64") - df_int = DataFrame(np.random.randn(10, 3), dtype="int64") + df_int = DataFrame(np.random.randn(10, 3).astype("int64")) df_bool = DataFrame(True, index=df_float.index, columns=df_float.columns) df_object = DataFrame("foo", index=df_float.index, columns=df_float.columns) df_dt = DataFrame( diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 6f4949267c00c..26f2ba577d184 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -134,7 +134,10 @@ def test_setitem_series_int8(self, val, exp_dtype, request): ) request.node.add_marker(mark) - exp = pd.Series([1, val, 3, 4], dtype=np.int8) + warn = None if exp_dtype is np.int8 else FutureWarning + msg = "Values are too large to be losslessly cast to int8" + with tm.assert_produces_warning(warn, match=msg): + exp = pd.Series([1, val, 3, 4], dtype=np.int8) self._assert_setitem_series_conversion(obj, val, exp, exp_dtype) @pytest.mark.parametrize(
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry xref #40110, though we may still deprecate the Series behavior too, in which case we'll need to update the deprecation message here
https://api.github.com/repos/pandas-dev/pandas/pulls/41770
2021-06-01T18:37:02Z
2021-06-03T17:40:20Z
2021-06-03T17:40:20Z
2021-06-03T18:35:29Z
TYP: use type annotations in timezones.pyi
diff --git a/pandas/_libs/tslibs/timezones.pyi b/pandas/_libs/tslibs/timezones.pyi index 04a1b391dc30a..346cc34576184 100644 --- a/pandas/_libs/tslibs/timezones.pyi +++ b/pandas/_libs/tslibs/timezones.pyi @@ -1,32 +1,25 @@ +from __future__ import annotations + from datetime import ( datetime, tzinfo, ) -from typing import ( - Callable, - Optional, - Union, -) +from typing import Callable import numpy as np # imported from dateutil.tz dateutil_gettz: Callable[[str], tzinfo] - def tz_standardize(tz: tzinfo) -> tzinfo: ... - -def tz_compare(start: Optional[tzinfo], end: Optional[tzinfo]) -> bool: ... - +def tz_compare(start: tzinfo | None, end: tzinfo | None) -> bool: ... def infer_tzinfo( - start: Optional[datetime], end: Optional[datetime], -) -> Optional[tzinfo]: ... + start: datetime | None, + end: datetime | None, +) -> tzinfo | None: ... # ndarrays returned are both int64_t def get_dst_info(tz: tzinfo) -> tuple[np.ndarray, np.ndarray, str]: ... - -def maybe_get_tz(tz: Optional[Union[str, int, np.int64, tzinfo]]) -> Optional[tzinfo]: ... - -def get_timezone(tz: tzinfo) -> Union[tzinfo, str]: ... - -def is_utc(tz: Optional[tzinfo]) -> bool: ... +def maybe_get_tz(tz: str | int | np.int64 | tzinfo | None) -> tzinfo | None: ... +def get_timezone(tz: tzinfo) -> tzinfo | str: ... +def is_utc(tz: tzinfo | None) -> bool: ...
https://api.github.com/repos/pandas-dev/pandas/pulls/41769
2021-06-01T18:32:29Z
2021-06-01T23:16:21Z
2021-06-01T23:16:21Z
2022-11-18T02:20:24Z
BUG: Improve memory usage with openpyxl
diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 2071076d04a24..20557be81a6e6 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -29,12 +29,17 @@ class OpenpyxlWriter(ExcelWriter): engine = "openpyxl" supported_extensions = (".xlsx", ".xlsm") + # Because this is used a few times in the class, + # it's declared as a variable + # Maybe move to within __init__? + def __init__( self, path, engine=None, date_format=None, datetime_format=None, + write_only=True, mode: str = "w", storage_options: StorageOptions = None, if_sheet_exists: str | None = None, @@ -51,6 +56,9 @@ def __init__( engine_kwargs=engine_kwargs, ) + from openpyxl import LXML + self.LXML_EXISTS = True if (LXML is True) else False + # ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from # the file and later write to it if "r+" in self.mode: # Load from existing workbook @@ -61,16 +69,36 @@ def __init__( self.sheets = {name: self.book[name] for name in self.book.sheetnames} else: - # Create workbook object with default optimized_write=True. - self.book = Workbook() + if self.LXML_EXISTS: + # Sheets are not automatically created in the workbook + self.book = Workbook(write_only=True) + else: + + import warnings + """ + This would ideally be in the ImportWarning category, + but ImportWarnings are ignored by default in Python. - if self.book.worksheets: - self.book.remove(self.book.worksheets[0]) + Not sure it's worth changing the default filter + when we can just use the Warning category instead. + """ + + warnings.warn( + "lxml is not installed" + "Memory usage may be much higher", + Warning, + stacklevel=2 + ) + self.book = Workbook() + + if self.book.worksheets: + self.book.remove(self.book.worksheets[0]) def save(self): """ Save workbook to disk. """ + self.book.save(self.handles.handle) if "r+" in self.mode and not isinstance(self.handles.handle, mmap.mmap): # truncate file to the written content @@ -410,6 +438,17 @@ def _convert_to_protection(cls, protection_dict): return Protection(**protection_dict) + @staticmethod + def _convert_to_excel_format(row, col): + col_name = "" + + while col > 0: + mod = (col - 1) % 26 + col_name = str(chr(65 + mod)) + col_name + col = (col - mod) // 26 + + return "%s%d" % (col_name, row) + def write_cells( self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None ): @@ -448,52 +487,128 @@ def write_cells( row=freeze_panes[0] + 1, column=freeze_panes[1] + 1 ) - for cell in cells: - xcell = wks.cell( - row=startrow + cell.row + 1, column=startcol + cell.col + 1 - ) - xcell.value, fmt = self._value_with_fmt(cell.val) - if fmt: - xcell.number_format = fmt - - style_kwargs: dict[str, Serialisable] | None = {} - if cell.style: - key = str(cell.style) - style_kwargs = _style_cache.get(key) - if style_kwargs is None: - style_kwargs = self._convert_to_style_kwargs(cell.style) - _style_cache[key] = style_kwargs - - if style_kwargs: - for k, v in style_kwargs.items(): - setattr(xcell, k, v) - - if cell.mergestart is not None and cell.mergeend is not None: - - wks.merge_cells( - start_row=startrow + cell.row + 1, - start_column=startcol + cell.col + 1, - end_column=startcol + cell.mergeend + 1, - end_row=startrow + cell.mergestart + 1, + if self.book.write_only and self.LXML_EXISTS: + + from openpyxl.cell import WriteOnlyCell + + for cell in cells: + xval, fmt = self._value_with_fmt(cell.val) + xcell = WriteOnlyCell(wks, value=xval) + + if fmt: + xcell.number_format = fmt + + if cell.style: + key = str(cell.style) + style_kwargs = _style_cache.get(key) + if style_kwargs is None: + style_kwargs = self._convert_to_style_kwargs(cell.style) + _style_cache[key] = style_kwargs + + # This is primarily the issue. Setting cell attributes arbitrarily. + # Might not actually be an issue, as long as we set THEN append + if style_kwargs: + for k, v in style_kwargs.items(): + setattr(xcell, k, v) + + """ + + The way that merged cells work in write_only worksheets is as follows: + + You append the data you want first, then you append the merge over it. + It must be in excel notation (e.g. "A1:C1") rather than by index + (e.g. 11:14) + + I think creating a helper function to turn the indexes into excel + notation could be useful + + """ + if cell.mergestart is not None and cell.mergeend is not None: + + wks.merged_cells.ranges.append( + "%s:%s" % ( + self._convert_to_excel_format( + row=startrow + cell.row + 1, + col=startcol + cell.col + 1), + + self._convert_to_excel_format( + row=startrow + cell.mergestart + 1, + col=startcol + cell.mergeend + 1 + ) + ) + ) + + # When cells are merged only the top-left cell is preserved + # The behaviour of the other cells in a merged range is + # undefined + """ + + Commenting out to test if the code otherwise works. + + if style_kwargs: + first_row = startrow + cell.row + 1 + last_row = startrow + cell.mergestart + 1 + first_col = startcol + cell.col + 1 + last_col = startcol + cell.mergeend + 1 + + for row in range(first_row, last_row + 1): + for col in range(first_col, last_col + 1): + if row == first_row and col == first_col: + # Ignore first cell. It is already handled. + continue + xcell = wks.cell(column=col, row=row) + for k, v in style_kwargs.items(): + setattr(xcell, k, v) + """ + + wks.append([xcell]) + else: + for cell in cells: + xcell = wks.cell( + row=startrow + cell.row + 1, column=startcol + cell.col + 1 ) + xcell.value, fmt = self._value_with_fmt(cell.val) + if fmt: + xcell.number_format = fmt + + style_kwargs: dict[str, Serialisable] | None = {} + if cell.style: + key = str(cell.style) + style_kwargs = _style_cache.get(key) + if style_kwargs is None: + style_kwargs = self._convert_to_style_kwargs(cell.style) + _style_cache[key] = style_kwargs - # When cells are merged only the top-left cell is preserved - # The behaviour of the other cells in a merged range is - # undefined if style_kwargs: - first_row = startrow + cell.row + 1 - last_row = startrow + cell.mergestart + 1 - first_col = startcol + cell.col + 1 - last_col = startcol + cell.mergeend + 1 - - for row in range(first_row, last_row + 1): - for col in range(first_col, last_col + 1): - if row == first_row and col == first_col: - # Ignore first cell. It is already handled. - continue - xcell = wks.cell(column=col, row=row) - for k, v in style_kwargs.items(): - setattr(xcell, k, v) + for k, v in style_kwargs.items(): + setattr(xcell, k, v) + + if cell.mergestart is not None and cell.mergeend is not None: + + wks.merge_cells( + start_row=startrow + cell.row + 1, + start_column=startcol + cell.col + 1, + end_column=startcol + cell.mergeend + 1, + end_row=startrow + cell.mergestart + 1, + ) + + # When cells are merged only the top-left cell is preserved + # The behaviour of the other cells in a merged range is + # undefined + if style_kwargs: + first_row = startrow + cell.row + 1 + last_row = startrow + cell.mergestart + 1 + first_col = startcol + cell.col + 1 + last_col = startcol + cell.mergeend + 1 + + for row in range(first_row, last_row + 1): + for col in range(first_col, last_col + 1): + if row == first_row and col == first_col: + # Ignore first cell. It is already handled. + continue + xcell = wks.cell(column=col, row=row) + for k, v in style_kwargs.items(): + setattr(xcell, k, v) class OpenpyxlReader(BaseExcelReader):
- [x] closes #41681 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41767
2021-06-01T15:18:58Z
2022-01-16T18:07:31Z
null
2022-01-16T18:07:31Z
Backport PR #41711: REGR: DataFrame reduction with min_count
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index e936519383520..500030e1304c6 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) +- Fixed regression in :meth:`DataFrame.sum` and :meth:`DataFrame.prod` when ``min_count`` and ``numeric_only`` are both given (:issue:`41074`) - Regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`) - Regression in :meth:`DataFrame.replace` and :meth:`Series.replace` when the values to replace is a NumPy float array (:issue:`40371`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4c156d7470364..92892ac0f26e0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8786,7 +8786,6 @@ def _reduce( **kwds, ): - min_count = kwds.get("min_count", 0) assert filter_type is None or filter_type == "bool", filter_type out_dtype = "bool" if filter_type == "bool" else None @@ -8831,7 +8830,7 @@ def _get_data() -> DataFrame: data = self._get_bool_data() return data - if (numeric_only is not None or axis == 0) and min_count == 0: + if numeric_only is not None or axis == 0: # For numeric_only non-None and axis non-None, we know # which blocks to use and no try/except is needed. # For numeric_only=None only the case with axis==0 and no object diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 523e19f6043da..a38b7a19dc80a 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -391,7 +391,7 @@ def reduce(self, func, ignore_failures: bool = False) -> List["Block"]: return [] raise - if np.ndim(result) == 0: + if self.values.ndim == 1: # TODO(EA2D): special case not needed with 2D EAs res_values = np.array([[result]]) else: diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index edc1b1e96509e..20adcee924a15 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -231,8 +231,7 @@ def _maybe_get_mask( """ if mask is None: if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype): - # Boolean data cannot contain nulls, so signal via mask being None - return None + return np.broadcast_to(False, values.shape) if skipna or needs_i8_conversion(values.dtype): mask = isna(values) diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index cb481613eb97f..b6eccc6999dec 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1,5 +1,6 @@ from datetime import timedelta from decimal import Decimal +import re from dateutil.tz import tzlocal import numpy as np @@ -783,34 +784,35 @@ def test_sum_corner(self): assert len(axis1) == 0 @pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)]) - def test_sum_prod_nanops(self, method, unit): + @pytest.mark.parametrize("numeric_only", [None, True, False]) + def test_sum_prod_nanops(self, method, unit, numeric_only): idx = ["a", "b", "c"] df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]}) # The default - result = getattr(df, method) + result = getattr(df, method)(numeric_only=numeric_only) expected = Series([unit, unit, unit], index=idx, dtype="float64") # min_count=1 - result = getattr(df, method)(min_count=1) + result = getattr(df, method)(numeric_only=numeric_only, min_count=1) expected = Series([unit, unit, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count=0 - result = getattr(df, method)(min_count=0) + result = getattr(df, method)(numeric_only=numeric_only, min_count=0) expected = Series([unit, unit, unit], index=idx, dtype="float64") tm.assert_series_equal(result, expected) - result = getattr(df.iloc[1:], method)(min_count=1) + result = getattr(df.iloc[1:], method)(numeric_only=numeric_only, min_count=1) expected = Series([unit, np.nan, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count > 1 df = DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) - result = getattr(df, method)(min_count=5) + result = getattr(df, method)(numeric_only=numeric_only, min_count=5) expected = Series(result, index=["A", "B"]) tm.assert_series_equal(result, expected) - result = getattr(df, method)(min_count=6) + result = getattr(df, method)(numeric_only=numeric_only, min_count=6) expected = Series(result, index=["A", "B"]) tm.assert_series_equal(result, expected) @@ -1491,3 +1493,16 @@ def test_minmax_extensionarray(method, numeric_only): [getattr(int64_info, method)], index=Index(["Int64"], dtype="object") ) tm.assert_series_equal(result, expected) + + +def test_prod_sum_min_count_mixed_object(): + # https://github.com/pandas-dev/pandas/issues/41074 + df = DataFrame([1, "a", True]) + + result = df.prod(axis=0, min_count=1, numeric_only=False) + expected = Series(["a"]) + tm.assert_series_equal(result, expected) + + msg = re.escape("unsupported operand type(s) for +: 'int' and 'str'") + with pytest.raises(TypeError, match=msg): + df.sum(axis=0, min_count=1, numeric_only=False)
Backport PR #41711
https://api.github.com/repos/pandas-dev/pandas/pulls/41766
2021-06-01T15:14:20Z
2021-06-01T16:58:47Z
2021-06-01T16:58:47Z
2021-06-01T16:58:52Z
Backport PR #41370 on branch 1.2.x (Pin fastparquet to leq 0.5.0)
diff --git a/ci/deps/actions-37-cov.yaml b/ci/deps/actions-37-cov.yaml index 5381caaa242cf..6bdbfa769f772 100644 --- a/ci/deps/actions-37-cov.yaml +++ b/ci/deps/actions-37-cov.yaml @@ -15,7 +15,7 @@ dependencies: - beautifulsoup4 - botocore>=1.11 - dask - - fastparquet>=0.4.0 + - fastparquet>=0.4.0, <=0.5.0 - fsspec>=0.7.4 - gcsfs>=0.6.0 - geopandas diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index 661d8813d32d2..fdea34d573340 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -15,7 +15,7 @@ dependencies: # pandas dependencies - blosc - bottleneck - - fastparquet>=0.4.0 + - fastparquet>=0.4.0, <=0.5.0 - flask - fsspec>=0.8.0 - matplotlib=3.1.3 diff --git a/environment.yml b/environment.yml index 72826124bc35d..5c47d9c5fa484 100644 --- a/environment.yml +++ b/environment.yml @@ -97,7 +97,7 @@ dependencies: - xlwt - odfpy - - fastparquet>=0.3.2 # pandas.read_parquet, DataFrame.to_parquet + - fastparquet>=0.3.2, <=0.5.0 # pandas.read_parquet, DataFrame.to_parquet - pyarrow>=0.15.0 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather - python-snappy # required by pyarrow diff --git a/requirements-dev.txt b/requirements-dev.txt index 5a64156fe997f..33073cf953729 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -62,7 +62,7 @@ xlrd xlsxwriter xlwt odfpy -fastparquet>=0.3.2 +fastparquet>=0.3.2, <=0.5.0 pyarrow>=0.15.0 python-snappy pyqt5>=5.9.2
Backport PR #41370: Pin fastparquet to leq 0.5.0
https://api.github.com/repos/pandas-dev/pandas/pulls/41765
2021-06-01T14:58:38Z
2021-06-01T16:56:41Z
2021-06-01T16:56:41Z
2021-06-01T16:56:41Z
Backport PR #41443 on branch 1.2.x (Revert "Pin fastparquet to leq 0.5.0")
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 8b1184df92eaf..f09588c5e8ef9 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -300,9 +300,14 @@ def read( if is_fsspec_url(path): fsspec = import_optional_dependency("fsspec") - parquet_kwargs["open_with"] = lambda path, _: fsspec.open( - path, "rb", **(storage_options or {}) - ).open() + if Version(self.api.__version__) > Version("0.6.1"): + parquet_kwargs["fs"] = fsspec.open( + path, "rb", **(storage_options or {}) + ).fs + else: + parquet_kwargs["open_with"] = lambda path, _: fsspec.open( + path, "rb", **(storage_options or {}) + ).open() elif isinstance(path, str) and not os.path.isdir(path): # use get_handle only when we are very certain that it is not a directory # fsspec resources can also point to directories
Backport PR #41443: Revert "Pin fastparquet to leq 0.5.0"
https://api.github.com/repos/pandas-dev/pandas/pulls/41763
2021-06-01T14:23:35Z
2021-06-01T14:57:39Z
null
2021-06-01T14:57:40Z
Backport PR #41730: CI: suppress npdev warnings
diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml index 4cb4eaf95f6f5..2bb348a11655c 100644 --- a/ci/azure/posix.yml +++ b/ci/azure/posix.yml @@ -62,7 +62,7 @@ jobs: ENV_FILE: ci/deps/azure-38-numpydev.yaml CONDA_PY: "38" PATTERN: "not slow and not network" - TEST_ARGS: "-W error" + TEST_ARGS: "-W error -W \"ignore:Promotion of numbers and bools:FutureWarning\"" PANDAS_TESTING_MODE: "deprecate" EXTRA_APT: "xsel" diff --git a/pandas/tests/arithmetic/common.py b/pandas/tests/arithmetic/common.py index e26bb513838a5..d42da39ec8ff0 100644 --- a/pandas/tests/arithmetic/common.py +++ b/pandas/tests/arithmetic/common.py @@ -83,6 +83,7 @@ def xbox2(x): "Invalid comparison between", "Cannot compare type", "not supported between", + "could not be promoted", "invalid type promotion", ( # GH#36706 npdev 1.20.0 2020-09-28 diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py index 6dc3b3b13dd0c..4473d86fa04a1 100644 --- a/pandas/tests/arithmetic/test_interval.py +++ b/pandas/tests/arithmetic/test_interval.py @@ -235,7 +235,7 @@ def test_compare_list_like_nan(self, op, array, nulls_fixture, request): Categorical(list("abab")), Categorical(date_range("2017-01-01", periods=4)), pd.array(list("abcd")), - pd.array(["foo", 3.14, None, object()]), + pd.array(["foo", 3.14, None, object()], dtype=object), ], ids=lambda x: str(x.dtype), ) diff --git a/pandas/tests/frame/methods/test_to_records.py b/pandas/tests/frame/methods/test_to_records.py index e83882be9c680..0b710d7ebf7d7 100644 --- a/pandas/tests/frame/methods/test_to_records.py +++ b/pandas/tests/frame/methods/test_to_records.py @@ -3,6 +3,8 @@ import numpy as np import pytest +from pandas.compat.numpy import is_numpy_dev + from pandas import ( CategoricalDtype, DataFrame, @@ -162,20 +164,28 @@ def test_to_records_with_categorical(self): ), ), # Pass in a type instance. - ( + pytest.param( {"column_dtypes": str}, np.rec.array( [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")], ), + marks=pytest.mark.xfail( + is_numpy_dev, + reason="https://github.com/numpy/numpy/issues/19078", + ), ), # Pass in a dtype instance. - ( + pytest.param( {"column_dtypes": np.dtype("unicode")}, np.rec.array( [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")], ), + marks=pytest.mark.xfail( + is_numpy_dev, + reason="https://github.com/numpy/numpy/issues/19078", + ), ), # Pass in a dictionary (name-only). ( diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index e5ec3c5641bd2..51420859dc1bd 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -79,6 +79,7 @@ def check(df, df2): msgs = [ r"Invalid comparison between dtype=datetime64\[ns\] and ndarray", "invalid type promotion", + "could not be promoted", ( # npdev 1.20.0 r"The DTypes <class 'numpy.dtype\[.*\]'> and "
Backport PR #41730
https://api.github.com/repos/pandas-dev/pandas/pulls/41762
2021-06-01T13:50:51Z
2021-06-01T21:39:13Z
2021-06-01T21:39:13Z
2021-06-02T07:38:01Z
Backport PR #40555: BUG: Fix behavior of replace_list with mixed types.
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index 60e146b2212eb..e936519383520 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -16,7 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) - Regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`) -- +- Regression in :meth:`DataFrame.replace` and :meth:`Series.replace` when the values to replace is a NumPy float array (:issue:`40371`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index b6bca855a9f05..523e19f6043da 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -860,6 +860,15 @@ def _replace_list( """ See BlockManager._replace_list docstring. """ + + # https://github.com/pandas-dev/pandas/issues/40371 + # the following pairs check code caused a regression so we catch that case here + # until the issue is fixed properly in can_hold_element + + # error: "Iterable[Any]" has no attribute "tolist" + if hasattr(src_list, "tolist"): + src_list = src_list.tolist() # type: ignore[attr-defined] + # Exclude anything that we know we won't contain pairs = [ (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index c4f2e09911b34..0f85af6b26aa3 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1665,3 +1665,22 @@ def test_replace_bytes(self, frame_or_series): expected = obj.copy() obj = obj.replace({None: np.nan}) tm.assert_equal(obj, expected) + + @pytest.mark.parametrize( + "data, to_replace, value, expected", + [ + ([1], [1.0], [0], [0]), + ([1], [1], [0], [0]), + ([1.0], [1.0], [0], [0.0]), + ([1.0], [1], [0], [0.0]), + ], + ) + @pytest.mark.parametrize("box", [list, tuple, np.array]) + def test_replace_list_with_mixed_type( + self, data, to_replace, value, expected, box, frame_or_series + ): + # GH#40371 + obj = frame_or_series(data) + expected = frame_or_series(expected) + result = obj.replace(box(to_replace), value) + tm.assert_equal(result, expected)
Backport PR #40555 (removed typing changes - changes related to handling Categorical on master)
https://api.github.com/repos/pandas-dev/pandas/pulls/41761
2021-06-01T13:03:14Z
2021-06-01T13:54:19Z
2021-06-01T13:54:19Z
2021-06-01T13:54:31Z
DO NOT MERGE: test-backportability-of-#41711
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index 60e146b2212eb..1d7b7a762e2ae 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) +- Fixed regression in :meth:`DataFrame.sum` and :meth:`DataFrame.prod` when ``min_count`` and ``numeric_only`` are both given (:issue:`41074`) - Regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`) - diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4c156d7470364..92892ac0f26e0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8786,7 +8786,6 @@ def _reduce( **kwds, ): - min_count = kwds.get("min_count", 0) assert filter_type is None or filter_type == "bool", filter_type out_dtype = "bool" if filter_type == "bool" else None @@ -8831,7 +8830,7 @@ def _get_data() -> DataFrame: data = self._get_bool_data() return data - if (numeric_only is not None or axis == 0) and min_count == 0: + if numeric_only is not None or axis == 0: # For numeric_only non-None and axis non-None, we know # which blocks to use and no try/except is needed. # For numeric_only=None only the case with axis==0 and no object diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index b6bca855a9f05..b9925ec769ad0 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -391,7 +391,7 @@ def reduce(self, func, ignore_failures: bool = False) -> List["Block"]: return [] raise - if np.ndim(result) == 0: + if self.values.ndim == 1: # TODO(EA2D): special case not needed with 2D EAs res_values = np.array([[result]]) else: diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index edc1b1e96509e..20adcee924a15 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -231,8 +231,7 @@ def _maybe_get_mask( """ if mask is None: if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype): - # Boolean data cannot contain nulls, so signal via mask being None - return None + return np.broadcast_to(False, values.shape) if skipna or needs_i8_conversion(values.dtype): mask = isna(values) diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index cb481613eb97f..b6eccc6999dec 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1,5 +1,6 @@ from datetime import timedelta from decimal import Decimal +import re from dateutil.tz import tzlocal import numpy as np @@ -783,34 +784,35 @@ def test_sum_corner(self): assert len(axis1) == 0 @pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)]) - def test_sum_prod_nanops(self, method, unit): + @pytest.mark.parametrize("numeric_only", [None, True, False]) + def test_sum_prod_nanops(self, method, unit, numeric_only): idx = ["a", "b", "c"] df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]}) # The default - result = getattr(df, method) + result = getattr(df, method)(numeric_only=numeric_only) expected = Series([unit, unit, unit], index=idx, dtype="float64") # min_count=1 - result = getattr(df, method)(min_count=1) + result = getattr(df, method)(numeric_only=numeric_only, min_count=1) expected = Series([unit, unit, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count=0 - result = getattr(df, method)(min_count=0) + result = getattr(df, method)(numeric_only=numeric_only, min_count=0) expected = Series([unit, unit, unit], index=idx, dtype="float64") tm.assert_series_equal(result, expected) - result = getattr(df.iloc[1:], method)(min_count=1) + result = getattr(df.iloc[1:], method)(numeric_only=numeric_only, min_count=1) expected = Series([unit, np.nan, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count > 1 df = DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) - result = getattr(df, method)(min_count=5) + result = getattr(df, method)(numeric_only=numeric_only, min_count=5) expected = Series(result, index=["A", "B"]) tm.assert_series_equal(result, expected) - result = getattr(df, method)(min_count=6) + result = getattr(df, method)(numeric_only=numeric_only, min_count=6) expected = Series(result, index=["A", "B"]) tm.assert_series_equal(result, expected) @@ -1491,3 +1493,16 @@ def test_minmax_extensionarray(method, numeric_only): [getattr(int64_info, method)], index=Index(["Int64"], dtype="object") ) tm.assert_series_equal(result, expected) + + +def test_prod_sum_min_count_mixed_object(): + # https://github.com/pandas-dev/pandas/issues/41074 + df = DataFrame([1, "a", True]) + + result = df.prod(axis=0, min_count=1, numeric_only=False) + expected = Series(["a"]) + tm.assert_series_equal(result, expected) + + msg = re.escape("unsupported operand type(s) for +: 'int' and 'str'") + with pytest.raises(TypeError, match=msg): + df.sum(axis=0, min_count=1, numeric_only=False)
xref #41711
https://api.github.com/repos/pandas-dev/pandas/pulls/41758
2021-06-01T10:57:27Z
2021-06-01T12:28:29Z
null
2021-06-02T07:40:46Z
CI: Activating CircleCI
diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000000000..5ff2f783e6a96 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,18 @@ +version: 2.1 + +jobs: + test-arm: + machine: + image: ubuntu-2004:202101-01 + resource_class: arm.medium + environment: + ENV_FILE: ci/deps/circle-37-arm64.yaml + PYTEST_WORKERS: auto + PATTERN: "not slow and not network and not clipboard and not arm_slow" + steps: + - run: echo "CircleCI is working" + +workflows: + test: + jobs: + - test-arm
We'll start running ARM tests in #41739. In order to see the result of the execution in that PR, we need `.circleci/config.yml` to exist in master, so CircleCI starts showing in GitHub. Adding the config file, but just doing an echo (and not the checkout, set up or the environment or the running of the tests). So we can add CircleCI safely, and see results before merging #41739.
https://api.github.com/repos/pandas-dev/pandas/pulls/41752
2021-06-01T00:36:21Z
2021-06-01T01:55:44Z
2021-06-01T01:55:44Z
2021-06-01T01:55:44Z
TYP: to_numpy
diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py index 3daf1b3ae3902..7f811cc987d34 100644 --- a/pandas/core/array_algos/putmask.py +++ b/pandas/core/array_algos/putmask.py @@ -191,7 +191,7 @@ def extract_bool_array(mask: ArrayLike) -> np.ndarray: # We could have BooleanArray, Sparse[bool], ... # Except for BooleanArray, this is equivalent to just # np.asarray(mask, dtype=bool) - mask = mask.to_numpy(dtype=bool, na_value=False) + mask = mask.to_numpy(dtype=np.dtype("bool"), na_value=False) mask = np.asarray(mask, dtype=bool) return mask diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 7dddb9f3d6f25..73a5259fe6216 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -26,6 +26,7 @@ ArrayLike, Dtype, FillnaOptions, + NpDtype, PositionalIndexer, Shape, ) @@ -429,7 +430,7 @@ def __ne__(self, other: Any) -> ArrayLike: # type: ignore[override] def to_numpy( self, - dtype: Dtype | None = None, + dtype: NpDtype | None = None, copy: bool = False, na_value=lib.no_default, ) -> np.ndarray: @@ -458,12 +459,7 @@ def to_numpy( ------- numpy.ndarray """ - # error: Argument "dtype" to "asarray" has incompatible type - # "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], Type[int], - # Type[complex], Type[bool], Type[object], None]"; expected "Union[dtype[Any], - # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, - # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" - result = np.asarray(self, dtype=dtype) # type: ignore[arg-type] + result = np.asarray(self, dtype=dtype) if copy or na_value is not lib.no_default: result = result.copy() if na_value is not lib.no_default: diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 11f9f645920ec..d7c746ba29677 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -208,10 +208,7 @@ def __len__(self) -> int: def __invert__(self: BaseMaskedArrayT) -> BaseMaskedArrayT: return type(self)(~self._data, self._mask.copy()) - # error: Argument 1 of "to_numpy" is incompatible with supertype "ExtensionArray"; - # supertype defines the argument type as "Union[ExtensionDtype, str, dtype[Any], - # Type[str], Type[float], Type[int], Type[complex], Type[bool], Type[object], None]" - def to_numpy( # type: ignore[override] + def to_numpy( self, dtype: NpDtype | None = None, copy: bool = False, diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index e9d554200805e..2f34e2b0fb588 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -367,10 +367,7 @@ def skew( # ------------------------------------------------------------------------ # Additional Methods - # error: Argument 1 of "to_numpy" is incompatible with supertype "ExtensionArray"; - # supertype defines the argument type as "Union[ExtensionDtype, str, dtype[Any], - # Type[str], Type[float], Type[int], Type[complex], Type[bool], Type[object], None]" - def to_numpy( # type: ignore[override] + def to_numpy( self, dtype: NpDtype | None = None, copy: bool = False, diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 3cf471e381da9..26ca9f9bd6023 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -275,10 +275,7 @@ def __arrow_array__(self, type=None): """Convert myself to a pyarrow Array or ChunkedArray.""" return self._data - # error: Argument 1 of "to_numpy" is incompatible with supertype "ExtensionArray"; - # supertype defines the argument type as "Union[ExtensionDtype, str, dtype[Any], - # Type[str], Type[float], Type[int], Type[complex], Type[bool], Type[object], None]" - def to_numpy( # type: ignore[override] + def to_numpy( self, dtype: NpDtype | None = None, copy: bool = False, diff --git a/pandas/core/base.py b/pandas/core/base.py index 55e776d2e6b73..7ade73c5c5f0a 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -19,10 +19,10 @@ import pandas._libs.lib as lib from pandas._typing import ( ArrayLike, - Dtype, DtypeObj, FrameOrSeries, IndexLabel, + NpDtype, Shape, final, ) @@ -413,7 +413,7 @@ def array(self) -> ExtensionArray: def to_numpy( self, - dtype: Dtype | None = None, + dtype: NpDtype | None = None, copy: bool = False, na_value=lib.no_default, **kwargs, @@ -523,12 +523,7 @@ def to_numpy( f"to_numpy() got an unexpected keyword argument '{bad_keys}'" ) - # error: Argument "dtype" to "asarray" has incompatible type - # "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], Type[int], - # Type[complex], Type[bool], Type[object], None]"; expected "Union[dtype[Any], - # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, - # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" - result = np.asarray(self._values, dtype=dtype) # type: ignore[arg-type] + result = np.asarray(self._values, dtype=dtype) # TODO(GH-24345): Avoid potential double copy if copy or na_value is not lib.no_default: result = result.copy() diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index b00a1160fb01b..a6ba850bcedfa 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2424,12 +2424,12 @@ def pre_processor(vals: ArrayLike) -> tuple[np.ndarray, np.dtype | None]: inference: np.dtype | None = None if is_integer_dtype(vals.dtype): if isinstance(vals, ExtensionArray): - out = vals.to_numpy(dtype=float, na_value=np.nan) + out = vals.to_numpy(dtype="float64", na_value=np.nan) else: out = vals inference = np.dtype(np.int64) elif is_bool_dtype(vals.dtype) and isinstance(vals, ExtensionArray): - out = vals.to_numpy(dtype=float, na_value=np.nan) + out = vals.to_numpy(dtype="float64", na_value=np.nan) elif is_datetime64_dtype(vals.dtype): inference = np.dtype("datetime64[ns]") out = np.asarray(vals).astype(float) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 323aa45874d96..e77d39c53ea36 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -24,6 +24,7 @@ ArrayLike, Dtype, DtypeObj, + NpDtype, Shape, type_t, ) @@ -1385,7 +1386,7 @@ def to_dict(self, copy: bool = True): def as_array( self, transpose: bool = False, - dtype: Dtype | None = None, + dtype: NpDtype | None = None, copy: bool = False, na_value=lib.no_default, ) -> np.ndarray: @@ -1396,7 +1397,7 @@ def as_array( ---------- transpose : bool, default False If True, transpose the return array. - dtype : object, default None + dtype : str or numpy.dtype, optional Data type of the return array. copy : bool, default False If True then guarantee that a copy is returned. A value of @@ -1430,12 +1431,7 @@ def as_array( else: arr = np.asarray(blk.get_values()) if dtype: - # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has - # incompatible type "Union[ExtensionDtype, str, dtype[Any], - # Type[object]]"; expected "Union[dtype[Any], None, type, - # _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, - # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" - arr = arr.astype(dtype, copy=False) # type: ignore[arg-type] + arr = arr.astype(dtype, copy=False) else: arr = self._interleave(dtype=dtype, na_value=na_value) # The underlying data was copied within _interleave @@ -1468,12 +1464,9 @@ def _interleave( elif is_dtype_equal(dtype, str): dtype = np.dtype("object") - # error: Argument "dtype" to "empty" has incompatible type - # "Union[ExtensionDtype, str, dtype[Any], Type[object], None]"; expected - # "Union[dtype[Any], None, type, _SupportsDType, str, Union[Tuple[Any, int], - # Tuple[Any, Union[int, Sequence[int]]], List[Any], _DTypeDict, - # Tuple[Any, Any]]]" - result = np.empty(self.shape, dtype=dtype) # type: ignore[arg-type] + dtype = cast(np.dtype, dtype) + + result = np.empty(self.shape, dtype=dtype) itemmask = np.zeros(self.shape[0]) @@ -1488,10 +1481,7 @@ def _interleave( dtype=dtype, na_value=na_value ) else: - # error: Argument 1 to "get_values" of "Block" has incompatible type - # "Union[ExtensionDtype, str, dtype[Any], Type[object], None]"; expected - # "Union[dtype[Any], ExtensionDtype, None]" - arr = blk.get_values(dtype) # type: ignore[arg-type] + arr = blk.get_values(dtype) result[rl.indexer] = arr itemmask[rl.indexer] = 1
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41751
2021-05-31T23:51:17Z
2021-06-09T21:17:19Z
null
2021-06-09T21:17:25Z
CLN: datetimelike setops
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 5f24eb0cfaad6..484b581a898d8 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -632,10 +632,6 @@ def is_type_compatible(self, kind: str) -> bool: # -------------------------------------------------------------------- # Set Operation Methods - def _difference(self, other, sort=None): - new_idx = super()._difference(other, sort=sort)._with_freq(None) - return new_idx - def _intersection(self, other: Index, sort=False) -> Index: """ intersection specialized to the case with matching dtypes. @@ -781,13 +777,8 @@ def _union(self, other, sort): if self._can_fast_union(other): result = self._fast_union(other, sort=sort) - if sort is None: - # In the case where sort is None, _can_fast_union - # implies that result.freq should match self.freq - assert result.freq == self.freq, (result.freq, self.freq) - elif result.freq is None: - # TODO: no tests rely on this; needed? - result = result._with_freq("infer") + # in the case with sort=None, the _can_fast_union check ensures + # that result.freq == self.freq return result else: i8self = Int64Index._simple_new(self.asi8)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41750
2021-05-31T23:50:26Z
2021-06-04T13:21:05Z
2021-06-04T13:21:05Z
2021-06-04T16:09:01Z
BUG: lib.infer_dtype with incompatible intervals
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index d7e15bb2ad197..ba82a7840c4f9 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2029,16 +2029,59 @@ cdef bint is_period_array(ndarray[object] values): return True -cdef class IntervalValidator(Validator): - cdef inline bint is_value_typed(self, object value) except -1: - return is_interval(value) - - cpdef bint is_interval_array(ndarray values): + """ + Is this an ndarray of Interval (or np.nan) with a single dtype? + """ + cdef: - IntervalValidator validator = IntervalValidator(len(values), - skipna=True) - return validator.validate(values) + Py_ssize_t i, n = len(values) + str closed = None + bint numeric = False + bint dt64 = False + bint td64 = False + object val + + if len(values) == 0: + return False + + for val in values: + if is_interval(val): + if closed is None: + closed = val.closed + numeric = ( + util.is_float_object(val.left) + or util.is_integer_object(val.left) + ) + td64 = is_timedelta(val.left) + dt64 = PyDateTime_Check(val.left) + elif val.closed != closed: + # mismatched closedness + return False + elif numeric: + if not ( + util.is_float_object(val.left) + or util.is_integer_object(val.left) + ): + # i.e. datetime64 or timedelta64 + return False + elif td64: + if not is_timedelta(val.left): + return False + elif dt64: + if not PyDateTime_Check(val.left): + return False + else: + raise ValueError(val) + elif util.is_nan(val) or val is None: + pass + else: + return False + + if closed is None: + # we saw all-NAs, no actual Intervals + return False + return True @cython.boundscheck(False) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 0c299056075c1..d34ae6179fe76 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -317,12 +317,7 @@ def array( return PeriodArray._from_sequence(data, copy=copy) elif inferred_dtype == "interval": - try: - return IntervalArray(data, copy=copy) - except ValueError: - # We may have a mixture of `closed` here. - # We choose to return an ndarray, rather than raising. - pass + return IntervalArray(data, copy=copy) elif inferred_dtype.startswith("datetime"): # datetime, datetime64 diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 30215b40593d3..aa8be070df312 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6441,12 +6441,8 @@ def _maybe_cast_data_without_dtype(subarr: np.ndarray) -> ArrayLike: return data elif inferred == "interval": - try: - ia_data = IntervalArray._from_sequence(subarr, copy=False) - return ia_data - except (ValueError, TypeError): - # GH27172: mixed closed Intervals --> object dtype - pass + ia_data = IntervalArray._from_sequence(subarr, copy=False) + return ia_data elif inferred == "boolean": # don't support boolean explicitly ATM pass diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 09efa97871fae..073a1ff28815b 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -1458,17 +1458,54 @@ def test_categorical(self): result = lib.infer_dtype(Series(arr), skipna=True) assert result == "categorical" - def test_interval(self): + @pytest.mark.parametrize("asobject", [True, False]) + def test_interval(self, asobject): idx = pd.IntervalIndex.from_breaks(range(5), closed="both") + if asobject: + idx = idx.astype(object) + inferred = lib.infer_dtype(idx, skipna=False) assert inferred == "interval" inferred = lib.infer_dtype(idx._data, skipna=False) assert inferred == "interval" - inferred = lib.infer_dtype(Series(idx), skipna=False) + inferred = lib.infer_dtype(Series(idx, dtype=idx.dtype), skipna=False) assert inferred == "interval" + @pytest.mark.parametrize("value", [Timestamp(0), Timedelta(0), 0, 0.0]) + def test_interval_mismatched_closed(self, value): + + first = Interval(value, value, closed="left") + second = Interval(value, value, closed="right") + + # if closed match, we should infer "interval" + arr = np.array([first, first], dtype=object) + assert lib.infer_dtype(arr, skipna=False) == "interval" + + # if closed dont match, we should _not_ get "interval" + arr2 = np.array([first, second], dtype=object) + assert lib.infer_dtype(arr2, skipna=False) == "mixed" + + def test_interval_mismatched_subtype(self): + first = Interval(0, 1, closed="left") + second = Interval(Timestamp(0), Timestamp(1), closed="left") + third = Interval(Timedelta(0), Timedelta(1), closed="left") + + arr = np.array([first, second]) + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + arr = np.array([second, third]) + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + arr = np.array([first, third]) + assert lib.infer_dtype(arr, skipna=False) == "mixed" + + # float vs int subdtype are compatible + flt_interval = Interval(1.5, 2.5, closed="left") + arr = np.array([first, flt_interval], dtype=object) + assert lib.infer_dtype(arr, skipna=False) == "interval" + @pytest.mark.parametrize("klass", [pd.array, Series]) @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.parametrize("data", [["a", "b", "c"], ["a", "b", pd.NA]])
Nothing user-facing.
https://api.github.com/repos/pandas-dev/pandas/pulls/41749
2021-05-31T20:36:32Z
2021-06-02T02:08:32Z
2021-06-02T02:08:32Z
2021-06-02T02:34:12Z
whatsnew 1.3.0
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 849b9d45da5ad..2945fc760e01a 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -28,7 +28,7 @@ Enhancements Custom HTTP(s) headers when reading csv or json files ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When reading from a remote URL that is not handled by fsspec (ie. HTTP and +When reading from a remote URL that is not handled by fsspec (e.g. HTTP and HTTPS) the dictionary passed to ``storage_options`` will be used to create the headers included in the request. This can be used to control the User-Agent header or send other custom headers (:issue:`36688`). @@ -110,42 +110,32 @@ both XPath 1.0 and XSLT 1.0 are available. (:issue:`27554`) For more, see :ref:`io.xml` in the user guide on IO tools. -Styler Upgrades -^^^^^^^^^^^^^^^ - -We provided some focused development on :class:`.Styler`, including altering methods -to accept more universal CSS language for arguments, such as ``'color:red;'`` instead of -``[('color', 'red')]`` (:issue:`39564`). This is also added to the built-in methods -to allow custom CSS highlighting instead of default background coloring (:issue:`40242`). -Enhancements to other built-in methods include extending the :meth:`.Styler.background_gradient` -method to shade elements based on a given gradient map and not be restricted only to -values in the DataFrame (:issue:`39930` :issue:`22727` :issue:`28901`). Additional -built-in methods such as :meth:`.Styler.highlight_between`, :meth:`.Styler.highlight_quantile` -and :math:`.Styler.text_gradient` have been added (:issue:`39821`, :issue:`40926`, :issue:`41098`). - -The :meth:`.Styler.apply` now consistently allows functions with ``ndarray`` output to -allow more flexible development of UDFs when ``axis`` is ``None`` ``0`` or ``1`` (:issue:`39393`). - -:meth:`.Styler.set_tooltips` is a new method that allows adding on hover tooltips to -enhance interactive displays (:issue:`35643`). :meth:`.Styler.set_td_classes`, which was recently -introduced in v1.2.0 (:issue:`36159`) to allow adding specific CSS classes to data cells, has -been made as performant as :meth:`.Styler.apply` and :meth:`.Styler.applymap` (:issue:`40453`), -if not more performant in some cases. The overall performance of HTML -render times has been considerably improved to -match :meth:`DataFrame.to_html` (:issue:`39952` :issue:`37792` :issue:`40425`). - -The :meth:`.Styler.format` has had upgrades to easily format missing data, -precision, and perform HTML escaping (:issue:`40437` :issue:`40134`). There have been numerous other bug fixes to -properly format HTML and eliminate some inconsistencies (:issue:`39942` :issue:`40356` :issue:`39807` :issue:`39889` :issue:`39627`) - -:class:`.Styler` has also been compatible with non-unique index or columns, at least for as many features as are fully compatible, others made only partially compatible (:issue:`41269`). -One also has greater control of the display through separate sparsification of the index or columns, using the new 'styler' options context (:issue:`41142`). -Render trimming has also been added for large numbers of data elements to avoid browser overload (:issue:`40712`). - -We have added an extension to allow LaTeX styling as an alternative to CSS styling and a method :meth:`.Styler.to_latex` -which renders the necessary LaTeX format including built-up styles (:issue:`21673`, :issue:`41659`). An additional file io function :meth:`Styler.to_html` has been added for convenience (:issue:`40312`). - -Documentation has also seen major revisions in light of new features (:issue:`39720` :issue:`39317` :issue:`40493`) +.. _whatsnew_130.styler_enhancements: + +Styler enhancements +^^^^^^^^^^^^^^^^^^^ + +We provided some focused development on :class:`.Styler`. See also the `Styler documentation <../user_guide/style.ipynb>`_ +which has been revised and improved (:issue:`39720`, :issue:`39317`, :issue:`40493`). + + - The method :meth:`.Styler.set_table_styles` can now accept more natural CSS language for arguments, such as ``'color:red;'`` instead of ``[('color', 'red')]`` (:issue:`39563`) + - The methods :meth:`.Styler.highlight_null`, :meth:`.Styler.highlight_min`, and :meth:`.Styler.highlight_max` now allow custom CSS highlighting instead of the default background coloring (:issue:`40242`) + - :meth:`.Styler.apply` now accepts functions that return an ``ndarray`` when ``axis=None``, making it now consistent with the ``axis=0`` and ``axis=1`` behavior (:issue:`39359`) + - When incorrectly formatted CSS is given via :meth:`.Styler.apply` or :meth:`.Styler.applymap`, an error is now raised upon rendering (:issue:`39660`) + - :meth:`.Styler.format` now accepts the keyword argument ``escape`` for optional HTML and LaTex escaping (:issue:`40388`, :issue:`41619`) + - :meth:`.Styler.background_gradient` has gained the argument ``gmap`` to supply a specific gradient map for shading (:issue:`22727`) + - :meth:`.Styler.clear` now clears :attr:`Styler.hidden_index` and :attr:`Styler.hidden_columns` as well (:issue:`40484`) + - Added the method :meth:`.Styler.highlight_between` (:issue:`39821`) + - Added the method :meth:`.Styler.highlight_quantile` (:issue:`40926`) + - Added the method :meth:`.Styler.text_gradient` (:issue:`41098`) + - Added the method :meth:`.Styler.set_tooltips` to allow hover tooltips; this can be used enhance interactive displays (:issue:`21266`, :issue:`40284`) + - Added the parameter ``precision`` to the method :meth:`.Styler.format` to control the display of floating point numbers (:issue:`40134`) + - :class:`.Styler` rendered HTML output now follows the `w3 HTML Style Guide <https://www.w3schools.com/html/html5_syntax.asp>`_ (:issue:`39626`) + - Many features of the :class:`.Styler` class are now either partially or fully usable on a DataFrame with a non-unique indexes or columns (:issue:`41143`) + - One has greater control of the display through separate sparsification of the index or columns using the :ref:`new styler options <options.available>`, which are also usable via :func:`option_context` (:issue:`41142`) + - Added the option ``styler.render.max_elements`` to avoid browser overload when styling large DataFrames (:issue:`40712`) + - Added the method :meth:`.Styler.to_latex` (:issue:`21673`) + - Added the method :meth:`.Styler.to_html` (:issue:`13379`) .. _whatsnew_130.dataframe_honors_copy_with_dict: @@ -153,7 +143,7 @@ DataFrame constructor honors ``copy=False`` with dict ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When passing a dictionary to :class:`DataFrame` with ``copy=False``, -a copy will no longer be made (:issue:`32960`) +a copy will no longer be made (:issue:`32960`). .. ipython:: python @@ -223,10 +213,12 @@ String accessor methods returning integers will return a value with :class:`Int6 s.str.count("a") -Centered Datetime-Like Rolling Windows +.. _whatsnew_130.centered_datetimelike_rolling_window: + +Centered datetime-like rolling windows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When performing rolling calculations on :class:`DataFrame` and :class:`Series` +When performing rolling calculations on DataFrame and Series objects with a datetime-like index, a centered datetime-like window can now be used (:issue:`38780`). For example: @@ -245,37 +237,28 @@ For example: Other enhancements ^^^^^^^^^^^^^^^^^^ -- :meth:`DataFrame.rolling`, :meth:`Series.rolling`, :meth:`DataFrame.expanding`, and :meth:`Series.expanding` now support a ``method`` argument with a ``'table'`` option that performs the windowing operation over an entire :class:`DataFrame`. See :ref:`Window Overview <window.overview>` for performance and functional benefits (:issue:`15095`, :issue:`38995`) +- :meth:`DataFrame.rolling`, :meth:`Series.rolling`, :meth:`DataFrame.expanding`, and :meth:`Series.expanding` now support a ``method`` argument with a ``'table'`` option that performs the windowing operation over an entire DataFrame. See :ref:`Window Overview <window.overview>` for performance and functional benefits (:issue:`15095`, :issue:`38995`) - Added :meth:`MultiIndex.dtypes` (:issue:`37062`) - Added ``end`` and ``end_day`` options for the ``origin`` argument in :meth:`DataFrame.resample` (:issue:`37804`) -- Improve error message when ``usecols`` and ``names`` do not match for :func:`read_csv` and ``engine="c"`` (:issue:`29042`) -- Improved consistency of error messages when passing an invalid ``win_type`` argument in :class:`Window` (:issue:`15969`) +- Improved error message when ``usecols`` and ``names`` do not match for :func:`read_csv` and ``engine="c"`` (:issue:`29042`) +- Improved consistency of error messages when passing an invalid ``win_type`` argument in :ref:`Window methods <api.window>` (:issue:`15969`) - :func:`read_sql_query` now accepts a ``dtype`` argument to cast the columnar data from the SQL database based on user input (:issue:`10285`) - Improved integer type mapping from pandas to SQLAlchemy when using :meth:`DataFrame.to_sql` (:issue:`35076`) - :func:`to_numeric` now supports downcasting of nullable ``ExtensionDtype`` objects (:issue:`33013`) -- Add support for dict-like names in :class:`MultiIndex.set_names` and :class:`MultiIndex.rename` (:issue:`20421`) -- :func:`read_excel` can now auto detect .xlsb files and older .xls files (:issue:`35416`, :issue:`41225`) +- Added support for dict-like names in :class:`MultiIndex.set_names` and :class:`MultiIndex.rename` (:issue:`20421`) +- :func:`read_excel` can now auto-detect .xlsb files and older .xls files (:issue:`35416`, :issue:`41225`) - :class:`ExcelWriter` now accepts an ``if_sheet_exists`` parameter to control the behaviour of append mode when writing to existing sheets (:issue:`40230`) -- :meth:`.Rolling.sum`, :meth:`.Expanding.sum`, :meth:`.Rolling.mean`, :meth:`.Expanding.mean`, :meth:`.ExponentialMovingWindow.mean`, :meth:`.Rolling.median`, :meth:`.Expanding.median`, :meth:`.Rolling.max`, :meth:`.Expanding.max`, :meth:`.Rolling.min`, and :meth:`.Expanding.min` now support ``Numba`` execution with the ``engine`` keyword (:issue:`38895`, :issue:`41267`) +- :meth:`.Rolling.sum`, :meth:`.Expanding.sum`, :meth:`.Rolling.mean`, :meth:`.Expanding.mean`, :meth:`.ExponentialMovingWindow.mean`, :meth:`.Rolling.median`, :meth:`.Expanding.median`, :meth:`.Rolling.max`, :meth:`.Expanding.max`, :meth:`.Rolling.min`, and :meth:`.Expanding.min` now support `Numba <http://numba.pydata.org/>`_ execution with the ``engine`` keyword (:issue:`38895`, :issue:`41267`) - :meth:`DataFrame.apply` can now accept NumPy unary operators as strings, e.g. ``df.apply("sqrt")``, which was already the case for :meth:`Series.apply` (:issue:`39116`) - :meth:`DataFrame.apply` can now accept non-callable DataFrame properties as strings, e.g. ``df.apply("size")``, which was already the case for :meth:`Series.apply` (:issue:`39116`) -- :meth:`DataFrame.applymap` can now accept kwargs to pass on to func (:issue:`39987`) +- :meth:`DataFrame.applymap` can now accept kwargs to pass on to the user-provided ``func`` (:issue:`39987`) - Passing a :class:`DataFrame` indexer to ``iloc`` is now disallowed for :meth:`Series.__getitem__` and :meth:`DataFrame.__getitem__` (:issue:`39004`) - :meth:`Series.apply` can now accept list-like or dictionary-like arguments that aren't lists or dictionaries, e.g. ``ser.apply(np.array(["sum", "mean"]))``, which was already the case for :meth:`DataFrame.apply` (:issue:`39140`) - :meth:`DataFrame.plot.scatter` can now accept a categorical column for the argument ``c`` (:issue:`12380`, :issue:`31357`) -- :meth:`.Styler.set_tooltips` allows on hover tooltips to be added to styled HTML dataframes (:issue:`35643`, :issue:`21266`, :issue:`39317`, :issue:`39708`, :issue:`40284`) -- :meth:`.Styler.set_table_styles` amended to optionally allow certain css-string input arguments (:issue:`39564`) -- :meth:`.Styler.apply` now more consistently accepts ndarray function returns, i.e. in all cases for ``axis`` is ``0, 1 or None`` (:issue:`39359`) -- :meth:`.Styler.apply` and :meth:`.Styler.applymap` now raise errors if incorrectly formatted CSS is passed on render(:issue:`39660`) -- :meth:`.Styler.format` now accepts the keyword argument ``escape`` for optional HTML and LaTeX escaping (:issue:`40437`) -- :meth:`.Styler.background_gradient` now allows the ability to supply a specific gradient map (:issue:`22727`) -- :meth:`.Styler.clear` now clears :attr:`Styler.hidden_index` and :attr:`Styler.hidden_columns` as well (:issue:`40484`) -- Builtin highlighting methods in :class:`.Styler` have a more consistent signature and css customisability (:issue:`40242`) -- :meth:`.Styler.highlight_between` added to list of builtin styling methods (:issue:`39821`) - :meth:`Series.loc` now raises a helpful error message when the Series has a :class:`MultiIndex` and the indexer has too many dimensions (:issue:`35349`) - :func:`read_stata` now supports reading data from compressed files (:issue:`26599`) -- Add support for parsing ``ISO 8601``-like timestamps with negative signs to :class:`Timedelta` (:issue:`37172`) -- Add support for unary operators in :class:`FloatingArray` (:issue:`38749`) +- Added support for parsing ``ISO 8601``-like timestamps with negative signs to :class:`Timedelta` (:issue:`37172`) +- Added support for unary operators in :class:`FloatingArray` (:issue:`38749`) - :class:`RangeIndex` can now be constructed by passing a ``range`` object directly e.g. ``pd.RangeIndex(range(3))`` (:issue:`12067`) - :meth:`Series.round` and :meth:`DataFrame.round` now work with nullable integer and floating dtypes (:issue:`38844`) - :meth:`read_csv` and :meth:`read_json` expose the argument ``encoding_errors`` to control how encoding errors are handled (:issue:`39450`) @@ -301,8 +284,8 @@ These are bug fixes that might have notable behavior changes. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Previously, when calling :meth:`Categorical.unique` with categorical data, unused categories in the new array -would be removed, meaning that the dtype of the new array would be different than the -original, if some categories are not present in the unique array (:issue:`18291`) +would be removed, making the dtype of the new array different than the +original (:issue:`18291`) As an example of this, given: @@ -458,7 +441,7 @@ In pandas 1.3.0, ``df`` continues to share data with ``values`` .. _whatsnew_130.notable_bug_fixes.setitem_never_inplace: -Never Operate Inplace When Setting ``frame[keys] = values`` +Never operate inplace when setting ``frame[keys] = values`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When setting multiple columns using ``frame[keys] = values`` new arrays will @@ -493,7 +476,7 @@ In the new behavior, we get a new array, and retain an integer-dtyped ``5``: .. _whatsnew_130.notable_bug_fixes.setitem_with_bool_casting: -Consistent Casting With Setting Into Boolean Series +Consistent casting with setting into Boolean Series ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Setting non-boolean values into a :class:`Series` with ``dtype=bool`` now consistently @@ -695,12 +678,12 @@ Other API changes ^^^^^^^^^^^^^^^^^ - Partially initialized :class:`CategoricalDtype` objects (i.e. those with ``categories=None``) will no longer compare as equal to fully initialized dtype objects (:issue:`38516`) - Accessing ``_constructor_expanddim`` on a :class:`DataFrame` and ``_constructor_sliced`` on a :class:`Series` now raise an ``AttributeError``. Previously a ``NotImplementedError`` was raised (:issue:`38782`) -- Added new ``engine`` and ``**engine_kwargs`` parameters to :meth:`DataFrame.to_sql` to support other future "SQL engines". Currently we still only use ``SQLAlchemy`` under the hood, but more engines are planned to be supported such as ``turbodbc`` (:issue:`36893`) +- Added new ``engine`` and ``**engine_kwargs`` parameters to :meth:`DataFrame.to_sql` to support other future "SQL engines". Currently we still only use ``SQLAlchemy`` under the hood, but more engines are planned to be supported such as `turbodbc <https://turbodbc.readthedocs.io/en/latest/>`_ (:issue:`36893`) - Removed redundant ``freq`` from :class:`PeriodIndex` string representation (:issue:`41653`) - :meth:`ExtensionDtype.construct_array_type` is now a required method instead of an optional one for :class:`ExtensionDtype` subclasses (:issue:`24860`) Build -===== +^^^^^ - Documentation in ``.pptx`` and ``.pdf`` formats are no longer included in wheels or source distributions. (:issue:`30741`) @@ -770,7 +753,7 @@ Deprecations .. _whatsnew_130.deprecations.nuisance_columns: -Deprecated Dropping Nuisance Columns in DataFrame Reductions and DataFrameGroupBy Operations +Deprecated dropping nuisance columns in DataFrame reductions and DataFrameGroupBy operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Calling a reduction (e.g. ``.min``, ``.max``, ``.sum``) on a :class:`DataFrame` with ``numeric_only=None`` (the default), columns where the reduction raises a ``TypeError`` @@ -857,7 +840,7 @@ Performance improvements - Performance improvement in :meth:`IntervalIndex.isin` (:issue:`38353`) - Performance improvement in :meth:`Series.mean` for nullable data types (:issue:`34814`) - Performance improvement in :meth:`Series.isin` for nullable data types (:issue:`38340`) -- Performance improvement in :meth:`DataFrame.fillna` with ``method="pad|backfill"`` for nullable floating and nullable integer dtypes (:issue:`39953`) +- Performance improvement in :meth:`DataFrame.fillna` with ``method="pad"`` or ``method="backfill"`` for nullable floating and nullable integer dtypes (:issue:`39953`) - Performance improvement in :meth:`DataFrame.corr` for ``method=kendall`` (:issue:`28329`) - Performance improvement in :meth:`DataFrame.corr` for ``method=spearman`` (:issue:`40956`) - Performance improvement in :meth:`.Rolling.corr` and :meth:`.Rolling.cov` (:issue:`39388`) @@ -865,7 +848,8 @@ Performance improvements - Performance improvement in :func:`unique` for object data type (:issue:`37615`) - Performance improvement in :func:`json_normalize` for basic cases (including separators) (:issue:`40035` :issue:`15621`) - Performance improvement in :class:`.ExpandingGroupby` aggregation methods (:issue:`39664`) -- Performance improvement in :class:`.Styler` where render times are more than 50% reduced (:issue:`39972` :issue:`39952`) +- Performance improvement in :class:`.Styler` where render times are more than 50% reduced and now matches :meth:`DataFrame.to_html` (:issue:`39972` :issue:`39952`, :issue:`40425`) +- The method :meth:`.Styler.set_td_classes` is now as performant as :meth:`.Styler.apply` and :meth:`.Styler.applymap`, and even more so in some cases (:issue:`40453`) - Performance improvement in :meth:`.ExponentialMovingWindow.mean` with ``times`` (:issue:`39784`) - Performance improvement in :meth:`.GroupBy.apply` when requiring the python fallback implementation (:issue:`40176`) - Performance improvement in the conversion of a PyArrow Boolean array to a pandas nullable Boolean array (:issue:`41051`) @@ -978,7 +962,7 @@ Indexing - Bug in :meth:`DataFrame.reindex` and :meth:`Series.reindex` with timezone aware indexes raising a ``TypeError`` for ``method="ffill"`` and ``method="bfill"`` and specified ``tolerance`` (:issue:`38566`) - Bug in :meth:`DataFrame.reindex` with ``datetime64[ns]`` or ``timedelta64[ns]`` incorrectly casting to integers when the ``fill_value`` requires casting to object dtype (:issue:`39755`) - Bug in :meth:`DataFrame.__setitem__` raising a ``ValueError`` when setting on an empty :class:`DataFrame` using specified columns and a nonempty :class:`DataFrame` value (:issue:`38831`) -- Bug in :meth:`DataFrame.loc.__setitem__` raising ValueError when expanding unique column for :class:`DataFrame` with duplicate columns (:issue:`38521`) +- Bug in :meth:`DataFrame.loc.__setitem__` raising a ``ValueError`` when operating on a unique column when the :class:`DataFrame` has duplicate columns (:issue:`38521`) - Bug in :meth:`DataFrame.iloc.__setitem__` and :meth:`DataFrame.loc.__setitem__` with mixed dtypes when setting with a dictionary value (:issue:`38335`) - Bug in :meth:`Series.loc.__setitem__` and :meth:`DataFrame.loc.__setitem__` raising ``KeyError`` when provided a Boolean generator (:issue:`39614`) - Bug in :meth:`Series.iloc` and :meth:`DataFrame.iloc` raising a ``KeyError`` when provided a generator (:issue:`39614`) @@ -1041,7 +1025,7 @@ I/O - Allow custom error values for the ``parse_dates`` argument of :func:`read_sql`, :func:`read_sql_query` and :func:`read_sql_table` (:issue:`35185`) - Bug in :meth:`DataFrame.to_hdf` and :meth:`Series.to_hdf` raising a ``KeyError`` when trying to apply for subclasses of ``DataFrame`` or ``Series`` (:issue:`33748`) - Bug in :meth:`.HDFStore.put` raising a wrong ``TypeError`` when saving a DataFrame with non-string dtype (:issue:`34274`) -- Bug in :func:`json_normalize` resulting in the first element of a generator object not being included in the returned ``DataFrame`` (:issue:`35923`) +- Bug in :func:`json_normalize` resulting in the first element of a generator object not being included in the returned DataFrame (:issue:`35923`) - Bug in :func:`read_csv` applying the thousands separator to date columns when the column should be parsed for dates and ``usecols`` is specified for ``engine="python"`` (:issue:`39365`) - Bug in :func:`read_excel` forward filling :class:`MultiIndex` names when multiple header and index columns are specified (:issue:`34673`) - Bug in :func:`read_excel` not respecting :func:`set_option` (:issue:`34252`) @@ -1062,7 +1046,7 @@ I/O - Bug in :func:`read_csv` silently ignoring ``sep`` if ``delimiter`` and ``sep`` are defined, now raising a ``ValueError`` (:issue:`39823`) - Bug in :func:`read_csv` and :func:`read_table` misinterpreting arguments when ``sys.setprofile`` had been previously called (:issue:`41069`) - Bug in the conversion from PyArrow to pandas (e.g. for reading Parquet) with nullable dtypes and a PyArrow array whose data buffer size is not a multiple of the dtype size (:issue:`40896`) -- Bug in :func:`read_excel` would raise an error when pandas could not determine the file type, even when user specified the ``engine`` argument (:issue:`41225`) +- Bug in :func:`read_excel` would raise an error when pandas could not determine the file type even though the user specified the ``engine`` argument (:issue:`41225`) - Bug in :func:`read_clipboard` copying from an excel file shifts values into the wrong column if there are null values in first column (:issue:`41108`) Period @@ -1087,7 +1071,7 @@ Groupby/resample/rolling - Bug in :meth:`.SeriesGroupBy.value_counts` where unobserved categories in a grouped categorical Series were not tallied (:issue:`38672`) - Bug in :meth:`.SeriesGroupBy.value_counts` where an error was raised on an empty Series (:issue:`39172`) - Bug in :meth:`.GroupBy.indices` would contain non-existent indices when null values were present in the groupby keys (:issue:`9304`) -- Fixed bug in :meth:`.GroupBy.sum` causing loss of precision through using Kahan summation (:issue:`38778`) +- Fixed bug in :meth:`.GroupBy.sum` causing a loss of precision by now using Kahan summation (:issue:`38778`) - Fixed bug in :meth:`.GroupBy.cumsum` and :meth:`.GroupBy.mean` causing loss of precision through using Kahan summation (:issue:`38934`) - Bug in :meth:`.Resampler.aggregate` and :meth:`DataFrame.transform` raising a ``TypeError`` instead of ``SpecificationError`` when missing keys had mixed dtypes (:issue:`39025`) - Bug in :meth:`.DataFrameGroupBy.idxmin` and :meth:`.DataFrameGroupBy.idxmax` with ``ExtensionDtype`` columns (:issue:`38733`) @@ -1160,10 +1144,10 @@ Sparse ExtensionArray ^^^^^^^^^^^^^^ -- Bug in :meth:`DataFrame.where` when ``other`` is a :class:`Series` with :class:`ExtensionArray` dtype (:issue:`38729`) +- Bug in :meth:`DataFrame.where` when ``other`` is a Series with an :class:`ExtensionDtype` (:issue:`38729`) - Fixed bug where :meth:`Series.idxmax`, :meth:`Series.idxmin`, :meth:`Series.argmax`, and :meth:`Series.argmin` would fail when the underlying data is an :class:`ExtensionArray` (:issue:`32749`, :issue:`33719`, :issue:`36566`) - Fixed bug where some properties of subclasses of :class:`PandasExtensionDtype` where improperly cached (:issue:`40329`) -- Bug in :meth:`DataFrame.mask` where masking a :class:`Dataframe` with an :class:`ExtensionArray` dtype raises ``ValueError`` (:issue:`40941`) +- Bug in :meth:`DataFrame.mask` where masking a DataFrame with an :class:`ExtensionDtype` raises a ``ValueError`` (:issue:`40941`) Styler ^^^^^^ @@ -1172,10 +1156,10 @@ Styler - :class:`.Styler` rendered HTML output has seen minor alterations to support w3 good code standards (:issue:`39626`) - Bug in :class:`.Styler` where rendered HTML was missing a column class identifier for certain header cells (:issue:`39716`) - Bug in :meth:`.Styler.background_gradient` where text-color was not determined correctly (:issue:`39888`) -- Bug in :class:`.Styler` where multiple elements in CSS-selectors were not correctly added to ``table_styles`` (:issue:`39942`) +- Bug in :meth:`.Styler.set_table_styles` where multiple elements in CSS-selectors of the ``table_styles`` argument were not correctly added (:issue:`34061`) - Bug in :class:`.Styler` where copying from Jupyter dropped the top left cell and misaligned headers (:issue:`12147`) - Bug in :class:`Styler.where` where ``kwargs`` were not passed to the applicable callable (:issue:`40845`) -- Bug in :class:`.Styler` caused CSS to duplicate on multiple renders (:issue:`39395`, :issue:`40334`) +- Bug in :class:`.Styler` causing CSS to duplicate on multiple renders (:issue:`39395`, :issue:`40334`) Other
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them Minor fixups. After I get feedback here, I plan to make a PR with detailed instructions about adding to the whatsnew. cc @attack68 - the Styler sections had a major revisions, wanted to get any feedback here. cc @jorisvandenbossche @simonjayhawkins
https://api.github.com/repos/pandas-dev/pandas/pulls/41747
2021-05-31T18:21:33Z
2021-06-09T11:56:59Z
2021-06-09T11:56:59Z
2021-06-09T15:50:27Z
Fix 32bit test; follow-up to #41709
diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py index 65ae904d1083a..b4012c6a842a6 100644 --- a/pandas/tests/indexes/interval/test_constructors.py +++ b/pandas/tests/indexes/interval/test_constructors.py @@ -412,8 +412,8 @@ def test_constructor_errors(self, constructor): with pytest.raises(TypeError, match=msg): constructor(5) - # not an interval - msg = "type <class 'numpy.int64'> with value 0 is not an interval" + # not an interval; dtype depends on 32bit/windows builds + msg = "type <class 'numpy.int(32|64)'> with value 0 is not an interval" with pytest.raises(TypeError, match=msg): constructor([0, 1])
xref #41709
https://api.github.com/repos/pandas-dev/pandas/pulls/41746
2021-05-31T16:21:26Z
2021-05-31T21:30:13Z
2021-05-31T21:30:13Z
2021-05-31T21:36:53Z
DOC: collect positional deprecations, remove duplicate DateOffset methods
diff --git a/doc/source/reference/offset_frequency.rst b/doc/source/reference/offset_frequency.rst index e6271a7806706..5d3aa1dc80cae 100644 --- a/doc/source/reference/offset_frequency.rst +++ b/doc/source/reference/offset_frequency.rst @@ -62,13 +62,6 @@ Properties .. autosummary:: :toctree: api/ - BusinessDay.freqstr - BusinessDay.kwds - BusinessDay.name - BusinessDay.nanos - BusinessDay.normalize - BusinessDay.rule_code - BusinessDay.n BusinessDay.weekmask BusinessDay.holidays BusinessDay.calendar @@ -78,14 +71,6 @@ Methods .. autosummary:: :toctree: api/ - BusinessDay.apply - BusinessDay.apply_index - BusinessDay.copy - BusinessDay.isAnchored - BusinessDay.onOffset - BusinessDay.is_anchored - BusinessDay.is_on_offset - BusinessDay.__call__ BusinessHour ------------ @@ -99,13 +84,6 @@ Properties .. autosummary:: :toctree: api/ - BusinessHour.freqstr - BusinessHour.kwds - BusinessHour.name - BusinessHour.nanos - BusinessHour.normalize - BusinessHour.rule_code - BusinessHour.n BusinessHour.start BusinessHour.end BusinessHour.weekmask @@ -117,14 +95,6 @@ Methods .. autosummary:: :toctree: api/ - BusinessHour.apply - BusinessHour.apply_index - BusinessHour.copy - BusinessHour.isAnchored - BusinessHour.onOffset - BusinessHour.is_anchored - BusinessHour.is_on_offset - BusinessHour.__call__ CustomBusinessDay ----------------- @@ -147,13 +117,6 @@ Properties .. autosummary:: :toctree: api/ - CustomBusinessDay.freqstr - CustomBusinessDay.kwds - CustomBusinessDay.name - CustomBusinessDay.nanos - CustomBusinessDay.normalize - CustomBusinessDay.rule_code - CustomBusinessDay.n CustomBusinessDay.weekmask CustomBusinessDay.calendar CustomBusinessDay.holidays @@ -163,14 +126,6 @@ Methods .. autosummary:: :toctree: api/ - CustomBusinessDay.apply_index - CustomBusinessDay.apply - CustomBusinessDay.copy - CustomBusinessDay.isAnchored - CustomBusinessDay.onOffset - CustomBusinessDay.is_anchored - CustomBusinessDay.is_on_offset - CustomBusinessDay.__call__ CustomBusinessHour ------------------ @@ -184,13 +139,6 @@ Properties .. autosummary:: :toctree: api/ - CustomBusinessHour.freqstr - CustomBusinessHour.kwds - CustomBusinessHour.name - CustomBusinessHour.nanos - CustomBusinessHour.normalize - CustomBusinessHour.rule_code - CustomBusinessHour.n CustomBusinessHour.weekmask CustomBusinessHour.calendar CustomBusinessHour.holidays @@ -202,14 +150,6 @@ Methods .. autosummary:: :toctree: api/ - CustomBusinessHour.apply - CustomBusinessHour.apply_index - CustomBusinessHour.copy - CustomBusinessHour.isAnchored - CustomBusinessHour.onOffset - CustomBusinessHour.is_anchored - CustomBusinessHour.is_on_offset - CustomBusinessHour.__call__ MonthEnd -------- @@ -223,27 +163,12 @@ Properties .. autosummary:: :toctree: api/ - MonthEnd.freqstr - MonthEnd.kwds - MonthEnd.name - MonthEnd.nanos - MonthEnd.normalize - MonthEnd.rule_code - MonthEnd.n Methods ~~~~~~~ .. autosummary:: :toctree: api/ - MonthEnd.apply - MonthEnd.apply_index - MonthEnd.copy - MonthEnd.isAnchored - MonthEnd.onOffset - MonthEnd.is_anchored - MonthEnd.is_on_offset - MonthEnd.__call__ MonthBegin ---------- @@ -257,27 +182,12 @@ Properties .. autosummary:: :toctree: api/ - MonthBegin.freqstr - MonthBegin.kwds - MonthBegin.name - MonthBegin.nanos - MonthBegin.normalize - MonthBegin.rule_code - MonthBegin.n Methods ~~~~~~~ .. autosummary:: :toctree: api/ - MonthBegin.apply - MonthBegin.apply_index - MonthBegin.copy - MonthBegin.isAnchored - MonthBegin.onOffset - MonthBegin.is_anchored - MonthBegin.is_on_offset - MonthBegin.__call__ BusinessMonthEnd ---------------- @@ -300,27 +210,12 @@ Properties .. autosummary:: :toctree: api/ - BusinessMonthEnd.freqstr - BusinessMonthEnd.kwds - BusinessMonthEnd.name - BusinessMonthEnd.nanos - BusinessMonthEnd.normalize - BusinessMonthEnd.rule_code - BusinessMonthEnd.n Methods ~~~~~~~ .. autosummary:: :toctree: api/ - BusinessMonthEnd.apply - BusinessMonthEnd.apply_index - BusinessMonthEnd.copy - BusinessMonthEnd.isAnchored - BusinessMonthEnd.onOffset - BusinessMonthEnd.is_anchored - BusinessMonthEnd.is_on_offset - BusinessMonthEnd.__call__ BusinessMonthBegin ------------------ @@ -343,27 +238,12 @@ Properties .. autosummary:: :toctree: api/ - BusinessMonthBegin.freqstr - BusinessMonthBegin.kwds - BusinessMonthBegin.name - BusinessMonthBegin.nanos - BusinessMonthBegin.normalize - BusinessMonthBegin.rule_code - BusinessMonthBegin.n Methods ~~~~~~~ .. autosummary:: :toctree: api/ - BusinessMonthBegin.apply - BusinessMonthBegin.apply_index - BusinessMonthBegin.copy - BusinessMonthBegin.isAnchored - BusinessMonthBegin.onOffset - BusinessMonthBegin.is_anchored - BusinessMonthBegin.is_on_offset - BusinessMonthBegin.__call__ CustomBusinessMonthEnd ---------------------- @@ -386,14 +266,7 @@ Properties .. autosummary:: :toctree: api/ - CustomBusinessMonthEnd.freqstr - CustomBusinessMonthEnd.kwds CustomBusinessMonthEnd.m_offset - CustomBusinessMonthEnd.name - CustomBusinessMonthEnd.nanos - CustomBusinessMonthEnd.normalize - CustomBusinessMonthEnd.rule_code - CustomBusinessMonthEnd.n CustomBusinessMonthEnd.weekmask CustomBusinessMonthEnd.calendar CustomBusinessMonthEnd.holidays @@ -403,14 +276,6 @@ Methods .. autosummary:: :toctree: api/ - CustomBusinessMonthEnd.apply - CustomBusinessMonthEnd.apply_index - CustomBusinessMonthEnd.copy - CustomBusinessMonthEnd.isAnchored - CustomBusinessMonthEnd.onOffset - CustomBusinessMonthEnd.is_anchored - CustomBusinessMonthEnd.is_on_offset - CustomBusinessMonthEnd.__call__ CustomBusinessMonthBegin ------------------------ @@ -433,14 +298,7 @@ Properties .. autosummary:: :toctree: api/ - CustomBusinessMonthBegin.freqstr - CustomBusinessMonthBegin.kwds CustomBusinessMonthBegin.m_offset - CustomBusinessMonthBegin.name - CustomBusinessMonthBegin.nanos - CustomBusinessMonthBegin.normalize - CustomBusinessMonthBegin.rule_code - CustomBusinessMonthBegin.n CustomBusinessMonthBegin.weekmask CustomBusinessMonthBegin.calendar CustomBusinessMonthBegin.holidays @@ -450,14 +308,6 @@ Methods .. autosummary:: :toctree: api/ - CustomBusinessMonthBegin.apply - CustomBusinessMonthBegin.apply_index - CustomBusinessMonthBegin.copy - CustomBusinessMonthBegin.isAnchored - CustomBusinessMonthBegin.onOffset - CustomBusinessMonthBegin.is_anchored - CustomBusinessMonthBegin.is_on_offset - CustomBusinessMonthBegin.__call__ SemiMonthEnd ------------ @@ -471,13 +321,6 @@ Properties .. autosummary:: :toctree: api/ - SemiMonthEnd.freqstr - SemiMonthEnd.kwds - SemiMonthEnd.name - SemiMonthEnd.nanos - SemiMonthEnd.normalize - SemiMonthEnd.rule_code - SemiMonthEnd.n SemiMonthEnd.day_of_month Methods @@ -485,14 +328,6 @@ Methods .. autosummary:: :toctree: api/ - SemiMonthEnd.apply - SemiMonthEnd.apply_index - SemiMonthEnd.copy - SemiMonthEnd.isAnchored - SemiMonthEnd.onOffset - SemiMonthEnd.is_anchored - SemiMonthEnd.is_on_offset - SemiMonthEnd.__call__ SemiMonthBegin -------------- @@ -506,13 +341,6 @@ Properties .. autosummary:: :toctree: api/ - SemiMonthBegin.freqstr - SemiMonthBegin.kwds - SemiMonthBegin.name - SemiMonthBegin.nanos - SemiMonthBegin.normalize - SemiMonthBegin.rule_code - SemiMonthBegin.n SemiMonthBegin.day_of_month Methods @@ -520,14 +348,6 @@ Methods .. autosummary:: :toctree: api/ - SemiMonthBegin.apply - SemiMonthBegin.apply_index - SemiMonthBegin.copy - SemiMonthBegin.isAnchored - SemiMonthBegin.onOffset - SemiMonthBegin.is_anchored - SemiMonthBegin.is_on_offset - SemiMonthBegin.__call__ Week ---- @@ -541,13 +361,6 @@ Properties .. autosummary:: :toctree: api/ - Week.freqstr - Week.kwds - Week.name - Week.nanos - Week.normalize - Week.rule_code - Week.n Week.weekday Methods @@ -555,14 +368,6 @@ Methods .. autosummary:: :toctree: api/ - Week.apply - Week.apply_index - Week.copy - Week.isAnchored - Week.onOffset - Week.is_anchored - Week.is_on_offset - Week.__call__ WeekOfMonth ----------- @@ -576,13 +381,6 @@ Properties .. autosummary:: :toctree: api/ - WeekOfMonth.freqstr - WeekOfMonth.kwds - WeekOfMonth.name - WeekOfMonth.nanos - WeekOfMonth.normalize - WeekOfMonth.rule_code - WeekOfMonth.n WeekOfMonth.week Methods @@ -590,14 +388,6 @@ Methods .. autosummary:: :toctree: api/ - WeekOfMonth.apply - WeekOfMonth.apply_index - WeekOfMonth.copy - WeekOfMonth.isAnchored - WeekOfMonth.onOffset - WeekOfMonth.is_anchored - WeekOfMonth.is_on_offset - WeekOfMonth.__call__ WeekOfMonth.weekday LastWeekOfMonth @@ -612,13 +402,6 @@ Properties .. autosummary:: :toctree: api/ - LastWeekOfMonth.freqstr - LastWeekOfMonth.kwds - LastWeekOfMonth.name - LastWeekOfMonth.nanos - LastWeekOfMonth.normalize - LastWeekOfMonth.rule_code - LastWeekOfMonth.n LastWeekOfMonth.weekday LastWeekOfMonth.week @@ -627,14 +410,6 @@ Methods .. autosummary:: :toctree: api/ - LastWeekOfMonth.apply - LastWeekOfMonth.apply_index - LastWeekOfMonth.copy - LastWeekOfMonth.isAnchored - LastWeekOfMonth.onOffset - LastWeekOfMonth.is_anchored - LastWeekOfMonth.is_on_offset - LastWeekOfMonth.__call__ BQuarterEnd ----------- @@ -648,13 +423,6 @@ Properties .. autosummary:: :toctree: api/ - BQuarterEnd.freqstr - BQuarterEnd.kwds - BQuarterEnd.name - BQuarterEnd.nanos - BQuarterEnd.normalize - BQuarterEnd.rule_code - BQuarterEnd.n BQuarterEnd.startingMonth Methods @@ -662,14 +430,6 @@ Methods .. autosummary:: :toctree: api/ - BQuarterEnd.apply - BQuarterEnd.apply_index - BQuarterEnd.copy - BQuarterEnd.isAnchored - BQuarterEnd.onOffset - BQuarterEnd.is_anchored - BQuarterEnd.is_on_offset - BQuarterEnd.__call__ BQuarterBegin ------------- @@ -683,13 +443,6 @@ Properties .. autosummary:: :toctree: api/ - BQuarterBegin.freqstr - BQuarterBegin.kwds - BQuarterBegin.name - BQuarterBegin.nanos - BQuarterBegin.normalize - BQuarterBegin.rule_code - BQuarterBegin.n BQuarterBegin.startingMonth Methods @@ -697,14 +450,6 @@ Methods .. autosummary:: :toctree: api/ - BQuarterBegin.apply - BQuarterBegin.apply_index - BQuarterBegin.copy - BQuarterBegin.isAnchored - BQuarterBegin.onOffset - BQuarterBegin.is_anchored - BQuarterBegin.is_on_offset - BQuarterBegin.__call__ QuarterEnd ---------- @@ -718,13 +463,6 @@ Properties .. autosummary:: :toctree: api/ - QuarterEnd.freqstr - QuarterEnd.kwds - QuarterEnd.name - QuarterEnd.nanos - QuarterEnd.normalize - QuarterEnd.rule_code - QuarterEnd.n QuarterEnd.startingMonth Methods @@ -732,14 +470,6 @@ Methods .. autosummary:: :toctree: api/ - QuarterEnd.apply - QuarterEnd.apply_index - QuarterEnd.copy - QuarterEnd.isAnchored - QuarterEnd.onOffset - QuarterEnd.is_anchored - QuarterEnd.is_on_offset - QuarterEnd.__call__ QuarterBegin ------------ @@ -753,13 +483,6 @@ Properties .. autosummary:: :toctree: api/ - QuarterBegin.freqstr - QuarterBegin.kwds - QuarterBegin.name - QuarterBegin.nanos - QuarterBegin.normalize - QuarterBegin.rule_code - QuarterBegin.n QuarterBegin.startingMonth Methods @@ -767,14 +490,6 @@ Methods .. autosummary:: :toctree: api/ - QuarterBegin.apply - QuarterBegin.apply_index - QuarterBegin.copy - QuarterBegin.isAnchored - QuarterBegin.onOffset - QuarterBegin.is_anchored - QuarterBegin.is_on_offset - QuarterBegin.__call__ BYearEnd -------- @@ -788,13 +503,6 @@ Properties .. autosummary:: :toctree: api/ - BYearEnd.freqstr - BYearEnd.kwds - BYearEnd.name - BYearEnd.nanos - BYearEnd.normalize - BYearEnd.rule_code - BYearEnd.n BYearEnd.month Methods @@ -802,14 +510,6 @@ Methods .. autosummary:: :toctree: api/ - BYearEnd.apply - BYearEnd.apply_index - BYearEnd.copy - BYearEnd.isAnchored - BYearEnd.onOffset - BYearEnd.is_anchored - BYearEnd.is_on_offset - BYearEnd.__call__ BYearBegin ---------- @@ -823,13 +523,6 @@ Properties .. autosummary:: :toctree: api/ - BYearBegin.freqstr - BYearBegin.kwds - BYearBegin.name - BYearBegin.nanos - BYearBegin.normalize - BYearBegin.rule_code - BYearBegin.n BYearBegin.month Methods @@ -837,14 +530,6 @@ Methods .. autosummary:: :toctree: api/ - BYearBegin.apply - BYearBegin.apply_index - BYearBegin.copy - BYearBegin.isAnchored - BYearBegin.onOffset - BYearBegin.is_anchored - BYearBegin.is_on_offset - BYearBegin.__call__ YearEnd ------- @@ -858,13 +543,6 @@ Properties .. autosummary:: :toctree: api/ - YearEnd.freqstr - YearEnd.kwds - YearEnd.name - YearEnd.nanos - YearEnd.normalize - YearEnd.rule_code - YearEnd.n YearEnd.month Methods @@ -872,14 +550,6 @@ Methods .. autosummary:: :toctree: api/ - YearEnd.apply - YearEnd.apply_index - YearEnd.copy - YearEnd.isAnchored - YearEnd.onOffset - YearEnd.is_anchored - YearEnd.is_on_offset - YearEnd.__call__ YearBegin --------- @@ -893,13 +563,6 @@ Properties .. autosummary:: :toctree: api/ - YearBegin.freqstr - YearBegin.kwds - YearBegin.name - YearBegin.nanos - YearBegin.normalize - YearBegin.rule_code - YearBegin.n YearBegin.month Methods @@ -907,14 +570,6 @@ Methods .. autosummary:: :toctree: api/ - YearBegin.apply - YearBegin.apply_index - YearBegin.copy - YearBegin.isAnchored - YearBegin.onOffset - YearBegin.is_anchored - YearBegin.is_on_offset - YearBegin.__call__ FY5253 ------ @@ -928,13 +583,6 @@ Properties .. autosummary:: :toctree: api/ - FY5253.freqstr - FY5253.kwds - FY5253.name - FY5253.nanos - FY5253.normalize - FY5253.rule_code - FY5253.n FY5253.startingMonth FY5253.variation FY5253.weekday @@ -944,16 +592,8 @@ Methods .. autosummary:: :toctree: api/ - FY5253.apply - FY5253.apply_index - FY5253.copy FY5253.get_rule_code_suffix FY5253.get_year_end - FY5253.isAnchored - FY5253.onOffset - FY5253.is_anchored - FY5253.is_on_offset - FY5253.__call__ FY5253Quarter ------------- @@ -967,13 +607,6 @@ Properties .. autosummary:: :toctree: api/ - FY5253Quarter.freqstr - FY5253Quarter.kwds - FY5253Quarter.name - FY5253Quarter.nanos - FY5253Quarter.normalize - FY5253Quarter.rule_code - FY5253Quarter.n FY5253Quarter.qtr_with_extra_week FY5253Quarter.startingMonth FY5253Quarter.variation @@ -984,17 +617,9 @@ Methods .. autosummary:: :toctree: api/ - FY5253Quarter.apply - FY5253Quarter.apply_index - FY5253Quarter.copy FY5253Quarter.get_rule_code_suffix FY5253Quarter.get_weeks - FY5253Quarter.isAnchored - FY5253Quarter.onOffset - FY5253Quarter.is_anchored - FY5253Quarter.is_on_offset FY5253Quarter.year_has_extra_week - FY5253Quarter.__call__ Easter ------ @@ -1008,27 +633,12 @@ Properties .. autosummary:: :toctree: api/ - Easter.freqstr - Easter.kwds - Easter.name - Easter.nanos - Easter.normalize - Easter.rule_code - Easter.n Methods ~~~~~~~ .. autosummary:: :toctree: api/ - Easter.apply - Easter.apply_index - Easter.copy - Easter.isAnchored - Easter.onOffset - Easter.is_anchored - Easter.is_on_offset - Easter.__call__ Tick ---- @@ -1043,27 +653,12 @@ Properties :toctree: api/ Tick.delta - Tick.freqstr - Tick.kwds - Tick.name - Tick.nanos - Tick.normalize - Tick.rule_code - Tick.n Methods ~~~~~~~ .. autosummary:: :toctree: api/ - Tick.copy - Tick.isAnchored - Tick.onOffset - Tick.is_anchored - Tick.is_on_offset - Tick.__call__ - Tick.apply - Tick.apply_index Day --- @@ -1078,27 +673,12 @@ Properties :toctree: api/ Day.delta - Day.freqstr - Day.kwds - Day.name - Day.nanos - Day.normalize - Day.rule_code - Day.n Methods ~~~~~~~ .. autosummary:: :toctree: api/ - Day.copy - Day.isAnchored - Day.onOffset - Day.is_anchored - Day.is_on_offset - Day.__call__ - Day.apply - Day.apply_index Hour ---- @@ -1113,27 +693,12 @@ Properties :toctree: api/ Hour.delta - Hour.freqstr - Hour.kwds - Hour.name - Hour.nanos - Hour.normalize - Hour.rule_code - Hour.n Methods ~~~~~~~ .. autosummary:: :toctree: api/ - Hour.copy - Hour.isAnchored - Hour.onOffset - Hour.is_anchored - Hour.is_on_offset - Hour.__call__ - Hour.apply - Hour.apply_index Minute ------ @@ -1148,27 +713,12 @@ Properties :toctree: api/ Minute.delta - Minute.freqstr - Minute.kwds - Minute.name - Minute.nanos - Minute.normalize - Minute.rule_code - Minute.n Methods ~~~~~~~ .. autosummary:: :toctree: api/ - Minute.copy - Minute.isAnchored - Minute.onOffset - Minute.is_anchored - Minute.is_on_offset - Minute.__call__ - Minute.apply - Minute.apply_index Second ------ @@ -1183,27 +733,12 @@ Properties :toctree: api/ Second.delta - Second.freqstr - Second.kwds - Second.name - Second.nanos - Second.normalize - Second.rule_code - Second.n Methods ~~~~~~~ .. autosummary:: :toctree: api/ - Second.copy - Second.isAnchored - Second.onOffset - Second.is_anchored - Second.is_on_offset - Second.__call__ - Second.apply - Second.apply_index Milli ----- @@ -1218,27 +753,12 @@ Properties :toctree: api/ Milli.delta - Milli.freqstr - Milli.kwds - Milli.name - Milli.nanos - Milli.normalize - Milli.rule_code - Milli.n Methods ~~~~~~~ .. autosummary:: :toctree: api/ - Milli.copy - Milli.isAnchored - Milli.onOffset - Milli.is_anchored - Milli.is_on_offset - Milli.__call__ - Milli.apply - Milli.apply_index Micro ----- @@ -1253,27 +773,12 @@ Properties :toctree: api/ Micro.delta - Micro.freqstr - Micro.kwds - Micro.name - Micro.nanos - Micro.normalize - Micro.rule_code - Micro.n Methods ~~~~~~~ .. autosummary:: :toctree: api/ - Micro.copy - Micro.isAnchored - Micro.onOffset - Micro.is_anchored - Micro.is_on_offset - Micro.__call__ - Micro.apply - Micro.apply_index Nano ---- @@ -1288,27 +793,12 @@ Properties :toctree: api/ Nano.delta - Nano.freqstr - Nano.kwds - Nano.name - Nano.nanos - Nano.normalize - Nano.rule_code - Nano.n Methods ~~~~~~~ .. autosummary:: :toctree: api/ - Nano.copy - Nano.isAnchored - Nano.onOffset - Nano.is_anchored - Nano.is_on_offset - Nano.__call__ - Nano.apply - Nano.apply_index .. _api.frequencies: diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index e06085c4c5c26..129cc071af2c7 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -681,28 +681,34 @@ Deprecations - Deprecated the ``convert_float`` optional argument in :func:`read_excel` and :meth:`ExcelFile.parse` (:issue:`41127`) - Deprecated behavior of :meth:`DatetimeIndex.union` with mixed timezones; in a future version both will be cast to UTC instead of object dtype (:issue:`39328`) - Deprecated using ``usecols`` with out of bounds indices for ``read_csv`` with ``engine="c"`` (:issue:`25623`) -- Deprecated passing arguments as positional (except for ``"codes"``) in :meth:`MultiIndex.codes` (:issue:`41485`) -- Deprecated passing arguments as positional in :meth:`Index.set_names` and :meth:`MultiIndex.set_names` (except for ``names``) (:issue:`41485`) -- Deprecated passing arguments (apart from ``cond`` and ``other``) as positional in :meth:`DataFrame.mask` and :meth:`Series.mask` (:issue:`41485`) -- Deprecated passing arguments as positional in :meth:`DataFrame.clip` and :meth:`Series.clip` (other than ``"upper"`` and ``"lower"``) (:issue:`41485`) - Deprecated special treatment of lists with first element a Categorical in the :class:`DataFrame` constructor; pass as ``pd.DataFrame({col: categorical, ...})`` instead (:issue:`38845`) -- Deprecated passing arguments as positional (except for ``"method"``) in :meth:`DataFrame.interpolate` and :meth:`Series.interpolate` (:issue:`41485`) -- Deprecated passing arguments as positional in :meth:`DataFrame.ffill`, :meth:`Series.ffill`, :meth:`DataFrame.bfill`, and :meth:`Series.bfill` (:issue:`41485`) -- Deprecated passing arguments as positional in :meth:`DataFrame.sort_values` (other than ``"by"``) and :meth:`Series.sort_values` (:issue:`41485`) +- Deprecated construction of :class:`Series` or :class:`DataFrame` with ``DatetimeTZDtype`` data and ``datetime64[ns]`` dtype. Use ``Series(data).dt.tz_localize(None)`` instead (:issue:`41555`,:issue:`33401`) +- In a future version, constructing :class:`Series` or :class:`DataFrame` with ``datetime64[ns]`` data and ``DatetimeTZDtype`` will treat the data as wall-times instead of as UTC times (matching DatetimeIndex behavior). To treat the data as UTC times, use ``pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(dtype.tz)`` or ``pd.Series(data.view("int64"), dtype=dtype)`` (:issue:`33401`) + +.. _whatsnew_130.deprecations.positional_arguments: + +Deprecated Allowing Passing Arguments As Positional +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_csv` (:issue:`41485`) +- Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_table` (:issue:`41485`) +- Deprecated passing arguments as positional in :meth:`DataFrame.set_axis` and :meth:`Series.set_axis` (other than ``"labels"``) (:issue:`41485`) +- Deprecated passing arguments as positional in :meth:`DataFrame.where` and :meth:`Series.where` (other than ``"cond"`` and ``"other"``) (:issue:`41485`) +- Deprecated passing arguments as positional in :meth:`DataFrame.drop` (other than ``"labels"``) and :meth:`Series.drop` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.dropna` and :meth:`Series.dropna` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.set_index` (other than ``"keys"``) (:issue:`41485`) -- Deprecated passing arguments as positional (except for ``"levels"``) in :meth:`MultiIndex.set_levels` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.sort_index` and :meth:`Series.sort_index` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.drop_duplicates` (except for ``subset``), :meth:`Series.drop_duplicates`, :meth:`Index.drop_duplicates` and :meth:`MultiIndex.drop_duplicates`(:issue:`41485`) - Deprecated passing arguments (apart from ``value``) as positional in :meth:`DataFrame.fillna` and :meth:`Series.fillna` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.reset_index` (other than ``"level"``) and :meth:`Series.reset_index` (:issue:`41485`) -- Deprecated construction of :class:`Series` or :class:`DataFrame` with ``DatetimeTZDtype`` data and ``datetime64[ns]`` dtype. Use ``Series(data).dt.tz_localize(None)`` instead (:issue:`41555`,:issue:`33401`) -- In a future version, constructing :class:`Series` or :class:`DataFrame` with ``datetime64[ns]`` data and ``DatetimeTZDtype`` will treat the data as wall-times instead of as UTC times (matching DatetimeIndex behavior). To treat the data as UTC times, use ``pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(dtype.tz)`` or ``pd.Series(data.view("int64"), dtype=dtype)`` (:issue:`33401`) -- Deprecated passing arguments as positional in :meth:`DataFrame.set_axis` and :meth:`Series.set_axis` (other than ``"labels"``) (:issue:`41485`) -- Deprecated passing arguments as positional in :meth:`DataFrame.where` and :meth:`Series.where` (other than ``"cond"`` and ``"other"``) (:issue:`41485`) -- Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_csv` (:issue:`41485`) -- Deprecated passing arguments as positional in :meth:`DataFrame.drop` (other than ``"labels"``) and :meth:`Series.drop` (:issue:`41485`) -- Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_table` (:issue:`41485`) +- Deprecated passing arguments as positional (except for ``"method"``) in :meth:`DataFrame.interpolate` and :meth:`Series.interpolate` (:issue:`41485`) +- Deprecated passing arguments as positional in :meth:`DataFrame.ffill`, :meth:`Series.ffill`, :meth:`DataFrame.bfill`, and :meth:`Series.bfill` (:issue:`41485`) +- Deprecated passing arguments as positional in :meth:`DataFrame.sort_values` (other than ``"by"``) and :meth:`Series.sort_values` (:issue:`41485`) +- Deprecated passing arguments as positional in :meth:`DataFrame.clip` and :meth:`Series.clip` (other than ``"upper"`` and ``"lower"``) (:issue:`41485`) +- Deprecated passing arguments (apart from ``cond`` and ``other``) as positional in :meth:`DataFrame.mask` and :meth:`Series.mask` (:issue:`41485`) +- Deprecated passing arguments as positional (except for ``"levels"``) in :meth:`MultiIndex.set_levels` (:issue:`41485`) +- Deprecated passing arguments as positional in :meth:`Index.set_names` and :meth:`MultiIndex.set_names` (except for ``names``) (:issue:`41485`) +- Deprecated passing arguments as positional (except for ``"codes"``) in :meth:`MultiIndex.codes` (:issue:`41485`) .. _whatsnew_130.deprecations.nuisance_columns:
The DateOffset duplication is discussed in #41586. docbuild is segfaulting locally (independent of this PR), so will have to see if this works on the CI.
https://api.github.com/repos/pandas-dev/pandas/pulls/41745
2021-05-31T16:17:45Z
2021-06-01T18:41:15Z
null
2021-06-03T17:16:58Z
REF: remove unnecessary try/excepts
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 95d9409b265ce..47779dd6dba25 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -439,12 +439,6 @@ def __init__( "explicitly specify the categories order " "by passing in a categories argument." ) from err - except ValueError as err: - - # TODO(EA2D) - raise NotImplementedError( - "> 1 ndim Categorical are not supported at this time" - ) from err # we're inferring from values dtype = CategoricalDtype(categories, dtype.ordered) diff --git a/pandas/core/common.py b/pandas/core/common.py index 04ff2d2c4618f..c0e44a437f59e 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -142,11 +142,8 @@ def is_bool_indexer(key: Any) -> bool: elif is_bool_dtype(key.dtype): return True elif isinstance(key, list): - try: - arr = np.asarray(key) - return arr.dtype == np.bool_ and len(arr) == len(key) - except TypeError: # pragma: no cover - return False + arr = np.asarray(key) + return arr.dtype == np.bool_ and len(arr) == len(key) return False diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 0267116cdfb99..62b75dd90c79b 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -17,7 +17,6 @@ import numpy.ma as ma from pandas._libs import lib -from pandas._libs.tslibs import IncompatibleFrequency from pandas._typing import ( AnyArrayLike, ArrayLike, @@ -289,9 +288,9 @@ def array( IntegerArray, IntervalArray, PandasArray, + PeriodArray, StringArray, TimedeltaArray, - period_array, ) if lib.is_scalar(data): @@ -315,12 +314,8 @@ def array( if dtype is None: inferred_dtype = lib.infer_dtype(data, skipna=True) if inferred_dtype == "period": - try: - return period_array(data, copy=copy) - except IncompatibleFrequency: - # We may have a mixture of frequencies. - # We choose to return an ndarray, rather than raising. - pass + return PeriodArray._from_sequence(data, copy=copy) + elif inferred_dtype == "interval": try: return IntervalArray(data, copy=copy) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 2a50ebd959ace..11f4da02c0d2f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6468,11 +6468,8 @@ def _maybe_cast_data_without_dtype(subarr: np.ndarray) -> ArrayLike: tda = TimedeltaArray._from_sequence(subarr, copy=False) return tda elif inferred == "period": - try: - parr = PeriodArray._from_sequence(subarr) - return parr - except IncompatibleFrequency: - pass + parr = PeriodArray._from_sequence(subarr) + return parr return subarr
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41744
2021-05-31T15:11:35Z
2021-05-31T20:18:52Z
2021-05-31T20:18:52Z
2021-05-31T20:20:19Z
⬆️ UPGRADE: Autoupdate pre-commit config
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3078619ecac35..dee52d8ad41bf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,7 +9,7 @@ repos: - id: absolufy-imports files: ^pandas/ - repo: https://github.com/python/black - rev: 20.8b1 + rev: 21.5b1 hooks: - id: black - repo: https://github.com/codespell-project/codespell @@ -57,7 +57,7 @@ repos: hooks: - id: isort - repo: https://github.com/asottile/pyupgrade - rev: v2.18.3 + rev: v2.19.0 hooks: - id: pyupgrade args: [--py37-plus] diff --git a/pandas/_config/config.py b/pandas/_config/config.py index 37f5a5730439d..be3498dc0829b 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -189,7 +189,7 @@ def get_default_val(pat: str): class DictWrapper: - """ provide attribute-style access to a nested dict""" + """provide attribute-style access to a nested dict""" def __init__(self, d: dict[str, Any], prefix: str = ""): object.__setattr__(self, "d", d) @@ -571,7 +571,7 @@ def _get_root(key: str) -> tuple[dict[str, Any], str]: def _is_deprecated(key: str) -> bool: - """ Returns True if the given option has been deprecated """ + """Returns True if the given option has been deprecated""" key = key.lower() return key in _deprecated_options @@ -643,7 +643,7 @@ def _warn_if_deprecated(key: str) -> bool: def _build_option_description(k: str) -> str: - """ Builds a formatted description of a registered option and prints it """ + """Builds a formatted description of a registered option and prints it""" o = _get_registered_option(k) d = _get_deprecated_option(k) @@ -667,7 +667,7 @@ def _build_option_description(k: str) -> str: def pp_options_list(keys: Iterable[str], width=80, _print: bool = False): - """ Builds a concise listing of available options, grouped by prefix """ + """Builds a concise listing of available options, grouped by prefix""" from itertools import groupby from textwrap import wrap diff --git a/pandas/_libs/algos.pyi b/pandas/_libs/algos.pyi index 30a31d17fc947..a2d028250eb51 100644 --- a/pandas/_libs/algos.pyi +++ b/pandas/_libs/algos.pyi @@ -7,6 +7,7 @@ class Infinity: """ Provide a positive Infinity comparison method for ranking. """ + def __eq__(self, other) -> bool: ... def __ne__(self, other) -> bool: ... def __lt__(self, other) -> bool: ... @@ -14,11 +15,11 @@ class Infinity: def __gt__(self, other) -> bool: ... def __ge__(self, other) -> bool: ... - class NegInfinity: """ Provide a negative Infinity comparison method for ranking. """ + def __eq__(self, other) -> bool: ... def __ne__(self, other) -> bool: ... def __lt__(self, other) -> bool: ... @@ -26,56 +27,38 @@ class NegInfinity: def __gt__(self, other) -> bool: ... def __ge__(self, other) -> bool: ... - def unique_deltas( arr: np.ndarray, # const int64_t[:] ) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1] - - def is_lexsorted(list_of_arrays: list[np.ndarray]) -> bool: ... - - def groupsort_indexer( index: np.ndarray, # const int64_t[:] ngroups: int, ) -> tuple[ np.ndarray, # ndarray[int64_t, ndim=1] np.ndarray, # ndarray[int64_t, ndim=1] -]: - ... - - +]: ... def kth_smallest( a: np.ndarray, # numeric[:] k: int, -) -> Any: ... # numeric - +) -> Any: ... # numeric # ---------------------------------------------------------------------- # Pairwise correlation/covariance - - def nancorr( mat: np.ndarray, # const float64_t[:, :] cov: bool = False, minp=None, -) -> np.ndarray: # np.ndarray[float64_t, ndim=2] - ... - - +) -> np.ndarray: ... def nancorr_spearman( mat: np.ndarray, # ndarray[float64_t, ndim=2] minp: int = 1, -) -> np.ndarray: # np.ndarray[np.float64, ndim=2] - ... - - +) -> np.ndarray: ... def nancorr_kendall( mat: np.ndarray, # ndarray[float64_t, ndim=2] minp: int = 1, -) -> np.ndarray: # np.ndarray[float64, ndim=2] - ... +) -> np.ndarray: ... # ---------------------------------------------------------------------- @@ -92,58 +75,40 @@ def nancorr_kendall( # uint16_t # uint8_t - def validate_limit(nobs: int | None, limit=None) -> int: ... - - def pad( - old: np.ndarray, # ndarray[algos_t] - new: np.ndarray, # ndarray[algos_t] + old: np.ndarray, # ndarray[algos_t] + new: np.ndarray, # ndarray[algos_t] limit=None, ) -> np.ndarray: ... # np.ndarray[np.intp, ndim=1] - - def pad_inplace( values: np.ndarray, # algos_t[:] - mask: np.ndarray, # uint8_t[:] + mask: np.ndarray, # uint8_t[:] limit=None, ) -> None: ... - - def pad_2d_inplace( values: np.ndarray, # algos_t[:, :] - mask: np.ndarray, # const uint8_t[:, :] + mask: np.ndarray, # const uint8_t[:, :] limit=None, -) -> None: - ... - - +) -> None: ... def backfill( old: np.ndarray, # ndarray[algos_t] new: np.ndarray, # ndarray[algos_t] limit=None, -) -> np.ndarray: # np.ndarray[np.intp, ndim=1] - ... - +) -> np.ndarray: ... def backfill_inplace( values: np.ndarray, # algos_t[:] - mask: np.ndarray, # uint8_t[:] + mask: np.ndarray, # uint8_t[:] limit=None, ) -> None: ... - - def backfill_2d_inplace( values: np.ndarray, # algos_t[:, :] - mask: np.ndarray, # const uint8_t[:, :] + mask: np.ndarray, # const uint8_t[:, :] limit=None, ) -> None: ... - - def is_monotonic( - arr: np.ndarray, # ndarray[algos_t, ndim=1] - timelike: bool -) -> tuple[bool, bool, bool]: - ... + arr: np.ndarray, timelike: bool # ndarray[algos_t, ndim=1] +) -> tuple[bool, bool, bool]: ... # ---------------------------------------------------------------------- # rank_1d, rank_2d @@ -155,7 +120,6 @@ def is_monotonic( # uint64_t # int64_t - def rank_1d( values: np.ndarray, # ndarray[rank_t, ndim=1] labels: np.ndarray, # const int64_t[:] @@ -165,8 +129,6 @@ def rank_1d( pct: bool = ..., na_option=..., ) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1] - - def rank_2d( in_arr: np.ndarray, # ndarray[rank_t, ndim=2] axis: int = ..., @@ -176,8 +138,6 @@ def rank_2d( na_option=..., pct: bool = ..., ) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1] - - def diff_2d( arr: np.ndarray, # ndarray[diff_t, ndim=2] out: np.ndarray, # ndarray[out_t, ndim=2] @@ -185,109 +145,243 @@ def diff_2d( axis: int, datetimelike: bool = ..., ) -> None: ... - - def ensure_platform_int(arr: object) -> np.ndarray: ... - def ensure_object(arr: object) -> np.ndarray: ... - def ensure_float64(arr: object, copy=True) -> np.ndarray: ... - def ensure_float32(arr: object, copy=True) -> np.ndarray: ... - def ensure_int8(arr: object, copy=True) -> np.ndarray: ... - def ensure_int16(arr: object, copy=True) -> np.ndarray: ... - def ensure_int32(arr: object, copy=True) -> np.ndarray: ... - def ensure_int64(arr: object, copy=True) -> np.ndarray: ... - def ensure_uint8(arr: object, copy=True) -> np.ndarray: ... - def ensure_uint16(arr: object, copy=True) -> np.ndarray: ... - def ensure_uint32(arr: object, copy=True) -> np.ndarray: ... - def ensure_uint64(arr: object, copy=True) -> np.ndarray: ... - - -def take_1d_int8_int8(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_int8_int32(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_int8_int64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_int8_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_int16_int16(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_int16_int32(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_int16_int64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_int16_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_int32_int32(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_int32_int64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_int32_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_int64_int64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_int64_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_float32_float32(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_float32_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_float64_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_object_object(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_bool_bool(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_1d_bool_object(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... - -def take_2d_axis0_int8_int8(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_int8_int32(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_int8_int64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_int8_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_int16_int16(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_int16_int32(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_int16_int64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_int16_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_int32_int32(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_int32_int64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_int32_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_int64_int64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_int64_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_float32_float32(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_float32_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_float64_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_object_object(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_bool_bool(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis0_bool_object(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... - -def take_2d_axis1_int8_int8(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_int8_int32(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_int8_int64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_int8_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_int16_int16(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_int16_int32(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_int16_int64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_int16_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_int32_int32(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_int32_int64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_int32_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_int64_int64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_int64_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_float32_float32(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_float32_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_float64_float64(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_object_object(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_bool_bool(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_axis1_bool_object(values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=...) -> None: ... - -def take_2d_multi_int8_int8(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_int8_int32(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_int8_int64(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_int8_float64(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_int16_int16(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_int16_int32(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_int16_int64(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_int16_float64(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_int32_int32(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_int32_int64(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_int32_float64(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_int64_float64(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_float32_float32(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_float32_float64(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_float64_float64(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_object_object(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_bool_bool(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_bool_object(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... -def take_2d_multi_int64_int64(values: np.ndarray, indexer, out: np.ndarray, fill_value=...) -> None: ... +def take_1d_int8_int8( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int8_int32( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int8_int64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int8_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_int16( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_int32( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_int64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int16_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int32_int32( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int32_int64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int32_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int64_int64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_int64_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_float32_float32( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_float32_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_float64_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_object_object( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_bool_bool( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_1d_bool_object( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_int8( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_int32( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_int64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int8_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_int16( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_int32( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_int64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int16_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int32_int32( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int32_int64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int32_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int64_int64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_int64_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_float32_float32( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_float32_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_float64_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_object_object( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_bool_bool( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis0_bool_object( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_int8( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_int32( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_int64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int8_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_int16( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_int32( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_int64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int16_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int32_int32( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int32_int64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int32_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int64_int64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_int64_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_float32_float32( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_float32_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_float64_float64( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_object_object( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_bool_bool( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_axis1_bool_object( + values: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int8_int8( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int8_int32( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int8_int64( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int8_float64( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int16_int16( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int16_int32( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int16_int64( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int16_float64( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int32_int32( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int32_int64( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int32_float64( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int64_float64( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_float32_float32( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_float32_float64( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_float64_float64( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_object_object( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_bool_bool( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_bool_object( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... +def take_2d_multi_int64_int64( + values: np.ndarray, indexer, out: np.ndarray, fill_value=... +) -> None: ... diff --git a/pandas/_libs/arrays.pyi b/pandas/_libs/arrays.pyi index 0ca501c5b712c..67af9653fc75a 100644 --- a/pandas/_libs/arrays.pyi +++ b/pandas/_libs/arrays.pyi @@ -10,36 +10,25 @@ from pandas._typing import ( class NDArrayBacked: _dtype: DtypeObj _ndarray: np.ndarray - def __init__(self, values: np.ndarray, dtype: DtypeObj): ... - @classmethod def _simple_new(cls, values: np.ndarray, dtype: DtypeObj): ... - def _from_backing_data(self, values: np.ndarray): ... - def __setstate__(self, state): ... - def __len__(self) -> int: ... - @property def shape(self) -> Shape: ... - @property def ndim(self) -> int: ... - @property def size(self) -> int: ... - @property def nbytes(self) -> int: ... - def copy(self): ... def delete(self, loc, axis=0): ... def swapaxes(self, axis1, axis2): ... def repeat(self, repeats: int | Sequence[int], axis: int | None = ...): ... def reshape(self, *args, **kwargs): ... def ravel(self, order="C"): ... - @property def T(self): ... diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi index 8721624e9881c..7b1dcbe562123 100644 --- a/pandas/_libs/groupby.pyi +++ b/pandas/_libs/groupby.pyi @@ -3,128 +3,111 @@ from typing import Literal import numpy as np def group_median_float64( - out: np.ndarray, # ndarray[float64_t, ndim=2] - counts: np.ndarray, # ndarray[int64_t] - values: np.ndarray, # ndarray[float64_t, ndim=2] - labels: np.ndarray, # ndarray[int64_t] + out: np.ndarray, # ndarray[float64_t, ndim=2] + counts: np.ndarray, # ndarray[int64_t] + values: np.ndarray, # ndarray[float64_t, ndim=2] + labels: np.ndarray, # ndarray[int64_t] min_count: int = ..., # Py_ssize_t ) -> None: ... - def group_cumprod_float64( - out: np.ndarray, # float64_t[:, ::1] + out: np.ndarray, # float64_t[:, ::1] values: np.ndarray, # const float64_t[:, :] labels: np.ndarray, # const int64_t[:] ngroups: int, is_datetimelike: bool, skipna: bool = ..., ) -> None: ... - def group_cumsum( - out: np.ndarray, # numeric[:, ::1] + out: np.ndarray, # numeric[:, ::1] values: np.ndarray, # ndarray[numeric, ndim=2] labels: np.ndarray, # const int64_t[:] ngroups: int, is_datetimelike: bool, skipna: bool = ..., ) -> None: ... - - def group_shift_indexer( - out: np.ndarray, # int64_t[::1] + out: np.ndarray, # int64_t[::1] labels: np.ndarray, # const int64_t[:] ngroups: int, periods: int, ) -> None: ... - - def group_fillna_indexer( - out: np.ndarray, # ndarray[int64_t] + out: np.ndarray, # ndarray[int64_t] labels: np.ndarray, # ndarray[int64_t] - mask: np.ndarray, # ndarray[uint8_t] + mask: np.ndarray, # ndarray[uint8_t] direction: Literal["ffill", "bfill"], - limit: int, # int64_t + limit: int, # int64_t dropna: bool, ) -> None: ... - - def group_any_all( - out: np.ndarray, # uint8_t[::1] + out: np.ndarray, # uint8_t[::1] values: np.ndarray, # const uint8_t[::1] labels: np.ndarray, # const int64_t[:] - mask: np.ndarray, # const uint8_t[::1] + mask: np.ndarray, # const uint8_t[::1] val_test: Literal["any", "all"], skipna: bool, ) -> None: ... - def group_add( - out: np.ndarray, # complexfloating_t[:, ::1] + out: np.ndarray, # complexfloating_t[:, ::1] counts: np.ndarray, # int64_t[::1] values: np.ndarray, # ndarray[complexfloating_t, ndim=2] labels: np.ndarray, # const intp_t[:] - min_count: int = ... + min_count: int = ..., ) -> None: ... - def group_prod( - out: np.ndarray, # floating[:, ::1] + out: np.ndarray, # floating[:, ::1] counts: np.ndarray, # int64_t[::1] values: np.ndarray, # ndarray[floating, ndim=2] labels: np.ndarray, # const intp_t[:] - min_count: int = ... + min_count: int = ..., ) -> None: ... - def group_var( - out: np.ndarray, # floating[:, ::1] - counts: np.ndarray, # int64_t[::1] - values: np.ndarray, # ndarray[floating, ndim=2] - labels: np.ndarray, # const intp_t[:] + out: np.ndarray, # floating[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[floating, ndim=2] + labels: np.ndarray, # const intp_t[:] min_count: int = ..., # Py_ssize_t - ddof: int = ..., # int64_t + ddof: int = ..., # int64_t ) -> None: ... - def group_mean( - out: np.ndarray, # floating[:, ::1] + out: np.ndarray, # floating[:, ::1] counts: np.ndarray, # int64_t[::1] values: np.ndarray, # ndarray[floating, ndim=2] labels: np.ndarray, # const intp_t[:] - min_count: int = ... + min_count: int = ..., ) -> None: ... - def group_ohlc( - out: np.ndarray, # floating[:, ::1] + out: np.ndarray, # floating[:, ::1] counts: np.ndarray, # int64_t[::1] values: np.ndarray, # ndarray[floating, ndim=2] labels: np.ndarray, # const intp_t[:] - min_count: int = ... + min_count: int = ..., ) -> None: ... - def group_quantile( - out: np.ndarray, # ndarray[float64_t] + out: np.ndarray, # ndarray[float64_t] values: np.ndarray, # ndarray[numeric, ndim=1] labels: np.ndarray, # ndarray[int64_t] - mask: np.ndarray, # ndarray[uint8_t] - q: float, # float64_t + mask: np.ndarray, # ndarray[uint8_t] + q: float, # float64_t interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"], ) -> None: ... - def group_last( - out: np.ndarray, # rank_t[:, ::1] - counts: np.ndarray, # int64_t[::1] - values: np.ndarray, # ndarray[rank_t, ndim=2] - labels: np.ndarray, # const int64_t[:] + out: np.ndarray, # rank_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] min_count: int = ..., # Py_ssize_t ) -> None: ... - def group_nth( - out: np.ndarray, # rank_t[:, ::1] - counts: np.ndarray, # int64_t[::1] - values: np.ndarray, # ndarray[rank_t, ndim=2] - labels: np.ndarray, # const int64_t[:] - min_count: int = ..., # int64_t - rank: int = ..., # int64_t + out: np.ndarray, # rank_t[:, ::1] + counts: np.ndarray, # int64_t[::1] + values: np.ndarray, # ndarray[rank_t, ndim=2] + labels: np.ndarray, # const int64_t[:] + min_count: int = ..., # int64_t + rank: int = ..., # int64_t ) -> None: ... - def group_rank( - out: np.ndarray, # float64_t[:, ::1] + out: np.ndarray, # float64_t[:, ::1] values: np.ndarray, # ndarray[rank_t, ndim=2] labels: np.ndarray, # const int64_t[:] ngroups: int, @@ -134,35 +117,31 @@ def group_rank( pct: bool = ..., na_option: Literal["keep", "top", "bottom"] = ..., ) -> None: ... - def group_max( - out: np.ndarray, # groupby_t[:, ::1] + out: np.ndarray, # groupby_t[:, ::1] counts: np.ndarray, # int64_t[::1] values: np.ndarray, # ndarray[groupby_t, ndim=2] labels: np.ndarray, # const int64_t[:] min_count: int = ..., ) -> None: ... - def group_min( - out: np.ndarray, # groupby_t[:, ::1] + out: np.ndarray, # groupby_t[:, ::1] counts: np.ndarray, # int64_t[::1] values: np.ndarray, # ndarray[groupby_t, ndim=2] labels: np.ndarray, # const int64_t[:] min_count: int = ..., ) -> None: ... - def group_cummin( - out: np.ndarray, # groupby_t[:, ::1] - values: np.ndarray, # ndarray[groupby_t, ndim=2] - labels: np.ndarray, # const int64_t[:] + out: np.ndarray, # groupby_t[:, ::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] ngroups: int, is_datetimelike: bool, ) -> None: ... - def group_cummax( - out: np.ndarray, # groupby_t[:, ::1] - values: np.ndarray, # ndarray[groupby_t, ndim=2] - labels: np.ndarray, # const int64_t[:] + out: np.ndarray, # groupby_t[:, ::1] + values: np.ndarray, # ndarray[groupby_t, ndim=2] + labels: np.ndarray, # const int64_t[:] ngroups: int, is_datetimelike: bool, ) -> None: ... diff --git a/pandas/_libs/hashtable.pyi b/pandas/_libs/hashtable.pyi index 0612acd25a5d5..39e5fbfea9dd1 100644 --- a/pandas/_libs/hashtable.pyi +++ b/pandas/_libs/hashtable.pyi @@ -10,18 +10,14 @@ def unique_label_indices( labels: np.ndarray, # const int64_t[:] ) -> np.ndarray: ... - class Factorizer: count: int - def __init__(self, size_hint: int): ... def get_count(self) -> int: ... - class ObjectFactorizer(Factorizer): table: PyObjectHashTable uniques: ObjectVector - def factorize( self, values: np.ndarray, # ndarray[object] @@ -30,11 +26,9 @@ class ObjectFactorizer(Factorizer): na_value=..., ) -> np.ndarray: ... # np.ndarray[intp] - class Int64Factorizer(Factorizer): table: Int64HashTable uniques: Int64Vector - def factorize( self, values: np.ndarray, # const int64_t[:] @@ -43,7 +37,6 @@ class Int64Factorizer(Factorizer): na_value=..., ) -> np.ndarray: ... # np.ndarray[intp] - class Int64Vector: def __init__(self): ... def __len__(self) -> int: ... @@ -114,7 +107,6 @@ class ObjectVector: def __len__(self) -> int: ... def to_array(self) -> np.ndarray: ... # np.ndarray[object] - class HashTable: # NB: The base HashTable class does _not_ actually have these methods; # we are putting the here for the sake of mypy to avoid @@ -124,37 +116,31 @@ class HashTable: def __contains__(self, key: Hashable) -> bool: ... def sizeof(self, deep: bool = ...) -> int: ... def get_state(self) -> dict[str, int]: ... - # TODO: `item` type is subclass-specific def get_item(self, item): ... # TODO: return type? def set_item(self, item) -> None: ... - # FIXME: we don't actually have this for StringHashTable or ObjectHashTable? def map( self, - keys: np.ndarray, # np.ndarray[subclass-specific] - values: np.ndarray, # const int64_t[:] values + keys: np.ndarray, # np.ndarray[subclass-specific] + values: np.ndarray, # const int64_t[:] values ) -> None: ... - def map_locations( self, values: np.ndarray, # np.ndarray[subclass-specific] ) -> None: ... - def lookup( self, values: np.ndarray, # np.ndarray[subclass-specific] - ) -> np.ndarray: ... # np.ndarray[np.intp] - + ) -> np.ndarray: ... # np.ndarray[np.intp] def get_labels( self, values: np.ndarray, # np.ndarray[subclass-specific] - uniques, # SubclassTypeVector + uniques, # SubclassTypeVector count_prior: int = ..., na_sentinel: int = ..., na_value: object = ..., - ) -> np.ndarray: ... # np.ndarray[intp_t] - + ) -> np.ndarray: ... # np.ndarray[intp_t] def unique( self, values: np.ndarray, # np.ndarray[subclass-specific] @@ -163,11 +149,10 @@ class HashTable: np.ndarray, # np.ndarray[subclass-specific] np.ndarray, # np.ndarray[np.intp], ] | np.ndarray: ... # np.ndarray[subclass-specific] - def _unique( self, values: np.ndarray, # np.ndarray[subclass-specific] - uniques, # FooVector + uniques, # FooVector count_prior: int = ..., na_sentinel: int = ..., na_value: object = ..., @@ -177,7 +162,6 @@ class HashTable: np.ndarray, # np.ndarray[subclass-specific] np.ndarray, # np.ndarray[np.intp], ] | np.ndarray: ... # np.ndarray[subclass-specific] - def factorize( self, values: np.ndarray, # np.ndarray[subclass-specific] @@ -185,9 +169,9 @@ class HashTable: na_value: object = ..., mask=..., ) -> tuple[ - np.ndarray, # np.ndarray[subclass-specific] - np.ndarray, # np.ndarray[np.intp], - ]: ... + np.ndarray, # np.ndarray[subclass-specific] + np.ndarray, # np.ndarray[np.intp], + ]: ... class Complex128HashTable(HashTable): ... class Complex64HashTable(HashTable): ... @@ -211,46 +195,33 @@ class UInt64HashTable(HashTable): ... class UInt32HashTable(HashTable): ... class UInt16HashTable(HashTable): ... class UInt8HashTable(HashTable): ... - class StringHashTable(HashTable): ... class PyObjectHashTable(HashTable): ... - def duplicated_int64( values: np.ndarray, # const int64_t[:] values keep: Literal["last", "first", False] = ..., ) -> np.ndarray: ... # np.ndarray[bool] + # TODO: Is it actually bool or is it uint8? def mode_int64( values: np.ndarray, # const int64_t[:] values dropna: bool, ) -> np.ndarray: ... # np.ndarray[np.int64] - def value_count_int64( values: np.ndarray, # const int64_t[:] dropna: bool, -) -> tuple[ - np.ndarray, # np.ndarray[np.int64] - np.ndarray, # np.ndarray[np.int64] -]: ... - - +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[np.int64] # np.ndarray[np.int64] def duplicated( values: np.ndarray, keep: Literal["last", "first", False] = ..., ) -> np.ndarray: ... # np.ndarray[bool] - def mode(values: np.ndarray, dropna: bool) -> np.ndarray: ... - def value_count( values: np.ndarray, dropna: bool, -) -> tuple[ - np.ndarray, - np.ndarray, # np.ndarray[np.int64] -]: ... - +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[np.int64] # arr and values should have same dtype def ismember( diff --git a/pandas/_libs/index.pyi b/pandas/_libs/index.pyi index 979619c3d14c4..6bb332435be63 100644 --- a/pandas/_libs/index.pyi +++ b/pandas/_libs/index.pyi @@ -2,32 +2,26 @@ import numpy as np class IndexEngine: over_size_threshold: bool - def __init__(self, vgetter, n: int): ... - def __contains__(self, val: object) -> bool: ... - # -> int | slice | np.ndarray[bool] def get_loc(self, val: object) -> int | slice | np.ndarray: ... - def sizeof(self, deep: bool = False) -> int: ... def __sizeof__(self) -> int: ... - @property def is_unique(self) -> bool: ... - @property def is_monotonic_increasing(self) -> bool: ... - @property def is_monotonic_decreasing(self) -> bool: ... - - def get_backfill_indexer(self, other: np.ndarray, limit: int | None =...) -> np.ndarray: ... - def get_pad_indexer(self, other: np.ndarray, limit: int | None =...) -> np.ndarray: ... - + def get_backfill_indexer( + self, other: np.ndarray, limit: int | None = ... + ) -> np.ndarray: ... + def get_pad_indexer( + self, other: np.ndarray, limit: int | None = ... + ) -> np.ndarray: ... @property def is_mapping_populated(self) -> bool: ... - def clear_mapping(self): ... def get_indexer(self, values: np.ndarray) -> np.ndarray: ... # np.ndarray[np.intp] def get_indexer_non_unique( @@ -38,45 +32,35 @@ class IndexEngine: np.ndarray, # np.ndarray[np.intp] ]: ... - class Float64Engine(IndexEngine): ... class Float32Engine(IndexEngine): ... - class Int64Engine(IndexEngine): ... class Int32Engine(IndexEngine): ... class Int16Engine(IndexEngine): ... class Int8Engine(IndexEngine): ... - class UInt64Engine(IndexEngine): ... class UInt32Engine(IndexEngine): ... class UInt16Engine(IndexEngine): ... class UInt8Engine(IndexEngine): ... - class ObjectEngine(IndexEngine): ... - class DatetimeEngine(Int64Engine): ... class TimedeltaEngine(DatetimeEngine): ... class PeriodEngine(Int64Engine): ... - class BaseMultiIndexCodesEngine: levels: list[np.ndarray] offsets: np.ndarray # ndarray[uint64_t, ndim=1] - def __init__( self, levels: list[np.ndarray], # all entries hashable labels: list[np.ndarray], # all entries integer-dtyped offsets: np.ndarray, # np.ndarray[np.uint64, ndim=1] ): ... - def get_indexer( self, target: np.ndarray, # np.ndarray[object] - ) -> np.ndarray: ... # np.ndarray[np.intp] - + ) -> np.ndarray: ... # np.ndarray[np.intp] def _extract_level_codes(self, target: object): ... - def get_indexer_with_fill( self, target: np.ndarray, # np.ndarray[object] of tuples diff --git a/pandas/_libs/internals.pyi b/pandas/_libs/internals.pyi index 74ca311b35ed7..d6fac14d3ee6e 100644 --- a/pandas/_libs/internals.pyi +++ b/pandas/_libs/internals.pyi @@ -16,52 +16,36 @@ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.internals.blocks import Block as B def slice_len(slc: slice, objlen: int = ...) -> int: ... - - def get_blkno_indexers( blknos: np.ndarray, # int64_t[:] group: bool = ..., ) -> list[tuple[int, slice | np.ndarray]]: ... - - def get_blkno_placements( blknos: np.ndarray, group: bool = ..., ) -> Iterator[tuple[int, BlockPlacement]]: ... - class BlockPlacement: def __init__(self, val: int | slice | np.ndarray): ... - @property def indexer(self) -> np.ndarray | slice: ... - @property def as_array(self) -> np.ndarray: ... - @property def is_slice_like(self) -> bool: ... - @overload def __getitem__(self, loc: slice | Sequence[int]) -> BlockPlacement: ... - @overload def __getitem__(self, loc: int) -> int: ... - def __iter__(self) -> Iterator[int]: ... - def __len__(self) -> int: ... - def delete(self, loc) -> BlockPlacement: ... - def append(self, others: list[BlockPlacement]) -> BlockPlacement: ... - class SharedBlock: _mgr_locs: BlockPlacement ndim: int values: ArrayLike - def __init__(self, values: ArrayLike, placement: BlockPlacement, ndim: int): ... class NumpyBlock(SharedBlock): @@ -72,8 +56,7 @@ class NDArrayBackedBlock(SharedBlock): values: NDArrayBackedExtensionArray def getitem_block_index(self: T, slicer: slice) -> T: ... -class Block(SharedBlock): - ... +class Block(SharedBlock): ... class BlockManager: blocks: tuple[B, ...] @@ -82,7 +65,7 @@ class BlockManager: _is_consolidated: bool _blknos: np.ndarray _blklocs: np.ndarray - - def __init__(self, blocks: tuple[B, ...], axes: list[Index], verify_integrity=True): ... - - def get_slice(self: T, slobj: slice, axis: int=...) -> T: ... + def __init__( + self, blocks: tuple[B, ...], axes: list[Index], verify_integrity=True + ): ... + def get_slice(self: T, slobj: slice, axis: int = ...) -> T: ... diff --git a/pandas/_libs/join.pyi b/pandas/_libs/join.pyi index 4ae3ef0781dde..f73f495cf4d4f 100644 --- a/pandas/_libs/join.pyi +++ b/pandas/_libs/join.pyi @@ -1,144 +1,91 @@ import numpy as np def inner_join( - left: np.ndarray, # const intp_t[:] + left: np.ndarray, # const intp_t[:] right: np.ndarray, # const intp_t[:] max_groups: int, -) -> tuple[ - np.ndarray, # np.ndarray[np.intp] - np.ndarray, # np.ndarray[np.intp] -]: ... - - +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[np.intp] # np.ndarray[np.intp] def left_outer_join( - left: np.ndarray, # const intp_t[:] + left: np.ndarray, # const intp_t[:] right: np.ndarray, # const intp_t[:] max_groups: int, sort: bool = True, -) -> tuple[ - np.ndarray, # np.ndarray[np.intp] - np.ndarray, # np.ndarray[np.intp] -]: ... - - +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[np.intp] # np.ndarray[np.intp] def full_outer_join( - left: np.ndarray, # const intp_t[:] + left: np.ndarray, # const intp_t[:] right: np.ndarray, # const intp_t[:] max_groups: int, -) -> tuple[ - np.ndarray, # np.ndarray[np.intp] - np.ndarray, # np.ndarray[np.intp] -]: ... - - +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[np.intp] # np.ndarray[np.intp] def ffill_indexer( - indexer: np.ndarray # const intp_t[:] + indexer: np.ndarray, # const intp_t[:] ) -> np.ndarray: ... # np.ndarray[np.intp] - - def left_join_indexer_unique( - left: np.ndarray, # ndarray[join_t] + left: np.ndarray, # ndarray[join_t] right: np.ndarray, # ndarray[join_t] ) -> np.ndarray: ... # np.ndarray[np.intp] - - def left_join_indexer( - left: np.ndarray, # ndarray[join_t] + left: np.ndarray, # ndarray[join_t] right: np.ndarray, # ndarray[join_t] ) -> tuple[ np.ndarray, # np.ndarray[join_t] np.ndarray, # np.ndarray[np.intp] np.ndarray, # np.ndarray[np.intp] ]: ... - - def inner_join_indexer( - left: np.ndarray, # ndarray[join_t] + left: np.ndarray, # ndarray[join_t] right: np.ndarray, # ndarray[join_t] ) -> tuple[ np.ndarray, # np.ndarray[join_t] np.ndarray, # np.ndarray[np.intp] np.ndarray, # np.ndarray[np.intp] ]: ... - - def outer_join_indexer( - left: np.ndarray, # ndarray[join_t] + left: np.ndarray, # ndarray[join_t] right: np.ndarray, # ndarray[join_t] ) -> tuple[ np.ndarray, # np.ndarray[join_t] np.ndarray, # np.ndarray[np.intp] np.ndarray, # np.ndarray[np.intp] ]: ... - - def asof_join_backward_on_X_by_Y( - left_values: np.ndarray, # asof_t[:] - right_values: np.ndarray, # asof_t[:] - left_by_values: np.ndarray, # by_t[:] + left_values: np.ndarray, # asof_t[:] + right_values: np.ndarray, # asof_t[:] + left_by_values: np.ndarray, # by_t[:] right_by_values: np.ndarray, # by_t[:] allow_exact_matches: bool = True, tolerance=None, -) -> tuple[ - np.ndarray, # np.ndarray[np.intp] - np.ndarray, # np.ndarray[np.intp] -]: ... - - +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[np.intp] # np.ndarray[np.intp] def asof_join_forward_on_X_by_Y( - left_values: np.ndarray, # asof_t[:] - right_values: np.ndarray, # asof_t[:] - left_by_values: np.ndarray, # by_t[:] + left_values: np.ndarray, # asof_t[:] + right_values: np.ndarray, # asof_t[:] + left_by_values: np.ndarray, # by_t[:] right_by_values: np.ndarray, # by_t[:] allow_exact_matches: bool = True, tolerance=None, -) -> tuple[ - np.ndarray, # np.ndarray[np.intp] - np.ndarray, # np.ndarray[np.intp] -]: ... - - +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[np.intp] # np.ndarray[np.intp] def asof_join_nearest_on_X_by_Y( - left_values: np.ndarray, # asof_t[:] - right_values: np.ndarray, # asof_t[:] - left_by_values: np.ndarray, # by_t[:] + left_values: np.ndarray, # asof_t[:] + right_values: np.ndarray, # asof_t[:] + left_by_values: np.ndarray, # by_t[:] right_by_values: np.ndarray, # by_t[:] allow_exact_matches: bool = True, tolerance=None, -) -> tuple[ - np.ndarray, # np.ndarray[np.intp] - np.ndarray, # np.ndarray[np.intp] -]: ... - - +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[np.intp] # np.ndarray[np.intp] def asof_join_backward( - left_values: np.ndarray, # asof_t[:] + left_values: np.ndarray, # asof_t[:] right_values: np.ndarray, # asof_t[:] allow_exact_matches: bool = True, tolerance=None, -) -> tuple[ - np.ndarray, # np.ndarray[np.intp] - np.ndarray, # np.ndarray[np.intp] -]: ... - - +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[np.intp] # np.ndarray[np.intp] def asof_join_forward( - left_values: np.ndarray, # asof_t[:] + left_values: np.ndarray, # asof_t[:] right_values: np.ndarray, # asof_t[:] allow_exact_matches: bool = True, tolerance=None, -) -> tuple[ - np.ndarray, # np.ndarray[np.intp] - np.ndarray, # np.ndarray[np.intp] -]: ... - - +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[np.intp] # np.ndarray[np.intp] def asof_join_nearest( - left_values: np.ndarray, # asof_t[:] + left_values: np.ndarray, # asof_t[:] right_values: np.ndarray, # asof_t[:] allow_exact_matches: bool = True, tolerance=None, -) -> tuple[ - np.ndarray, # np.ndarray[np.intp] - np.ndarray, # np.ndarray[np.intp] -]: ... +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[np.intp] # np.ndarray[np.intp] diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index 5e1cc612bed57..2b9f68da553eb 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -18,19 +18,15 @@ ndarray_obj_2d = np.ndarray from enum import Enum -class NoDefault(Enum): - ... +class NoDefault(Enum): ... no_default: NoDefault - def item_from_zerodim(val: object) -> object: ... def infer_dtype(value: object, skipna: bool = True) -> str: ... - def is_iterator(obj: object) -> bool: ... def is_scalar(val: object) -> bool: ... def is_list_like(obj: object, allow_sets: bool = True) -> bool: ... - def is_period(val: object) -> bool: ... def is_interval(val: object) -> bool: ... def is_decimal(val: object) -> bool: ... @@ -38,12 +34,10 @@ def is_complex(val: object) -> bool: ... def is_bool(val: object) -> bool: ... def is_integer(val: object) -> bool: ... def is_float(val: object) -> bool: ... - def is_interval_array(values: np.ndarray) -> bool: ... def is_datetime64_array(values: np.ndarray) -> bool: ... def is_timedelta_or_timedelta64_array(values: np.ndarray) -> bool: ... def is_datetime_with_singletz_array(values: np.ndarray) -> bool: ... - def is_time_array(values: np.ndarray, skipna: bool = False): ... def is_date_array(values: np.ndarray, skipna: bool = False): ... def is_datetime_array(values: np.ndarray, skipna: bool = False): ... @@ -51,18 +45,16 @@ def is_string_array(values: np.ndarray, skipna: bool = False): ... def is_float_array(values: np.ndarray, skipna: bool = False): ... def is_integer_array(values: np.ndarray, skipna: bool = False): ... def is_bool_array(values: np.ndarray, skipna: bool = False): ... - def fast_multiget(mapping: dict, keys: np.ndarray, default=np.nan) -> np.ndarray: ... - def fast_unique_multiple_list_gen(gen: Generator, sort: bool = True) -> list: ... def fast_unique_multiple_list(lists: list, sort: bool = True) -> list: ... def fast_unique_multiple(arrays: list, sort: bool = True) -> list: ... - def map_infer( - arr: np.ndarray, f: Callable[[Any], Any], convert: bool = True, ignore_na: bool = False + arr: np.ndarray, + f: Callable[[Any], Any], + convert: bool = True, + ignore_na: bool = False, ) -> np.ndarray: ... - - @overload # both convert_datetime and convert_to_nullable_integer False -> np.ndarray def maybe_convert_objects( objects: np.ndarray, # np.ndarray[object] @@ -74,7 +66,6 @@ def maybe_convert_objects( convert_period: Literal[False] = ..., convert_to_nullable_integer: Literal[False] = ..., ) -> np.ndarray: ... - @overload def maybe_convert_objects( objects: np.ndarray, # np.ndarray[object] @@ -86,7 +77,6 @@ def maybe_convert_objects( convert_period: bool = ..., convert_to_nullable_integer: Literal[True] = ..., ) -> ArrayLike: ... - @overload def maybe_convert_objects( objects: np.ndarray, # np.ndarray[object] @@ -98,7 +88,6 @@ def maybe_convert_objects( convert_period: bool = ..., convert_to_nullable_integer: bool = ..., ) -> ArrayLike: ... - @overload def maybe_convert_objects( objects: np.ndarray, # np.ndarray[object] @@ -110,7 +99,6 @@ def maybe_convert_objects( convert_period: Literal[True] = ..., convert_to_nullable_integer: bool = ..., ) -> ArrayLike: ... - @overload def maybe_convert_objects( objects: np.ndarray, # np.ndarray[object] @@ -122,7 +110,6 @@ def maybe_convert_objects( convert_period: bool = ..., convert_to_nullable_integer: bool = ..., ) -> ArrayLike: ... - @overload def maybe_convert_numeric( values: np.ndarray, # np.ndarray[object] @@ -131,7 +118,6 @@ def maybe_convert_numeric( coerce_numeric: bool = False, convert_to_masked_nullable: Literal[False] = ..., ) -> tuple[np.ndarray, None]: ... - @overload def maybe_convert_numeric( values: np.ndarray, # np.ndarray[object] @@ -150,54 +136,37 @@ def ensure_string_array( copy: bool = True, skipna: bool = True, ) -> np.ndarray: ... # np.ndarray[object] - -def infer_datetimelike_array( - arr: np.ndarray # np.ndarray[object] -) -> str: ... - +def infer_datetimelike_array(arr: np.ndarray) -> str: ... # np.ndarray[object] def astype_intsafe( arr: np.ndarray, # np.ndarray[object] new_dtype: np.dtype, ) -> np.ndarray: ... - def fast_zip(ndarrays: list) -> np.ndarray: ... # np.ndarray[object] # TODO: can we be more specific about rows? def to_object_array_tuples(rows: object) -> ndarray_obj_2d: ... - def tuples_to_object_array( - tuples: np.ndarray # np.ndarray[object] + tuples: np.ndarray, # np.ndarray[object] ) -> ndarray_obj_2d: ... # TODO: can we be more specific about rows? def to_object_array(rows: object, min_width: int = 0) -> ndarray_obj_2d: ... - def dicts_to_array(dicts: list, columns: list) -> ndarray_obj_2d: ... - - def maybe_booleans_to_slice( - mask: np.ndarray # ndarray[uint8_t] + mask: np.ndarray, # ndarray[uint8_t] ) -> slice | np.ndarray: ... # np.ndarray[np.uint8] - def maybe_indices_to_slice( indices: np.ndarray, # np.ndarray[np.intp] max_len: int, ) -> slice | np.ndarray: ... # np.ndarray[np.uint8] - -def clean_index_list(obj: list) -> tuple[ - list | np.ndarray, # np.ndarray[object] | np.ndarray[np.int64] - bool, -]: ... - +def clean_index_list( + obj: list, +) -> tuple[list | np.ndarray, bool,]: ... # np.ndarray[object] | np.ndarray[np.int64] # ----------------------------------------------------------------- # Functions which in reality take memoryviews -def memory_usage_of_objects( - arr: np.ndarray # object[:] -) -> int: ... # np.int64 - - +def memory_usage_of_objects(arr: np.ndarray) -> int: ... # object[:] # np.int64 def map_infer_mask( arr: np.ndarray, f: Callable[[Any], Any], @@ -206,57 +175,38 @@ def map_infer_mask( na_value: Any = ..., dtype: np.dtype = ..., ) -> np.ndarray: ... - def indices_fast( - index: np.ndarray, # ndarray[intp_t] + index: np.ndarray, # ndarray[intp_t] labels: np.ndarray, # const int64_t[:] keys: list, sorted_labels: list[np.ndarray], # list[ndarray[np.int64]] ) -> dict: ... - def generate_slices( - labels: np.ndarray, # const intp_t[:] - ngroups: int -) -> tuple[ - np.ndarray, # np.ndarray[np.int64] - np.ndarray, # np.ndarray[np.int64] -]: ... - + labels: np.ndarray, ngroups: int # const intp_t[:] +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[np.int64] # np.ndarray[np.int64] def count_level_2d( - mask: np.ndarray, # ndarray[uint8_t, ndim=2, cast=True], + mask: np.ndarray, # ndarray[uint8_t, ndim=2, cast=True], labels: np.ndarray, # const intp_t[:] max_bin: int, - axis: int -) -> np.ndarray: ... # np.ndarray[np.int64, ndim=2] - + axis: int, +) -> np.ndarray: ... # np.ndarray[np.int64, ndim=2] def get_level_sorter( - label: np.ndarray, # const int64_t[:] + label: np.ndarray, # const int64_t[:] starts: np.ndarray, # const intp_t[:] -) -> np.ndarray: ... # np.ndarray[np.intp, ndim=1] - - +) -> np.ndarray: ... # np.ndarray[np.intp, ndim=1] def generate_bins_dt64( values: np.ndarray, # np.ndarray[np.int64] binner: np.ndarray, # const int64_t[:] closed: object = "left", hasnans: bool = False, -) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1] - - +) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1] def array_equivalent_object( - left: np.ndarray, # object[:] + left: np.ndarray, # object[:] right: np.ndarray, # object[:] ) -> bool: ... - -def has_infs_f8( - arr: np.ndarray # const float64_t[:] -) -> bool: ... - -def has_infs_f4( - arr: np.ndarray # const float32_t[:] -) -> bool: ... - +def has_infs_f8(arr: np.ndarray) -> bool: ... # const float64_t[:] +def has_infs_f4(arr: np.ndarray) -> bool: ... # const float32_t[:] def get_reverse_indexer( indexer: np.ndarray, # const intp_t[:] length: int, -) -> np.ndarray: ... # np.ndarray[np.intp] +) -> np.ndarray: ... # np.ndarray[np.intp] diff --git a/pandas/_libs/ops.pyi b/pandas/_libs/ops.pyi index 11d67dfb93d5f..d84b0dee20e7d 100644 --- a/pandas/_libs/ops.pyi +++ b/pandas/_libs/ops.pyi @@ -10,33 +10,26 @@ import numpy as np _BinOp = Callable[[Any, Any], Any] _BoolOp = Callable[[Any, Any], bool] - def scalar_compare( values: np.ndarray, # object[:] val: object, - op: _BoolOp, # {operator.eq, operator.ne, ...} -) -> np.ndarray: ... # np.ndarray[bool] - + op: _BoolOp, # {operator.eq, operator.ne, ...} +) -> np.ndarray: ... # np.ndarray[bool] def vec_compare( - left: np.ndarray, # np.ndarray[object] + left: np.ndarray, # np.ndarray[object] right: np.ndarray, # np.ndarray[object] - op: _BoolOp, # {operator.eq, operator.ne, ...} -) -> np.ndarray: ... # np.ndarray[bool] - - + op: _BoolOp, # {operator.eq, operator.ne, ...} +) -> np.ndarray: ... # np.ndarray[bool] def scalar_binop( - values: np.ndarray, # object[:] + values: np.ndarray, # object[:] val: object, - op: _BinOp, # binary operator + op: _BinOp, # binary operator ) -> np.ndarray: ... - - def vec_binop( - left: np.ndarray, # object[:] + left: np.ndarray, # object[:] right: np.ndarray, # object[:] - op: _BinOp, # binary operator + op: _BinOp, # binary operator ) -> np.ndarray: ... - @overload def maybe_convert_bool( arr: np.ndarray, # np.ndarray[object] @@ -44,7 +37,6 @@ def maybe_convert_bool( false_values=..., convert_to_masked_nullable: Literal[False] = ..., ) -> tuple[np.ndarray, None]: ... - @overload def maybe_convert_bool( arr: np.ndarray, # np.ndarray[object] diff --git a/pandas/_libs/parsers.pyi b/pandas/_libs/parsers.pyi index 92b970d47467e..9ff05adceb2b4 100644 --- a/pandas/_libs/parsers.pyi +++ b/pandas/_libs/parsers.pyi @@ -12,20 +12,17 @@ from pandas._typing import ( STR_NA_VALUES: set[str] - def sanitize_objects( values: np.ndarray, # ndarray[object] na_values: set, convert_empty: bool = ..., ) -> int: ... - class TextReader: unnamed_cols: set[str] - table_width: int # int64_t + table_width: int # int64_t leading_cols: int # int64_t header: list[list[int]] # non-negative integers - def __init__( self, source, @@ -64,14 +61,11 @@ class TextReader: mangle_dupe_cols: bool = ..., float_precision: Literal["round_trip", "legacy", "high"] | None = ..., skip_blank_lines: bool = ..., - encoding_errors: bytes | str = ... + encoding_errors: bytes | str = ..., ): ... - def set_error_bad_lines(self, status: int) -> None: ... def set_noconvert(self, i: int) -> None: ... def remove_noconvert(self, i: int) -> None: ... - def close(self) -> None: ... - def read(self, rows: int | None = ...) -> dict[int, ArrayLike]: ... def read_low_memory(self, rows: int | None) -> list[dict[int, ArrayLike]]: ... diff --git a/pandas/_libs/reshape.pyi b/pandas/_libs/reshape.pyi index 7aaa18a7feff2..0457ceb1e03e6 100644 --- a/pandas/_libs/reshape.pyi +++ b/pandas/_libs/reshape.pyi @@ -1,19 +1,14 @@ import numpy as np def unstack( - values: np.ndarray, # reshape_t[:, :] - mask: np.ndarray, # const uint8_t[:] + values: np.ndarray, # reshape_t[:, :] + mask: np.ndarray, # const uint8_t[:] stride: int, length: int, width: int, new_values: np.ndarray, # reshape_t[:, :] - new_mask: np.ndarray, # uint8_t[:, :] + new_mask: np.ndarray, # uint8_t[:, :] ) -> None: ... - - def explode( values: np.ndarray, # np.ndarray[object] -) -> tuple[ - np.ndarray, # np.ndarray[object] - np.ndarray, # np.ndarray[np.int64] -]: ... +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[object] # np.ndarray[np.int64] diff --git a/pandas/_libs/testing.pyi b/pandas/_libs/testing.pyi index ac0c772780c5c..01da496975f51 100644 --- a/pandas/_libs/testing.pyi +++ b/pandas/_libs/testing.pyi @@ -1,8 +1,12 @@ - - def assert_dict_equal(a, b, compare_keys: bool = ...): ... - -def assert_almost_equal(a, b, - rtol: float = ..., atol: float = ..., - check_dtype: bool = ..., - obj=..., lobj=..., robj=..., index_values=...): ... +def assert_almost_equal( + a, + b, + rtol: float = ..., + atol: float = ..., + check_dtype: bool = ..., + obj=..., + lobj=..., + robj=..., + index_values=..., +): ... diff --git a/pandas/_libs/tslib.pyi b/pandas/_libs/tslib.pyi index 641e62e7c8973..f43a81f20700a 100644 --- a/pandas/_libs/tslib.pyi +++ b/pandas/_libs/tslib.pyi @@ -3,20 +3,16 @@ from datetime import tzinfo import numpy as np def format_array_from_datetime( - values: np.ndarray, # np.ndarray[np.int64] + values: np.ndarray, # np.ndarray[np.int64] tz: tzinfo | None = ..., format: str | None = ..., - na_rep: object = ... + na_rep: object = ..., ) -> np.ndarray: ... # np.ndarray[object] - - def array_with_unit_to_datetime( values: np.ndarray, unit: str, errors: str = ..., ) -> tuple[np.ndarray, tzinfo | None]: ... - - def array_to_datetime( values: np.ndarray, # np.ndarray[object] errors: str = ..., @@ -26,4 +22,5 @@ def array_to_datetime( require_iso8601: bool = ..., allow_mixed: bool = ..., ) -> tuple[np.ndarray, tzinfo | None]: ... + # returned ndarray may be object dtype or datetime64[ns] diff --git a/pandas/_libs/tslibs/ccalendar.pyi b/pandas/_libs/tslibs/ccalendar.pyi index 500a0423bc9cf..993f18a61d74a 100644 --- a/pandas/_libs/tslibs/ccalendar.pyi +++ b/pandas/_libs/tslibs/ccalendar.pyi @@ -1,4 +1,3 @@ - DAYS: list[str] MONTH_ALIASES: dict[int, str] MONTH_NUMBERS: dict[str, int] diff --git a/pandas/_libs/tslibs/conversion.pyi b/pandas/_libs/tslibs/conversion.pyi index 6470361542597..10c8a819b9b78 100644 --- a/pandas/_libs/tslibs/conversion.pyi +++ b/pandas/_libs/tslibs/conversion.pyi @@ -12,30 +12,16 @@ class OutOfBoundsTimedelta(ValueError): ... def precision_from_unit( unit: str, -) -> tuple[ - int, # int64_t - int, -]: ... - - +) -> tuple[int, int,]: ... # int64_t def ensure_datetime64ns( arr: np.ndarray, # np.ndarray[datetime64[ANY]] copy: bool = ..., ) -> np.ndarray: ... # np.ndarray[datetime64ns] - - def ensure_timedelta64ns( arr: np.ndarray, # np.ndarray[timedelta64[ANY]] copy: bool = ..., ) -> np.ndarray: ... # np.ndarray[timedelta64ns] - - def datetime_to_datetime64( values: np.ndarray, # np.ndarray[object] -) -> tuple[ - np.ndarray, # np.ndarray[dt64ns] - tzinfo | None, -]: ... - - +) -> tuple[np.ndarray, tzinfo | None,]: ... # np.ndarray[dt64ns] def localize_pydatetime(dt: datetime, tz: tzinfo | None) -> datetime: ... diff --git a/pandas/_libs/tslibs/dtypes.pyi b/pandas/_libs/tslibs/dtypes.pyi index d3aea5b0be796..f6a8d7887ced1 100644 --- a/pandas/_libs/tslibs/dtypes.pyi +++ b/pandas/_libs/tslibs/dtypes.pyi @@ -5,20 +5,16 @@ from pandas._libs.tslibs.offsets import BaseOffset _attrname_to_abbrevs: dict[str, str] _period_code_map: dict[str, int] - class PeriodDtypeBase: _dtype_code: int # PeriodDtypeCode # actually __cinit__ def __new__(self, code: int): ... - def freq_group_code(self) -> int: ... def date_offset(self) -> BaseOffset: ... - @classmethod def from_date_offset(cls, offset: BaseOffset) -> PeriodDtypeBase: ... - class FreqGroup(Enum): FR_ANN: int = ... FR_QTR: int = ... @@ -33,11 +29,9 @@ class FreqGroup(Enum): FR_US: int = ... FR_NS: int = ... FR_UND: int = ... - @staticmethod def get_freq_group(code: int) -> FreqGroup: ... - class Resolution(Enum): RESO_NS: int = ... RESO_US: int = ... @@ -49,19 +43,13 @@ class Resolution(Enum): RESO_MTH: int = ... RESO_QTR: int = ... RESO_YR: int = ... - def __lt__(self, other: Resolution) -> bool: ... - def __ge__(self, other: Resolution) -> bool: ... - @property def freq_group(self) -> FreqGroup: ... - @property def attrname(self) -> str: ... - @classmethod def from_attrname(cls, attrname: str) -> Resolution: ... - @classmethod def get_reso_from_freq(cls, freq: str) -> Resolution: ... diff --git a/pandas/_libs/tslibs/fields.pyi b/pandas/_libs/tslibs/fields.pyi index 22ae156d78b7d..244af38e25da0 100644 --- a/pandas/_libs/tslibs/fields.pyi +++ b/pandas/_libs/tslibs/fields.pyi @@ -3,67 +3,48 @@ import numpy as np def build_field_sarray( dtindex: np.ndarray, # const int64_t[:] ) -> np.ndarray: ... - def month_position_check(fields, weekdays) -> str | None: ... - def get_date_name_field( dtindex: np.ndarray, # const int64_t[:] field: str, locale=..., ) -> np.ndarray: ... # np.ndarray[object] - def get_start_end_field( dtindex: np.ndarray, # const int64_t[:] field: str, freqstr: str | None = ..., - month_kw: int = ... + month_kw: int = ..., ) -> np.ndarray: ... # np.ndarray[bool] - - def get_date_field( dtindex: np.ndarray, # const int64_t[:] - field: str, ) -> np.ndarray: ... # np.ndarray[in32] - - def get_timedelta_field( tdindex: np.ndarray, # const int64_t[:] field: str, ) -> np.ndarray: ... # np.ndarray[int32] - - def isleapyear_arr( years: np.ndarray, ) -> np.ndarray: ... # np.ndarray[bool] - def build_isocalendar_sarray( dtindex: np.ndarray, # const int64_t[:] ) -> np.ndarray: ... - - def get_locale_names(name_type: str, locale: object = None): ... - class RoundTo: @property def MINUS_INFTY(self) -> int: ... - @property def PLUS_INFTY(self) -> int: ... - @property def NEAREST_HALF_EVEN(self) -> int: ... - @property def NEAREST_HALF_PLUS_INFTY(self) -> int: ... - @property def NEAREST_HALF_MINUS_INFTY(self) -> int: ... - def round_nsint64( - values: np.ndarray, # np.ndarray[np.int64] + values: np.ndarray, # np.ndarray[np.int64] mode: RoundTo, nanos: int, ) -> np.ndarray: ... # np.ndarray[np.int64] diff --git a/pandas/_libs/tslibs/nattype.pyi b/pandas/_libs/tslibs/nattype.pyi index 5a2985d0e815b..22e6395a1fe99 100644 --- a/pandas/_libs/tslibs/nattype.pyi +++ b/pandas/_libs/tslibs/nattype.pyi @@ -1,4 +1,3 @@ - from datetime import ( datetime, timedelta, @@ -17,11 +16,9 @@ def is_null_datetimelike(val: object, inat_is_null: bool = ...) -> bool: ... class NaTType(datetime): value: np.int64 - def asm8(self) -> np.datetime64: ... def to_datetime64(self) -> np.datetime64: ... def to_numpy(self, dtype=..., copy: bool = ...) -> np.datetime64: ... - @property def is_leap_year(self) -> bool: ... @property @@ -36,7 +33,6 @@ class NaTType(datetime): def is_quarter_end(self) -> bool: ... @property def is_year_end(self) -> bool: ... - @property def day_of_year(self) -> float: ... @property @@ -53,81 +49,61 @@ class NaTType(datetime): def week(self) -> float: ... @property def weekofyear(self) -> float: ... - def day_name(self) -> float: ... def month_name(self) -> float: ... - # error: Return type "float" of "weekday" incompatible with return # type "int" in supertype "date" def weekday(self) -> float: ... # type: ignore[override] - # error: Return type "float" of "isoweekday" incompatible with return # type "int" in supertype "date" def isoweekday(self) -> float: ... # type: ignore[override] - def total_seconds(self) -> float: ... - # error: Signature of "today" incompatible with supertype "datetime" def today(self, *args, **kwargs) -> NaTType: ... # type: ignore[override] # error: Signature of "today" incompatible with supertype "datetime" def now(self, *args, **kwargs) -> NaTType: ... # type: ignore[override] - def to_pydatetime(self) -> NaTType: ... def date(self) -> NaTType: ... - def round(self) -> NaTType: ... def floor(self) -> NaTType: ... def ceil(self) -> NaTType: ... - def tz_convert(self) -> NaTType: ... def tz_localize(self) -> NaTType: ... - def replace(self, *args, **kwargs) -> NaTType: ... - # error: Return type "float" of "year" incompatible with return # type "int" in supertype "date" @property def year(self) -> float: ... # type: ignore[override] - @property def quarter(self) -> float: ... - # error: Return type "float" of "month" incompatible with return # type "int" in supertype "date" @property def month(self) -> float: ... # type: ignore[override] - # error: Return type "float" of "day" incompatible with return # type "int" in supertype "date" @property def day(self) -> float: ... # type: ignore[override] - # error: Return type "float" of "hour" incompatible with return # type "int" in supertype "date" @property def hour(self) -> float: ... # type: ignore[override] - # error: Return type "float" of "minute" incompatible with return # type "int" in supertype "date" @property def minute(self) -> float: ... # type: ignore[override] - # error: Return type "float" of "second" incompatible with return # type "int" in supertype "date" @property def second(self) -> float: ... # type: ignore[override] - @property def millisecond(self) -> float: ... - # error: Return type "float" of "microsecond" incompatible with return # type "int" in supertype "date" @property def microsecond(self) -> float: ... # type: ignore[override] - @property def nanosecond(self) -> float: ... - # inject Timedelta properties @property def days(self) -> float: ... @@ -135,35 +111,29 @@ class NaTType(datetime): def microseconds(self) -> float: ... @property def nanoseconds(self) -> float: ... - # inject Period properties @property def qyear(self) -> float: ... - def __eq__(self, other: Any) -> bool: ... def __ne__(self, other: Any) -> bool: ... # https://github.com/python/mypy/issues/9015 # error: Argument 1 of "__lt__" is incompatible with supertype "date"; # supertype defines the argument type as "date" def __lt__( # type: ignore[override] - self, - other: datetime | timedelta | Period | np.datetime64 | np.timedelta64 + self, other: datetime | timedelta | Period | np.datetime64 | np.timedelta64 ) -> bool: ... # error: Argument 1 of "__le__" is incompatible with supertype "date"; # supertype defines the argument type as "date" def __le__( # type: ignore[override] - self, - other: datetime | timedelta | Period | np.datetime64 | np.timedelta64 + self, other: datetime | timedelta | Period | np.datetime64 | np.timedelta64 ) -> bool: ... # error: Argument 1 of "__gt__" is incompatible with supertype "date"; # supertype defines the argument type as "date" def __gt__( # type: ignore[override] - self, - other: datetime | timedelta | Period | np.datetime64 | np.timedelta64 + self, other: datetime | timedelta | Period | np.datetime64 | np.timedelta64 ) -> bool: ... # error: Argument 1 of "__ge__" is incompatible with supertype "date"; # supertype defines the argument type as "date" def __ge__( # type: ignore[override] - self, - other: datetime | timedelta | Period | np.datetime64 | np.timedelta64 + self, other: datetime | timedelta | Period | np.datetime64 | np.timedelta64 ) -> bool: ... diff --git a/pandas/_libs/tslibs/parsing.pyi b/pandas/_libs/tslibs/parsing.pyi index f346204d69d25..fc08a48cee343 100644 --- a/pandas/_libs/tslibs/parsing.pyi +++ b/pandas/_libs/tslibs/parsing.pyi @@ -6,35 +6,26 @@ from pandas._libs.tslibs.offsets import BaseOffset class DateParseError(ValueError): ... - def parse_datetime_string( date_string: str, dayfirst: bool = ..., yearfirst: bool = ..., **kwargs, ) -> datetime: ... - - def parse_time_string( arg: str, freq: BaseOffset | str | None = ..., dayfirst: bool | None = ..., yearfirst: bool | None = ..., ) -> tuple[datetime, str]: ... - - def _does_string_look_like_datetime(py_string: str) -> bool: ... - def quarter_to_myear(year: int, quarter: int, freq: str) -> tuple[int, int]: ... - - def try_parse_dates( values: np.ndarray, # object[:] parser=..., dayfirst: bool = ..., default: datetime | None = ..., ) -> np.ndarray: ... # np.ndarray[object] - def try_parse_date_and_time( dates: np.ndarray, # object[:] times: np.ndarray, # object[:] @@ -42,40 +33,29 @@ def try_parse_date_and_time( time_parser=..., dayfirst: bool = ..., default: datetime | None = ..., -) -> np.ndarray: ... # np.ndarray[object] - +) -> np.ndarray: ... # np.ndarray[object] def try_parse_year_month_day( years: np.ndarray, # object[:] - months: np.ndarray, # object[:] - days: np.ndarray, # object[:] -) -> np.ndarray: ... # np.ndarray[object] - - + months: np.ndarray, # object[:] + days: np.ndarray, # object[:] +) -> np.ndarray: ... # np.ndarray[object] def try_parse_datetime_components( - years: np.ndarray, # object[:] + years: np.ndarray, # object[:] months: np.ndarray, # object[:] - days: np.ndarray, # object[:] - hours: np.ndarray, # object[:] - minutes: np.ndarray, # object[:] - seconds: np.ndarray, # object[:] -) -> np.ndarray: ... # np.ndarray[object] - - + days: np.ndarray, # object[:] + hours: np.ndarray, # object[:] + minutes: np.ndarray, # object[:] + seconds: np.ndarray, # object[:] +) -> np.ndarray: ... # np.ndarray[object] def format_is_iso(f: str) -> bool: ... - - def guess_datetime_format( dt_str, dayfirst: bool = ..., dt_str_parse=..., dt_str_split=..., ) -> str | None: ... - - def concat_date_cols( date_cols: tuple, keep_trivial_numbers: bool = ..., ) -> np.ndarray: ... # np.ndarray[object] - - def get_rule_month(source: str) -> str: ... diff --git a/pandas/_libs/tslibs/period.pyi b/pandas/_libs/tslibs/period.pyi index 49e630d605310..97738d51b5a0e 100644 --- a/pandas/_libs/tslibs/period.pyi +++ b/pandas/_libs/tslibs/period.pyi @@ -19,41 +19,34 @@ def periodarr_to_dt64arr( periodarr: np.ndarray, # const int64_t[:] freq: int, ) -> np.ndarray: ... # np.ndarray[np.int64] - def period_asfreq_arr( arr: np.ndarray, # ndarray[int64_t] arr, freq1: int, freq2: int, end: bool, -) -> np.ndarray: ... # np.ndarray[np.int64] - +) -> np.ndarray: ... # np.ndarray[np.int64] def get_period_field_arr( field: str, arr: np.ndarray, # const int64_t[:] freq: int, -) -> np.ndarray: ... # np.ndarray[np.int64] - +) -> np.ndarray: ... # np.ndarray[np.int64] def from_ordinals( values: np.ndarray, # const int64_t[:] freq: Frequency, ) -> np.ndarray: ... # np.ndarray[np.int64] - def extract_ordinals( values: np.ndarray, # np.ndarray[object] freq: Frequency | int, ) -> np.ndarray: ... # np.ndarray[np.int64] - def extract_freq( values: np.ndarray, # np.ndarray[object] ) -> BaseOffset: ... # exposed for tests def period_asfreq(ordinal: int, freq1: int, freq2: int, end: bool) -> int: ... - def period_ordinal( y: int, m: int, d: int, h: int, min: int, s: int, us: int, ps: int, freq: int ) -> int: ... - def freq_to_dtype_code(freq: BaseOffset) -> int: ... def validate_end_alias(how: str) -> Literal["E", "S"]: ... @@ -75,84 +68,57 @@ class Period: minute=None, second=None, ) -> Period | NaTType: ... - @classmethod def _maybe_convert_freq(cls, freq) -> BaseOffset: ... - @classmethod def _from_ordinal(cls, ordinal: int, freq) -> Period: ... - @classmethod def now(cls, freq=...) -> Period: ... - def strftime(self, fmt: str) -> str: ... - def to_timestamp( self, - freq: str | BaseOffset | None =..., + freq: str | BaseOffset | None = ..., how: str = ..., tz: Timezone | None = ..., ) -> Timestamp: ... - def asfreq(self, freq, how=...) -> Period: ... - @property def freqstr(self) -> str: ... - @property def is_leap_year(self) -> bool: ... - @property def daysinmonth(self) -> int: ... - @property def days_in_month(self) -> int: ... - @property def qyear(self) -> int: ... - @property def quarter(self) -> int: ... - @property def day_of_year(self) -> int: ... - @property def weekday(self) -> int: ... - @property def day_of_week(self) -> int: ... - @property def week(self) -> int: ... - @property def weekofyear(self) -> int: ... - @property def second(self) -> int: ... - @property def minute(self) -> int: ... - @property def hour(self) -> int: ... - @property def day(self) -> int: ... - @property def month(self) -> int: ... - @property def year(self) -> int: ... - @property def end_time(self) -> Timestamp: ... - @property def start_time(self) -> Timestamp: ... - def __sub__(self, other) -> Period | BaseOffset: ... - def __add__(self, other) -> Period: ... diff --git a/pandas/_libs/tslibs/strptime.pyi b/pandas/_libs/tslibs/strptime.pyi index 3748c169bb1c6..1b4376ceadebe 100644 --- a/pandas/_libs/tslibs/strptime.pyi +++ b/pandas/_libs/tslibs/strptime.pyi @@ -6,6 +6,7 @@ def array_strptime( values: np.ndarray, # np.ndarray[object] fmt: Optional[str], exact: bool = True, - errors: str = "raise" + errors: str = "raise", ) -> tuple[np.ndarray, np.ndarray]: ... + # first ndarray is M8[ns], second is object ndarray of Optional[tzinfo] diff --git a/pandas/_libs/tslibs/timedeltas.pyi b/pandas/_libs/tslibs/timedeltas.pyi index 9ccc3a8ed5fa4..31a836b2c2079 100644 --- a/pandas/_libs/tslibs/timedeltas.pyi +++ b/pandas/_libs/tslibs/timedeltas.pyi @@ -15,26 +15,18 @@ from pandas._libs.tslibs import ( _S = TypeVar("_S") - def ints_to_pytimedelta( arr: np.ndarray, # const int64_t[:] box: bool = ..., ) -> np.ndarray: ... # np.ndarray[object] - - def array_to_timedelta64( values: np.ndarray, # ndarray[object] unit: str | None = ..., errors: str = ..., ) -> np.ndarray: ... # np.ndarray[m8ns] - - def parse_timedelta_unit(unit: str | None) -> str: ... - - def delta_to_nanoseconds(delta: Tick | np.timedelta64 | timedelta | int) -> int: ... - class Timedelta(timedelta): min: ClassVar[Timedelta] max: ClassVar[Timedelta] @@ -43,12 +35,8 @@ class Timedelta(timedelta): # error: "__new__" must return a class instance (got "Union[Timedelta, NaTType]") def __new__( # type: ignore[misc] - cls: Type[_S], - value=..., - unit=..., - **kwargs + cls: Type[_S], value=..., unit=..., **kwargs ) -> _S | NaTType: ... - @property def days(self) -> int: ... @property @@ -56,21 +44,16 @@ class Timedelta(timedelta): @property def microseconds(self) -> int: ... def total_seconds(self) -> float: ... - def to_pytimedelta(self) -> timedelta: ... def to_timedelta64(self) -> np.timedelta64: ... - @property def asm8(self) -> np.timedelta64: ... - # TODO: round/floor/ceil could return NaT? def round(self: _S, freq) -> _S: ... def floor(self: _S, freq) -> _S: ... def ceil(self: _S, freq) -> _S: ... - @property def resolution_string(self) -> str: ... - def __add__(self, other: timedelta) -> timedelta: ... def __radd__(self, other: timedelta) -> timedelta: ... def __sub__(self, other: timedelta) -> timedelta: ... @@ -80,19 +63,16 @@ class Timedelta(timedelta): def __abs__(self) -> timedelta: ... def __mul__(self, other: float) -> timedelta: ... def __rmul__(self, other: float) -> timedelta: ... - @overload def __floordiv__(self, other: timedelta) -> int: ... @overload def __floordiv__(self, other: int) -> timedelta: ... - @overload def __truediv__(self, other: timedelta) -> float: ... @overload def __truediv__(self, other: float) -> timedelta: ... def __mod__(self, other: timedelta) -> timedelta: ... def __divmod__(self, other: timedelta) -> tuple[int, timedelta]: ... - def __le__(self, other: timedelta) -> bool: ... def __lt__(self, other: timedelta) -> bool: ... def __ge__(self, other: timedelta) -> bool: ... diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi index 8728b700a1f6d..4ed37abe3f5b5 100644 --- a/pandas/_libs/tslibs/timestamps.pyi +++ b/pandas/_libs/tslibs/timestamps.pyi @@ -26,10 +26,8 @@ from pandas._libs.tslibs import ( _S = TypeVar("_S") - def integer_op_not_supported(obj) -> None: ... - class Timestamp(datetime): min: ClassVar[Timestamp] max: ClassVar[Timestamp] @@ -40,9 +38,15 @@ class Timestamp(datetime): # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]") def __new__( # type: ignore[misc] cls: Type[_S], - ts_input: int | np.integer | float | str | _date | datetime | np.datetime64 = ..., + ts_input: int + | np.integer + | float + | str + | _date + | datetime + | np.datetime64 = ..., freq=..., - tz: str | _tzinfo | None | int= ..., + tz: str | _tzinfo | None | int = ..., unit=..., year: int | None = ..., month: int | None = ..., @@ -54,9 +58,8 @@ class Timestamp(datetime): nanosecond: int | None = ..., tzinfo: _tzinfo | None = ..., *, - fold: int | None= ..., + fold: int | None = ..., ) -> _S | NaTType: ... - @property def year(self) -> int: ... @property @@ -75,10 +78,8 @@ class Timestamp(datetime): def tzinfo(self) -> Optional[_tzinfo]: ... @property def tz(self) -> Optional[_tzinfo]: ... - @property def fold(self) -> int: ... - @classmethod def fromtimestamp(cls: Type[_S], t: float, tz: Optional[_tzinfo] = ...) -> _S: ... @classmethod @@ -87,7 +88,6 @@ class Timestamp(datetime): def today(cls: Type[_S]) -> _S: ... @classmethod def fromordinal(cls: Type[_S], n: int) -> _S: ... - if sys.version_info >= (3, 8): @classmethod def now(cls: Type[_S], tz: _tzinfo | str | None = ...) -> _S: ... @@ -98,28 +98,23 @@ class Timestamp(datetime): @overload @classmethod def now(cls, tz: _tzinfo) -> datetime: ... - @classmethod def utcnow(cls: Type[_S]) -> _S: ... @classmethod - def combine(cls, date: _date, time: _time, tzinfo: Optional[_tzinfo] = ...) -> datetime: ... - + def combine( + cls, date: _date, time: _time, tzinfo: Optional[_tzinfo] = ... + ) -> datetime: ... @classmethod def fromisoformat(cls: Type[_S], date_string: str) -> _S: ... - def strftime(self, fmt: str) -> str: ... def __format__(self, fmt: str) -> str: ... - def toordinal(self) -> int: ... def timetuple(self) -> struct_time: ... - def timestamp(self) -> float: ... - def utctimetuple(self) -> struct_time: ... def date(self) -> _date: ... def time(self) -> _time: ... def timetz(self) -> _time: ... - def replace( self, year: int = ..., @@ -133,22 +128,17 @@ class Timestamp(datetime): *, fold: int = ..., ) -> datetime: ... - if sys.version_info >= (3, 8): def astimezone(self: _S, tz: Optional[_tzinfo] = ...) -> _S: ... else: def astimezone(self, tz: Optional[_tzinfo] = ...) -> datetime: ... - def ctime(self) -> str: ... def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ... - @classmethod def strptime(cls, date_string: str, format: str) -> datetime: ... - def utcoffset(self) -> Optional[timedelta]: ... def tzname(self) -> Optional[str]: ... def dst(self) -> Optional[timedelta]: ... - def __le__(self, other: datetime) -> bool: ... # type: ignore def __lt__(self, other: datetime) -> bool: ... # type: ignore def __ge__(self, other: datetime) -> bool: ... # type: ignore @@ -163,12 +153,10 @@ class Timestamp(datetime): def __sub__(self, other: datetime) -> timedelta: ... @overload def __sub__(self, other: timedelta) -> datetime: ... - def __hash__(self) -> int: ... def weekday(self) -> int: ... def isoweekday(self) -> int: ... def isocalendar(self) -> tuple[int, int, int]: ... - @property def is_leap_year(self) -> bool: ... @property @@ -183,23 +171,25 @@ class Timestamp(datetime): def is_quarter_end(self) -> bool: ... @property def is_year_end(self) -> bool: ... - def to_pydatetime(self, warn: bool = ...) -> datetime: ... def to_datetime64(self) -> np.datetime64: ... def to_period(self, freq) -> Period: ... def to_julian_date(self) -> np.float64: ... - @property def asm8(self) -> np.datetime64: ... - def tz_convert(self: _S, tz) -> _S: ... - # TODO: could return NaT? - def tz_localize(self: _S, tz, ambiguous: str = ..., nonexistent: str = ...) -> _S: ... - + def tz_localize( + self: _S, tz, ambiguous: str = ..., nonexistent: str = ... + ) -> _S: ... def normalize(self: _S) -> _S: ... - # TODO: round/floor/ceil could return NaT? - def round(self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ...) -> _S: ... - def floor(self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ...) -> _S: ... - def ceil(self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ...) -> _S: ... + def round( + self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ... + ) -> _S: ... + def floor( + self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ... + ) -> _S: ... + def ceil( + self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ... + ) -> _S: ... diff --git a/pandas/_libs/tslibs/timezones.pyi b/pandas/_libs/tslibs/timezones.pyi index 04a1b391dc30a..fc03e4e4c68da 100644 --- a/pandas/_libs/tslibs/timezones.pyi +++ b/pandas/_libs/tslibs/timezones.pyi @@ -13,20 +13,17 @@ import numpy as np # imported from dateutil.tz dateutil_gettz: Callable[[str], tzinfo] - def tz_standardize(tz: tzinfo) -> tzinfo: ... - def tz_compare(start: Optional[tzinfo], end: Optional[tzinfo]) -> bool: ... - def infer_tzinfo( - start: Optional[datetime], end: Optional[datetime], + start: Optional[datetime], + end: Optional[datetime], ) -> Optional[tzinfo]: ... # ndarrays returned are both int64_t def get_dst_info(tz: tzinfo) -> tuple[np.ndarray, np.ndarray, str]: ... - -def maybe_get_tz(tz: Optional[Union[str, int, np.int64, tzinfo]]) -> Optional[tzinfo]: ... - +def maybe_get_tz( + tz: Optional[Union[str, int, np.int64, tzinfo]] +) -> Optional[tzinfo]: ... def get_timezone(tz: tzinfo) -> Union[tzinfo, str]: ... - def is_utc(tz: Optional[tzinfo]) -> bool: ... diff --git a/pandas/_libs/tslibs/tzconversion.pyi b/pandas/_libs/tslibs/tzconversion.pyi index f47885a2e3306..219548474f9e6 100644 --- a/pandas/_libs/tslibs/tzconversion.pyi +++ b/pandas/_libs/tslibs/tzconversion.pyi @@ -14,9 +14,7 @@ def tz_convert_from_utc( vals: np.ndarray, # const int64_t[:] tz: tzinfo, ) -> np.ndarray: ... # np.ndarray[np.int64] - def tz_convert_from_utc_single(val: np.int64, tz: tzinfo) -> np.int64: ... - def tz_localize_to_utc( vals: np.ndarray, # np.ndarray[np.int64] tz: Optional[tzinfo], diff --git a/pandas/_libs/tslibs/vectorized.pyi b/pandas/_libs/tslibs/vectorized.pyi index 6ed1e10ef2353..ec8b33cd33d2c 100644 --- a/pandas/_libs/tslibs/vectorized.pyi +++ b/pandas/_libs/tslibs/vectorized.pyi @@ -18,26 +18,18 @@ def dt64arr_to_periodarr( freq: int, tz: Optional[tzinfo], ) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1] - - def is_date_array_normalized( stamps: np.ndarray, # const int64_t[:] tz: Optional[tzinfo] = None, ) -> bool: ... - - def normalize_i8_timestamps( stamps: np.ndarray, # const int64_t[:] tz: Optional[tzinfo], ) -> np.ndarray: ... # np.ndarray[np.int64] - - def get_resolution( stamps: np.ndarray, # const int64_t[:] tz: Optional[tzinfo] = None, ) -> Resolution: ... - - def ints_to_pydatetime( arr: np.ndarray, # const int64_t[:}] tz: Optional[tzinfo] = None, diff --git a/pandas/_libs/window/aggregations.pyi b/pandas/_libs/window/aggregations.pyi index 3391edac84224..fe083fe415e4b 100644 --- a/pandas/_libs/window/aggregations.pyi +++ b/pandas/_libs/window/aggregations.pyi @@ -11,58 +11,50 @@ def roll_sum( start: np.ndarray, # np.ndarray[np.int64] end: np.ndarray, # np.ndarray[np.int64] minp: int, # int64_t -) -> np.ndarray: ... # np.ndarray[float] - +) -> np.ndarray: ... # np.ndarray[float] def roll_mean( values: np.ndarray, # const float64_t[:] start: np.ndarray, # np.ndarray[np.int64] end: np.ndarray, # np.ndarray[np.int64] minp: int, # int64_t -) -> np.ndarray: ... # np.ndarray[float] - +) -> np.ndarray: ... # np.ndarray[float] def roll_var( values: np.ndarray, # const float64_t[:] start: np.ndarray, # np.ndarray[np.int64] end: np.ndarray, # np.ndarray[np.int64] minp: int, # int64_t ddof: int = ..., -) -> np.ndarray: ... # np.ndarray[float] - +) -> np.ndarray: ... # np.ndarray[float] def roll_skew( values: np.ndarray, # np.ndarray[np.float64] start: np.ndarray, # np.ndarray[np.int64] end: np.ndarray, # np.ndarray[np.int64] minp: int, # int64_t -) -> np.ndarray: ... # np.ndarray[float] - +) -> np.ndarray: ... # np.ndarray[float] def roll_kurt( values: np.ndarray, # np.ndarray[np.float64] start: np.ndarray, # np.ndarray[np.int64] end: np.ndarray, # np.ndarray[np.int64] minp: int, # int64_t -) -> np.ndarray: ... # np.ndarray[float] - +) -> np.ndarray: ... # np.ndarray[float] def roll_median_c( values: np.ndarray, # np.ndarray[np.float64] start: np.ndarray, # np.ndarray[np.int64] end: np.ndarray, # np.ndarray[np.int64] minp: int, # int64_t ) -> np.ndarray: ... # np.ndarray[float] - def roll_max( values: np.ndarray, # np.ndarray[np.float64] start: np.ndarray, # np.ndarray[np.int64] end: np.ndarray, # np.ndarray[np.int64] minp: int, # int64_t ) -> np.ndarray: ... # np.ndarray[float] - def roll_min( values: np.ndarray, # np.ndarray[np.float64] start: np.ndarray, # np.ndarray[np.int64] end: np.ndarray, # np.ndarray[np.int64] minp: int, # int64_t ) -> np.ndarray: ... # np.ndarray[float] - def roll_quantile( values: np.ndarray, # const float64_t[:] start: np.ndarray, # np.ndarray[np.int64] @@ -71,7 +63,6 @@ def roll_quantile( quantile: float, # float64_t interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"], ) -> np.ndarray: ... # np.ndarray[float] - def roll_apply( obj: object, start: np.ndarray, # np.ndarray[np.int64] @@ -82,26 +73,22 @@ def roll_apply( args: tuple[Any, ...], kwargs: dict[str, Any], ) -> np.ndarray: ... # np.ndarray[float] # FIXME: could also be type(obj) if n==0 - def roll_weighted_sum( values: np.ndarray, # const float64_t[:] weights: np.ndarray, # const float64_t[:] minp: int, ) -> np.ndarray: ... # np.ndarray[np.float64] - def roll_weighted_mean( values: np.ndarray, # const float64_t[:] weights: np.ndarray, # const float64_t[:] minp: int, ) -> np.ndarray: ... # np.ndarray[np.float64] - def roll_weighted_var( values: np.ndarray, # const float64_t[:] weights: np.ndarray, # const float64_t[:] minp: int, # int64_t ddof: int, # unsigned int ) -> np.ndarray: ... # np.ndarray[np.float64] - def ewma( vals: np.ndarray, # const float64_t[:] start: np.ndarray, # const int64_t[:] @@ -112,7 +99,6 @@ def ewma( ignore_na: bool, deltas: np.ndarray, # const float64_t[:] ) -> np.ndarray: ... # np.ndarray[np.float64] - def ewmcov( input_x: np.ndarray, # const float64_t[:] start: np.ndarray, # const int64_t[:] diff --git a/pandas/_libs/window/indexers.pyi b/pandas/_libs/window/indexers.pyi index a32fe2f0f8b03..2dea9362228e5 100644 --- a/pandas/_libs/window/indexers.pyi +++ b/pandas/_libs/window/indexers.pyi @@ -1,13 +1,10 @@ import numpy as np def calculate_variable_window_bounds( - num_values: int, # int64_t - window_size: int, # int64_t + num_values: int, # int64_t + window_size: int, # int64_t min_periods, center: bool, closed: str | None, index: np.ndarray, # const int64_t[:] -) -> tuple[ - np.ndarray, # np.ndarray[np.int64] - np.ndarray, # np.ndarray[np.int64] -]: ... +) -> tuple[np.ndarray, np.ndarray,]: ... # np.ndarray[np.int64] # np.ndarray[np.int64] diff --git a/pandas/_libs/writers.pyi b/pandas/_libs/writers.pyi index 67f6059c2a825..c188dc2bd9048 100644 --- a/pandas/_libs/writers.pyi +++ b/pandas/_libs/writers.pyi @@ -8,15 +8,11 @@ def write_csv_rows( cols: np.ndarray, writer: object, # _csv.writer ) -> None: ... - def convert_json_to_lines(arr: str) -> str: ... - def max_len_string_array( arr: np.ndarray, # pandas_string[:] ) -> int: ... - def word_len(val: object) -> int: ... - def string_array_replace_from_nan_rep( arr: np.ndarray, # np.ndarray[object, ndim=1] nan_rep: object, diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py index aaf58f1fcb150..5f7981bc71e15 100644 --- a/pandas/_testing/__init__.py +++ b/pandas/_testing/__init__.py @@ -271,7 +271,7 @@ def makeUnicodeIndex(k=10, name=None): def makeCategoricalIndex(k=10, n=3, name=None, **kwargs): - """ make a length k index or n categories """ + """make a length k index or n categories""" x = rands_array(nchars=4, size=n) return CategoricalIndex( Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs @@ -279,7 +279,7 @@ def makeCategoricalIndex(k=10, n=3, name=None, **kwargs): def makeIntervalIndex(k=10, name=None, **kwargs): - """ make a length k IntervalIndex """ + """make a length k IntervalIndex""" x = np.linspace(0, 100, num=(k + 1)) return IntervalIndex.from_breaks(x, name=name, **kwargs) diff --git a/pandas/conftest.py b/pandas/conftest.py index f948dc11bc014..7aeb263d4d3e6 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -573,7 +573,7 @@ def datetime_series(): def _create_series(index): - """ Helper for the _series dict """ + """Helper for the _series dict""" size = len(index) data = np.random.randn(size) return Series(data, index=index, name="a") diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index f8f5e5e05bc35..954fc12b7cf1c 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1840,7 +1840,7 @@ def safe_sort( def _sort_mixed(values) -> np.ndarray: - """ order ints before strings in 1d arrays, safe in py3 """ + """order ints before strings in 1d arrays, safe in py3""" str_pos = np.array([isinstance(x, str) for x in values], dtype=bool) nums = np.sort(values[~str_pos]) strs = np.sort(values[str_pos]) diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 00b49c2f4f951..388c1881afed7 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -77,7 +77,7 @@ def frame_apply( args=None, kwargs=None, ) -> FrameApply: - """ construct and return a row or column based frame apply object """ + """construct and return a row or column based frame apply object""" axis = obj._get_axis_number(axis) klass: type[FrameApply] if axis == 0: @@ -639,7 +639,7 @@ def dtypes(self) -> Series: return self.obj.dtypes def apply(self) -> FrameOrSeriesUnion: - """ compute the results """ + """compute the results""" # dispatch to agg if is_list_like(self.f): return self.apply_multiple() @@ -733,7 +733,7 @@ def apply_empty_result(self): return self.obj.copy() def apply_raw(self): - """ apply to the values as a numpy array """ + """apply to the values as a numpy array""" def wrap_function(func): """ @@ -867,7 +867,7 @@ def result_columns(self) -> Index: def wrap_results_for_axis( self, results: ResType, res_index: Index ) -> FrameOrSeriesUnion: - """ return the results for the rows """ + """return the results for the rows""" if self.result_type == "reduce": # e.g. test_apply_dict GH#8735 @@ -950,7 +950,7 @@ def result_columns(self) -> Index: def wrap_results_for_axis( self, results: ResType, res_index: Index ) -> FrameOrSeriesUnion: - """ return the results for the columns """ + """return the results for the columns""" result: FrameOrSeriesUnion # we have requested to expand @@ -969,7 +969,7 @@ def wrap_results_for_axis( return result def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame: - """ infer the results to the same shape as the input object """ + """infer the results to the same shape as the input object""" result = self.obj._constructor(data=results) result = result.T diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 95d9409b265ce..ccdfca0af985b 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1650,7 +1650,7 @@ def _internal_get_values(self): return np.array(self) def check_for_ordered(self, op): - """ assert that we are ordered """ + """assert that we are ordered""" if not self.ordered: raise TypeError( f"Categorical is not ordered for operation {op}\n" diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 11f9f645920ec..d274501143916 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -78,7 +78,7 @@ class BaseMaskedDtype(ExtensionDtype): @cache_readonly def numpy_dtype(self) -> np.dtype: - """ Return an instance of our numpy dtype """ + """Return an instance of our numpy dtype""" return np.dtype(self.type) @cache_readonly @@ -87,7 +87,7 @@ def kind(self) -> str: @cache_readonly def itemsize(self) -> int: - """ Return the number of bytes in this dtype """ + """Return the number of bytes in this dtype""" return self.numpy_dtype.itemsize @classmethod diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 4847372f18239..1219052e4018e 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -1452,7 +1452,7 @@ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value) ) - result = getattr(ufunc, method)(*[np.asarray(x) for x in inputs], **kwargs) + result = getattr(ufunc, method)(*(np.asarray(x) for x in inputs), **kwargs) if out: if len(out) == 1: out = out[0] diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py index ad2c5f75fc32c..f399d3230d897 100644 --- a/pandas/core/arrays/sparse/scipy_sparse.py +++ b/pandas/core/arrays/sparse/scipy_sparse.py @@ -34,7 +34,7 @@ def _to_ijv(ss, row_levels=(0,), column_levels=(1,), sort_labels=False): nonnull_labels = ss.dropna() def get_indexers(levels): - """ Return sparse coords and dense labels for subset levels """ + """Return sparse coords and dense labels for subset levels""" # TODO: how to do this better? cleanly slice nonnull_labels given the # coord values_ilabels = [tuple(x[i] for i in levels) for x in nonnull_labels.index] @@ -58,7 +58,7 @@ def _get_label_to_i_dict(labels, sort_labels=False): return {k: i for i, k in enumerate(labels)} def _get_index_subset_to_coord_dict(index, subset, sort_labels=False): - ilabels = list(zip(*[index._get_level_values(i) for i in subset])) + ilabels = list(zip(*(index._get_level_values(i) for i in subset))) labels_to_i = _get_label_to_i_dict(ilabels, sort_labels=sort_labels) labels_to_i = Series(labels_to_i) if len(subset) > 1: diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 02660539f4981..4f90a83c6b212 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -567,7 +567,7 @@ def visit_List(self, node, **kwargs): visit_Tuple = visit_List def visit_Index(self, node, **kwargs): - """ df.index[4] """ + """df.index[4]""" return self.visit(node.value) def visit_Subscript(self, node, **kwargs): @@ -591,7 +591,7 @@ def visit_Subscript(self, node, **kwargs): return self.term_type(name, env=self.env) def visit_Slice(self, node, **kwargs): - """ df.index[slice(4,6)] """ + """df.index[slice(4,6)]""" lower = node.lower if lower is not None: lower = self.visit(lower).value diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 2f87e0bcce70a..863832679efb0 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -73,7 +73,7 @@ def _evaluate_standard(op, op_str, a, b): def _can_use_numexpr(op, op_str, a, b, dtype_check): - """ return a boolean if we WILL be using numexpr """ + """return a boolean if we WILL be using numexpr""" if op_str is not None: # required min elements (otherwise we are adding overhead) diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 0e6a7551ab399..f733a5c43dfb3 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -113,7 +113,7 @@ def _disallow_scalar_only_bool_ops(self): def prune(self, klass): def pr(left, right): - """ create and return a new specialized BinOp from myself """ + """create and return a new specialized BinOp from myself""" if left is None: return right elif right is None: @@ -154,7 +154,7 @@ def pr(left, right): return res def conform(self, rhs): - """ inplace conform rhs """ + """inplace conform rhs""" if not is_list_like(rhs): rhs = [rhs] if isinstance(rhs, np.ndarray): @@ -163,7 +163,7 @@ def conform(self, rhs): @property def is_valid(self) -> bool: - """ return True if this is a valid field """ + """return True if this is a valid field""" return self.lhs in self.queryables @property @@ -176,21 +176,21 @@ def is_in_table(self) -> bool: @property def kind(self): - """ the kind of my field """ + """the kind of my field""" return getattr(self.queryables.get(self.lhs), "kind", None) @property def meta(self): - """ the meta of my field """ + """the meta of my field""" return getattr(self.queryables.get(self.lhs), "meta", None) @property def metadata(self): - """ the metadata of my field """ + """the metadata of my field""" return getattr(self.queryables.get(self.lhs), "metadata", None) def generate(self, v) -> str: - """ create and return the op string for this TermValue """ + """create and return the op string for this TermValue""" val = v.tostring(self.encoding) return f"({self.lhs} {self.op} {val})" @@ -273,7 +273,7 @@ def __repr__(self) -> str: return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]") def invert(self): - """ invert the filter """ + """invert the filter""" if self.filter is not None: self.filter = ( self.filter[0], @@ -283,7 +283,7 @@ def invert(self): return self def format(self): - """ return the actual filter format """ + """return the actual filter format""" return [self.filter] def evaluate(self): @@ -338,7 +338,7 @@ def __repr__(self) -> str: return pprint_thing(f"[Condition : [{self.condition}]]") def invert(self): - """ invert the condition """ + """invert the condition""" # if self.condition is not None: # self.condition = "~(%s)" % self.condition # return self @@ -347,7 +347,7 @@ def invert(self): ) def format(self): - """ return the actual ne format """ + """return the actual ne format""" return self.condition def evaluate(self): @@ -604,7 +604,7 @@ def __repr__(self) -> str: return pprint_thing(self.expr) def evaluate(self): - """ create and return the numexpr condition and filter """ + """create and return the numexpr condition and filter""" try: self.condition = self.terms.prune(ConditionBinOp) except AttributeError as err: @@ -624,7 +624,7 @@ def evaluate(self): class TermValue: - """ hold a term value the we use to construct a condition/filter """ + """hold a term value the we use to construct a condition/filter""" def __init__(self, value, converted, kind: str): assert isinstance(kind, str), kind @@ -633,7 +633,7 @@ def __init__(self, value, converted, kind: str): self.kind = kind def tostring(self, encoding) -> str: - """ quote the string if not encoded else encode and return """ + """quote the string if not encoded else encode and return""" if self.kind == "string": if encoding is not None: return str(self.converted) @@ -646,7 +646,7 @@ def tostring(self, encoding) -> str: def maybe_expression(s) -> bool: - """ loose checking if s is a pytables-acceptable expression """ + """loose checking if s is a pytables-acceptable expression""" if not isinstance(s, str): return False ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 40883dd8f747b..67e297e6c7eba 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -112,7 +112,7 @@ def maybe_convert_platform( values: list | tuple | range | np.ndarray | ExtensionArray, ) -> ArrayLike: - """ try to do platform conversion, allow ndarray or list here """ + """try to do platform conversion, allow ndarray or list here""" arr: ArrayLike if isinstance(values, (list, tuple, range)): @@ -935,7 +935,7 @@ def invalidate_string_dtypes(dtype_set: set[DtypeObj]): def coerce_indexer_dtype(indexer, categories): - """ coerce the indexer input array to the smallest dtype possible """ + """coerce the indexer input array to the smallest dtype possible""" length = len(categories) if length < _int8_max: return ensure_int8(indexer) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 3f43681687945..a1e19a7f5e0e7 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -142,7 +142,7 @@ def ensure_python_int(value: Union[int, np.integer]) -> int: def classes(*klasses) -> Callable: - """ evaluate if the tipo is a subclass of the klasses """ + """evaluate if the tipo is a subclass of the klasses""" return lambda tipo: issubclass(tipo, klasses) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index c5efd8f77495c..b86fab6a226bf 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -110,7 +110,7 @@ def __getstate__(self) -> dict[str_type, Any]: @classmethod def reset_cache(cls) -> None: - """ clear the cache """ + """clear the cache""" cls._cache_dtypes = {} diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 49dc71954fd8f..0c6c98d29ef0c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -252,7 +252,7 @@ def _init_mgr( dtype: Dtype | None = None, copy: bool_t = False, ) -> Manager: - """ passed a manager and a axes dict """ + """passed a manager and a axes dict""" for a, axe in axes.items(): if axe is not None: axe = ensure_index(axe) @@ -433,7 +433,7 @@ def set_flags( @final @classmethod def _validate_dtype(cls, dtype) -> DtypeObj | None: - """ validate the passed dtype """ + """validate the passed dtype""" if dtype is not None: dtype = pandas_dtype(dtype) @@ -4017,7 +4017,7 @@ def get(self, key, default=None): @final @property def _is_view(self) -> bool_t: - """Return boolean indicating if self is view of another array """ + """Return boolean indicating if self is view of another array""" return self._mgr.is_view @final @@ -4873,7 +4873,7 @@ def _reindex_with_indexers( copy: bool_t = False, allow_dups: bool_t = False, ) -> FrameOrSeries: - """allow_dups indicates an internal call here """ + """allow_dups indicates an internal call here""" # reindex doing multiple operations on different axes if indicated new_data = self._mgr for axis in sorted(reindexers.keys()): @@ -5592,7 +5592,7 @@ def _is_mixed_type(self) -> bool_t: @final def _check_inplace_setting(self, value) -> bool_t: - """ check whether we allow in-place setting with this type of value """ + """check whether we allow in-place setting with this type of value""" if self._is_mixed_type and not self._mgr.is_numeric_mixed_type: # allow an actual np.nan thru diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 6903c8e99e489..b65f26c7174fc 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -824,7 +824,7 @@ def apply(self, f: F, data: FrameOrSeries, axis: int = 0): @cache_readonly def indices(self): - """ dict {group name -> group indices} """ + """dict {group name -> group indices}""" if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex): # This shows unused categories in indices GH#38642 return self.groupings[0].indices @@ -858,7 +858,7 @@ def size(self) -> Series: @cache_readonly def groups(self) -> dict[Hashable, np.ndarray]: - """ dict {group name -> group labels} """ + """dict {group name -> group labels}""" if len(self.groupings) == 1: return self.groupings[0].groups else: @@ -1132,7 +1132,7 @@ def __init__( @cache_readonly def groups(self): - """ dict {group name -> group labels} """ + """dict {group name -> group labels}""" # this is mainly for compat # GH 3881 result = { diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 1541885887dab..e899ced799f81 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -636,7 +636,7 @@ def _concat(self, to_concat: list[Index], name: Hashable) -> Index: return type(self)._simple_new(cat, name=name) def _delegate_method(self, name: str, *args, **kwargs): - """ method delegation to the ._values """ + """method delegation to the ._values""" method = getattr(self._values, name) if "inplace" in kwargs: raise ValueError("cannot use inplace with CategoricalIndex") diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 59882422f5439..60d0ffa6d2f54 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1211,11 +1211,11 @@ def copy( return new_index def __array__(self, dtype=None) -> np.ndarray: - """ the array interface, return my values """ + """the array interface, return my values""" return self.values def view(self, cls=None): - """ this is defined as a copy with the same identity """ + """this is defined as a copy with the same identity""" result = self.copy() result._id = self._id return result @@ -1234,7 +1234,7 @@ def dtype(self) -> np.dtype: return np.dtype("O") def _is_memory_usage_qualified(self) -> bool: - """ return a boolean if we need a qualified .info display """ + """return a boolean if we need a qualified .info display""" def f(level): return "mixed" in level or "string" in level or "unicode" in level @@ -1250,7 +1250,7 @@ def memory_usage(self, deep: bool = False) -> int: @cache_readonly def nbytes(self) -> int: - """ return the number of bytes in the underlying data """ + """return the number of bytes in the underlying data""" return self._nbytes(False) def _nbytes(self, deep: bool = False) -> int: @@ -1591,7 +1591,7 @@ def is_monotonic_decreasing(self) -> bool: @cache_readonly def _inferred_type_levels(self) -> list[str]: - """ return a list of the inferred types, one for each level """ + """return a list of the inferred types, one for each level""" return [i.inferred_type for i in self.levels] @doc(Index.duplicated) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index ead1a2a4a544b..8588f55f64389 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -173,7 +173,7 @@ def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex: @cache_readonly def _constructor(self) -> type[Int64Index]: - """ return the class to use for construction """ + """return the class to use for construction""" return Int64Index @cache_readonly @@ -197,7 +197,7 @@ def _int64index(self) -> Int64Index: return res def _get_data_as_items(self): - """ return a list of tuples of start, stop, step """ + """return a list of tuples of start, stop, step""" rng = self._range return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)] @@ -350,7 +350,7 @@ def dtype(self) -> np.dtype: @property def is_unique(self) -> bool: - """ return if the index has unique values """ + """return if the index has unique values""" return True @cache_readonly diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 31e32b053367b..76967cdc9b52e 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -433,7 +433,7 @@ def replace_list( inplace: bool = False, regex: bool = False, ) -> T: - """ do a list replace """ + """do a list replace""" inplace = validate_bool_kwarg(inplace, "inplace") return self.apply_with_block( @@ -462,7 +462,7 @@ def any_extension_types(self) -> bool: @property def is_view(self) -> bool: - """ return a boolean if we are a single block and are a view """ + """return a boolean if we are a single block and are a view""" # TODO what is this used for? return False diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 4f1b16e747394..4a947677937e8 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -167,7 +167,7 @@ def _consolidate_key(self): @property def is_view(self) -> bool: - """ return a boolean if I am possibly a view """ + """return a boolean if I am possibly a view""" values = self.values values = cast(np.ndarray, values) return values.base is not None @@ -260,7 +260,7 @@ def make_block(self, values, placement=None) -> Block: def make_block_same_class( self, values, placement: BlockPlacement | None = None ) -> Block: - """ Wrap given values in a block of same type as self. """ + """Wrap given values in a block of same type as self.""" if placement is None: placement = self._mgr_locs @@ -289,7 +289,7 @@ def __len__(self) -> int: return len(self.values) def _slice(self, slicer): - """ return a slice of my values """ + """return a slice of my values""" return self.values[slicer] @@ -528,7 +528,7 @@ def _maybe_downcast(self, blocks: list[Block], downcast=None) -> list[Block]: @final def downcast(self, dtypes=None) -> list[Block]: - """ try to downcast each item to the dict of dtypes if present """ + """try to downcast each item to the dict of dtypes if present""" # turn it off completely if dtypes is False: return [self] @@ -615,7 +615,7 @@ def convert( @final def _can_hold_element(self, element: Any) -> bool: - """ require the same dtype as ourselves """ + """require the same dtype as ourselves""" element = extract_array(element, extract_numpy=True) return can_hold_element(self.values, element) @@ -636,14 +636,14 @@ def should_store(self, value: ArrayLike) -> bool: @final def to_native_types(self, na_rep="nan", quoting=None, **kwargs): - """ convert to our native types format """ + """convert to our native types format""" result = to_native_types(self.values, na_rep=na_rep, quoting=quoting, **kwargs) return self.make_block(result) # block actions # @final def copy(self, deep: bool = True): - """ copy constructor """ + """copy constructor""" values = self.values if deep: values = values.copy() @@ -1156,12 +1156,12 @@ def take_nd( return self.make_block_same_class(new_values, new_mgr_locs) def diff(self, n: int, axis: int = 1) -> list[Block]: - """ return block for the diff of the values """ + """return block for the diff of the values""" new_values = algos.diff(self.values, n, axis=axis, stacklevel=7) return [self.make_block(values=new_values)] def shift(self, periods: int, axis: int = 0, fill_value: Any = None) -> list[Block]: - """ shift the block by periods, possibly upcast """ + """shift the block by periods, possibly upcast""" # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also @@ -1674,7 +1674,7 @@ class NDArrayBackedExtensionBlock(libinternals.NDArrayBackedBlock, EABackedBlock @property def is_view(self) -> bool: - """ return a boolean if I am possibly a view """ + """return a boolean if I am possibly a view""" # check the ndarray values of the DatetimeIndex values return self.values._ndarray.base is not None @@ -1770,7 +1770,7 @@ class DatetimeLikeBlock(NDArrayBackedExtensionBlock): class DatetimeTZBlock(DatetimeLikeBlock): - """ implement a datetime64 block with a tz attribute """ + """implement a datetime64 block with a tz attribute""" values: DatetimeArray @@ -1990,7 +1990,7 @@ def extract_pandas_array( def extend_blocks(result, blocks=None) -> list[Block]: - """ return a new extended blocks, given the result """ + """return a new extended blocks, given the result""" if blocks is None: blocks = [] if isinstance(result, list): @@ -2030,7 +2030,7 @@ def to_native_types( decimal=".", **kwargs, ) -> np.ndarray: - """ convert to our native types format """ + """convert to our native types format""" values = ensure_wrapped_if_datetimelike(values) if isinstance(values, (DatetimeArray, TimedeltaArray)): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 323aa45874d96..48f0b7f7f964b 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -183,7 +183,7 @@ def blklocs(self): return self._blklocs def make_empty(self: T, axes=None) -> T: - """ return an empty BlockManager with the items axis of len 0 """ + """return an empty BlockManager with the items axis of len 0""" if axes is None: axes = [Index([])] + self.axes[1:] @@ -422,7 +422,7 @@ def replace_list( inplace: bool = False, regex: bool = False, ) -> T: - """ do a list replace """ + """do a list replace""" inplace = validate_bool_kwarg(inplace, "inplace") bm = self.apply( @@ -466,7 +466,7 @@ def any_extension_types(self) -> bool: @property def is_view(self) -> bool: - """ return a boolean if we are a single block and are a view """ + """return a boolean if we are a single block and are a view""" if len(self.blocks) == 1: return self.blocks[0].is_view @@ -516,7 +516,7 @@ def get_numeric_data(self: T, copy: bool = False) -> T: def _combine( self: T, blocks: list[Block], copy: bool = True, index: Index | None = None ) -> T: - """ return a new manager with the blocks """ + """return a new manager with the blocks""" if len(blocks) == 0: if self.ndim == 2: # retain our own Index dtype @@ -1502,7 +1502,7 @@ def _interleave( class SingleBlockManager(BaseBlockManager, SingleDataManager): - """ manage a single block with """ + """manage a single block with""" ndim = 1 _is_consolidated = True @@ -1596,12 +1596,12 @@ def _block(self) -> Block: @property def _blknos(self): - """ compat with BlockManager """ + """compat with BlockManager""" return None @property def _blklocs(self): - """ compat with BlockManager """ + """compat with BlockManager""" return None def getitem_mgr(self, indexer) -> SingleBlockManager: @@ -1759,7 +1759,7 @@ def construction_error( axes: list[Index], e: ValueError | None = None, ): - """ raise a helpful message about our construction """ + """raise a helpful message about our construction""" passed = tuple(map(int, [tot_items] + list(block_shape))) # Correcting the user facing error message during dataframe construction if len(passed) <= 2: @@ -1885,7 +1885,7 @@ def _simple_blockify(tuples, dtype, consolidate: bool) -> list[Block]: def _multi_blockify(tuples, dtype: DtypeObj | None = None, consolidate: bool = True): - """ return an array of blocks that potentially have different dtypes """ + """return an array of blocks that potentially have different dtypes""" if not consolidate: return _tuples_to_blocks_no_consolidate(tuples, dtype=dtype) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index b8909f16ee876..8785545574fb9 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -191,7 +191,7 @@ def _has_infs(result) -> bool: def _get_fill_value( dtype: DtypeObj, fill_value: Scalar | None = None, fill_value_typ=None ): - """ return the correct fill value for the dtype of the values """ + """return the correct fill value for the dtype of the values""" if fill_value is not None: return fill_value if _na_ok_dtype(dtype): @@ -350,7 +350,7 @@ def _na_ok_dtype(dtype: DtypeObj) -> bool: def _wrap_results(result, dtype: np.dtype, fill_value=None): - """ wrap our results if needed """ + """wrap our results if needed""" if result is NaT: pass diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 9cccf1cff60a1..297769149e5f0 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -149,7 +149,7 @@ def fill_binop(left, right, fill_value): def align_method_SERIES(left: Series, right, align_asobject: bool = False): - """ align lhs and rhs Series """ + """align lhs and rhs Series""" # ToDo: Different from align_method_FRAME, list, tuple and ndarray # are not coerced here # because Series has inconsistencies described in #13637 diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 6a0fad9ee729b..0905b69c64b0e 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -226,7 +226,7 @@ def lreshape(data: DataFrame, groups, dropna: bool = True, label=None) -> DataFr else: keys, values = zip(*groups) - all_cols = list(set.union(*[set(x) for x in values])) + all_cols = list(set.union(*(set(x) for x in values))) id_cols = list(data.columns.difference(all_cols)) K = len(values[0]) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index c05130278f75b..143999a4677b3 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -931,7 +931,7 @@ def _maybe_add_join_keys( result.insert(i, name or f"key_{i}", key_col) def _get_join_indexers(self) -> tuple[np.ndarray, np.ndarray]: - """ return the join indexers """ + """return the join indexers""" # Both returned ndarrays are np.intp return get_join_indexers( self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how @@ -1692,7 +1692,7 @@ def _asof_by_function(direction: str): def _get_cython_type_upcast(dtype: DtypeObj) -> str: - """ Upcast a dtype to 'int64_t', 'double', or 'object' """ + """Upcast a dtype to 'int64_t', 'double', or 'object'""" if is_integer_dtype(dtype): return "int64_t" elif is_float_dtype(dtype): @@ -1883,10 +1883,10 @@ def _get_merge_keys(self): def _get_join_indexers(self) -> tuple[np.ndarray, np.ndarray]: # Both returned ndarrays are np.intp - """ return the join indexers """ + """return the join indexers""" def flip(xs) -> np.ndarray: - """ unlike np.transpose, this returns an array of tuples """ + """unlike np.transpose, this returns an array of tuples""" # error: Item "ndarray" of "Union[Any, Union[ExtensionArray, ndarray]]" has # no attribute "_values_for_argsort" xs = [ diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 64daf2542e15a..7db30dc1ba9b9 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -552,7 +552,7 @@ def _convert_bin_to_datelike_type(bins, dtype): def _format_labels( bins, precision: int, right: bool = True, include_lowest: bool = False, dtype=None ): - """ based on the dtype, return our labels """ + """based on the dtype, return our labels""" closed = "right" if right else "left" formatter: Callable[[Any], Timestamp] | Callable[[Any], Timedelta] diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 485610af747f6..029492e78099f 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -860,7 +860,7 @@ def space_format(x, y): return y str_columns = list( - zip(*[[space_format(x, y) for y in x] for x in fmt_columns]) + zip(*([space_format(x, y) for y in x] for x in fmt_columns)) ) if self.sparsify and len(str_columns): str_columns = sparsify_labels(str_columns) @@ -1541,7 +1541,7 @@ def __init__( self.date_format = date_format def _format_strings(self) -> list[str]: - """ we by definition have DO NOT have a TZ """ + """we by definition have DO NOT have a TZ""" values = self.values if not isinstance(values, DatetimeIndex): @@ -1729,7 +1729,7 @@ def get_format_datetime64( def get_format_datetime64_from_values( values: np.ndarray | DatetimeArray | DatetimeIndex, date_format: str | None ) -> str | None: - """ given values and a date_format, return a string format """ + """given values and a date_format, return a string format""" if isinstance(values, np.ndarray) and values.ndim > 1: # We don't actually care about the order of values, and DatetimeIndex # only accepts 1D values @@ -1743,7 +1743,7 @@ def get_format_datetime64_from_values( class Datetime64TZFormatter(Datetime64Formatter): def _format_strings(self) -> list[str]: - """ we by definition have a TZ """ + """we by definition have a TZ""" values = self.values.astype(object) ido = is_dates_only(values) formatter = self.formatter or get_format_datetime64( diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index bccf3c3f1011b..7617cea327a56 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -35,7 +35,7 @@ def get_engine(engine: str) -> BaseImpl: - """ return our implementation """ + """return our implementation""" if engine == "auto": engine = get_option("io.parquet.engine") diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 8bf1ab1260b8e..3e649fb2b7fb2 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -1196,7 +1196,7 @@ def _floatify_na_values(na_values): def _stringify_na_values(na_values): - """ return a stringified and numeric for these values """ + """return a stringified and numeric for these values""" result: List[Union[int, str, float]] = [] for x in na_values: result.append(str(x)) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index b32eb9e308780..9b691175e1ef0 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -112,7 +112,7 @@ def _ensure_decoded(s): - """ if we have bytes, decode them to unicode """ + """if we have bytes, decode them to unicode""" if isinstance(s, np.bytes_): s = s.decode("UTF-8") return s @@ -274,7 +274,7 @@ def to_hdf( errors: str = "strict", encoding: str = "UTF-8", ): - """ store this object, close it if we opened it """ + """store this object, close it if we opened it""" if append: f = lambda store: store.append( key, @@ -592,7 +592,7 @@ def __fspath__(self): @property def root(self): - """ return the root node """ + """return the root node""" self._check_if_open() assert self._handle is not None # for mypy return self._handle.root @@ -611,7 +611,7 @@ def __delitem__(self, key: str): return self.remove(key) def __getattr__(self, name: str): - """ allow attribute access to get stores """ + """allow attribute access to get stores""" try: return self.get(name) except (KeyError, ClosedFileError): @@ -1491,7 +1491,7 @@ def walk(self, where="/"): yield (g._v_pathname.rstrip("/"), groups, leaves) def get_node(self, key: str) -> Node | None: - """ return the node with the key or None if it does not exist """ + """return the node with the key or None if it does not exist""" self._check_if_open() if not key.startswith("/"): key = "/" + key @@ -1507,7 +1507,7 @@ def get_node(self, key: str) -> Node | None: return node def get_storer(self, key: str) -> GenericFixed | Table: - """ return the storer object for a key, raise if not in the file """ + """return the storer object for a key, raise if not in the file""" group = self.get_node(key) if group is None: raise KeyError(f"No object named {key} in the file") @@ -1624,7 +1624,7 @@ def _check_if_open(self): raise ClosedFileError(f"{self._path} file is not open!") def _validate_format(self, format: str) -> str: - """ validate / deprecate formats """ + """validate / deprecate formats""" # validate try: format = _FORMAT_MAP[format.lower()] @@ -1641,7 +1641,7 @@ def _create_storer( encoding: str = "UTF-8", errors: str = "strict", ) -> GenericFixed | Table: - """ return a suitable class to operate """ + """return a suitable class to operate""" cls: type[GenericFixed] | type[Table] if value is not None and not isinstance(value, (Series, DataFrame)): @@ -2019,7 +2019,7 @@ def kind_attr(self) -> str: return f"{self.name}_kind" def set_pos(self, pos: int): - """ set the position of this column in the Table """ + """set the position of this column in the Table""" self.pos = pos if pos is not None and self.typ is not None: self.typ._v_pos = pos @@ -2036,7 +2036,7 @@ def __repr__(self) -> str: ) def __eq__(self, other: Any) -> bool: - """ compare 2 col items """ + """compare 2 col items""" return all( getattr(self, a, None) == getattr(other, a, None) for a in ["name", "cname", "axis", "pos"] @@ -2047,7 +2047,7 @@ def __ne__(self, other) -> bool: @property def is_indexed(self) -> bool: - """ return whether I am an indexed column """ + """return whether I am an indexed column""" if not hasattr(self.table, "cols"): # e.g. if infer hasn't been called yet, self.table will be None. return False @@ -2092,7 +2092,7 @@ def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str): return new_pd_index, new_pd_index def take_data(self): - """ return the values""" + """return the values""" return self.values @property @@ -2105,12 +2105,12 @@ def description(self): @property def col(self): - """ return my current col description """ + """return my current col description""" return getattr(self.description, self.cname, None) @property def cvalues(self): - """ return my cython values """ + """return my cython values""" return self.values def __iter__(self): @@ -2141,7 +2141,7 @@ def validate_and_set(self, handler: AppendableTable, append: bool): self.set_attr() def validate_col(self, itemsize=None): - """ validate this column: return the compared against itemsize """ + """validate this column: return the compared against itemsize""" # validate this column for string truncation (or reset to the max size) if _ensure_decoded(self.kind) == "string": c = self.col @@ -2200,17 +2200,17 @@ def update_info(self, info): idx[key] = value def set_info(self, info): - """ set my state from the passed info """ + """set my state from the passed info""" idx = info.get(self.name) if idx is not None: self.__dict__.update(idx) def set_attr(self): - """ set the kind for this column """ + """set the kind for this column""" setattr(self.attrs, self.kind_attr, self.kind) def validate_metadata(self, handler: AppendableTable): - """ validate that kind=category does not change the categories """ + """validate that kind=category does not change the categories""" if self.meta == "category": new_metadata = self.metadata cur_metadata = handler.read_metadata(self.cname) @@ -2225,13 +2225,13 @@ def validate_metadata(self, handler: AppendableTable): ) def write_metadata(self, handler: AppendableTable): - """ set the meta data """ + """set the meta data""" if self.metadata is not None: handler.write_metadata(self.cname, self.metadata) class GenericIndexCol(IndexCol): - """ an index which is not represented in the data of the table """ + """an index which is not represented in the data of the table""" @property def is_indexed(self) -> bool: @@ -2330,7 +2330,7 @@ def __repr__(self) -> str: ) def __eq__(self, other: Any) -> bool: - """ compare 2 col items """ + """compare 2 col items""" return all( getattr(self, a, None) == getattr(other, a, None) for a in ["name", "cname", "dtype", "pos"] @@ -2347,7 +2347,7 @@ def set_data(self, data: ArrayLike): self.kind = _dtype_to_kind(dtype_name) def take_data(self): - """ return the data """ + """return the data""" return self.data @classmethod @@ -2388,7 +2388,7 @@ def get_atom_string(cls, shape, itemsize): @classmethod def get_atom_coltype(cls, kind: str) -> type[Col]: - """ return the PyTables column class for this column """ + """return the PyTables column class for this column""" if kind.startswith("uint"): k4 = kind[4:] col_name = f"UInt{k4}Col" @@ -2419,7 +2419,7 @@ def shape(self): @property def cvalues(self): - """ return my cython values """ + """return my cython values""" return self.data def validate_attr(self, append): @@ -2537,7 +2537,7 @@ def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str): return self.values, converted def set_attr(self): - """ set the data for this column """ + """set the data for this column""" setattr(self.attrs, self.kind_attr, self.values) setattr(self.attrs, self.meta_attr, self.meta) assert self.dtype is not None @@ -2545,7 +2545,7 @@ def set_attr(self): class DataIndexableCol(DataCol): - """ represent a data column that can be indexed """ + """represent a data column that can be indexed""" is_data_indexable = True @@ -2572,7 +2572,7 @@ def get_atom_timedelta64(cls, shape): class GenericDataIndexableCol(DataIndexableCol): - """ represent a generic pytables data column """ + """represent a generic pytables data column""" pass @@ -2621,7 +2621,7 @@ def is_old_version(self) -> bool: @property def version(self) -> tuple[int, int, int]: - """ compute and set our version """ + """compute and set our version""" version = _ensure_decoded(getattr(self.group._v_attrs, "pandas_version", None)) try: version = tuple(int(x) for x in version.split(".")) @@ -2636,7 +2636,7 @@ def pandas_type(self): return _ensure_decoded(getattr(self.group._v_attrs, "pandas_type", None)) def __repr__(self) -> str: - """ return a pretty representation of myself """ + """return a pretty representation of myself""" self.infer_axes() s = self.shape if s is not None: @@ -2647,7 +2647,7 @@ def __repr__(self) -> str: return self.pandas_type def set_object_info(self): - """ set my pandas type & version """ + """set my pandas type & version""" self.attrs.pandas_type = str(self.pandas_kind) self.attrs.pandas_version = str(_version) @@ -2684,16 +2684,16 @@ def attrs(self): return self.group._v_attrs def set_attrs(self): - """ set our object attributes """ + """set our object attributes""" pass def get_attrs(self): - """ get our object attributes """ + """get our object attributes""" pass @property def storable(self): - """ return my storable """ + """return my storable""" return self.group @property @@ -2705,13 +2705,13 @@ def nrows(self): return getattr(self.storable, "nrows", None) def validate(self, other): - """ validate against an existing storable """ + """validate against an existing storable""" if other is None: return return True def validate_version(self, where=None): - """ are we trying to operate on an old version? """ + """are we trying to operate on an old version?""" return True def infer_axes(self): @@ -2754,7 +2754,7 @@ def delete(self, where=None, start: int | None = None, stop: int | None = None): class GenericFixed(Fixed): - """ a generified fixed version """ + """a generified fixed version""" _index_type_map = {DatetimeIndex: "datetime", PeriodIndex: "period"} _reverse_index_map = {v: k for k, v in _index_type_map.items()} @@ -2836,12 +2836,12 @@ def is_exists(self) -> bool: return True def set_attrs(self): - """ set our object attributes """ + """set our object attributes""" self.attrs.encoding = self.encoding self.attrs.errors = self.errors def get_attrs(self): - """ retrieve our attributes """ + """retrieve our attributes""" self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None)) self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict")) for n in self.attributes: @@ -2851,7 +2851,7 @@ def write(self, obj, **kwargs): self.set_attrs() def read_array(self, key: str, start: int | None = None, stop: int | None = None): - """ read an array for the specified node (off of group """ + """read an array for the specified node (off of group""" import tables node = getattr(self.group, key) @@ -3008,7 +3008,7 @@ def read_index_node( return index def write_array_empty(self, key: str, value: ArrayLike): - """ write a 0-len array """ + """write a 0-len array""" # ugly hack for length 0 axes arr = np.empty((1,) * value.ndim) self._handle.create_array(self.group, key, arr) @@ -3296,7 +3296,7 @@ def table_type_short(self) -> str: return self.table_type.split("_")[0] def __repr__(self) -> str: - """ return a pretty representation of myself """ + """return a pretty representation of myself""" self.infer_axes() jdc = ",".join(self.data_columns) if len(self.data_columns) else "" dc = f",dc->[{jdc}]" @@ -3314,14 +3314,14 @@ def __repr__(self) -> str: ) def __getitem__(self, c: str): - """ return the axis for c """ + """return the axis for c""" for a in self.axes: if c == a.name: return a return None def validate(self, other): - """ validate against an existing table """ + """validate against an existing table""" if other is None: return @@ -3377,12 +3377,12 @@ def validate_multiindex( @property def nrows_expected(self) -> int: - """ based on our axes, compute the expected nrows """ + """based on our axes, compute the expected nrows""" return np.prod([i.cvalues.shape[0] for i in self.index_axes]) @property def is_exists(self) -> bool: - """ has this table been created """ + """has this table been created""" return "table" in self.group @property @@ -3391,7 +3391,7 @@ def storable(self): @property def table(self): - """ return the table group (this is my storable) """ + """return the table group (this is my storable)""" return self.storable @property @@ -3408,7 +3408,7 @@ def axes(self): @property def ncols(self) -> int: - """ the number of total columns in the values axes """ + """the number of total columns in the values axes""" return sum(len(a.values) for a in self.values_axes) @property @@ -3426,7 +3426,7 @@ def data_orientation(self): ) def queryables(self) -> dict[str, Any]: - """ return a dict of the kinds allowable columns for this object """ + """return a dict of the kinds allowable columns for this object""" # mypy doesn't recognize DataFrame._AXIS_NAMES, so we re-write it here axis_names = {0: "index", 1: "columns"} @@ -3442,16 +3442,16 @@ def queryables(self) -> dict[str, Any]: return dict(d1 + d2 + d3) # type: ignore[operator] def index_cols(self): - """ return a list of my index cols """ + """return a list of my index cols""" # Note: each `i.cname` below is assured to be a str. return [(i.axis, i.cname) for i in self.index_axes] def values_cols(self) -> list[str]: - """ return a list of my values cols """ + """return a list of my values cols""" return [i.cname for i in self.values_axes] def _get_metadata_path(self, key: str) -> str: - """ return the metadata pathname for this key """ + """return the metadata pathname for this key""" group = self.group._v_pathname return f"{group}/meta/{key}/meta" @@ -3479,13 +3479,13 @@ def write_metadata(self, key: str, values: np.ndarray): ) def read_metadata(self, key: str): - """ return the meta data array for this key """ + """return the meta data array for this key""" if getattr(getattr(self.group, "meta", None), key, None) is not None: return self.parent.select(self._get_metadata_path(key)) return None def set_attrs(self): - """ set our table type & indexables """ + """set our table type & indexables""" self.attrs.table_type = str(self.table_type) self.attrs.index_cols = self.index_cols() self.attrs.values_cols = self.values_cols() @@ -3498,7 +3498,7 @@ def set_attrs(self): self.attrs.info = self.info def get_attrs(self): - """ retrieve our attributes """ + """retrieve our attributes""" self.non_index_axes = getattr(self.attrs, "non_index_axes", None) or [] self.data_columns = getattr(self.attrs, "data_columns", None) or [] self.info = getattr(self.attrs, "info", None) or {} @@ -3510,7 +3510,7 @@ def get_attrs(self): self.values_axes = [a for a in self.indexables if not a.is_an_indexable] def validate_version(self, where=None): - """ are we trying to operate on an old version? """ + """are we trying to operate on an old version?""" if where is not None: if self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1: ws = incompatibility_doc % ".".join(str(x) for x in self.version) @@ -3540,7 +3540,7 @@ def validate_min_itemsize(self, min_itemsize): @cache_readonly def indexables(self): - """ create/cache the indexables if they don't exist """ + """create/cache the indexables if they don't exist""" _indexables = [] desc = self.description @@ -3732,7 +3732,7 @@ def _read_axes( @classmethod def get_object(cls, obj, transposed: bool): - """ return the data for this obj """ + """return the data for this obj""" return obj def validate_data_columns(self, data_columns, min_itemsize, non_index_axes): @@ -4067,7 +4067,7 @@ def get_blk_items(mgr): return blocks, blk_items def process_axes(self, obj, selection: Selection, columns=None): - """ process axes filters """ + """process axes filters""" # make a copy to avoid side effects if columns is not None: columns = list(columns) @@ -4131,7 +4131,7 @@ def create_description( fletcher32: bool, expectedrows: int | None, ) -> dict[str, Any]: - """ create the description of the table from the axes & values """ + """create the description of the table from the axes & values""" # provided expected rows if its passed if expectedrows is None: expectedrows = max(self.nrows_expected, 10000) @@ -4256,7 +4256,7 @@ def write(self, **kwargs): class AppendableTable(Table): - """ support the new appendable table formats """ + """support the new appendable table formats""" table_type = "appendable" @@ -4485,7 +4485,7 @@ def delete(self, where=None, start: int | None = None, stop: int | None = None): class AppendableFrameTable(AppendableTable): - """ support the new appendable table formats """ + """support the new appendable table formats""" pandas_kind = "frame_table" table_type = "appendable_frame" @@ -4498,7 +4498,7 @@ def is_transposed(self) -> bool: @classmethod def get_object(cls, obj, transposed: bool): - """ these are written transposed """ + """these are written transposed""" if transposed: obj = obj.T return obj @@ -4585,7 +4585,7 @@ def read( class AppendableSeriesTable(AppendableFrameTable): - """ support the new appendable table formats """ + """support the new appendable table formats""" pandas_kind = "series_table" table_type = "appendable_series" @@ -4601,7 +4601,7 @@ def get_object(cls, obj, transposed: bool): return obj def write(self, obj, data_columns=None, **kwargs): - """ we are going to write this as a frame table """ + """we are going to write this as a frame table""" if not isinstance(obj, DataFrame): name = obj.name or "values" obj = obj.to_frame(name) @@ -4634,13 +4634,13 @@ def read( class AppendableMultiSeriesTable(AppendableSeriesTable): - """ support the new appendable table formats """ + """support the new appendable table formats""" pandas_kind = "series_table" table_type = "appendable_multiseries" def write(self, obj, **kwargs): - """ we are going to write this as a frame table """ + """we are going to write this as a frame table""" name = obj.name or "values" newobj, self.levels = self.validate_multiindex(obj) assert isinstance(self.levels, list) # for mypy @@ -4651,7 +4651,7 @@ def write(self, obj, **kwargs): class GenericTable(AppendableFrameTable): - """ a table that read/writes the generic pytables table format """ + """a table that read/writes the generic pytables table format""" pandas_kind = "frame_table" table_type = "generic_table" @@ -4668,7 +4668,7 @@ def storable(self): return getattr(self.group, "table", None) or self.group def get_attrs(self): - """ retrieve our attributes """ + """retrieve our attributes""" self.non_index_axes = [] self.nan_rep = None self.levels = [] @@ -4679,7 +4679,7 @@ def get_attrs(self): @cache_readonly def indexables(self): - """ create the indexables from the table description """ + """create the indexables from the table description""" d = self.description # TODO: can we get a typ for this? AFAICT it is the only place @@ -4717,7 +4717,7 @@ def write(self, **kwargs): class AppendableMultiFrameTable(AppendableFrameTable): - """ a frame with a multi-index """ + """a frame with a multi-index""" table_type = "appendable_multiframe" obj_type = DataFrame @@ -4784,7 +4784,7 @@ def _reindex_axis(obj: DataFrame, axis: int, labels: Index, other=None) -> DataF def _get_tz(tz: tzinfo) -> str | tzinfo: - """ for a tz-aware type, return an encoded zone """ + """for a tz-aware type, return an encoded zone""" zone = timezones.get_timezone(tz) return zone @@ -5232,7 +5232,7 @@ def __init__( self.condition, self.filter = self.terms.evaluate() def generate(self, where): - """ where can be a : dict,list,tuple,string """ + """where can be a : dict,list,tuple,string""" if where is None: return None diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index f7e1c56cbb196..6ced3febd78f4 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -138,7 +138,7 @@ def _parse_date(datestr: str) -> datetime: - """ Given a date in xport format, return Python date. """ + """Given a date in xport format, return Python date.""" try: # e.g. "16FEB11:10:07:55" return datetime.strptime(datestr, "%d%b%y:%H:%M:%S") diff --git a/pandas/io/sql.py b/pandas/io/sql.py index a347e7a99be8b..954f6f5eae676 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -966,7 +966,7 @@ def insert(self, chunksize: int | None = None, method: str | None = None): if start_i >= end_i: break - chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list]) + chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list)) exec_insert(conn, keys, chunk_iter) def _query_iterator( @@ -1354,7 +1354,7 @@ def insert_records( def get_engine(engine: str) -> BaseEngine: - """ return our implementation """ + """return our implementation""" if engine == "auto": engine = get_option("io.sql.engine") diff --git a/pandas/io/stata.py b/pandas/io/stata.py index e4f3bcb89cf7e..4ba7a446d0668 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1095,15 +1095,15 @@ def __init__( self._setup_dtype() def __enter__(self) -> StataReader: - """ enter context manager """ + """enter context manager""" return self def __exit__(self, exc_type, exc_value, traceback) -> None: - """ exit context manager """ + """exit context manager""" self.close() def close(self) -> None: - """ close the handle if its open """ + """close the handle if its open""" self.path_or_buf.close() def _set_encoding(self) -> None: diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index bb78e29924ba2..5f93442cae4f6 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -444,7 +444,7 @@ def test_cmp_series_period_series_mixed_freq(self): class TestPeriodIndexSeriesComparisonConsistency: - """ Test PeriodIndex and Period Series Ops consistency """ + """Test PeriodIndex and Period Series Ops consistency""" # TODO: needs parametrization+de-duplication @@ -1306,7 +1306,7 @@ def test_ops_series_period(self): class TestPeriodIndexSeriesMethods: - """ Test PeriodIndex and Period Series Ops consistency """ + """Test PeriodIndex and Period Series Ops consistency""" def _check(self, values, func, expected): idx = PeriodIndex(values) diff --git a/pandas/tests/base/test_constructors.py b/pandas/tests/base/test_constructors.py index ceb882ff9c963..16ce709a5b021 100644 --- a/pandas/tests/base/test_constructors.py +++ b/pandas/tests/base/test_constructors.py @@ -47,7 +47,7 @@ def _get_foo(self): foo = property(_get_foo, _set_foo, doc="foo property") def bar(self, *args, **kwargs): - """ a test bar method """ + """a test bar method""" pass class Delegate(PandasDelegate, PandasObject): diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 616f46624bfd7..cf9acadc7a6dd 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -24,12 +24,12 @@ # EA & Actual Dtypes def to_ea_dtypes(dtypes): - """ convert list of string dtypes to EA dtype """ + """convert list of string dtypes to EA dtype""" return [getattr(pd, dt + "Dtype") for dt in dtypes] def to_numpy_dtypes(dtypes): - """ convert list of string dtypes to numpy dtype """ + """convert list of string dtypes to numpy dtype""" return [getattr(np, dt) for dt in dtypes if isinstance(dt, str)] diff --git a/pandas/tests/extension/base/reduce.py b/pandas/tests/extension/base/reduce.py index 0f7bd59411eb5..c6a35d8fa5b38 100644 --- a/pandas/tests/extension/base/reduce.py +++ b/pandas/tests/extension/base/reduce.py @@ -20,7 +20,7 @@ def check_reduce(self, s, op_name, skipna): class BaseNoReduceTests(BaseReduceTests): - """ we don't define any reductions """ + """we don't define any reductions""" @pytest.mark.parametrize("skipna", [True, False]) def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna): diff --git a/pandas/tests/frame/methods/test_clip.py b/pandas/tests/frame/methods/test_clip.py index 09b33831ed5ec..7258f5eceb54a 100644 --- a/pandas/tests/frame/methods/test_clip.py +++ b/pandas/tests/frame/methods/test_clip.py @@ -139,7 +139,7 @@ def test_clip_against_unordered_columns(self): tm.assert_frame_equal(result_lower_upper, expected_lower_upper) def test_clip_with_na_args(self, float_frame): - """Should process np.nan argument as None """ + """Should process np.nan argument as None""" # GH#17276 tm.assert_frame_equal(float_frame.clip(np.nan), float_frame) tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan), float_frame) diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py index fa91eb928e35c..3a1228ee5c4a5 100644 --- a/pandas/tests/frame/methods/test_describe.py +++ b/pandas/tests/frame/methods/test_describe.py @@ -346,7 +346,7 @@ def test_describe_percentiles_integer_idx(self): result = df.describe(percentiles=pct) expected = DataFrame( - {"x": [1.0, 1.0, np.NaN, 1.0, *[1.0 for _ in pct], 1.0]}, + {"x": [1.0, 1.0, np.NaN, 1.0, *(1.0 for _ in pct), 1.0]}, index=[ "count", "mean", diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index da930ab4d7423..afa9593807acc 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -763,7 +763,7 @@ def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators): if opname in ["__rmod__", "__rfloordiv__"]: # Series ops may return mixed int/float dtypes in cases where # DataFrame op will return all-float. So we upcast `expected` - dtype = np.common_type(*[x.values for x in exvals.values()]) + dtype = np.common_type(*(x.values for x in exvals.values())) expected = DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype) diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 254a0b8dfd34e..3a307ebd702ca 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -24,7 +24,7 @@ def _ndim(self): return self._typ._AXIS_LEN def _axes(self): - """ return the axes for my object typ """ + """return the axes for my object typ""" return self._typ._AXIS_ORDERS def _construct(self, shape, value=None, dtype=None, **kwargs): diff --git a/pandas/tests/generic/test_label_or_level_utils.py b/pandas/tests/generic/test_label_or_level_utils.py index d3566f16ab49f..87bb6a58600f4 100644 --- a/pandas/tests/generic/test_label_or_level_utils.py +++ b/pandas/tests/generic/test_label_or_level_utils.py @@ -9,13 +9,13 @@ # ======== @pytest.fixture def df(): - """DataFrame with columns 'L1', 'L2', and 'L3' """ + """DataFrame with columns 'L1', 'L2', and 'L3'""" return pd.DataFrame({"L1": [1, 2, 3], "L2": [11, 12, 13], "L3": ["A", "B", "C"]}) @pytest.fixture(params=[[], ["L1"], ["L1", "L2"], ["L1", "L2", "L3"]]) def df_levels(request, df): - """DataFrame with columns or index levels 'L1', 'L2', and 'L3' """ + """DataFrame with columns or index levels 'L1', 'L2', and 'L3'""" levels = request.param if levels: @@ -26,7 +26,7 @@ def df_levels(request, df): @pytest.fixture def df_ambig(df): - """DataFrame with levels 'L1' and 'L2' and labels 'L1' and 'L3' """ + """DataFrame with levels 'L1' and 'L2' and labels 'L1' and 'L3'""" df = df.set_index(["L1", "L2"]) df["L1"] = df["L3"] @@ -36,7 +36,7 @@ def df_ambig(df): @pytest.fixture def df_duplabels(df): - """DataFrame with level 'L1' and labels 'L2', 'L3', and 'L2' """ + """DataFrame with level 'L1' and labels 'L2', 'L3', and 'L2'""" df = df.set_index(["L1"]) df = pd.concat([df, df["L2"]], axis=1) diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index f7c2266e39fcc..8cde03af1ff92 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -26,7 +26,7 @@ def _axify(obj, key, axis): class Base: - """ indexing comprehensive base class """ + """indexing comprehensive base class""" _kinds = {"series", "frame"} _typs = { @@ -120,7 +120,7 @@ def generate_indices(self, f, values=False): return itertools.product(*axes) def get_value(self, name, f, i, values=False): - """ return the value for the location i """ + """return the value for the location i""" # check against values if values: return f.values[i] @@ -153,7 +153,7 @@ def check_values(self, f, func, values=False): def check_result(self, method, key, typs=None, axes=None, fails=None): def _eq(axis, obj, key): - """ compare equal for these 2 keys """ + """compare equal for these 2 keys""" axified = _axify(obj, key, axis) try: getattr(obj, method).__getitem__(axified) diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index cd49620f45fae..0e9eb163078d9 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -485,9 +485,9 @@ def test_loc_and_at_with_categorical_index(self): [1.5, 2.5, 3.5], [-1.5, -2.5, -3.5], # numpy int/uint - *[np.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_INT_DTYPES], + *(np.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_INT_DTYPES), # numpy floats - *[np.array([1.5, 2.5, 3.5], dtype=dtyp) for dtyp in tm.FLOAT_DTYPES], + *(np.array([1.5, 2.5, 3.5], dtype=dtyp) for dtyp in tm.FLOAT_DTYPES), # numpy object np.array([1, "b", 3.5], dtype=object), # pandas scalars @@ -495,7 +495,7 @@ def test_loc_and_at_with_categorical_index(self): [Timestamp(2019, 1, 1), Timestamp(2019, 2, 1), Timestamp(2019, 3, 1)], [Timedelta(1, "d"), Timedelta(2, "d"), Timedelta(3, "D")], # pandas Integer arrays - *[pd.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_EA_INT_DTYPES], + *(pd.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_EA_INT_DTYPES), # other pandas arrays pd.IntervalIndex.from_breaks([1, 4, 6, 9]).array, pd.date_range("2019-01-01", periods=3).array, diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 6f4949267c00c..a0d62f1fa782d 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -79,7 +79,7 @@ class TestSetitemCoercion(CoercionBase): def _assert_setitem_series_conversion( self, original_series, loc_value, expected_series, expected_dtype ): - """ test series value's coercion triggered by assignment """ + """test series value's coercion triggered by assignment""" temp = original_series.copy() temp[1] = loc_value tm.assert_series_equal(temp, expected_series) @@ -270,7 +270,7 @@ def test_setitem_series_no_coercion_from_values_list(self): def _assert_setitem_index_conversion( self, original_series, loc_key, expected_index, expected_dtype ): - """ test index's coercion triggered by assign key """ + """test index's coercion triggered by assign key""" temp = original_series.copy() temp[loc_key] = 5 exp = pd.Series([1, 2, 3, 4, 5], index=expected_index) @@ -364,7 +364,7 @@ class TestInsertIndexCoercion(CoercionBase): method = "insert" def _assert_insert_conversion(self, original, value, expected, expected_dtype): - """ test coercion triggered by insert """ + """test coercion triggered by insert""" target = original.copy() res = target.insert(1, value) tm.assert_index_equal(res, expected) @@ -552,7 +552,7 @@ class TestWhereCoercion(CoercionBase): def _assert_where_conversion( self, original, cond, values, expected, expected_dtype ): - """ test coercion triggered by where """ + """test coercion triggered by where""" target = original.copy() res = target.where(cond, values) tm.assert_equal(res, expected) @@ -866,7 +866,7 @@ def test_has_comprehensive_tests(self): raise NotImplementedError def _assert_fillna_conversion(self, original, value, expected, expected_dtype): - """ test coercion triggered by fillna """ + """test coercion triggered by fillna""" target = original.copy() res = target.fillna(value) tm.assert_equal(res, expected) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 772aa97c47233..2fc2aa3ac3a9d 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -33,7 +33,7 @@ class TestFancy: - """ pure get/set item & fancy indexing """ + """pure get/set item & fancy indexing""" def test_setitem_ndarray_1d(self): # GH5508 diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 61bbd4e12e1ba..0f4a30cfa9cf9 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -561,7 +561,7 @@ def test_astype(self, t): def test_convert(self): def _compare(old_mgr, new_mgr): - """ compare the blocks, numeric compare ==, object don't """ + """compare the blocks, numeric compare ==, object don't""" old_blocks = set(old_mgr.blocks) new_blocks = set(new_mgr.blocks) assert len(old_blocks) == len(new_blocks) diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py index 601b50fb469cb..dede9127821fd 100644 --- a/pandas/tests/io/generate_legacy_storage_files.py +++ b/pandas/tests/io/generate_legacy_storage_files.py @@ -125,7 +125,7 @@ def _create_sp_frame(): def create_data(): - """ create the pickle data """ + """create the pickle data""" data = { "A": [0.0, 1.0, 2.0, 3.0, np.nan], "B": [0, 1, 0, 1, 0], diff --git a/pandas/tests/io/pytables/common.py b/pandas/tests/io/pytables/common.py index 6a9d5745ab457..67c3a2902dbcb 100644 --- a/pandas/tests/io/pytables/common.py +++ b/pandas/tests/io/pytables/common.py @@ -30,7 +30,7 @@ def safe_close(store): def create_tempfile(path): - """ create an unopened named temporary file """ + """create an unopened named temporary file""" return os.path.join(tempfile.gettempdir(), path) diff --git a/pandas/tests/io/pytables/conftest.py b/pandas/tests/io/pytables/conftest.py index 38ffcb3b0e8ec..988f78c5ae843 100644 --- a/pandas/tests/io/pytables/conftest.py +++ b/pandas/tests/io/pytables/conftest.py @@ -11,7 +11,7 @@ def setup_path(): @pytest.fixture(scope="module", autouse=True) def setup_mode(): - """ Reset testing mode fixture""" + """Reset testing mode fixture""" tm.reset_testing_mode() yield tm.set_testing_mode() diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index 329ace02f4207..ccd0bc3d16896 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -1753,7 +1753,7 @@ def _check(axes): @td.skip_if_no_scipy def test_memory_leak(self): - """ Check that every plot type gets properly collected. """ + """Check that every plot type gets properly collected.""" import gc import weakref diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 7f0d1802580b9..adda95f4c5aa0 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -272,7 +272,7 @@ def test_parallel_coordinates(self, iris): # not sure if this is indicative of a problem @pytest.mark.filterwarnings("ignore:Attempting to set:UserWarning") def test_parallel_coordinates_with_sorted_labels(self): - """ For #15908 """ + """For #15908""" from pandas.plotting import parallel_coordinates df = DataFrame( diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index bf3e6d822ab19..450bd8b05ea43 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -36,7 +36,7 @@ @pytest.fixture def create_index(_index_factory): def _create_index(*args, **kwargs): - """ return the _index_factory created using the args, kwargs """ + """return the _index_factory created using the args, kwargs""" return _index_factory(*args, **kwargs) return _create_index diff --git a/pandas/tests/resample/test_deprecated.py b/pandas/tests/resample/test_deprecated.py index fdb3a7872ad67..8ae3abf903d57 100644 --- a/pandas/tests/resample/test_deprecated.py +++ b/pandas/tests/resample/test_deprecated.py @@ -42,7 +42,7 @@ def _index_factory(): @pytest.fixture def create_index(_index_factory): def _create_index(*args, **kwargs): - """ return the _index_factory created using the args, kwargs """ + """return the _index_factory created using the args, kwargs""" return _index_factory(*args, **kwargs) return _create_index diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 671f0ad2d26c7..6746158179964 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -40,7 +40,7 @@ def setup_method(self, datapath): ) def test_examples1(self): - """ doc-string examples """ + """doc-string examples""" left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) @@ -52,7 +52,7 @@ def test_examples1(self): tm.assert_frame_equal(result, expected) def test_examples2(self): - """ doc-string examples """ + """doc-string examples""" trades = pd.DataFrame( { "time": to_datetime( @@ -136,7 +136,7 @@ def test_examples2(self): tm.assert_frame_equal(result, expected) def test_examples3(self): - """ doc-string examples """ + """doc-string examples""" # GH14887 left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) @@ -150,7 +150,7 @@ def test_examples3(self): tm.assert_frame_equal(result, expected) def test_examples4(self): - """ doc-string examples """ + """doc-string examples""" # GH14887 left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) diff --git a/pandas/tests/series/methods/test_clip.py b/pandas/tests/series/methods/test_clip.py index 7dbc194669a62..e4803a9cd3038 100644 --- a/pandas/tests/series/methods/test_clip.py +++ b/pandas/tests/series/methods/test_clip.py @@ -61,7 +61,7 @@ def test_series_clipping_with_na_values( tm.assert_series_equal(s_clipped_lower, expected_lower) def test_clip_with_na_args(self): - """Should process np.nan argument as None """ + """Should process np.nan argument as None""" # GH#17276 s = Series([1, 2, 3]) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index a0e3399bee49f..e100fef3490ba 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -402,7 +402,7 @@ def test_subsets_multiindex_dtype(self): class TestSorted: - """ everything you wanted to test about sorting """ + """everything you wanted to test about sorting""" def test_sort_non_lexsorted(self): # degenerate case where we sort but don't
<!-- START pr-commits --> <!-- END pr-commits --> ## Base PullRequest default branch (https://github.com/pandas-dev/pandas/tree/master) ## Command results <details> <summary>Details: </summary> <details> <summary><em>add path</em></summary> ```Shell /home/runner/work/_actions/technote-space/create-pr-action/v2/node_modules/npm-check-updates/bin ``` </details> <details> <summary><em>pip install pre-commit</em></summary> ```Shell Collecting pre-commit Downloading pre_commit-2.13.0-py2.py3-none-any.whl (190 kB) Collecting identify>=1.0.0 Downloading identify-2.2.7-py2.py3-none-any.whl (98 kB) Collecting toml Using cached toml-0.10.2-py2.py3-none-any.whl (16 kB) Collecting nodeenv>=0.11.1 Using cached nodeenv-1.6.0-py2.py3-none-any.whl (21 kB) Collecting cfgv>=2.0.0 Downloading cfgv-3.3.0-py2.py3-none-any.whl (7.3 kB) Collecting pyyaml>=5.1 Using cached PyYAML-5.4.1-cp39-cp39-manylinux1_x86_64.whl (630 kB) Collecting virtualenv>=20.0.8 Downloading virtualenv-20.4.7-py2.py3-none-any.whl (7.2 MB) Collecting distlib<1,>=0.3.1 Downloading distlib-0.3.2-py2.py3-none-any.whl (338 kB) Collecting six<2,>=1.9.0 Using cached six-1.16.0-py2.py3-none-any.whl (11 kB) Collecting appdirs<2,>=1.4.3 Using cached appdirs-1.4.4-py2.py3-none-any.whl (9.6 kB) Collecting filelock<4,>=3.0.0 Using cached filelock-3.0.12-py3-none-any.whl (7.6 kB) Installing collected packages: six, filelock, distlib, appdirs, virtualenv, toml, pyyaml, nodeenv, identify, cfgv, pre-commit Successfully installed appdirs-1.4.4 cfgv-3.3.0 distlib-0.3.2 filelock-3.0.12 identify-2.2.7 nodeenv-1.6.0 pre-commit-2.13.0 pyyaml-5.4.1 six-1.16.0 toml-0.10.2 virtualenv-20.4.7 ``` </details> <details> <summary><em>pre-commit autoupdate || (exit 0);</em></summary> ```Shell Updating https://github.com/MarcoGorelli/absolufy-imports ... already up to date. Updating https://github.com/python/black ... updating 20.8b1 -> 21.5b1. Updating https://github.com/codespell-project/codespell ... already up to date. Updating https://github.com/pre-commit/pre-commit-hooks ... already up to date. Updating https://github.com/cpplint/cpplint ... [INFO] Initializing environment for https://github.com/cpplint/cpplint. already up to date. Updating https://gitlab.com/pycqa/flake8 ... already up to date. Updating https://github.com/PyCQA/isort ... already up to date. Updating https://github.com/asottile/pyupgrade ... [INFO] Initializing environment for https://github.com/asottile/pyupgrade. updating v2.18.3 -> v2.19.0. Updating https://github.com/pre-commit/pygrep-hooks ... already up to date. Updating https://github.com/asottile/yesqa ... already up to date. ``` </details> <details> <summary><em>pre-commit run -a || (exit 0);</em></summary> ```Shell [INFO] Installing environment for https://github.com/cpplint/cpplint. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Installing environment for https://github.com/asottile/pyupgrade. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... absolufy-imports...............................................................................Passed black..........................................................................................Failed - hook id: black - files were modified by this hook reformatted pandas/_config/config.py reformatted pandas/_libs/arrays.pyi reformatted pandas/_libs/algos.pyi reformatted pandas/_libs/groupby.pyi reformatted pandas/_libs/hashtable.pyi reformatted pandas/_libs/index.pyi reformatted pandas/_libs/internals.pyi reformatted pandas/_libs/join.pyi reformatted pandas/_libs/ops.pyi reformatted pandas/_libs/parsers.pyi reformatted pandas/_libs/reshape.pyi reformatted pandas/_libs/testing.pyi reformatted pandas/_libs/lib.pyi reformatted pandas/_libs/tslib.pyi reformatted pandas/_libs/tslibs/conversion.pyi reformatted pandas/_libs/tslibs/ccalendar.pyi reformatted pandas/_libs/tslibs/fields.pyi reformatted pandas/_libs/tslibs/dtypes.pyi reformatted pandas/_libs/tslibs/parsing.pyi reformatted pandas/_libs/tslibs/nattype.pyi reformatted pandas/_libs/tslibs/strptime.pyi reformatted pandas/_libs/tslibs/period.pyi reformatted pandas/_libs/tslibs/timedeltas.pyi reformatted pandas/_libs/tslibs/timezones.pyi reformatted pandas/_libs/tslibs/tzconversion.pyi reformatted pandas/_libs/tslibs/vectorized.pyi reformatted pandas/_libs/tslibs/timestamps.pyi reformatted pandas/_libs/window/indexers.pyi reformatted pandas/_libs/writers.pyi reformatted pandas/_libs/window/aggregations.pyi reformatted pandas/_testing/__init__.py reformatted pandas/conftest.py reformatted pandas/core/algorithms.py reformatted pandas/core/apply.py reformatted pandas/core/arrays/categorical.py reformatted pandas/core/arrays/masked.py reformatted pandas/core/arrays/sparse/scipy_sparse.py reformatted pandas/core/computation/expressions.py reformatted pandas/core/computation/expr.py reformatted pandas/core/computation/pytables.py reformatted pandas/core/dtypes/common.py reformatted pandas/core/dtypes/cast.py reformatted pandas/core/dtypes/dtypes.py reformatted pandas/core/generic.py reformatted pandas/core/groupby/ops.py reformatted pandas/core/indexes/category.py reformatted pandas/core/indexes/range.py reformatted pandas/core/indexes/multi.py reformatted pandas/core/internals/array_manager.py reformatted pandas/core/internals/blocks.py reformatted pandas/core/internals/managers.py reformatted pandas/core/ops/__init__.py reformatted pandas/core/nanops.py reformatted pandas/core/reshape/tile.py reformatted pandas/core/reshape/merge.py reformatted pandas/io/formats/format.py reformatted pandas/io/parquet.py reformatted pandas/io/parsers/readers.py reformatted pandas/io/sas/sas_xport.py reformatted pandas/io/sql.py reformatted pandas/io/pytables.py reformatted pandas/io/stata.py reformatted pandas/tests/arithmetic/test_period.py reformatted pandas/tests/base/test_constructors.py reformatted pandas/tests/dtypes/test_common.py reformatted pandas/tests/extension/base/reduce.py reformatted pandas/tests/frame/methods/test_clip.py reformatted pandas/tests/generic/test_label_or_level_utils.py reformatted pandas/tests/generic/test_generic.py reformatted pandas/tests/indexing/common.py reformatted pandas/tests/indexing/test_coercion.py reformatted pandas/tests/indexing/test_indexing.py reformatted pandas/tests/internals/test_internals.py reformatted pandas/tests/io/generate_legacy_storage_files.py reformatted pandas/tests/io/pytables/common.py reformatted pandas/tests/io/pytables/conftest.py reformatted pandas/tests/plotting/frame/test_frame.py reformatted pandas/tests/plotting/test_misc.py reformatted pandas/tests/resample/test_base.py reformatted pandas/tests/resample/test_deprecated.py reformatted pandas/tests/reshape/merge/test_merge_asof.py reformatted pandas/tests/series/methods/test_clip.py reformatted pandas/tests/test_multilevel.py All done! ✨ 🍰 ✨ 83 files reformatted, 1292 files left unchanged. codespell......................................................................................Passed Debug Statements (Python)......................................................................Passed Fix End of Files...............................................................................Passed Trim Trailing Whitespace.......................................................................Passed cpplint........................................................................................Passed flake8.........................................................................................Passed flake8 (cython)................................................................................Passed flake8 (cython template).......................................................................Passed isort..........................................................................................Passed pyupgrade......................................................................................Failed - hook id: pyupgrade - exit code: 1 - files were modified by this hook Rewriting pandas/tests/frame/test_arithmetic.py Rewriting pandas/core/arrays/sparse/scipy_sparse.py Rewriting pandas/core/reshape/melt.py Rewriting pandas/core/arrays/sparse/array.py Rewriting pandas/tests/frame/methods/test_describe.py Rewriting pandas/tests/indexing/test_categorical.py Rewriting pandas/io/formats/format.py Rewriting pandas/io/sql.py rst ``code`` is two backticks..................................................................Passed rst directives end with two colons.............................................................Passed rst ``inline code`` next to normal text........................................................Passed Strip unnecessary `# noqa`s....................................................................Passed flake8-rst.....................................................................................Passed Unwanted patterns..............................................................................Passed Generate pip dependency from conda.............................................................Passed Check flake8 version is synced across flake8, yesqa, and environment.yml.......................Passed Validate correct capitalization among titles in documentation..................................Passed Import pandas.array as pd_array in core........................................................Passed Use bool_t instead of bool in pandas/core/generic.py...........................................Passed ``` </details> </details> ## Changed files <details> <summary>Changed 89 files: </summary> - .pre-commit-config.yaml - pandas/_config/config.py - pandas/_libs/algos.pyi - pandas/_libs/arrays.pyi - pandas/_libs/groupby.pyi - pandas/_libs/hashtable.pyi - pandas/_libs/index.pyi - pandas/_libs/internals.pyi - pandas/_libs/join.pyi - pandas/_libs/lib.pyi - pandas/_libs/ops.pyi - pandas/_libs/parsers.pyi - pandas/_libs/reshape.pyi - pandas/_libs/testing.pyi - pandas/_libs/tslib.pyi - pandas/_libs/tslibs/ccalendar.pyi - pandas/_libs/tslibs/conversion.pyi - pandas/_libs/tslibs/dtypes.pyi - pandas/_libs/tslibs/fields.pyi - pandas/_libs/tslibs/nattype.pyi - pandas/_libs/tslibs/parsing.pyi - pandas/_libs/tslibs/period.pyi - pandas/_libs/tslibs/strptime.pyi - pandas/_libs/tslibs/timedeltas.pyi - pandas/_libs/tslibs/timestamps.pyi - pandas/_libs/tslibs/timezones.pyi - pandas/_libs/tslibs/tzconversion.pyi - pandas/_libs/tslibs/vectorized.pyi - pandas/_libs/window/aggregations.pyi - pandas/_libs/window/indexers.pyi - pandas/_libs/writers.pyi - pandas/_testing/__init__.py - pandas/conftest.py - pandas/core/algorithms.py - pandas/core/apply.py - pandas/core/arrays/categorical.py - pandas/core/arrays/masked.py - pandas/core/arrays/sparse/array.py - pandas/core/arrays/sparse/scipy_sparse.py - pandas/core/computation/expr.py - pandas/core/computation/expressions.py - pandas/core/computation/pytables.py - pandas/core/dtypes/cast.py - pandas/core/dtypes/common.py - pandas/core/dtypes/dtypes.py - pandas/core/generic.py - pandas/core/groupby/ops.py - pandas/core/indexes/category.py - pandas/core/indexes/multi.py - pandas/core/indexes/range.py - pandas/core/internals/array_manager.py - pandas/core/internals/blocks.py - pandas/core/internals/managers.py - pandas/core/nanops.py - pandas/core/ops/__init__.py - pandas/core/reshape/melt.py - pandas/core/reshape/merge.py - pandas/core/reshape/tile.py - pandas/io/formats/format.py - pandas/io/parquet.py - pandas/io/parsers/readers.py - pandas/io/pytables.py - pandas/io/sas/sas_xport.py - pandas/io/sql.py - pandas/io/stata.py - pandas/tests/arithmetic/test_period.py - pandas/tests/base/test_constructors.py - pandas/tests/dtypes/test_common.py - pandas/tests/extension/base/reduce.py - pandas/tests/frame/methods/test_clip.py - pandas/tests/frame/methods/test_describe.py - pandas/tests/frame/test_arithmetic.py - pandas/tests/generic/test_generic.py - pandas/tests/generic/test_label_or_level_utils.py - pandas/tests/indexing/common.py - pandas/tests/indexing/test_categorical.py - pandas/tests/indexing/test_coercion.py - pandas/tests/indexing/test_indexing.py - pandas/tests/internals/test_internals.py - pandas/tests/io/generate_legacy_storage_files.py - pandas/tests/io/pytables/common.py - pandas/tests/io/pytables/conftest.py - pandas/tests/plotting/frame/test_frame.py - pandas/tests/plotting/test_misc.py - pandas/tests/resample/test_base.py - pandas/tests/resample/test_deprecated.py - pandas/tests/reshape/merge/test_merge_asof.py - pandas/tests/series/methods/test_clip.py - pandas/tests/test_multilevel.py </details> <hr> [:octocat: Repo](https://github.com/technote-space/create-pr-action) | [:memo: Issues](https://github.com/technote-space/create-pr-action/issues) | [:department_store: Marketplace](https://github.com/marketplace/actions/create-pr-action)
https://api.github.com/repos/pandas-dev/pandas/pulls/41742
2021-05-31T07:41:01Z
2021-05-31T08:43:52Z
null
2021-06-14T09:16:28Z
ENH: Improve error message in corr/cov for Rolling/Expanding/EWM when other isn't a DataFrame/Series
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index ea9017da8a2f9..4655968eb07b5 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -233,6 +233,7 @@ Other enhancements - Add keyword ``sort`` to :func:`pivot_table` to allow non-sorting of the result (:issue:`39143`) - Add keyword ``dropna`` to :meth:`DataFrame.value_counts` to allow counting rows that include ``NA`` values (:issue:`41325`) - :meth:`Series.replace` will now cast results to ``PeriodDtype`` where possible instead of ``object`` dtype (:issue:`41526`) +- Improved error message in ``corr` and ``cov`` methods on :class:`.Rolling`, :class:`.Expanding`, and :class:`.ExponentialMovingWindow` when ``other`` is not a :class:`DataFrame` or :class:`Series` (:issue:`41741`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index d85aa20de5ab4..e0720c5d86df1 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -1,7 +1,6 @@ """Common utility functions for rolling operations""" from collections import defaultdict from typing import cast -import warnings import numpy as np @@ -15,17 +14,7 @@ def flex_binary_moment(arg1, arg2, f, pairwise=False): - if not ( - isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) - and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame)) - ): - raise TypeError( - "arguments to moment function must be of type np.ndarray/Series/DataFrame" - ) - - if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance( - arg2, (np.ndarray, ABCSeries) - ): + if isinstance(arg1, ABCSeries) and isinstance(arg2, ABCSeries): X, Y = prep_binary(arg1, arg2) return f(X, Y) @@ -43,7 +32,7 @@ def dataframe_from_int_dict(data, frame_template): if pairwise is False: if arg1 is arg2: # special case in order to handle duplicate column names - for i, col in enumerate(arg1.columns): + for i in range(len(arg1.columns)): results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i]) return dataframe_from_int_dict(results, arg1) else: @@ -51,23 +40,17 @@ def dataframe_from_int_dict(data, frame_template): raise ValueError("'arg1' columns are not unique") if not arg2.columns.is_unique: raise ValueError("'arg2' columns are not unique") - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - X, Y = arg1.align(arg2, join="outer") - X = X + 0 * Y - Y = Y + 0 * X - - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - res_columns = arg1.columns.union(arg2.columns) + X, Y = arg1.align(arg2, join="outer") + X, Y = prep_binary(X, Y) + res_columns = arg1.columns.union(arg2.columns) for col in res_columns: if col in X and col in Y: results[col] = f(X[col], Y[col]) return DataFrame(results, index=X.index, columns=res_columns) elif pairwise is True: results = defaultdict(dict) - for i, k1 in enumerate(arg1.columns): - for j, k2 in enumerate(arg2.columns): + for i in range(len(arg1.columns)): + for j in range(len(arg2.columns)): if j < i and arg2 is arg1: # Symmetric case results[i][j] = results[j][i] @@ -85,10 +68,10 @@ def dataframe_from_int_dict(data, frame_template): result = concat( [ concat( - [results[i][j] for j, c in enumerate(arg2.columns)], + [results[i][j] for j in range(len(arg2.columns))], ignore_index=True, ) - for i, c in enumerate(arg1.columns) + for i in range(len(arg1.columns)) ], ignore_index=True, axis=1, @@ -135,13 +118,10 @@ def dataframe_from_int_dict(data, frame_template): ) return result - - else: - raise ValueError("'pairwise' is not True/False") else: results = { i: f(*prep_binary(arg1.iloc[:, i], arg2)) - for i, col in enumerate(arg1.columns) + for i in range(len(arg1.columns)) } return dataframe_from_int_dict(results, arg1) @@ -165,11 +145,7 @@ def zsqrt(x): def prep_binary(arg1, arg2): - if not isinstance(arg2, type(arg1)): - raise Exception("Input arrays must be of the same type!") - # mask out values, this also makes a common index... X = arg1 + 0 * arg2 Y = arg2 + 0 * arg1 - return X, Y diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index dfb74b38cd9cf..2d5f148a6437a 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -472,6 +472,8 @@ def _apply_pairwise( other = target # only default unset pairwise = True if pairwise is None else pairwise + elif not isinstance(other, (ABCDataFrame, ABCSeries)): + raise ValueError("other must be a DataFrame or Series") return flex_binary_moment(target, other, func, pairwise=bool(pairwise)) diff --git a/pandas/tests/window/moments/test_moments_consistency_ewm.py b/pandas/tests/window/moments/test_moments_consistency_ewm.py index a36091ab8934e..c79d02fd3237e 100644 --- a/pandas/tests/window/moments/test_moments_consistency_ewm.py +++ b/pandas/tests/window/moments/test_moments_consistency_ewm.py @@ -64,9 +64,9 @@ def test_different_input_array_raise_exception(name): A = Series(np.random.randn(50), index=np.arange(50)) A[:10] = np.NaN - msg = "Input arrays must be of the same type!" + msg = "other must be a DataFrame or Series" # exception raised is Exception - with pytest.raises(Exception, match=msg): + with pytest.raises(ValueError, match=msg): getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50)) diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py index 28fd5633de02e..7ec5846ef4acf 100644 --- a/pandas/tests/window/moments/test_moments_consistency_rolling.py +++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py @@ -13,7 +13,6 @@ Series, ) import pandas._testing as tm -from pandas.core.window.common import flex_binary_moment def _rolling_consistency_cases(): @@ -133,14 +132,6 @@ def test_rolling_corr_with_zero_variance(window): assert s.rolling(window=window).corr(other=other).isna().all() -def test_flex_binary_moment(): - # GH3155 - # don't blow the stack - msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame" - with pytest.raises(TypeError, match=msg): - flex_binary_moment(5, 6, None) - - def test_corr_sanity(): # GH 3155 df = DataFrame(
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Also some internal method cleanups
https://api.github.com/repos/pandas-dev/pandas/pulls/41741
2021-05-31T03:56:59Z
2021-05-31T14:48:46Z
2021-05-31T14:48:46Z
2021-05-31T16:30:49Z
TST: Make ARM build work (not in the CI)
diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 5ff2f783e6a96..0000000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: 2.1 - -jobs: - test-arm: - machine: - image: ubuntu-2004:202101-01 - resource_class: arm.medium - environment: - ENV_FILE: ci/deps/circle-37-arm64.yaml - PYTEST_WORKERS: auto - PATTERN: "not slow and not network and not clipboard and not arm_slow" - steps: - - run: echo "CircleCI is working" - -workflows: - test: - jobs: - - test-arm diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/circle-37-arm64.yaml similarity index 100% rename from ci/deps/travis-37-arm64.yaml rename to ci/deps/circle-37-arm64.yaml diff --git a/ci/setup_env.sh b/ci/setup_env.sh index c36422884f2ec..e6bd9950331ca 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -12,41 +12,30 @@ if [[ "$(uname)" == "Linux" && -n "$LC_ALL" ]]; then echo fi -MINICONDA_DIR="$HOME/miniconda3" - - -if [ -d "$MINICONDA_DIR" ]; then - echo - echo "rm -rf "$MINICONDA_DIR"" - rm -rf "$MINICONDA_DIR" -fi echo "Install Miniconda" -UNAME_OS=$(uname) -if [[ "$UNAME_OS" == 'Linux' ]]; then +DEFAULT_CONDA_URL="https://repo.continuum.io/miniconda/Miniconda3-latest" +if [[ "$(uname -m)" == 'aarch64' ]]; then + CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.10.1-4/Miniforge3-4.10.1-4-Linux-aarch64.sh" +elif [[ "$(uname)" == 'Linux' ]]; then if [[ "$BITS32" == "yes" ]]; then - CONDA_OS="Linux-x86" + CONDA_URL="$DEFAULT_CONDA_URL-Linux-x86.sh" else - CONDA_OS="Linux-x86_64" + CONDA_URL="$DEFAULT_CONDA_URL-Linux-x86_64.sh" fi -elif [[ "$UNAME_OS" == 'Darwin' ]]; then - CONDA_OS="MacOSX-x86_64" +elif [[ "$(uname)" == 'Darwin' ]]; then + CONDA_URL="$DEFAULT_CONDA_URL-MacOSX-x86_64.sh" else - echo "OS $UNAME_OS not supported" + echo "OS $(uname) not supported" exit 1 fi - -if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then - CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.8.5-1/Miniforge3-4.8.5-1-Linux-aarch64.sh" -else - CONDA_URL="https://repo.continuum.io/miniconda/Miniconda3-latest-$CONDA_OS.sh" -fi +echo "Downloading $CONDA_URL" wget -q $CONDA_URL -O miniconda.sh chmod +x miniconda.sh -# Installation path is required for ARM64 platform as miniforge script installs in path $HOME/miniforge3. +MINICONDA_DIR="$HOME/miniconda3" +rm -rf $MINICONDA_DIR ./miniconda.sh -b -p $MINICONDA_DIR - export PATH=$MINICONDA_DIR/bin:$PATH echo diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 8d64bf8852946..369832e9bc05c 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -92,6 +92,18 @@ def is_platform_mac() -> bool: return sys.platform == "darwin" +def is_platform_arm() -> bool: + """ + Checking if he running platform use ARM architecture. + + Returns + ------- + bool + True if the running platform uses ARM architecture. + """ + return platform.machine() in ("arm64", "aarch64") + + def import_lzma(): """ Importing the `lzma` module. diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py index cac145aa30fd0..bdb9c3f97e798 100644 --- a/pandas/tests/indexes/interval/test_astype.py +++ b/pandas/tests/indexes/interval/test_astype.py @@ -3,6 +3,8 @@ import numpy as np import pytest +from pandas.compat import is_platform_arm + from pandas.core.dtypes.dtypes import ( CategoricalDtype, IntervalDtype, @@ -168,6 +170,7 @@ def test_subtype_integer_with_non_integer_borders(self, subtype): ) tm.assert_index_equal(result, expected) + @pytest.mark.xfail(is_platform_arm(), reason="GH 41740") def test_subtype_integer_errors(self): # float64 -> uint64 fails with negative values index = interval_range(-10.0, 10.0) diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py index eecb9492f29e3..643a5617abbeb 100644 --- a/pandas/tests/tools/test_to_numeric.py +++ b/pandas/tests/tools/test_to_numeric.py @@ -4,6 +4,8 @@ from numpy import iinfo import pytest +from pandas.compat import is_platform_arm + import pandas as pd from pandas import ( DataFrame, @@ -750,7 +752,7 @@ def test_to_numeric_from_nullable_string(values, nullable_string_dtype, expected "UInt64", "signed", "UInt64", - marks=pytest.mark.xfail(reason="GH38798"), + marks=pytest.mark.xfail(not is_platform_arm(), reason="GH38798"), ), ([1, 1], "Int64", "unsigned", "UInt8"), ([1.0, 1.0], "Float32", "unsigned", "UInt8"), diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index c28d54dd9fbfb..17a6d9216ca92 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -6,6 +6,7 @@ import numpy as np import pytest +from pandas.compat import is_platform_arm from pandas.errors import UnsupportedFunctionCall from pandas import ( @@ -1072,6 +1073,7 @@ def test_rolling_sem(frame_or_series): tm.assert_series_equal(result, expected) +@pytest.mark.xfail(is_platform_arm(), reason="GH 41740") @pytest.mark.parametrize( ("func", "third_value", "values"), [
- [X] closes #41737 This seems to be working, see: https://app.circleci.com/pipelines/github/datapythonista/pandas/11/workflows/2ca0487f-5a63-4f01-936c-e83827db714f/jobs/11 But tests are failing. I opened #41740 the the failures.
https://api.github.com/repos/pandas-dev/pandas/pulls/41739
2021-05-31T01:04:02Z
2021-06-01T22:29:33Z
2021-06-01T22:29:33Z
2021-06-04T01:22:15Z
CLN: Remove travis build
diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 52fadca6b7846..0000000000000 --- a/.travis.yml +++ /dev/null @@ -1,72 +0,0 @@ -language: python -python: 3.7 - -addons: - apt: - update: true - packages: - - xvfb - -services: - - xvfb - -# To turn off cached cython files and compiler cache -# set NOCACHE-true -# To delete caches go to https://travis-ci.org/OWNER/REPOSITORY/caches or run -# travis cache --delete inside the project directory from the travis command line client -# The cache directories will be deleted if anything in ci/ changes in a commit -cache: - apt: true - ccache: true - directories: - - $HOME/.cache # cython cache - -env: - global: - # create a github personal access token - # cd pandas-dev/pandas - # travis encrypt 'PANDAS_GH_TOKEN=personal_access_token' -r pandas-dev/pandas - - secure: "EkWLZhbrp/mXJOx38CHjs7BnjXafsqHtwxPQrqWy457VDFWhIY1DMnIR/lOWG+a20Qv52sCsFtiZEmMfUjf0pLGXOqurdxbYBGJ7/ikFLk9yV2rDwiArUlVM9bWFnFxHvdz9zewBH55WurrY4ShZWyV+x2dWjjceWG5VpWeI6sA=" - -git: - depth: false - -matrix: - fast_finish: true - - include: - - arch: arm64-graviton2 - virt: lxd - group: edge - env: - - JOB="3.7, arm64" PYTEST_WORKERS="auto" ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard and not arm_slow)" - -before_install: - - echo "before_install" - # Use blocking IO on travis. Ref: https://github.com/travis-ci/travis-ci/issues/8920#issuecomment-352661024 - - python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);' - - export PATH="$HOME/miniconda3/bin:$PATH" - - df -h - - pwd - - uname -a - - git --version - - ./ci/check_git_tags.sh - -install: - - echo "install start" - - ci/prep_cython_cache.sh - - ci/setup_env.sh - - ci/submit_cython_cache.sh - - echo "install done" - -script: - - echo "script start" - - echo "$JOB" - - source activate pandas-dev - - ci/run_tests.sh - -after_script: - - echo "after_script start" - - source activate pandas-dev && pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd - - ci/print_skipped.py - - echo "after_script done" diff --git a/ci/check_git_tags.sh b/ci/check_git_tags.sh deleted file mode 100755 index 9dbcd4f98683e..0000000000000 --- a/ci/check_git_tags.sh +++ /dev/null @@ -1,28 +0,0 @@ -set -e - -if [[ ! $(git tag) ]]; then - echo "No git tags in clone, please sync your git tags with upstream using:" - echo " git fetch --tags upstream" - echo " git push --tags origin" - echo "" - echo "If the issue persists, the clone depth needs to be increased in .travis.yml" - exit 1 -fi - -# This will error if there are no tags and we omit --always -DESCRIPTION=$(git describe --long --tags) -echo "$DESCRIPTION" - -if [[ "$DESCRIPTION" == *"untagged"* ]]; then - echo "Unable to determine most recent tag, aborting build" - exit 1 -else - if [[ "$DESCRIPTION" != *"g"* ]]; then - # A good description will have the hash prefixed by g, a bad one will be - # just the hash - echo "Unable to determine most recent tag, aborting build" - exit 1 - else - echo "$(git tag)" - fi -fi diff --git a/ci/prep_cython_cache.sh b/ci/prep_cython_cache.sh deleted file mode 100755 index 18d9388327ddc..0000000000000 --- a/ci/prep_cython_cache.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -ls "$HOME/.cache/" - -PYX_CACHE_DIR="$HOME/.cache/pyxfiles" -pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx" -o -name "*.pxd" -o -name "*.pxi.in"` -pyx_cache_file_list=`find ${PYX_CACHE_DIR} -name "*.pyx" -o -name "*.pxd" -o -name "*.pxi.in"` - -CACHE_File="$HOME/.cache/cython_files.tar" - -# Clear the cython cache 0 = NO, 1 = YES -clear_cache=0 - -pyx_files=`echo "$pyx_file_list" | wc -l` -pyx_cache_files=`echo "$pyx_cache_file_list" | wc -l` - -if [[ pyx_files -ne pyx_cache_files ]] -then - echo "Different number of pyx files" - clear_cache=1 -fi - -home_dir=$(pwd) - -if [ -f "$CACHE_File" ] && [ -z "$NOCACHE" ] && [ -d "$PYX_CACHE_DIR" ]; then - - echo "Cache available - checking pyx diff" - - for i in ${pyx_file_list} - do - diff=`diff -u $i $PYX_CACHE_DIR${i}` - if [[ $? -eq 2 ]] - then - echo "${i##*/} can't be diffed; probably not in cache" - clear_cache=1 - fi - if [[ ! -z $diff ]] - then - echo "${i##*/} has changed:" - echo $diff - clear_cache=1 - fi - done - - if [ "$TRAVIS_PULL_REQUEST" == "false" ] - then - echo "Not a PR" - # Uncomment next 2 lines to turn off cython caching not in a PR - # echo "Non PR cython caching is disabled" - # clear_cache=1 - else - echo "In a PR" - # Uncomment next 2 lines to turn off cython caching in a PR - # echo "PR cython caching is disabled" - # clear_cache=1 - fi - -fi - -if [ $clear_cache -eq 0 ] && [ -z "$NOCACHE" ] -then - # No and nocache is not set - echo "Will reuse cached cython file" - cd / - tar xvmf $CACHE_File - cd $home_dir -else - echo "Rebuilding cythonized files" - echo "No cache = $NOCACHE" - echo "Clear cache (1=YES) = $clear_cache" -fi - - -exit 0 diff --git a/ci/setup_env.sh b/ci/setup_env.sh index c36422884f2ec..70a28d4467dfe 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -63,29 +63,6 @@ conda update -n base conda echo "conda info -a" conda info -a -echo -echo "set the compiler cache to work" -if [ -z "$NOCACHE" ] && [ "${TRAVIS_OS_NAME}" == "linux" ]; then - echo "Using ccache" - export PATH=/usr/lib/ccache:/usr/lib64/ccache:$PATH - GCC=$(which gcc) - echo "gcc: $GCC" - CCACHE=$(which ccache) - echo "ccache: $CCACHE" - export CC='ccache gcc' -elif [ -z "$NOCACHE" ] && [ "${TRAVIS_OS_NAME}" == "osx" ]; then - echo "Install ccache" - brew install ccache > /dev/null 2>&1 - echo "Using ccache" - export PATH=/usr/local/opt/ccache/libexec:$PATH - gcc=$(which gcc) - echo "gcc: $gcc" - CCACHE=$(which ccache) - echo "ccache: $CCACHE" -else - echo "Not using ccache" -fi - echo "source deactivate" source deactivate diff --git a/ci/submit_cython_cache.sh b/ci/submit_cython_cache.sh deleted file mode 100755 index b87acef0ba11c..0000000000000 --- a/ci/submit_cython_cache.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -CACHE_File="$HOME/.cache/cython_files.tar" -PYX_CACHE_DIR="$HOME/.cache/pyxfiles" -pyx_file_list=`find ${TRAVIS_BUILD_DIR} -name "*.pyx" -o -name "*.pxd" -o -name "*.pxi.in"` - -rm -rf $CACHE_File -rm -rf $PYX_CACHE_DIR - -home_dir=$(pwd) - -mkdir -p $PYX_CACHE_DIR -rsync -Rv $pyx_file_list $PYX_CACHE_DIR - -echo "pyx files:" -echo $pyx_file_list - -tar cf ${CACHE_File} --files-from /dev/null - -for i in ${pyx_file_list} -do - f=${i%.pyx} - ls $f.{c,cpp} | tar rf ${CACHE_File} -T - -done - -echo "Cython files in cache tar:" -tar tvf ${CACHE_File} - -exit 0
- [X] closes #38943 Whenever we can assume that Travis is not an option for ARM builds (or any other builds due to the lack of free credits), this is the PR that removes all travis specific stuff.
https://api.github.com/repos/pandas-dev/pandas/pulls/41738
2021-05-30T23:06:30Z
2021-06-01T13:51:18Z
2021-06-01T13:51:18Z
2021-06-01T13:51:18Z
CLN: Remove old docs README
diff --git a/doc/README.rst b/doc/README.rst deleted file mode 100644 index 5423e7419d03b..0000000000000 --- a/doc/README.rst +++ /dev/null @@ -1 +0,0 @@ -See `contributing.rst <https://pandas-docs.github.io/pandas-docs-travis/contributing.html>`_ in this repo.
Not sure why a README was created under `docs` in the first place. But having a docs/README that instead of having docs related information, just points to the contributing page, doesn't seem very useful, since the main README is already providing better information on how to contribute. And in any case, the link in that README file is very old.
https://api.github.com/repos/pandas-dev/pandas/pulls/41735
2021-05-30T21:09:50Z
2021-05-31T15:46:23Z
2021-05-31T15:46:23Z
2021-05-31T15:46:27Z
DEPR: silent overflow on Series construction
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 3c4b38a93b8ee..18ab118c4bf16 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -698,6 +698,7 @@ Deprecations - Deprecated passing arguments (apart from ``value``) as positional in :meth:`DataFrame.fillna` and :meth:`Series.fillna` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.reset_index` (other than ``"level"``) and :meth:`Series.reset_index` (:issue:`41485`) - Deprecated construction of :class:`Series` or :class:`DataFrame` with ``DatetimeTZDtype`` data and ``datetime64[ns]`` dtype. Use ``Series(data).dt.tz_localize(None)`` instead (:issue:`41555`,:issue:`33401`) +- Deprecated behavior of :class:`Series` construction with large-integer values and small-integer dtype silently overflowing; use ``Series(data).astype(dtype)`` instead (:issue:`41734`) - Deprecated inference of ``timedelta64[ns]``, ``datetime64[ns]``, or ``DatetimeTZDtype`` dtypes in :class:`Series` construction when data containing strings is passed and no ``dtype`` is passed (:issue:`33558`) - In a future version, constructing :class:`Series` or :class:`DataFrame` with ``datetime64[ns]`` data and ``DatetimeTZDtype`` will treat the data as wall-times instead of as UTC times (matching DatetimeIndex behavior). To treat the data as UTC times, use ``pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(dtype.tz)`` or ``pd.Series(data.view("int64"), dtype=dtype)`` (:issue:`33401`) - Deprecated passing arguments as positional in :meth:`DataFrame.set_axis` and :meth:`Series.set_axis` (other than ``"labels"``) (:issue:`41485`) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 8a230e5da01dc..5c7211a5d1852 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -2036,7 +2036,7 @@ def construct_1d_ndarray_preserving_na( def maybe_cast_to_integer_array( arr: list | np.ndarray, dtype: np.dtype, copy: bool = False -): +) -> np.ndarray: """ Takes any dtype and returns the casted version, raising for when data is incompatible with integer/unsigned integer dtypes. @@ -2107,6 +2107,20 @@ def maybe_cast_to_integer_array( if is_float_dtype(arr.dtype) or is_object_dtype(arr.dtype): raise ValueError("Trying to coerce float values to integers") + if casted.dtype < arr.dtype: + # GH#41734 e.g. [1, 200, 923442] and dtype="int8" -> overflows + warnings.warn( + f"Values are too large to be losslessly cast to {dtype}. " + "In a future version this will raise OverflowError. To retain the " + f"old behavior, use pd.Series(values).astype({dtype})", + FutureWarning, + stacklevel=find_stack_level(), + ) + return casted + + # No known cases that get here, but raising explicitly to cover our bases. + raise ValueError(f"values cannot be losslessly cast to {dtype}") + def convert_scalar_for_putitemlike(scalar: Scalar, dtype: np.dtype) -> Scalar: """ diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py index 4a7c4faade00d..b617514f383af 100644 --- a/pandas/tests/frame/test_stack_unstack.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -358,7 +358,7 @@ def test_unstack_preserve_dtypes(self): "E": Series([1.0, 50.0, 100.0]).astype("float32"), "F": Series([3.0, 4.0, 5.0]).astype("float64"), "G": False, - "H": Series([1, 200, 923442], dtype="int8"), + "H": Series([1, 200, 923442]).astype("int8"), } ) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index f03322f9b0d6c..9376bd5f025b3 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -711,6 +711,21 @@ def test_constructor_cast(self): with pytest.raises(ValueError, match=msg): Series(["a", "b", "c"], dtype=float) + def test_constructor_signed_int_overflow_deprecation(self): + # GH#41734 disallow silent overflow + msg = "Values are too large to be losslessly cast" + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([1, 200, 923442], dtype="int8") + + expected = Series([1, -56, 50], dtype="int8") + tm.assert_series_equal(ser, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([1, 200, 923442], dtype="uint8") + + expected = Series([1, 200, 50], dtype="uint8") + tm.assert_series_equal(ser, expected) + def test_constructor_unsigned_dtype_overflow(self, uint_dtype): # see gh-15832 msg = "Trying to coerce negative values to unsigned integers"
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry xref #40114 which tried to change the behavior without a deprecation cycle. Handling DataFrame separately, as it is an entirely separate barrel of worms.
https://api.github.com/repos/pandas-dev/pandas/pulls/41734
2021-05-30T16:15:57Z
2021-06-01T14:54:02Z
2021-06-01T14:54:02Z
2021-06-01T16:00:09Z
DEPR: ignoring dtype in DataFrame constructor failures
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index e06085c4c5c26..b8c28bb8daadd 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -233,7 +233,7 @@ Other enhancements - Add keyword ``sort`` to :func:`pivot_table` to allow non-sorting of the result (:issue:`39143`) - Add keyword ``dropna`` to :meth:`DataFrame.value_counts` to allow counting rows that include ``NA`` values (:issue:`41325`) - :meth:`Series.replace` will now cast results to ``PeriodDtype`` where possible instead of ``object`` dtype (:issue:`41526`) -- Improved error message in ``corr` and ``cov`` methods on :class:`.Rolling`, :class:`.Expanding`, and :class:`.ExponentialMovingWindow` when ``other`` is not a :class:`DataFrame` or :class:`Series` (:issue:`41741`) +- Improved error message in ``corr`` and ``cov`` methods on :class:`.Rolling`, :class:`.Expanding`, and :class:`.ExponentialMovingWindow` when ``other`` is not a :class:`DataFrame` or :class:`Series` (:issue:`41741`) .. --------------------------------------------------------------------------- @@ -686,6 +686,7 @@ Deprecations - Deprecated passing arguments (apart from ``cond`` and ``other``) as positional in :meth:`DataFrame.mask` and :meth:`Series.mask` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.clip` and :meth:`Series.clip` (other than ``"upper"`` and ``"lower"``) (:issue:`41485`) - Deprecated special treatment of lists with first element a Categorical in the :class:`DataFrame` constructor; pass as ``pd.DataFrame({col: categorical, ...})`` instead (:issue:`38845`) +- Deprecated behavior of :class:`DataFrame` constructor when a ``dtype`` is passed and the data cannot be cast to that dtype. In a future version, this will raise instead of being silently ignored (:issue:`24435`) - Deprecated passing arguments as positional (except for ``"method"``) in :meth:`DataFrame.interpolate` and :meth:`Series.interpolate` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.ffill`, :meth:`Series.ffill`, :meth:`DataFrame.bfill`, and :meth:`Series.bfill` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.sort_values` (other than ``"by"``) and :meth:`Series.sort_values` (:issue:`41485`) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 0c299056075c1..ff73bc227fdb2 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -12,6 +12,7 @@ Sequence, cast, ) +import warnings import numpy as np import numpy.ma as ma @@ -745,6 +746,17 @@ def _try_cast( if raise_cast_failure: raise else: + # we only get here with raise_cast_failure False, which means + # called via the DataFrame constructor + # GH#24435 + warnings.warn( + f"Could not cast to {dtype}, falling back to object. This " + "behavior is deprecated. In a future version, when a dtype is " + "passed to 'DataFrame', either all columns will be cast to that " + "dtype, or a TypeError will be raised", + FutureWarning, + stacklevel=7, + ) subarr = np.array(arr, dtype=object, copy=copy) return subarr diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index ba0acdc4f947b..34854be29ad1f 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -258,8 +258,11 @@ def f(dtype): f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")]) # these work (though results may be unexpected) - f("int64") - f("float64") + depr_msg = "either all columns will be cast to that dtype, or a TypeError will" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + f("int64") + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + f("float64") # 10822 # invalid error message on dt inference diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index d118a376b56ec..784969c199c9f 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -207,7 +207,9 @@ def test_constructor_mixed(self, float_string_frame): assert float_string_frame["foo"].dtype == np.object_ def test_constructor_cast_failure(self): - foo = DataFrame({"a": ["a", "b", "c"]}, dtype=np.float64) + msg = "either all columns will be cast to that dtype, or a TypeError will" + with tm.assert_produces_warning(FutureWarning, match=msg): + foo = DataFrame({"a": ["a", "b", "c"]}, dtype=np.float64) assert foo["a"].dtype == object # GH 3010, constructing with odd arrays @@ -683,7 +685,10 @@ def test_constructor_dict_cast2(self): "A": dict(zip(range(20), tm.makeStringIndex(20))), "B": dict(zip(range(15), np.random.randn(15))), } - frame = DataFrame(test_data, dtype=float) + msg = "either all columns will be cast to that dtype, or a TypeError will" + with tm.assert_produces_warning(FutureWarning, match=msg): + frame = DataFrame(test_data, dtype=float) + assert len(frame) == 20 assert frame["A"].dtype == np.object_ assert frame["B"].dtype == np.float64 diff --git a/pandas/tests/indexing/multiindex/test_getitem.py b/pandas/tests/indexing/multiindex/test_getitem.py index f07bf3464b74c..f1fbe0c5a6b9c 100644 --- a/pandas/tests/indexing/multiindex/test_getitem.py +++ b/pandas/tests/indexing/multiindex/test_getitem.py @@ -206,27 +206,26 @@ def test_frame_getitem_nan_multiindex(nulls_fixture): df = DataFrame( [[11, n, 13], [21, n, 23], [31, n, 33], [41, n, 43]], columns=cols, - dtype="int64", ).set_index(["a", "b"]) + df["c"] = df["c"].astype("int64") idx = (21, n) result = df.loc[:idx] - expected = DataFrame( - [[11, n, 13], [21, n, 23]], columns=cols, dtype="int64" - ).set_index(["a", "b"]) + expected = DataFrame([[11, n, 13], [21, n, 23]], columns=cols).set_index(["a", "b"]) + expected["c"] = expected["c"].astype("int64") tm.assert_frame_equal(result, expected) result = df.loc[idx:] expected = DataFrame( - [[21, n, 23], [31, n, 33], [41, n, 43]], columns=cols, dtype="int64" + [[21, n, 23], [31, n, 33], [41, n, 43]], columns=cols ).set_index(["a", "b"]) + expected["c"] = expected["c"].astype("int64") tm.assert_frame_equal(result, expected) idx1, idx2 = (21, n), (31, n) result = df.loc[idx1:idx2] - expected = DataFrame( - [[21, n, 23], [31, n, 33]], columns=cols, dtype="int64" - ).set_index(["a", "b"]) + expected = DataFrame([[21, n, 23], [31, n, 33]], columns=cols).set_index(["a", "b"]) + expected["c"] = expected["c"].astype("int64") tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index c1a096ed06efc..ab868a3d3713d 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -284,7 +284,12 @@ def test_loc_setitem_dtype(self): df.loc[:, cols] = df.loc[:, cols].astype("float32") expected = DataFrame( - {"id": ["A"], "a": [1.2], "b": [0.0], "c": [-2.5]}, dtype="float32" + { + "id": ["A"], + "a": np.array([1.2], dtype="float32"), + "b": np.array([0.0], dtype="float32"), + "c": np.array([-2.5], dtype="float32"), + } ) # id is inferred as object tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/reshape/test_get_dummies.py b/pandas/tests/reshape/test_get_dummies.py index 8af49ac20987a..653ea88ed62ac 100644 --- a/pandas/tests/reshape/test_get_dummies.py +++ b/pandas/tests/reshape/test_get_dummies.py @@ -272,8 +272,9 @@ def test_dataframe_dummies_subset(self, df, sparse): "from_A_a": [1, 0, 1], "from_A_b": [0, 1, 0], }, - dtype=np.uint8, ) + cols = expected.columns + expected[cols[1:]] = expected[cols[1:]].astype(np.uint8) expected[["C"]] = df[["C"]] if sparse: cols = ["from_A_a", "from_A_b"]
- [x] closes #24435 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41733
2021-05-30T15:40:44Z
2021-06-01T13:52:10Z
2021-06-01T13:52:10Z
2021-06-01T15:58:07Z
[ArrowStringArray] ENH: raise an ImportError when trying to create an arrow string dtype if pyarrow is not installed
diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 219c52c4a65b9..d6d7743f3f5f3 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -23,11 +23,11 @@ type_t, ) from pandas.compat import ( + pa_version_under1p0, pa_version_under2p0, pa_version_under3p0, pa_version_under4p0, ) -from pandas.compat.pyarrow import pa_version_under1p0 from pandas.util._decorators import doc from pandas.util._validators import validate_fillna_kwargs @@ -55,31 +55,33 @@ ) from pandas.core.strings.object_array import ObjectStringArrayMixin -try: +# PyArrow backed StringArrays are available starting at 1.0.0, but this +# file is imported from even if pyarrow is < 1.0.0, before pyarrow.compute +# and its compute functions existed. GH38801 +if not pa_version_under1p0: import pyarrow as pa -except ImportError: - pa = None -else: - # PyArrow backed StringArrays are available starting at 1.0.0, but this - # file is imported from even if pyarrow is < 1.0.0, before pyarrow.compute - # and its compute functions existed. GH38801 - if not pa_version_under1p0: - import pyarrow.compute as pc - - ARROW_CMP_FUNCS = { - "eq": pc.equal, - "ne": pc.not_equal, - "lt": pc.less, - "gt": pc.greater, - "le": pc.less_equal, - "ge": pc.greater_equal, - } + import pyarrow.compute as pc + + ARROW_CMP_FUNCS = { + "eq": pc.equal, + "ne": pc.not_equal, + "lt": pc.less, + "gt": pc.greater, + "le": pc.less_equal, + "ge": pc.greater_equal, + } if TYPE_CHECKING: from pandas import Series +def _chk_pyarrow_available() -> None: + if pa_version_under1p0: + msg = "pyarrow>=1.0.0 is required for PyArrow backed StringArray." + raise ImportError(msg) + + @register_extension_dtype class ArrowStringDtype(StringDtype): """ @@ -112,6 +114,9 @@ class ArrowStringDtype(StringDtype): #: StringDtype.na_value uses pandas.NA na_value = libmissing.NA + def __init__(self): + _chk_pyarrow_available() + @property def type(self) -> type[str]: return str @@ -213,10 +218,8 @@ class ArrowStringArray(OpsMixin, ExtensionArray, ObjectStringArrayMixin): Length: 4, dtype: arrow_string """ - _dtype = ArrowStringDtype() - def __init__(self, values): - self._chk_pyarrow_available() + self._dtype = ArrowStringDtype() if isinstance(values, pa.Array): self._data = pa.chunked_array([values]) elif isinstance(values, pa.ChunkedArray): @@ -229,19 +232,11 @@ def __init__(self, values): "ArrowStringArray requires a PyArrow (chunked) array of string type" ) - @classmethod - def _chk_pyarrow_available(cls) -> None: - # TODO: maybe update import_optional_dependency to allow a minimum - # version to be specified rather than use the global minimum - if pa is None or pa_version_under1p0: - msg = "pyarrow>=1.0.0 is required for PyArrow backed StringArray." - raise ImportError(msg) - @classmethod def _from_sequence(cls, scalars, dtype: Dtype | None = None, copy: bool = False): from pandas.core.arrays.masked import BaseMaskedArray - cls._chk_pyarrow_available() + _chk_pyarrow_available() if isinstance(scalars, BaseMaskedArray): # avoid costly conversion to object dtype in ensure_string_array and diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py index ec7f57940a67f..3db8333798e36 100644 --- a/pandas/tests/arrays/string_/test_string_arrow.py +++ b/pandas/tests/arrays/string_/test_string_arrow.py @@ -3,14 +3,25 @@ import numpy as np import pytest -from pandas.core.arrays.string_arrow import ArrowStringArray +from pandas.compat import pa_version_under1p0 -pa = pytest.importorskip("pyarrow", minversion="1.0.0") +from pandas.core.arrays.string_arrow import ( + ArrowStringArray, + ArrowStringDtype, +) +@pytest.mark.skipif( + pa_version_under1p0, + reason="pyarrow>=1.0.0 is required for PyArrow backed StringArray", +) @pytest.mark.parametrize("chunked", [True, False]) -@pytest.mark.parametrize("array", [np, pa]) +@pytest.mark.parametrize("array", ["numpy", "pyarrow"]) def test_constructor_not_string_type_raises(array, chunked): + import pyarrow as pa + + array = pa if array == "pyarrow" else np + arr = array.array([1, 2, 3]) if chunked: if array is np: @@ -24,3 +35,20 @@ def test_constructor_not_string_type_raises(array, chunked): ) with pytest.raises(ValueError, match=msg): ArrowStringArray(arr) + + +@pytest.mark.skipif( + not pa_version_under1p0, + reason="pyarrow is installed", +) +def test_pyarrow_not_installed_raises(): + msg = re.escape("pyarrow>=1.0.0 is required for PyArrow backed StringArray") + + with pytest.raises(ImportError, match=msg): + ArrowStringDtype() + + with pytest.raises(ImportError, match=msg): + ArrowStringArray([]) + + with pytest.raises(ImportError, match=msg): + ArrowStringArray._from_sequence(["a", None, "b"])
xref https://github.com/pandas-dev/pandas/pull/39908#discussion_r585581525
https://api.github.com/repos/pandas-dev/pandas/pulls/41732
2021-05-30T10:54:15Z
2021-05-31T15:49:51Z
2021-05-31T15:49:51Z
2021-05-31T17:25:55Z
DEPR: datetimelike inference with strings
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index e06085c4c5c26..091380d6ccb6c 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -697,6 +697,7 @@ Deprecations - Deprecated passing arguments (apart from ``value``) as positional in :meth:`DataFrame.fillna` and :meth:`Series.fillna` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.reset_index` (other than ``"level"``) and :meth:`Series.reset_index` (:issue:`41485`) - Deprecated construction of :class:`Series` or :class:`DataFrame` with ``DatetimeTZDtype`` data and ``datetime64[ns]`` dtype. Use ``Series(data).dt.tz_localize(None)`` instead (:issue:`41555`,:issue:`33401`) +- Deprecated inference of ``timedelta64[ns]``, ``datetime64[ns]``, or ``DatetimeTZDtype`` dtypes in :class:`Series` construction when data containing strings is passed and no ``dtype`` is passed (:issue:`33558`) - In a future version, constructing :class:`Series` or :class:`DataFrame` with ``datetime64[ns]`` data and ``DatetimeTZDtype`` will treat the data as wall-times instead of as UTC times (matching DatetimeIndex behavior). To treat the data as UTC times, use ``pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(dtype.tz)`` or ``pd.Series(data.view("int64"), dtype=dtype)`` (:issue:`33401`) - Deprecated passing arguments as positional in :meth:`DataFrame.set_axis` and :meth:`Series.set_axis` (other than ``"labels"``) (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.where` and :meth:`Series.where` (other than ``"cond"`` and ``"other"``) (:issue:`41485`) diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index 5e1cc612bed57..06620c2ad0dca 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -153,7 +153,7 @@ def ensure_string_array( def infer_datetimelike_array( arr: np.ndarray # np.ndarray[object] -) -> str: ... +) -> tuple[str, bool]: ... def astype_intsafe( arr: np.ndarray, # np.ndarray[object] diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index d7e15bb2ad197..6a270c0a55638 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1558,7 +1558,7 @@ def infer_dtype(value: object, skipna: bool = True) -> str: return "mixed" -def infer_datetimelike_array(arr: ndarray[object]) -> str: +def infer_datetimelike_array(arr: ndarray[object]) -> tuple[str, bool]: """ Infer if we have a datetime or timedelta array. - date: we have *only* date and maybe strings, nulls @@ -1576,12 +1576,13 @@ def infer_datetimelike_array(arr: ndarray[object]) -> str: Returns ------- str: {datetime, timedelta, date, nat, mixed} + bool """ cdef: Py_ssize_t i, n = len(arr) bint seen_timedelta = False, seen_date = False, seen_datetime = False bint seen_tz_aware = False, seen_tz_naive = False - bint seen_nat = False + bint seen_nat = False, seen_str = False list objs = [] object v @@ -1589,6 +1590,7 @@ def infer_datetimelike_array(arr: ndarray[object]) -> str: v = arr[i] if isinstance(v, str): objs.append(v) + seen_str = True if len(objs) == 3: break @@ -1609,7 +1611,7 @@ def infer_datetimelike_array(arr: ndarray[object]) -> str: seen_tz_aware = True if seen_tz_naive and seen_tz_aware: - return "mixed" + return "mixed", seen_str elif util.is_datetime64_object(v): # np.datetime64 seen_datetime = True @@ -1619,16 +1621,16 @@ def infer_datetimelike_array(arr: ndarray[object]) -> str: # timedelta, or timedelta64 seen_timedelta = True else: - return "mixed" + return "mixed", seen_str if seen_date and not (seen_datetime or seen_timedelta): - return "date" + return "date", seen_str elif seen_datetime and not seen_timedelta: - return "datetime" + return "datetime", seen_str elif seen_timedelta and not seen_datetime: - return "timedelta" + return "timedelta", seen_str elif seen_nat: - return "nat" + return "nat", seen_str # short-circuit by trying to # actually convert these strings @@ -1637,14 +1639,14 @@ def infer_datetimelike_array(arr: ndarray[object]) -> str: if len(objs): try: array_to_datetime(objs, errors="raise") - return "datetime" + return "datetime", seen_str except (ValueError, TypeError): pass # we are *not* going to infer from strings # for timedelta as too much ambiguity - return 'mixed' + return "mixed", seen_str cdef inline bint is_timedelta(object o): diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index c3efbfb426ab3..8a230e5da01dc 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1543,7 +1543,7 @@ def try_timedelta(v: np.ndarray) -> np.ndarray: else: return td_values.reshape(shape) - inferred_type = lib.infer_datetimelike_array(ensure_object(v)) + inferred_type, seen_str = lib.infer_datetimelike_array(ensure_object(v)) if inferred_type == "datetime": # error: Incompatible types in assignment (expression has type "ExtensionArray", @@ -1572,6 +1572,15 @@ def try_timedelta(v: np.ndarray) -> np.ndarray: # "ExtensionArray", variable has type "Union[ndarray, List[Any]]") value = try_datetime(v) # type: ignore[assignment] + if value.dtype.kind in ["m", "M"] and seen_str: + warnings.warn( + f"Inferring {value.dtype} from data containing strings is deprecated " + "and will be removed in a future version. To retain the old behavior " + "explicitly pass Series(data, dtype={value.dtype})", + FutureWarning, + stacklevel=find_stack_level(), + ) + # return v.reshape(shape) return value diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py index 88c3ad228f8c3..7e8dbea07709f 100644 --- a/pandas/tests/apply/test_series_apply.py +++ b/pandas/tests/apply/test_series_apply.py @@ -859,7 +859,9 @@ def test_apply_to_timedelta(): list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT] a = pd.to_timedelta(list_of_strings) # noqa - b = Series(list_of_strings).apply(pd.to_timedelta) # noqa + with tm.assert_produces_warning(FutureWarning, match="Inferring timedelta64"): + ser = Series(list_of_strings) + b = ser.apply(pd.to_timedelta) # noqa # Can't compare until apply on a Series gives the correct dtype # assert_series_equal(a, b) diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 215b51dd88ef4..6b3309ba8ea1b 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -328,7 +328,7 @@ def test_dt64arr_timestamp_equality(self, box_with_array): box_with_array if box_with_array not in [pd.Index, pd.array] else np.ndarray ) - ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), "NaT"]) + ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT]) ser = tm.box_expected(ser, box_with_array) result = ser != ser diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 09efa97871fae..31903c559d8df 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -1169,7 +1169,7 @@ def test_infer_dtype_period_with_na(self, na_value): ], ) def test_infer_datetimelike_array_datetime(self, data): - assert lib.infer_datetimelike_array(data) == "datetime" + assert lib.infer_datetimelike_array(data) == ("datetime", False) @pytest.mark.parametrize( "data", @@ -1181,11 +1181,11 @@ def test_infer_datetimelike_array_datetime(self, data): ], ) def test_infer_datetimelike_array_timedelta(self, data): - assert lib.infer_datetimelike_array(data) == "timedelta" + assert lib.infer_datetimelike_array(data) == ("timedelta", False) def test_infer_datetimelike_array_date(self): arr = [date(2017, 6, 12), date(2017, 3, 11)] - assert lib.infer_datetimelike_array(arr) == "date" + assert lib.infer_datetimelike_array(arr) == ("date", False) @pytest.mark.parametrize( "data", @@ -1200,7 +1200,7 @@ def test_infer_datetimelike_array_date(self): ], ) def test_infer_datetimelike_array_mixed(self, data): - assert lib.infer_datetimelike_array(data) == "mixed" + assert lib.infer_datetimelike_array(data)[0] == "mixed" @pytest.mark.parametrize( "first, expected", @@ -1218,7 +1218,7 @@ def test_infer_datetimelike_array_mixed(self, data): @pytest.mark.parametrize("second", [None, np.nan]) def test_infer_datetimelike_array_nan_nat_like(self, first, second, expected): first.append(second) - assert lib.infer_datetimelike_array(first) == expected + assert lib.infer_datetimelike_array(first) == (expected, False) def test_infer_dtype_all_nan_nat_like(self): arr = np.array([np.nan, np.nan]) diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py index 7cc2b7f72fb69..82e6c4daf9515 100644 --- a/pandas/tests/resample/test_time_grouper.py +++ b/pandas/tests/resample/test_time_grouper.py @@ -305,27 +305,30 @@ def test_groupby_resample_interpolate(): .resample("1D") .interpolate(method="linear") ) - expected_ind = pd.MultiIndex.from_tuples( - [ - (50, "2018-01-07"), - (50, Timestamp("2018-01-08")), - (50, Timestamp("2018-01-09")), - (50, Timestamp("2018-01-10")), - (50, Timestamp("2018-01-11")), - (50, Timestamp("2018-01-12")), - (50, Timestamp("2018-01-13")), - (50, Timestamp("2018-01-14")), - (50, Timestamp("2018-01-15")), - (50, Timestamp("2018-01-16")), - (50, Timestamp("2018-01-17")), - (50, Timestamp("2018-01-18")), - (50, Timestamp("2018-01-19")), - (50, Timestamp("2018-01-20")), - (50, Timestamp("2018-01-21")), - (60, Timestamp("2018-01-14")), - ], - names=["volume", "week_starting"], - ) + + msg = "containing strings is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected_ind = pd.MultiIndex.from_tuples( + [ + (50, "2018-01-07"), + (50, Timestamp("2018-01-08")), + (50, Timestamp("2018-01-09")), + (50, Timestamp("2018-01-10")), + (50, Timestamp("2018-01-11")), + (50, Timestamp("2018-01-12")), + (50, Timestamp("2018-01-13")), + (50, Timestamp("2018-01-14")), + (50, Timestamp("2018-01-15")), + (50, Timestamp("2018-01-16")), + (50, Timestamp("2018-01-17")), + (50, Timestamp("2018-01-18")), + (50, Timestamp("2018-01-19")), + (50, Timestamp("2018-01-20")), + (50, Timestamp("2018-01-21")), + (60, Timestamp("2018-01-14")), + ], + names=["volume", "week_starting"], + ) expected = DataFrame( data={ "price": [ diff --git a/pandas/tests/series/accessors/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py index dcdee01bd4df8..62a9099fab1ad 100644 --- a/pandas/tests/series/accessors/test_dt_accessor.py +++ b/pandas/tests/series/accessors/test_dt_accessor.py @@ -679,6 +679,7 @@ def test_dt_timetz_accessor(self, tz_naive_fixture): [["2016-01-07", "2016-01-01"], [[2016, 1, 4], [2015, 53, 5]]], ], ) + @pytest.mark.filterwarnings("ignore:Inferring datetime64:FutureWarning") def test_isocalendar(self, input_series, expected_output): result = pd.to_datetime(Series(input_series)).dt.isocalendar() expected_frame = DataFrame( diff --git a/pandas/tests/series/methods/test_combine_first.py b/pandas/tests/series/methods/test_combine_first.py index 4c254c6db2a70..b838797b5f9b9 100644 --- a/pandas/tests/series/methods/test_combine_first.py +++ b/pandas/tests/series/methods/test_combine_first.py @@ -78,7 +78,11 @@ def test_combine_first_dt64(self): s0 = to_datetime(Series(["2010", np.NaN])) s1 = Series([np.NaN, "2011"]) rs = s0.combine_first(s1) - xp = Series([datetime(2010, 1, 1), "2011"]) + + msg = "containing strings is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + xp = Series([datetime(2010, 1, 1), "2011"]) + tm.assert_series_equal(rs, xp) def test_combine_first_dt_tz_values(self, tz_naive_fixture): diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py index 82c52bdaa29d7..1aec2a5e5d726 100644 --- a/pandas/tests/series/methods/test_fillna.py +++ b/pandas/tests/series/methods/test_fillna.py @@ -319,8 +319,11 @@ def test_datetime64_fillna(self): # GH#6587 # make sure that we are treating as integer when filling - # this also tests inference of a datetime-like with NaT's - ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"]) + msg = "containing strings is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + # this also tests inference of a datetime-like with NaT's + ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"]) + expected = Series( [ "2013-08-05 15:30:00.000001", diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 646d1f0ab1508..f03322f9b0d6c 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -900,14 +900,23 @@ def test_constructor_dtype_datetime64_7(self): def test_constructor_dtype_datetime64_6(self): # these will correctly infer a datetime - s = Series([None, NaT, "2013-08-05 15:30:00.000001"]) - assert s.dtype == "datetime64[ns]" - s = Series([np.nan, NaT, "2013-08-05 15:30:00.000001"]) - assert s.dtype == "datetime64[ns]" - s = Series([NaT, None, "2013-08-05 15:30:00.000001"]) - assert s.dtype == "datetime64[ns]" - s = Series([NaT, np.nan, "2013-08-05 15:30:00.000001"]) - assert s.dtype == "datetime64[ns]" + msg = "containing strings is deprecated" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([None, NaT, "2013-08-05 15:30:00.000001"]) + assert ser.dtype == "datetime64[ns]" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([np.nan, NaT, "2013-08-05 15:30:00.000001"]) + assert ser.dtype == "datetime64[ns]" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([NaT, None, "2013-08-05 15:30:00.000001"]) + assert ser.dtype == "datetime64[ns]" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([NaT, np.nan, "2013-08-05 15:30:00.000001"]) + assert ser.dtype == "datetime64[ns]" def test_constructor_dtype_datetime64_5(self): # tz-aware (UTC and other tz's) @@ -1379,14 +1388,22 @@ def test_constructor_dtype_timedelta64(self): assert td.dtype == "object" # these will correctly infer a timedelta - s = Series([None, NaT, "1 Day"]) - assert s.dtype == "timedelta64[ns]" - s = Series([np.nan, NaT, "1 Day"]) - assert s.dtype == "timedelta64[ns]" - s = Series([NaT, None, "1 Day"]) - assert s.dtype == "timedelta64[ns]" - s = Series([NaT, np.nan, "1 Day"]) - assert s.dtype == "timedelta64[ns]" + msg = "containing strings is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([None, NaT, "1 Day"]) + assert ser.dtype == "timedelta64[ns]" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([np.nan, NaT, "1 Day"]) + assert ser.dtype == "timedelta64[ns]" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([NaT, None, "1 Day"]) + assert ser.dtype == "timedelta64[ns]" + + with tm.assert_produces_warning(FutureWarning, match=msg): + ser = Series([NaT, np.nan, "1 Day"]) + assert ser.dtype == "timedelta64[ns]" # GH 16406 def test_constructor_mixed_tz(self): diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py index 1fc383521d31f..eb26ae688f00e 100644 --- a/pandas/tests/tools/test_to_timedelta.py +++ b/pandas/tests/tools/test_to_timedelta.py @@ -187,6 +187,16 @@ def test_to_timedelta_via_apply(self): result = Series([to_timedelta("00:00:01")]) tm.assert_series_equal(result, expected) + def test_to_timedelta_inference_without_warning(self): + # GH#41731 inference produces a warning in the Series constructor, + # but _not_ in to_timedelta + vals = ["00:00:01", pd.NaT] + with tm.assert_produces_warning(None): + result = to_timedelta(vals) + + expected = TimedeltaIndex([pd.Timedelta(seconds=1), pd.NaT]) + tm.assert_index_equal(result, expected) + def test_to_timedelta_on_missing_values(self): # GH5438 timedelta_NaT = np.timedelta64("NaT") @@ -197,7 +207,8 @@ def test_to_timedelta_on_missing_values(self): ) tm.assert_series_equal(actual, expected) - actual = to_timedelta(Series(["00:00:01", pd.NaT])) + with tm.assert_produces_warning(FutureWarning, match="Inferring timedelta64"): + actual = to_timedelta(Series(["00:00:01", pd.NaT])) tm.assert_series_equal(actual, expected) actual = to_timedelta(np.nan)
- [x] closes #33558 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry this difference in behavior is one of the biggest things left keeping us from sharing more between Index/Series constructors.
https://api.github.com/repos/pandas-dev/pandas/pulls/41731
2021-05-30T05:28:55Z
2021-06-01T13:53:05Z
2021-06-01T13:53:05Z
2021-06-01T15:59:19Z
CI: suppress npdev warnings
diff --git a/pandas/conftest.py b/pandas/conftest.py index f948dc11bc014..329023ed7ba6a 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -66,6 +66,11 @@ MultiIndex, ) +# Until https://github.com/numpy/numpy/issues/19078 is sorted out, just suppress +suppress_npdev_promotion_warning = pytest.mark.filterwarnings( + "ignore:Promotion of numbers and bools:FutureWarning" +) + # ---------------------------------------------------------------- # Configuration / Settings # ---------------------------------------------------------------- @@ -112,6 +117,8 @@ def pytest_collection_modifyitems(items): if "/frame/" in item.nodeid: item.add_marker(pytest.mark.arraymanager) + item.add_marker(suppress_npdev_promotion_warning) + # Hypothesis hypothesis.settings.register_profile( diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py index 1bbe90f3cb58c..12220e825aed4 100644 --- a/pandas/tests/arithmetic/test_interval.py +++ b/pandas/tests/arithmetic/test_interval.py @@ -238,7 +238,7 @@ def test_compare_list_like_nan(self, op, interval_array, nulls_fixture, request) Categorical(list("abab")), Categorical(date_range("2017-01-01", periods=4)), pd.array(list("abcd")), - pd.array(["foo", 3.14, None, object()]), + pd.array(["foo", 3.14, None, object()], dtype=object), ], ids=lambda x: str(x.dtype), ) diff --git a/pandas/tests/frame/methods/test_to_records.py b/pandas/tests/frame/methods/test_to_records.py index 2c96cf291c154..ba8fe25401e8c 100644 --- a/pandas/tests/frame/methods/test_to_records.py +++ b/pandas/tests/frame/methods/test_to_records.py @@ -3,6 +3,8 @@ import numpy as np import pytest +from pandas.compat import is_numpy_dev + from pandas import ( CategoricalDtype, DataFrame, @@ -171,20 +173,28 @@ def test_to_records_with_categorical(self): ), ), # Pass in a type instance. - ( + pytest.param( {"column_dtypes": str}, np.rec.array( [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")], ), + marks=pytest.mark.xfail( + is_numpy_dev, + reason="https://github.com/numpy/numpy/issues/19078", + ), ), # Pass in a dtype instance. - ( + pytest.param( {"column_dtypes": np.dtype("unicode")}, np.rec.array( [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")], ), + marks=pytest.mark.xfail( + is_numpy_dev, + reason="https://github.com/numpy/numpy/issues/19078", + ), ), # Pass in a dictionary (name-only). (
Let's try this again
https://api.github.com/repos/pandas-dev/pandas/pulls/41730
2021-05-30T03:18:54Z
2021-05-31T15:47:16Z
2021-05-31T15:47:15Z
2021-06-01T14:02:17Z
TYP: tighten types in core.construction
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6e71cb49596c8..bc44b23da25d5 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -728,6 +728,15 @@ def __init__( if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") + # Argument 1 to "ensure_index" has incompatible type "Collection[Any]"; + # expected "Union[Union[Union[ExtensionArray, ndarray], + # Index, Series], Sequence[Any]]" + index = ensure_index(index) # type: ignore[arg-type] + # Argument 1 to "ensure_index" has incompatible type "Collection[Any]"; + # expected "Union[Union[Union[ExtensionArray, ndarray], + # Index, Series], Sequence[Any]]" + columns = ensure_index(columns) # type: ignore[arg-type] + if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) @@ -2325,6 +2334,7 @@ def _from_arrays( dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") + columns = ensure_index(columns) mgr = arrays_to_mgr( arrays, columns, diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 3a8915e94135a..46eb138dc74d1 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -47,10 +47,7 @@ from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ( ABCDataFrame, - ABCDatetimeIndex, - ABCIndex, ABCSeries, - ABCTimedeltaIndex, ) from pandas.core import ( @@ -71,7 +68,9 @@ ) from pandas.core.indexes import base as ibase from pandas.core.indexes.api import ( + DatetimeIndex, Index, + TimedeltaIndex, ensure_index, get_objs_combined_axis, union_indexes, @@ -101,7 +100,7 @@ def arrays_to_mgr( arrays, - arr_names, + arr_names: Index, index, columns, *, @@ -115,8 +114,6 @@ def arrays_to_mgr( Needs to handle a lot of exceptional cases. """ - arr_names = ensure_index(arr_names) - if verify_integrity: # figure out the index, if necessary if index is None: @@ -286,10 +283,12 @@ def ndarray_to_mgr( if columns is None: columns = Index(range(len(values))) + else: + columns = ensure_index(columns) return arrays_to_mgr(values, columns, index, columns, dtype=dtype, typ=typ) - if is_extension_array_dtype(vdtype) and not is_1d_only_ea_dtype(vdtype): + elif is_extension_array_dtype(vdtype) and not is_1d_only_ea_dtype(vdtype): # i.e. Datetime64TZ values = extract_array(values, extract_numpy=True) if copy: @@ -454,7 +453,7 @@ def dict_to_mgr( arrays = [com.maybe_iterable_to_list(data[k]) for k in keys] # GH#24096 need copy to be deep for datetime64tz case # TODO: See if we can avoid these copies - arrays = [arr if not isinstance(arr, ABCIndex) else arr._data for arr in arrays] + arrays = [arr if not isinstance(arr, Index) else arr._data for arr in arrays] arrays = [ arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays ] @@ -480,7 +479,7 @@ def nested_data_to_arrays( columns: Index | None, index: Index | None, dtype: DtypeObj | None, -): +) -> tuple[list[ArrayLike], Index, Index]: """ Convert a single sequence of arrays to multiple arrays. """ @@ -548,7 +547,7 @@ def convert(v): if is_list_like(values[0]): values = np.array([convert(v) for v in values]) elif isinstance(values[0], np.ndarray) and values[0].ndim == 0: - # GH#21861 + # GH#21861 see test_constructor_list_of_lists values = np.array([convert(v) for v in values]) else: values = convert(values) @@ -566,31 +565,30 @@ def convert(v): return values -def _homogenize(data, index: Index, dtype: DtypeObj | None): +def _homogenize(data, index: Index, dtype: DtypeObj | None) -> list[ArrayLike]: oindex = None homogenized = [] for val in data: if isinstance(val, ABCSeries): if dtype is not None: - val = val.astype(dtype) + val = val.astype(dtype, copy=False) if val.index is not index: # Forces alignment. No need to copy data since we # are putting it into an ndarray later val = val.reindex(index, copy=False) - # TODO extract_array should be preferred, but that gives failures for - # `extension/test_numpy.py` (extract_array will convert numpy arrays - # to PandasArray), see https://github.com/pandas-dev/pandas/issues/40021 - # val = extract_array(val, extract_numpy=True) + val = val._values else: if isinstance(val, dict): if oindex is None: oindex = index.astype("O") - if isinstance(index, (ABCDatetimeIndex, ABCTimedeltaIndex)): + if isinstance(index, (DatetimeIndex, TimedeltaIndex)): + # see test_constructor_dict_datetime64_index val = dict_compat(val) else: + # see test_constructor_subclass_dict val = dict(val) val = lib.fast_multiget(val, oindex._values, default=np.nan) val = sanitize_array( @@ -749,6 +747,7 @@ def to_arrays( Return list of arrays, columns. """ if isinstance(data, ABCDataFrame): + # see test_from_records_with_index_data, test_from_records_bad_index_column if columns is not None: arrays = [ data._ixs(i, axis=1).values @@ -884,7 +883,7 @@ def _list_of_dict_to_arrays( # assure that they are of the base dict class and not of derived # classes - data = [(type(d) is dict) and d or dict(d) for d in data] + data = [d if type(d) is dict else dict(d) for d in data] content = lib.dicts_to_array(data, list(columns)) return content, columns
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41729
2021-05-30T03:07:06Z
2021-05-31T21:30:51Z
2021-05-31T21:30:51Z
2021-05-31T21:37:32Z
REF: more explicit dtypes in strings.accessor
diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 7643019ff8c55..29d37599b0785 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -13,7 +13,10 @@ import numpy as np import pandas._libs.lib as lib -from pandas._typing import FrameOrSeriesUnion +from pandas._typing import ( + DtypeObj, + FrameOrSeriesUnion, +) from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( @@ -209,8 +212,12 @@ def _validate(data): # see _libs/lib.pyx for list of inferred types allowed_types = ["string", "empty", "bytes", "mixed", "mixed-integer"] - values = getattr(data, "values", data) # Series / Index - values = getattr(values, "categories", values) # categorical / normal + # TODO: avoid kludge for tests.extension.test_numpy + from pandas.core.internals.managers import _extract_array + + data = _extract_array(data) + + values = getattr(data, "categories", data) # categorical / normal inferred_dtype = lib.infer_dtype(values, skipna=True) @@ -242,6 +249,7 @@ def _wrap_result( expand: bool | None = None, fill_value=np.nan, returns_string=True, + returns_bool: bool = False, ): from pandas import ( Index, @@ -319,11 +327,17 @@ def cons_row(x): else: index = self._orig.index # This is a mess. - dtype: str | None - if self._is_string and returns_string: - dtype = self._orig.dtype + dtype: DtypeObj | str | None + vdtype = getattr(result, "dtype", None) + if self._is_string: + if is_bool_dtype(vdtype): + dtype = result.dtype + elif returns_string: + dtype = self._orig.dtype + else: + dtype = vdtype else: - dtype = None + dtype = vdtype if expand: cons = self._orig._constructor_expanddim @@ -331,7 +345,7 @@ def cons_row(x): else: # Must be a Series cons = self._orig._constructor - result = cons(result, name=name, index=index) + result = cons(result, name=name, index=index, dtype=dtype) result = result.__finalize__(self._orig, method="str") if name is not None and result.ndim == 1: # __finalize__ might copy over the original name, but we may @@ -369,7 +383,7 @@ def _get_series_list(self, others): if isinstance(others, ABCSeries): return [others] elif isinstance(others, ABCIndex): - return [Series(others._values, index=idx)] + return [Series(others._values, index=idx, dtype=others.dtype)] elif isinstance(others, ABCDataFrame): return [others[x] for x in others] elif isinstance(others, np.ndarray) and others.ndim == 2: @@ -547,7 +561,7 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"): sep = "" if isinstance(self._orig, ABCIndex): - data = Series(self._orig, index=self._orig) + data = Series(self._orig, index=self._orig, dtype=self._orig.dtype) else: # Series data = self._orig
Experimenting with #40489 I found that making Series inference behavior more like Index behavior broke a bunch of strings tests. This makes the strings code robust to that potential change by being more explicit about dtypes.
https://api.github.com/repos/pandas-dev/pandas/pulls/41727
2021-05-29T20:46:26Z
2021-06-09T00:27:45Z
2021-06-09T00:27:45Z
2021-06-09T00:34:50Z
DOC: Updates Index.any docstring from #40362
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 871c6a4a1c41d..319422f8d386a 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6095,6 +6095,8 @@ def any(self, *args, **kwargs): Examples -------- + **any** + >>> index = pd.Index([0, 1, 2]) >>> index.any() True @@ -6102,6 +6104,16 @@ def any(self, *args, **kwargs): >>> index = pd.Index([0, 0, 0]) >>> index.any() False + + **all** + + >>> index = pd.Index([1, 2, 3]) + >>> index.all() + True + + >>> index = pd.Index([0, 1, 2]) + >>> index.all() + False """ nv.validate_any(args, kwargs) self._maybe_disable_logical_methods("any")
- [ ] closes #40362: add an introduction to all() function as described in "See also" - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41726
2021-05-29T19:37:34Z
2021-08-17T02:11:06Z
null
2021-08-17T02:11:06Z
TST: Check map function works with StringDtype (#40823)
diff --git a/pandas/tests/apply/test_series_apply.py b/pandas/tests/apply/test_series_apply.py index 88c3ad228f8c3..6837bdb1a6b9c 100644 --- a/pandas/tests/apply/test_series_apply.py +++ b/pandas/tests/apply/test_series_apply.py @@ -115,6 +115,20 @@ def func(x): ser.apply(func) +def test_series_map_stringdtype(any_string_dtype): + # map test on StringDType, GH#40823 + ser1 = Series( + data=["cat", "dog", "rabbit"], + index=["id1", "id2", "id3"], + dtype=any_string_dtype, + ) + ser2 = Series(data=["id3", "id2", "id1", "id7000"], dtype=any_string_dtype) + result = ser2.map(ser1) + expected = Series(data=["rabbit", "dog", "cat", pd.NA], dtype=any_string_dtype) + + tm.assert_series_equal(result, expected) + + def test_apply_box(): # ufunc will not be boxed. Same test cases as the test_map_box vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
Checks map function output on 2 DataFrames with data type StringDtype. - [ ] closes #40823 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41723
2021-05-29T17:44:40Z
2021-06-02T13:24:14Z
2021-06-02T13:24:14Z
2021-06-02T13:24:18Z
Fix DataFrame.agg produces different types if the DataFrame is empty
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6e71cb49596c8..25251b120e66b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8534,6 +8534,9 @@ def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): result_in_dict = relabel_result(result, func, columns, order) result = DataFrame(result_in_dict, index=columns) + if isinstance(result, DataFrame): + result = result.squeeze() + return result agg = aggregate diff --git a/pandas/tests/frame/test_aggregate.py b/pandas/tests/frame/test_aggregate.py new file mode 100644 index 0000000000000..3fd7e147e58a7 --- /dev/null +++ b/pandas/tests/frame/test_aggregate.py @@ -0,0 +1,19 @@ +import numpy as np + +import pandas as pd +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +def test_frame_aggregate(): + # GH#41672 + result = DataFrame([], columns=["lang", "name"]) + result = result.agg({"name": lambda y: y.values}) + assert type(result) == Series + + result = DataFrame([["a", "boof"]], columns=["lang", "name"]) + result = result.agg({"name": lambda y: y.values}) + assert type(result) == Series
- [ ✔] closes https://github.com/pandas-dev/pandas/issues/41672 - [ ❌] tests added / passed - [ ❌] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41722
2021-05-29T17:44:23Z
2021-08-17T02:07:44Z
null
2021-08-17T02:07:44Z
Improve Development Accesibility - Devcontainer [part 1 / 2]
diff --git a/.devcontainer.json b/.devcontainer.json index 8bea96aea29c1..e84133383847e 100644 --- a/.devcontainer.json +++ b/.devcontainer.json @@ -4,13 +4,13 @@ "name": "pandas", "context": ".", "dockerFile": "Dockerfile", - + "postCreateCommand": "bash container_create.sh", // Use 'settings' to set *default* container specific settings.json values on container create. // You can edit these settings after create using File > Preferences > Settings > Remote. "settings": { - "terminal.integrated.shell.linux": "/bin/bash", + "terminal.integrated.defaultProfile.linux": "/bin/bash", "python.condaPath": "/opt/conda/bin/conda", - "python.pythonPath": "/opt/conda/bin/python", + "python.pythonPath": "/workspaces/pandas/.venv/bin/python", "python.formatting.provider": "black", "python.linting.enabled": true, "python.linting.flake8Enabled": true, @@ -21,10 +21,12 @@ "pandas" ] }, - // Add the IDs of extensions you want installed when the container is created in the array below. "extensions": [ "ms-python.python", "ms-vscode.cpptools" + ], + "runArgs": [ + "--privileged" ] -} +} \ No newline at end of file diff --git a/.gitignore b/.gitignore index b682d93efbd04..f80a11449a19a 100644 --- a/.gitignore +++ b/.gitignore @@ -119,3 +119,6 @@ doc/build/html/index.html doc/tmp.sv env/ doc/source/savefig/ + +# container +.venv \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index de1c564921de9..66bb233dea9ef 100644 --- a/Dockerfile +++ b/Dockerfile @@ -20,29 +20,4 @@ RUN apt-get update \ && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* -# Switch back to dialog for any ad-hoc use of apt-get -ENV DEBIAN_FRONTEND=dialog -# Clone pandas repo -RUN mkdir "$pandas_home" \ - && git clone "https://github.com/$gh_username/pandas.git" "$pandas_home" \ - && cd "$pandas_home" \ - && git remote add upstream "https://github.com/pandas-dev/pandas.git" \ - && git pull upstream master - -# Because it is surprisingly difficult to activate a conda environment inside a DockerFile -# (from personal experience and per https://github.com/ContinuumIO/docker-images/issues/89), -# we just update the base/root one from the 'environment.yml' file instead of creating a new one. -# -# Set up environment -RUN conda install -y mamba -RUN mamba env update -n base -f "$pandas_home/environment.yml" - -# Build C extensions and pandas -SHELL ["/bin/bash", "-c"] -RUN . /opt/conda/etc/profile.d/conda.sh \ - && conda activate base \ - && cd "$pandas_home" \ - && export \ - && python setup.py build_ext -j 4 \ - && python -m pip install -e . diff --git a/container_create.sh b/container_create.sh new file mode 100644 index 0000000000000..559600ca95c36 --- /dev/null +++ b/container_create.sh @@ -0,0 +1,12 @@ +# Switch back to dialog for any ad-hoc use of apt-get +export DEBIAN_FRONTEND=dialog + +# Set up environment +conda install -y mamba +mamba env update -p .venv -f "environment.yml" + +source activate /workspaces/pandas/.venv + +# build + install pandas +python setup.py build_ext -j 4 +python -m pip install -e . \ No newline at end of file
## Whats new This PR introduces some changes that improve the development experience via VSCode's devcontainers. Specifically: * It removes the cloning of the pandas repo inside the container (it assumes that the user already cloned it on the host) and instead leverages the fact the the code is mounted to `/workspaces/pandas`. * Defines the conda env inside the workspace itself such that it can be reused even when the container is deleted. This enables you to use the codebase outside the repo afterwards if you activate the same local conda env. * Splits the previous `Dockerfile` in two: a simplified `Dockerfile` + a new `container_create.sh` that only runs the first time the container starts, making it possible to install the dependencies inside the workspace.
https://api.github.com/repos/pandas-dev/pandas/pulls/41721
2021-05-29T16:53:04Z
2021-07-12T18:34:45Z
null
2021-08-05T18:46:57Z
DEPR: Deprecated passing arguments as positional in pd.concat
diff --git a/doc/source/whatsnew/v0.17.0.rst b/doc/source/whatsnew/v0.17.0.rst index d8f39a7d6e3c0..991b9a40d151b 100644 --- a/doc/source/whatsnew/v0.17.0.rst +++ b/doc/source/whatsnew/v0.17.0.rst @@ -423,7 +423,7 @@ Other enhancements .. code-block:: ipython - In [1]: pd.concat([foo, bar, baz], 1) + In [1]: pd.concat([foo, bar, baz], axis=1) Out[1]: 0 1 2 0 1 1 4 @@ -433,7 +433,7 @@ Other enhancements .. ipython:: python - pd.concat([foo, bar, baz], 1) + pd.concat([foo, bar, baz], axis=1) - ``DataFrame`` has gained the ``nlargest`` and ``nsmallest`` methods (:issue:`10393`) diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index ce613fd78c1e1..55e8196754fdb 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -711,6 +711,7 @@ Deprecations - Deprecated passing lists as ``key`` to :meth:`DataFrame.xs` and :meth:`Series.xs` (:issue:`41760`) - Deprecated passing arguments as positional in :meth:`DataFrame.drop` (other than ``"labels"``) and :meth:`Series.drop` (:issue:`41485`) - Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_table` (:issue:`41485`) +- Deprecated passing arguments as positional (other than ``objs``) in :func:`concat` (:issue:`41485`) .. _whatsnew_130.deprecations.nuisance_columns: diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index b3b453ea6355a..ea34bc75b4e31 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -16,7 +16,10 @@ import numpy as np from pandas._typing import FrameOrSeriesUnion -from pandas.util._decorators import cache_readonly +from pandas.util._decorators import ( + cache_readonly, + deprecate_nonkeyword_arguments, +) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( @@ -84,6 +87,7 @@ def concat( ... +@deprecate_nonkeyword_arguments(version=None, allowed_args=["objs"]) def concat( objs: Iterable[NDFrame] | Mapping[Hashable, NDFrame], axis=0, diff --git a/pandas/io/stata.py b/pandas/io/stata.py index e4f3bcb89cf7e..1fef33558dd9a 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1762,7 +1762,7 @@ def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFra columns = data.columns replacement_df = DataFrame(replacements) replaced = concat( - [data.drop(replacement_df.columns, axis=1), replacement_df], 1 + [data.drop(replacement_df.columns, axis=1), replacement_df], axis=1 ) data = replaced[columns] return data diff --git a/pandas/tests/io/pytables/test_complex.py b/pandas/tests/io/pytables/test_complex.py index 8e1dee5873512..6cfe80ae5c87c 100644 --- a/pandas/tests/io/pytables/test_complex.py +++ b/pandas/tests/io/pytables/test_complex.py @@ -205,4 +205,4 @@ def test_complex_append(setup_path): store.append("df", df, data_columns=["b"]) store.append("df", df) result = store.select("df") - tm.assert_frame_equal(pd.concat([df, df], 0), result) + tm.assert_frame_equal(pd.concat([df, df], axis=0), result) diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py index 96b88dc61cfed..17a7089f0ac85 100644 --- a/pandas/tests/reshape/concat/test_concat.py +++ b/pandas/tests/reshape/concat/test_concat.py @@ -638,3 +638,18 @@ def test_concat_multiindex_with_empty_rangeindex(): result = concat([df1, df2]) expected = DataFrame([[1, 2], [np.nan, np.nan]], columns=mi) tm.assert_frame_equal(result, expected) + + +def test_concat_posargs_deprecation(): + # https://github.com/pandas-dev/pandas/issues/41485 + df = DataFrame([[1, 2, 3]], index=["a"]) + df2 = DataFrame([[4, 5, 6]], index=["b"]) + + msg = ( + "In a future version of pandas all arguments of concat " + "except for the argument 'objs' will be keyword-only" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = concat([df, df2], 0) + expected = DataFrame([[1, 2, 3], [4, 5, 6]], index=["a", "b"]) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/concat/test_invalid.py b/pandas/tests/reshape/concat/test_invalid.py index 95a81ce61c785..cd2a7ca33a267 100644 --- a/pandas/tests/reshape/concat/test_invalid.py +++ b/pandas/tests/reshape/concat/test_invalid.py @@ -27,13 +27,12 @@ def test_concat_invalid(self): def test_concat_invalid_first_argument(self): df1 = tm.makeCustomDataframe(10, 2) - df2 = tm.makeCustomDataframe(10, 2) msg = ( "first argument must be an iterable of pandas " 'objects, you passed an object of type "DataFrame"' ) with pytest.raises(TypeError, match=msg): - concat(df1, df2) + concat(df1) # generator ok though concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
- [x] xref #41485 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41718
2021-05-29T14:24:17Z
2021-06-07T16:46:24Z
2021-06-07T16:46:24Z
2021-06-07T16:46:28Z
CLN: Deprecate non-keyword arguments in read_table #41485
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index ea9017da8a2f9..38682d188e57a 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -700,7 +700,7 @@ Deprecations - Deprecated passing arguments as positional in :meth:`DataFrame.where` and :meth:`Series.where` (other than ``"cond"`` and ``"other"``) (:issue:`41485`) - Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_csv` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.drop` (other than ``"labels"``) and :meth:`Series.drop` (:issue:`41485`) -- +- Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_table` (:issue:`41485`) .. _whatsnew_130.deprecations.nuisance_columns: diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 8bf1ab1260b8e..a384846b7a063 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -592,6 +592,9 @@ def read_csv( return _read(filepath_or_buffer, kwds) +@deprecate_nonkeyword_arguments( + version=None, allowed_args=["filepath_or_buffer"], stacklevel=3 +) @Appender( _doc_read_csv_and_table.format( func_name="read_table", diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index 97b3be1306cd5..8fa2d7f7b8d65 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -823,3 +823,15 @@ def test_malformed_second_line(all_parsers): result = parser.read_csv(StringIO(data), skip_blank_lines=False, header=1) expected = DataFrame({"a": ["b"]}) tm.assert_frame_equal(result, expected) + + +def test_read_table_posargs_deprecation(all_parsers): + # https://github.com/pandas-dev/pandas/issues/41485 + data = StringIO("a\tb\n1\t2") + parser = all_parsers + msg = ( + "In a future version of pandas all arguments of read_table " + "except for the argument 'filepath_or_buffer' will be keyword-only" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + parser.read_table(data, " ")
- [x] xref #41485 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41717
2021-05-29T13:37:49Z
2021-05-31T14:52:28Z
2021-05-31T14:52:28Z
2021-05-31T14:52:34Z
Fix inconsistent results for empty datasets describe
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 69f992f840c7c..b9011c1f06cae 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -67,7 +67,10 @@ validate_func_kwargs, ) from pandas.core.apply import GroupByApply -from pandas.core.base import SpecificationError +from pandas.core.base import ( + DataError, + SpecificationError, +) import pandas.core.common as com from pandas.core.construction import create_series_with_explicit_dtype from pandas.core.frame import DataFrame @@ -513,12 +516,16 @@ def _cython_transform( obj = self._selected_obj + is_numeric = is_numeric_dtype(obj.dtype) + if numeric_only and not is_numeric: + raise DataError("No numeric types to aggregate") + try: result = self.grouper._cython_operation( "transform", obj._values, how, axis, **kwargs ) - except NotImplementedError as err: - raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err + except (NotImplementedError, TypeError): + raise DataError("No numeric types to aggregate") return obj._constructor(result, index=self.obj.index, name=obj.name) @@ -675,7 +682,7 @@ def nunique(self, dropna: bool = True) -> Series: @doc(Series.describe) def describe(self, **kwargs): result = self.apply(lambda x: x.describe(**kwargs)) - if self.axis == 1: + if self.axis == 1 or not isinstance(result.index, MultiIndex): return result.T return result.unstack() @@ -1057,6 +1064,7 @@ def _cython_agg_general( # Note: we never get here with how="ohlc"; that goes through SeriesGroupBy data: Manager2D = self._get_data_to_aggregate() + orig = data if numeric_only: data = data.get_numeric_data(copy=False) @@ -1079,6 +1087,9 @@ def array_func(values: ArrayLike) -> ArrayLike: # continue and exclude the block new_mgr = data.grouped_reduce(array_func, ignore_failures=True) + if not len(new_mgr) and len(orig): + # If the original Manager was already empty, no need to raise + raise DataError("No numeric types to aggregate") if len(new_mgr) < len(data): warnings.warn( f"Dropping invalid columns in {type(self).__name__}.{how} " @@ -1850,4 +1861,4 @@ def func(df): return self._python_apply_general(func, self._obj_with_exclusions) - boxplot = boxplot_frame_groupby + boxplot = boxplot_frame_groupby \ No newline at end of file diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 95bb010015f62..faf1d9684730a 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -1206,3 +1206,10 @@ def test_groupby_sum_below_mincount_nullable_integer(): result = grouped.sum(min_count=2) expected = DataFrame({"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx) tm.assert_frame_equal(result, expected) + +def test_groupby_empty_dataset(): + # 41575 + df = DataFrame(columns=["A", "B", "C"]) + result = df.groupby("A").B.describe().reset_index(drop=True) + expected = Series([], name="B", dtype=np.object_) + tm.assert_series_equal(result, expected)
- [✔ ] closes https://github.com/pandas-dev/pandas/issues/41575 - [ ✔] tests added / passed - [✔ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41716
2021-05-29T09:55:20Z
2021-08-17T02:03:23Z
null
2021-08-17T02:03:23Z
ENH: maybe_convert_objects corner cases
diff --git a/pandas/_libs/lib.pyi b/pandas/_libs/lib.pyi index 06620c2ad0dca..b84cacfd74840 100644 --- a/pandas/_libs/lib.pyi +++ b/pandas/_libs/lib.pyi @@ -11,7 +11,10 @@ from typing import ( import numpy as np -from pandas._typing import ArrayLike +from pandas._typing import ( + ArrayLike, + DtypeObj, +) # placeholder until we can specify np.ndarray[object, ndim=2] ndarray_obj_2d = np.ndarray @@ -73,6 +76,7 @@ def maybe_convert_objects( convert_timedelta: bool = ..., convert_period: Literal[False] = ..., convert_to_nullable_integer: Literal[False] = ..., + dtype_if_all_nat: DtypeObj | None = ..., ) -> np.ndarray: ... @overload @@ -85,6 +89,7 @@ def maybe_convert_objects( convert_timedelta: bool = ..., convert_period: bool = ..., convert_to_nullable_integer: Literal[True] = ..., + dtype_if_all_nat: DtypeObj | None = ..., ) -> ArrayLike: ... @overload @@ -97,6 +102,7 @@ def maybe_convert_objects( convert_timedelta: bool = ..., convert_period: bool = ..., convert_to_nullable_integer: bool = ..., + dtype_if_all_nat: DtypeObj | None = ..., ) -> ArrayLike: ... @overload @@ -109,6 +115,7 @@ def maybe_convert_objects( convert_timedelta: bool = ..., convert_period: Literal[True] = ..., convert_to_nullable_integer: bool = ..., + dtype_if_all_nat: DtypeObj | None = ..., ) -> ArrayLike: ... @overload @@ -121,6 +128,7 @@ def maybe_convert_objects( convert_timedelta: bool = ..., convert_period: bool = ..., convert_to_nullable_integer: bool = ..., + dtype_if_all_nat: DtypeObj | None = ..., ) -> ArrayLike: ... @overload diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 4d184ee13e3db..73ff3b85ca46b 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -84,6 +84,10 @@ from pandas._libs.util cimport ( ) from pandas._libs.tslib import array_to_datetime +from pandas._libs.tslibs import ( + OutOfBoundsDatetime, + OutOfBoundsTimedelta, +) from pandas._libs.tslibs.period import Period from pandas._libs.missing cimport ( @@ -1640,7 +1644,8 @@ def infer_datetimelike_array(arr: ndarray[object]) -> tuple[str, bool]: # convert *every* string array if len(objs): try: - array_to_datetime(objs, errors="raise") + # require_iso8601 as in maybe_infer_to_datetimelike + array_to_datetime(objs, errors="raise", require_iso8601=True) return "datetime", seen_str except (ValueError, TypeError): pass @@ -2322,7 +2327,8 @@ def maybe_convert_objects(ndarray[object] objects, bint convert_timedelta=False, bint convert_period=False, bint convert_interval=False, - bint convert_to_nullable_integer=False) -> "ArrayLike": + bint convert_to_nullable_integer=False, + object dtype_if_all_nat=None) -> "ArrayLike": """ Type inference function-- convert object array to proper dtype @@ -2351,6 +2357,8 @@ def maybe_convert_objects(ndarray[object] objects, convert_to_nullable_integer : bool, default False If an array-like object contains only integer values (and NaN) is encountered, whether to convert and return an IntegerArray. + dtype_if_all_nat : np.dtype, ExtensionDtype, or None, default None + Dtype to cast to if we have all-NaT. Returns ------- @@ -2419,8 +2427,12 @@ def maybe_convert_objects(ndarray[object] objects, seen.float_ = True elif is_timedelta(val): if convert_timedelta: - itimedeltas[i] = convert_to_timedelta64(val, "ns").view("i8") seen.timedelta_ = True + try: + itimedeltas[i] = convert_to_timedelta64(val, "ns").view("i8") + except OutOfBoundsTimedelta: + seen.object_ = True + break else: seen.object_ = True break @@ -2457,8 +2469,12 @@ def maybe_convert_objects(ndarray[object] objects, break else: seen.datetime_ = True - idatetimes[i] = convert_to_tsobject( - val, None, None, 0, 0).value + try: + idatetimes[i] = convert_to_tsobject( + val, None, None, 0, 0).value + except OutOfBoundsDatetime: + seen.object_ = True + break else: seen.object_ = True break @@ -2546,8 +2562,13 @@ def maybe_convert_objects(ndarray[object] objects, elif seen.nat_: if not seen.numeric_: if convert_datetime and convert_timedelta: - # TODO: array full of NaT ambiguity resolve here needed - pass + dtype = dtype_if_all_nat + if dtype is not None: + # otherwise we keep object dtype + result = _infer_all_nats( + dtype, datetimes, timedeltas + ) + elif convert_datetime: result = datetimes elif convert_timedelta: @@ -2586,8 +2607,13 @@ def maybe_convert_objects(ndarray[object] objects, elif seen.nat_: if not seen.numeric_: if convert_datetime and convert_timedelta: - # TODO: array full of NaT ambiguity resolve here needed - pass + dtype = dtype_if_all_nat + if dtype is not None: + # otherwise we keep object dtype + result = _infer_all_nats( + dtype, datetimes, timedeltas + ) + elif convert_datetime: result = datetimes elif convert_timedelta: @@ -2618,6 +2644,26 @@ def maybe_convert_objects(ndarray[object] objects, return objects +cdef _infer_all_nats(dtype, ndarray datetimes, ndarray timedeltas): + """ + If we have all-NaT values, cast these to the given dtype. + """ + if isinstance(dtype, np.dtype): + if dtype == "M8[ns]": + result = datetimes + elif dtype == "m8[ns]": + result = timedeltas + else: + raise ValueError(dtype) + else: + # ExtensionDtype + cls = dtype.construct_array_type() + i8vals = np.empty(len(datetimes), dtype="i8") + i8vals.fill(NPY_NAT) + result = cls(i8vals, dtype=dtype) + return result + + class NoDefault(Enum): # We make this an Enum # 1) because it round-trips through pickle correctly (see GH#40397) diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 7e0b26391e132..3c541a309e42a 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -665,6 +665,57 @@ def test_maybe_convert_objects_datetime(self): ) tm.assert_numpy_array_equal(out, exp) + def test_maybe_convert_objects_dtype_if_all_nat(self): + arr = np.array([pd.NaT, pd.NaT], dtype=object) + out = lib.maybe_convert_objects( + arr, convert_datetime=True, convert_timedelta=True + ) + # no dtype_if_all_nat passed -> we dont guess + tm.assert_numpy_array_equal(out, arr) + + out = lib.maybe_convert_objects( + arr, + convert_datetime=True, + convert_timedelta=True, + dtype_if_all_nat=np.dtype("timedelta64[ns]"), + ) + exp = np.array(["NaT", "NaT"], dtype="timedelta64[ns]") + tm.assert_numpy_array_equal(out, exp) + + out = lib.maybe_convert_objects( + arr, + convert_datetime=True, + convert_timedelta=True, + dtype_if_all_nat=np.dtype("datetime64[ns]"), + ) + exp = np.array(["NaT", "NaT"], dtype="datetime64[ns]") + tm.assert_numpy_array_equal(out, exp) + + def test_maybe_convert_objects_dtype_if_all_nat_invalid(self): + # we accept datetime64[ns], timedelta64[ns], and EADtype + arr = np.array([pd.NaT, pd.NaT], dtype=object) + + with pytest.raises(ValueError, match="int64"): + lib.maybe_convert_objects( + arr, + convert_datetime=True, + convert_timedelta=True, + dtype_if_all_nat=np.dtype("int64"), + ) + + @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"]) + def test_maybe_convert_objects_datetime_overflow_safe(self, dtype): + stamp = datetime(2363, 10, 4) # Enterprise-D launch date + if dtype == "timedelta64[ns]": + stamp = stamp - datetime(1970, 1, 1) + arr = np.array([stamp], dtype=object) + + out = lib.maybe_convert_objects( + arr, convert_datetime=True, convert_timedelta=True + ) + # no OutOfBoundsDatetime/OutOfBoundsTimedeltas + tm.assert_numpy_array_equal(out, arr) + def test_maybe_convert_objects_timedelta64_nat(self): obj = np.timedelta64("NaT", "ns") arr = np.array([obj], dtype=object)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41714
2021-05-29T04:30:18Z
2021-06-02T15:18:33Z
2021-06-02T15:18:33Z
2021-06-02T17:00:44Z
CI: suppress numpy FutureWarnings
diff --git a/pyproject.toml b/pyproject.toml index 01d28777eb47e..9bc80befa39b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,6 +47,7 @@ doctest_optionflags = [ filterwarnings = [ "error:Sparse:FutureWarning", "error:The SparseArray:FutureWarning", + "ignore:Promotion of numbers:FutureWarning", ] junit_family = "xunit2" markers = [
hopefully temporary
https://api.github.com/repos/pandas-dev/pandas/pulls/41713
2021-05-29T03:55:38Z
2021-05-29T15:06:57Z
null
2021-05-29T15:07:08Z
TST: More old issues
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index a8df09d479f22..62d7535159f13 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -638,6 +638,19 @@ def test_setitem_dtypes_bytes_type_to_object(self): expected = Series([np.uint32, object, object, np.uint8], index=list("abcd")) tm.assert_series_equal(result, expected) + def test_boolean_mask_nullable_int64(self): + # GH 28928 + result = DataFrame({"a": [3, 4], "b": [5, 6]}).astype( + {"a": "int64", "b": "Int64"} + ) + mask = Series(False, index=result.index) + result.loc[mask, "a"] = result["a"] + result.loc[mask, "b"] = result["b"] + expected = DataFrame({"a": [3, 4], "b": [5, 6]}).astype( + {"a": "int64", "b": "Int64"} + ) + tm.assert_frame_equal(result, expected) + class TestSetitemTZAwareValues: @pytest.fixture diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py index f9535e9c7ef17..80f97ecaee121 100644 --- a/pandas/tests/frame/methods/test_append.py +++ b/pandas/tests/frame/methods/test_append.py @@ -238,3 +238,22 @@ def test_append_numpy_bug_1681(self, dtype): result = df.append(other) assert (result["B"] == index).all() + + @pytest.mark.filterwarnings("ignore:The values in the array:RuntimeWarning") + def test_multiindex_column_append_multiple(self): + # GH 29699 + df = DataFrame( + [[1, 11], [2, 12], [3, 13]], + columns=pd.MultiIndex.from_tuples( + [("multi", "col1"), ("multi", "col2")], names=["level1", None] + ), + ) + df2 = df.copy() + for i in range(1, 10): + df[i, "colA"] = 10 + df = df.append(df2, ignore_index=True) + result = df["multi"] + expected = DataFrame( + {"col1": [1, 2, 3] * (i + 1), "col2": [11, 12, 13] * (i + 1)} + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_drop.py b/pandas/tests/frame/methods/test_drop.py index 76e24a27e0854..b3eeab9db4ad5 100644 --- a/pandas/tests/frame/methods/test_drop.py +++ b/pandas/tests/frame/methods/test_drop.py @@ -502,3 +502,9 @@ def test_drop_inplace_no_leftover_column_reference(self): tm.assert_index_equal(df.columns, Index([], dtype="object")) a -= a.mean() tm.assert_index_equal(df.columns, Index([], dtype="object")) + + def test_drop_level_missing_label_multiindex(self): + # GH 18561 + df = DataFrame(index=MultiIndex.from_product([range(3), range(3)])) + with pytest.raises(KeyError, match="labels \\[5\\] not found in level"): + df.drop(5, level=0) diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py index 8a3ac265db154..84992982a104a 100644 --- a/pandas/tests/frame/methods/test_reindex.py +++ b/pandas/tests/frame/methods/test_reindex.py @@ -60,6 +60,24 @@ def test_set_reset_index_intervalindex(self): df = df.reset_index() + def test_setitem_reset_index_dtypes(self): + # GH 22060 + df = DataFrame(columns=["a", "b", "c"]).astype( + {"a": "datetime64[ns]", "b": np.int64, "c": np.float64} + ) + df1 = df.set_index(["a"]) + df1["d"] = [] + result = df1.reset_index() + expected = DataFrame(columns=["a", "b", "c", "d"], index=range(0)).astype( + {"a": "datetime64[ns]", "b": np.int64, "c": np.float64, "d": np.float64} + ) + tm.assert_frame_equal(result, expected) + + df2 = df.set_index(["a", "b"]) + df2["d"] = [] + result = df2.reset_index() + tm.assert_frame_equal(result, expected) + class TestDataFrameSelectReindex: # These are specific reindex-based tests; other indexing tests should go in diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index dbb6bb116828a..6e176310da6b4 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -775,6 +775,16 @@ def test_sort_index_ascending_bad_value_raises(self, ascending): with pytest.raises(ValueError, match=match): df.sort_index(axis=0, ascending=ascending, na_position="first") + def test_sort_index_use_inf_as_na(self): + # GH 29687 + expected = DataFrame( + {"col1": [1, 2, 3], "col2": [3, 4, 5]}, + index=pd.date_range("2020", periods=3), + ) + with pd.option_context("mode.use_inf_as_na", True): + result = expected.sort_index() + tm.assert_frame_equal(result, expected) + class TestDataFrameSortIndexKey: def test_sort_multi_index_key(self): diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index 03c5b6e027dac..e2cfc50510173 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -322,3 +322,11 @@ def test_frame_to_string_with_periodindex(self): # it works! frame.to_string() + + def test_datetime64tz_slice_non_truncate(self): + # GH 30263 + df = DataFrame({"x": date_range("2019", periods=10, tz="UTC")}) + expected = repr(df) + df = df.iloc[:, :5] + result = repr(df) + assert result == expected diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 70bdfe92602b2..719fdb353e3cf 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2338,3 +2338,24 @@ def test_groupby_filtered_df_std(): index=Index([True], name="groupby_col"), ) tm.assert_frame_equal(result, expected) + + +def test_datetime_categorical_multikey_groupby_indices(): + # GH 26859 + df = DataFrame( + { + "a": Series(list("abc")), + "b": Series( + to_datetime(["2018-01-01", "2018-02-01", "2018-03-01"]), + dtype="category", + ), + "c": Categorical.from_codes([-1, 0, 1], categories=[0, 1]), + } + ) + result = df.groupby(["a", "b"]).indices + expected = { + ("a", Timestamp("2018-01-01 00:00:00")): np.array([0]), + ("b", Timestamp("2018-02-01 00:00:00")): np.array([1]), + ("c", Timestamp("2018-03-01 00:00:00")): np.array([2]), + } + assert result == expected diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py index dfbf1a5b2cdc2..e7a5e931f5297 100644 --- a/pandas/tests/groupby/test_nth.py +++ b/pandas/tests/groupby/test_nth.py @@ -663,3 +663,29 @@ def test_first_categorical_and_datetime_data_nat(): ) expected.index = Index(["first", "second", "third"], name="group") tm.assert_frame_equal(result, expected) + + +def test_first_multi_key_groupbby_categorical(): + # GH 22512 + df = DataFrame( + { + "A": [1, 1, 1, 2, 2], + "B": [100, 100, 200, 100, 100], + "C": ["apple", "orange", "mango", "mango", "orange"], + "D": ["jupiter", "mercury", "mars", "venus", "venus"], + } + ) + df = df.astype({"D": "category"}) + result = df.groupby(by=["A", "B"]).first() + expected = DataFrame( + { + "C": ["apple", "mango", "mango"], + "D": Series(["jupiter", "mars", "venus"]).astype( + pd.CategoricalDtype(["jupiter", "mars", "mercury", "venus"]) + ), + } + ) + expected.index = MultiIndex.from_tuples( + [(1, 100), (1, 200), (2, 100)], names=["A", "B"] + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 281bfb19eb6fa..fc07c14f1e179 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -13,6 +13,7 @@ import pandas.util._test_decorators as td from pandas import ( + NA, Categorical, CategoricalDtype, DataFrame, @@ -1340,3 +1341,10 @@ def test_iloc_setitem_pure_position_based(self): ser1.iloc[1:3] = ser2.iloc[1:3] expected = Series([1, 5, 6]) tm.assert_series_equal(ser1, expected) + + def test_iloc_nullable_int64_size_1_nan(self): + # GH 31861 + result = DataFrame({"a": ["test"], "b": [np.nan]}) + result.loc[:, "b"] = result.loc[:, "b"].astype("Int64") + expected = DataFrame({"a": ["test"], "b": array([NA], dtype="Int64")}) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 77b155f01a2ea..cd07b3814d023 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -2479,3 +2479,11 @@ def test_merge_string_float_column_result(): [[9, 10, 1, 2], [11, 12, 3, 4]], columns=pd.Index(["x", "y", "a", 114.0]) ) tm.assert_frame_equal(result, expected) + + +def test_mergeerror_on_left_index_mismatched_dtypes(): + # GH 22449 + df_1 = DataFrame(data=["X"], columns=["C"], index=[22]) + df_2 = DataFrame(data=["X"], columns=["C"], index=[999]) + with pytest.raises(MergeError, match="Can only pass argument"): + merge(df_1, df_2, on=["C"], left_index=True) diff --git a/pandas/tests/series/methods/test_sort_values.py b/pandas/tests/series/methods/test_sort_values.py index 28332a94207fe..67f986c0949ca 100644 --- a/pandas/tests/series/methods/test_sort_values.py +++ b/pandas/tests/series/methods/test_sort_values.py @@ -199,6 +199,13 @@ def test_sort_values_pos_args_deprecation(self): expected = Series([1, 2, 3]) tm.assert_series_equal(result, expected) + def test_mergesort_decending_stability(self): + # GH 28697 + s = Series([1, 2, 1, 3], ["first", "b", "second", "c"]) + result = s.sort_values(ascending=False, kind="mergesort") + expected = Series([3, 2, 1, 1], ["c", "b", "first", "second"]) + tm.assert_series_equal(result, expected) + class TestSeriesSortingKey: def test_sort_values_key(self): diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 3eb3892279832..8872b76cd9bce 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -870,3 +870,21 @@ def test_dateoffset_immutable(attribute): msg = "DateOffset objects are immutable" with pytest.raises(AttributeError, match=msg): setattr(offset, attribute, 5) + + +@pytest.mark.parametrize( + "weekmask, expected_time, mult", + [ + ["Mon Tue Wed Thu Fri Sat", "2018-11-10 09:00:00", 10], + ["Tue Wed Thu Fri Sat", "2018-11-13 08:00:00", 18], + ], +) +def test_custom_businesshour_weekmask_and_holidays(weekmask, expected_time, mult): + # GH 23542 + holidays = ["2018-11-09"] + bh = CustomBusinessHour( + start="08:00", end="17:00", weekmask=weekmask, holidays=holidays + ) + result = Timestamp("2018-11-08 08:00") + mult * bh + expected = Timestamp(expected_time) + assert result == expected diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 7a3e1e002759d..c28d54dd9fbfb 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -1411,3 +1411,11 @@ def test_rolling_sum_all_nan_window_floating_artifacts(): result = df.rolling(3, min_periods=0).sum() expected = DataFrame([0.002, 0.010, 0.015, 0.013, 0.005, 0.0]) tm.assert_frame_equal(result, expected) + + +def test_rolling_zero_window(): + # GH 22719 + s = Series(range(1)) + result = s.rolling(0).min() + expected = Series([np.nan]) + tm.assert_series_equal(result, expected)
- [x] closes #18561 - [x] closes #22060 - [x] closes #22449 - [x] closes #22512 - [x] closes #22719 - [x] closes #23542 - [x] closes #26859 - [x] closes #28697 - [x] closes #28928 - [x] closes #29687 - [x] closes #30263 - [x] closes #29699 - [x] closes #31861 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41712
2021-05-29T02:00:11Z
2021-05-31T14:27:05Z
2021-05-31T14:27:01Z
2021-05-31T16:30:56Z
REGR: DataFrame reduction with min_count
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index 60e146b2212eb..1d7b7a762e2ae 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) +- Fixed regression in :meth:`DataFrame.sum` and :meth:`DataFrame.prod` when ``min_count`` and ``numeric_only`` are both given (:issue:`41074`) - Regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`) - diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bc44b23da25d5..4eef4ee2a3e80 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9775,7 +9775,6 @@ def _reduce( **kwds, ): - min_count = kwds.get("min_count", 0) assert filter_type is None or filter_type == "bool", filter_type out_dtype = "bool" if filter_type == "bool" else None @@ -9824,7 +9823,7 @@ def _get_data() -> DataFrame: data = self._get_bool_data() return data - if (numeric_only is not None or axis == 0) and min_count == 0: + if numeric_only is not None or axis == 0: # For numeric_only non-None and axis non-None, we know # which blocks to use and no try/except is needed. # For numeric_only=None only the case with axis==0 and no object diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index d1eb50f2702ba..b5b4c7000fddf 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -393,7 +393,7 @@ def reduce(self, func, ignore_failures: bool = False) -> list[Block]: return [] raise - if np.ndim(result) == 0: + if self.values.ndim == 1: # TODO(EA2D): special case not needed with 2D EAs res_values = np.array([[result]]) else: diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index b8909f16ee876..673c482bced18 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -245,8 +245,7 @@ def _maybe_get_mask( """ if mask is None: if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype): - # Boolean data cannot contain nulls, so signal via mask being None - return None + return np.broadcast_to(False, values.shape) if skipna or needs_i8_conversion(values.dtype): mask = isna(values) diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 564f5d20b0301..9d778cdee6a5b 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1,5 +1,6 @@ from datetime import timedelta from decimal import Decimal +import re from dateutil.tz import tzlocal import numpy as np @@ -811,35 +812,36 @@ def test_sum_corner(self): assert len(axis1) == 0 @pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)]) - def test_sum_prod_nanops(self, method, unit): + @pytest.mark.parametrize("numeric_only", [None, True, False]) + def test_sum_prod_nanops(self, method, unit, numeric_only): idx = ["a", "b", "c"] df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]}) # The default - result = getattr(df, method)() + result = getattr(df, method)(numeric_only=numeric_only) expected = Series([unit, unit, unit], index=idx, dtype="float64") tm.assert_series_equal(result, expected) # min_count=1 - result = getattr(df, method)(min_count=1) + result = getattr(df, method)(numeric_only=numeric_only, min_count=1) expected = Series([unit, unit, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count=0 - result = getattr(df, method)(min_count=0) + result = getattr(df, method)(numeric_only=numeric_only, min_count=0) expected = Series([unit, unit, unit], index=idx, dtype="float64") tm.assert_series_equal(result, expected) - result = getattr(df.iloc[1:], method)(min_count=1) + result = getattr(df.iloc[1:], method)(numeric_only=numeric_only, min_count=1) expected = Series([unit, np.nan, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count > 1 df = DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) - result = getattr(df, method)(min_count=5) + result = getattr(df, method)(numeric_only=numeric_only, min_count=5) expected = Series(result, index=["A", "B"]) tm.assert_series_equal(result, expected) - result = getattr(df, method)(min_count=6) + result = getattr(df, method)(numeric_only=numeric_only, min_count=6) expected = Series(result, index=["A", "B"]) tm.assert_series_equal(result, expected) @@ -1685,7 +1687,7 @@ def test_minmax_extensionarray(method, numeric_only): @pytest.mark.parametrize("meth", ["max", "min", "sum", "mean", "median"]) -def test_groupy_regular_arithmetic_equivalent(meth): +def test_groupby_regular_arithmetic_equivalent(meth): # GH#40660 df = DataFrame( {"a": [pd.Timedelta(hours=6), pd.Timedelta(hours=7)], "b": [12.1, 13.3]} @@ -1708,3 +1710,16 @@ def test_frame_mixed_numeric_object_with_timestamp(ts_value): result = df.sum() expected = Series([1, 1.1, "foo"], index=list("abc")) tm.assert_series_equal(result, expected) + + +def test_prod_sum_min_count_mixed_object(): + # https://github.com/pandas-dev/pandas/issues/41074 + df = DataFrame([1, "a", True]) + + result = df.prod(axis=0, min_count=1, numeric_only=False) + expected = Series(["a"]) + tm.assert_series_equal(result, expected) + + msg = re.escape("unsupported operand type(s) for +: 'int' and 'str'") + with pytest.raises(TypeError, match=msg): + df.sum(axis=0, min_count=1, numeric_only=False)
- [ ] closes #41074 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry tests copied from #41701; i think this gets at the root problem cc @simonjayhawkins
https://api.github.com/repos/pandas-dev/pandas/pulls/41711
2021-05-28T23:02:15Z
2021-06-01T14:54:44Z
2021-06-01T14:54:44Z
2021-06-10T07:19:12Z
REF: avoid maybe_convert_platform
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index a99bf245a6073..4aa3bab168ac6 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -32,7 +32,6 @@ from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender -from pandas.core.dtypes.cast import maybe_convert_platform from pandas.core.dtypes.common import ( is_categorical_dtype, is_datetime64_dtype, @@ -1650,4 +1649,6 @@ def _maybe_convert_platform_interval(values) -> ArrayLike: else: values = extract_array(values, extract_numpy=True) - return maybe_convert_platform(values) + if not hasattr(values, "dtype"): + return np.asarray(values) + return values diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 5c2bed109e3bf..3a8915e94135a 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -537,9 +537,6 @@ def _prep_ndarray(values, copy: bool = True) -> np.ndarray: def convert(v): if not is_list_like(v) or isinstance(v, ABCDataFrame): return v - elif not hasattr(v, "dtype") and not isinstance(v, (list, tuple, range)): - # TODO: should we cast these to list? - return v v = extract_array(v, extract_numpy=True) res = maybe_convert_platform(v)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry After this it is exclusively used in core.construction and core.internals.construction
https://api.github.com/repos/pandas-dev/pandas/pulls/41709
2021-05-28T19:22:18Z
2021-05-31T16:17:27Z
2021-05-31T16:17:27Z
2021-05-31T16:19:09Z
CLN: Removing unused Travis files for GBQ
diff --git a/.travis.yml b/.travis.yml index 540cd026a43d5..52fadca6b7846 100644 --- a/.travis.yml +++ b/.travis.yml @@ -45,7 +45,6 @@ before_install: - echo "before_install" # Use blocking IO on travis. Ref: https://github.com/travis-ci/travis-ci/issues/8920#issuecomment-352661024 - python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);' - - source ci/travis_process_gbq_encryption.sh - export PATH="$HOME/miniconda3/bin:$PATH" - df -h - pwd diff --git a/ci/travis_encrypt_gbq.sh b/ci/travis_encrypt_gbq.sh deleted file mode 100755 index 7d5692d9520af..0000000000000 --- a/ci/travis_encrypt_gbq.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -GBQ_JSON_FILE=$1 - -if [[ $# -ne 1 ]]; then - echo -e "Too few arguments.\nUsage: ./travis_encrypt_gbq.sh "\ - "<gbq-json-credentials-file>" - exit 1 -fi - -if [[ $GBQ_JSON_FILE != *.json ]]; then - echo "ERROR: Expected *.json file" - exit 1 -fi - -if [[ ! -f $GBQ_JSON_FILE ]]; then - echo "ERROR: File $GBQ_JSON_FILE does not exist" - exit 1 -fi - -echo "Encrypting $GBQ_JSON_FILE..." -read -d "\n" TRAVIS_KEY TRAVIS_IV <<<$(travis encrypt-file -r pandas-dev/pandas $GBQ_JSON_FILE \ -travis_gbq.json.enc -f | grep -o "\w*_iv\|\w*_key"); - -echo "Adding your secure key to travis_gbq_config.txt ..." -echo -e "TRAVIS_IV_ENV=$TRAVIS_IV\nTRAVIS_KEY_ENV=$TRAVIS_KEY"\ -> travis_gbq_config.txt - -echo "Done. Removing file $GBQ_JSON_FILE" -rm $GBQ_JSON_FILE - -echo -e "Created encrypted credentials file travis_gbq.json.enc.\n"\ - "NOTE: Do NOT commit the *.json file containing your unencrypted" \ - "private key" diff --git a/ci/travis_gbq.json.enc b/ci/travis_gbq.json.enc deleted file mode 100644 index 6e0b6cee4048c..0000000000000 Binary files a/ci/travis_gbq.json.enc and /dev/null differ diff --git a/ci/travis_gbq_config.txt b/ci/travis_gbq_config.txt deleted file mode 100644 index dc857c450331c..0000000000000 --- a/ci/travis_gbq_config.txt +++ /dev/null @@ -1,2 +0,0 @@ -TRAVIS_IV_ENV=encrypted_e05c934e101e_iv -TRAVIS_KEY_ENV=encrypted_e05c934e101e_key diff --git a/ci/travis_process_gbq_encryption.sh b/ci/travis_process_gbq_encryption.sh deleted file mode 100755 index b5118ad5defc6..0000000000000 --- a/ci/travis_process_gbq_encryption.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -source ci/travis_gbq_config.txt - -if [[ -n ${SERVICE_ACCOUNT_KEY} ]]; then - echo "${SERVICE_ACCOUNT_KEY}" > ci/travis_gbq.json; -elif [[ -n ${!TRAVIS_IV_ENV} ]]; then - openssl aes-256-cbc -K ${!TRAVIS_KEY_ENV} -iv ${!TRAVIS_IV_ENV} \ - -in ci/travis_gbq.json.enc -out ci/travis_gbq.json -d; - export GBQ_PROJECT_ID='pandas-gbq-tests'; - echo 'Successfully decrypted gbq credentials' -fi
If I'm not missing anything, Travis is only used to test on ARM64, and it's not testing on GBQ, since it doesn't have its dependencies in the conda environment. So, I think all GBQ stuff for Travis can be removed.
https://api.github.com/repos/pandas-dev/pandas/pulls/41708
2021-05-28T18:07:21Z
2021-05-30T19:25:04Z
2021-05-30T19:25:04Z
2021-05-30T19:25:24Z
BUG: MultiIndex.reindex with non-MultiIndex; Series constructor
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index e06085c4c5c26..5d92aefdb4eb1 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -233,7 +233,7 @@ Other enhancements - Add keyword ``sort`` to :func:`pivot_table` to allow non-sorting of the result (:issue:`39143`) - Add keyword ``dropna`` to :meth:`DataFrame.value_counts` to allow counting rows that include ``NA`` values (:issue:`41325`) - :meth:`Series.replace` will now cast results to ``PeriodDtype`` where possible instead of ``object`` dtype (:issue:`41526`) -- Improved error message in ``corr` and ``cov`` methods on :class:`.Rolling`, :class:`.Expanding`, and :class:`.ExponentialMovingWindow` when ``other`` is not a :class:`DataFrame` or :class:`Series` (:issue:`41741`) +- Improved error message in ``corr`` and ``cov`` methods on :class:`.Rolling`, :class:`.Expanding`, and :class:`.ExponentialMovingWindow` when ``other`` is not a :class:`DataFrame` or :class:`Series` (:issue:`41741`) .. --------------------------------------------------------------------------- @@ -959,6 +959,7 @@ MultiIndex - Bug in :meth:`MultiIndex.equals` incorrectly returning ``True`` when :class:`MultiIndex` containing ``NaN`` even when they are differently ordered (:issue:`38439`) - Bug in :meth:`MultiIndex.intersection` always returning empty when intersecting with :class:`CategoricalIndex` (:issue:`38653`) - Bug in :meth:`MultiIndex.reindex` raising ``ValueError`` with empty MultiIndex and indexing only a specific level (:issue:`41170`) +- Bug in :meth:`MultiIndex.reindex` raising ``TypeError`` when reindexing against a flat :class:`Index` (:issue:`41707`) I/O ^^^ @@ -1073,6 +1074,7 @@ Reshaping - Bug in :meth:`DataFrame.sort_values` not reshaping index correctly after sorting on columns, when ``ignore_index=True`` (:issue:`39464`) - Bug in :meth:`DataFrame.append` returning incorrect dtypes with combinations of ``ExtensionDtype`` dtypes (:issue:`39454`) - Bug in :meth:`DataFrame.append` returning incorrect dtypes with combinations of ``datetime64`` and ``timedelta64`` dtypes (:issue:`39574`) +- Bug in :meth:`DataFrame.append` with a :class:`DataFrame` with a :class:`MultiIndex` and appending a :class:`Series` whose :class:`Index` is not a :class:`MultiIndex` (:issue:`41707`) - Bug in :meth:`DataFrame.pivot_table` returning a ``MultiIndex`` for a single value when operating on and empty ``DataFrame`` (:issue:`13483`) - Allow :class:`Index` to be passed to the :func:`numpy.all` function (:issue:`40180`) - Bug in :meth:`DataFrame.stack` not preserving ``CategoricalDtype`` in a ``MultiIndex`` (:issue:`36991`) @@ -1127,6 +1129,7 @@ Other - Bug in :meth:`DataFrame.clip` not interpreting missing values as no threshold (:issue:`40420`) - Bug in :class:`Series` backed by :class:`DatetimeArray` or :class:`TimedeltaArray` sometimes failing to set the array's ``freq`` to ``None`` (:issue:`41425`) - Bug in creating a :class:`Series` from a ``range`` object that does not fit in the bounds of ``int64`` dtype (:issue:`30173`) +- Bug in creating a :class:`Series` from a ``dict`` with all-tuple keys and an :class:`Index` that requires reindexing (:issue:`41707`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bc44b23da25d5..6d4b723cdf921 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8921,10 +8921,7 @@ def append( index = Index([other.name], name=self.index.name) idx_diff = other.index.difference(self.columns) - try: - combined_columns = self.columns.append(idx_diff) - except TypeError: - combined_columns = self.columns.astype(object).append(idx_diff) + combined_columns = self.columns.append(idx_diff) other = ( other.reindex(combined_columns, copy=False) .to_frame() diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 6ae906edd1d81..6dcb2a44e7d3d 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -774,9 +774,11 @@ def _get_indexer_pointwise(self, target: Index) -> tuple[np.ndarray, np.ndarray] except KeyError: missing.append(i) locs = np.array([-1]) - except InvalidIndexError as err: - # i.e. non-scalar key - raise TypeError(key) from err + except InvalidIndexError: + # i.e. non-scalar key e.g. a tuple. + # see test_append_different_columns_types_raises + missing.append(i) + locs = np.array([-1]) indexer.append(locs) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 59882422f5439..805420a83108a 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2541,9 +2541,11 @@ def reindex( elif (indexer >= 0).all(): target = self.take(indexer) else: - # hopefully? - target = MultiIndex.from_tuples(target) - + try: + target = MultiIndex.from_tuples(target) + except TypeError: + # not all tuples, see test_constructor_dict_multiindex_reindex_flat + return target, indexer if ( preserve_names and target.nlevels == self.nlevels diff --git a/pandas/tests/indexes/multi/test_reindex.py b/pandas/tests/indexes/multi/test_reindex.py index 3b0fcd72f3123..38ff6efec40c9 100644 --- a/pandas/tests/indexes/multi/test_reindex.py +++ b/pandas/tests/indexes/multi/test_reindex.py @@ -115,3 +115,14 @@ def test_reindex_empty_with_level(values): expected_indexer = np.array([], dtype=result_indexer.dtype) tm.assert_index_equal(result, expected) tm.assert_numpy_array_equal(result_indexer, expected_indexer) + + +def test_reindex_not_all_tuples(): + keys = [("i", "i"), ("i", "j"), ("j", "i"), "j"] + mi = MultiIndex.from_tuples(keys[:-1]) + idx = Index(keys) + res, indexer = mi.reindex(idx) + + tm.assert_index_equal(res, idx) + expected = np.array([0, 1, 2, -1], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) diff --git a/pandas/tests/reshape/concat/test_append.py b/pandas/tests/reshape/concat/test_append.py index 62fe1ed3a7c49..43fe72b0776ed 100644 --- a/pandas/tests/reshape/concat/test_append.py +++ b/pandas/tests/reshape/concat/test_append.py @@ -184,18 +184,12 @@ def test_append_preserve_index_name(self): dt.datetime(2013, 1, 3, 7, 12), ] ), + pd.MultiIndex.from_arrays(["A B C".split(), "D E F".split()]), ] - indexes_cannot_append_with_other = [ - pd.MultiIndex.from_arrays(["A B C".split(), "D E F".split()]) - ] - - # error: Unsupported operand types for + ("List[Index]" and "List[MultiIndex]") - all_indexes = ( - indexes_can_append + indexes_cannot_append_with_other # type: ignore[operator] + @pytest.mark.parametrize( + "index", indexes_can_append, ids=lambda x: type(x).__name__ ) - - @pytest.mark.parametrize("index", all_indexes, ids=lambda x: type(x).__name__) def test_append_same_columns_type(self, index): # GH18359 @@ -249,41 +243,6 @@ def test_append_different_columns_types(self, df_columns, series_index): ) tm.assert_frame_equal(result, expected) - @pytest.mark.parametrize( - "index_can_append", indexes_can_append, ids=lambda x: type(x).__name__ - ) - @pytest.mark.parametrize( - "index_cannot_append_with_other", - indexes_cannot_append_with_other, - ids=lambda x: type(x).__name__, - ) - def test_append_different_columns_types_raises( - self, index_can_append, index_cannot_append_with_other - ): - # GH18359 - # Dataframe.append will raise if MultiIndex appends - # or is appended to a different index type - # - # See also test 'test_append_different_columns_types' above for - # appending without raising. - - df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_can_append) - ser = Series([7, 8, 9], index=index_cannot_append_with_other, name=2) - msg = ( - r"Expected tuple, got (int|long|float|str|" - r"pandas._libs.interval.Interval)|" - r"object of type '(int|float|Timestamp|" - r"pandas._libs.interval.Interval)' has no len\(\)|" - ) - with pytest.raises(TypeError, match=msg): - df.append(ser) - - df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_cannot_append_with_other) - ser = Series([7, 8, 9], index=index_can_append, name=2) - - with pytest.raises(TypeError, match=msg): - df.append(ser) - def test_append_dtype_coerce(self, sort): # GH 4993 diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 646d1f0ab1508..a540b692f3aec 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1700,6 +1700,14 @@ def test_constructor_dict_multiindex(self): result = result.reindex(index=expected.index) tm.assert_series_equal(result, expected) + def test_constructor_dict_multiindex_reindex_flat(self): + # construction involves reindexing with a MultiIndex corner case + data = {("i", "i"): 0, ("i", "j"): 1, ("j", "i"): 2, "j": np.nan} + expected = Series(data) + + result = Series(expected[:-1].to_dict(), index=expected.index) + tm.assert_series_equal(result, expected) + def test_constructor_dict_timedelta_index(self): # GH #12169 : Resample category data with timedelta index # construct Series from dict as data and TimedeltaIndex as index
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry preliminary to de-duplicating some code in internals.construction
https://api.github.com/repos/pandas-dev/pandas/pulls/41707
2021-05-28T17:03:11Z
2021-06-01T14:52:40Z
2021-06-01T14:52:40Z
2021-06-01T16:05:15Z
BUG: DataFrameGroupBy with numeric_only and empty non-numeric data
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 1556c88aaecc6..b36499c340fd9 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -1061,6 +1061,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupBy.transform` with non-unique columns incorrectly raising ``AttributeError`` (:issue:`41427`) - Bug in :meth:`Resampler.apply` with non-unique columns incorrectly dropping duplicated columns (:issue:`41445`) - Bug in :meth:`SeriesGroupBy` aggregations incorrectly returning empty :class:`Series` instead of raising ``TypeError`` on aggregations that are invalid for its dtype, e.g. ``.prod`` with ``datetime64[ns]`` dtype (:issue:`41342`) +- Bug in :class:`DataFrameGroupBy` aggregations incorrectly failing to drop columns with invalid dtypes for that aggregation when there are no valid columns (:issue:`41291`) - Bug in :meth:`DataFrame.rolling.__iter__` where ``on`` was not assigned to the index of the resulting objects (:issue:`40373`) - Bug in :meth:`DataFrameGroupBy.transform` and :meth:`DataFrameGroupBy.agg` with ``engine="numba"`` where ``*args`` were being cached with the user passed function (:issue:`41647`) diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 7a286188c4e74..b72b927b3c2a8 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -516,7 +516,7 @@ def group_add(add_t[:, ::1] out, val = values[i, j] # not nan - if val == val: + if not checknull(val): nobs[lab, j] += 1 if nobs[lab, j] == 1: diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index b51fb2234e148..69f992f840c7c 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -67,10 +67,7 @@ validate_func_kwargs, ) from pandas.core.apply import GroupByApply -from pandas.core.base import ( - DataError, - SpecificationError, -) +from pandas.core.base import SpecificationError import pandas.core.common as com from pandas.core.construction import create_series_with_explicit_dtype from pandas.core.frame import DataFrame @@ -516,16 +513,12 @@ def _cython_transform( obj = self._selected_obj - is_numeric = is_numeric_dtype(obj.dtype) - if numeric_only and not is_numeric: - raise DataError("No numeric types to aggregate") - try: result = self.grouper._cython_operation( "transform", obj._values, how, axis, **kwargs ) - except (NotImplementedError, TypeError): - raise DataError("No numeric types to aggregate") + except NotImplementedError as err: + raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err return obj._constructor(result, index=self.obj.index, name=obj.name) @@ -1064,7 +1057,6 @@ def _cython_agg_general( # Note: we never get here with how="ohlc"; that goes through SeriesGroupBy data: Manager2D = self._get_data_to_aggregate() - orig = data if numeric_only: data = data.get_numeric_data(copy=False) @@ -1087,9 +1079,6 @@ def array_func(values: ArrayLike) -> ArrayLike: # continue and exclude the block new_mgr = data.grouped_reduce(array_func, ignore_failures=True) - if not len(new_mgr) and len(orig): - # If the original Manager was already empty, no need to raise - raise DataError("No numeric types to aggregate") if len(new_mgr) < len(data): warnings.warn( f"Dropping invalid columns in {type(self).__name__}.{how} " diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index b00a1160fb01b..6deb5bb1a76f0 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1339,20 +1339,12 @@ def _agg_general( with group_selection_context(self): # try a cython aggregation if we can - result = None - try: - result = self._cython_agg_general( - how=alias, - alt=npfunc, - numeric_only=numeric_only, - min_count=min_count, - ) - except DataError: - pass - - # apply a non-cython aggregation - if result is None: - result = self.aggregate(lambda x: npfunc(x, axis=self.axis)) + result = self._cython_agg_general( + how=alias, + alt=npfunc, + numeric_only=numeric_only, + min_count=min_count, + ) return result.__finalize__(self.obj, method="groupby") def _agg_py_fallback( diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index eb82e03aea82f..851dd7311183f 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -128,8 +128,9 @@ def test_groupby_aggregation_multi_level_column(): columns=MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 0), ("B", 1)]), ) - result = df.groupby(level=1, axis=1).sum() - expected = DataFrame({0: [2.0, 1, 1, 1], 1: [1, 0, 1, 1]}) + gb = df.groupby(level=1, axis=1) + result = gb.sum(numeric_only=False) + expected = DataFrame({0: [2.0, True, True, True], 1: [1, 0, 1, 1]}) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py index cf1177d231e37..a035c5500e2dc 100644 --- a/pandas/tests/groupby/aggregate/test_cython.py +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -18,7 +18,6 @@ bdate_range, ) import pandas._testing as tm -from pandas.core.groupby.groupby import DataError @pytest.mark.parametrize( @@ -98,9 +97,9 @@ def test_cython_agg_nothing_to_agg(): frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25}) - msg = "No numeric types to aggregate" - with pytest.raises(DataError, match=msg): - frame[["b"]].groupby(frame["a"]).mean() + result = frame[["b"]].groupby(frame["a"]).mean() + expected = DataFrame([], index=frame["a"].sort_values().drop_duplicates()) + tm.assert_frame_equal(result, expected) def test_cython_agg_nothing_to_agg_with_dates(): diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index 4d30543355d47..79990deed261d 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -433,15 +433,22 @@ def test_agg_over_numpy_arrays(): ], columns=["category", "arraydata"], ) - result = df.groupby("category").agg(sum) + gb = df.groupby("category") expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]] expected_index = Index([1, 2], name="category") expected_column = ["arraydata"] expected = DataFrame(expected_data, index=expected_index, columns=expected_column) + alt = gb.sum(numeric_only=False) + tm.assert_frame_equal(alt, expected) + + result = gb.agg("sum", numeric_only=False) tm.assert_frame_equal(result, expected) + # FIXME: the original version of this test called `gb.agg(sum)` + # and that raises TypeError if `numeric_only=False` is passed + @pytest.mark.parametrize("as_period", [True, False]) def test_agg_tzaware_non_datetime_result(as_period): @@ -524,9 +531,14 @@ def test_sum_uint64_overflow(): ) expected.index.name = 0 - result = df.groupby(0).sum() + result = df.groupby(0).sum(numeric_only=False) tm.assert_frame_equal(result, expected) + # out column is non-numeric, so with numeric_only=True it is dropped + result2 = df.groupby(0).sum(numeric_only=True) + expected2 = expected[[]] + tm.assert_frame_equal(result2, expected2) + @pytest.mark.parametrize( "structure, expected", diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 719fdb353e3cf..382a940d2a92c 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -638,7 +638,7 @@ def test_as_index_select_column(): def test_groupby_as_index_select_column_sum_empty_df(): # GH 35246 df = DataFrame(columns=["A", "B", "C"]) - left = df.groupby(by="A", as_index=False)["B"].sum() + left = df.groupby(by="A", as_index=False)["B"].sum(numeric_only=False) assert type(left) is DataFrame assert left.to_dict() == {"A": {}, "B": {}} @@ -1861,6 +1861,49 @@ def get_result(): get_result() return + else: + # ie. DataFrameGroupBy + if op in ["prod", "sum"]: + # ops that require more than just ordered-ness + if method != "apply": + # FIXME: apply goes through different code path + if df.dtypes[0].kind == "M": + # GH#41291 + # datetime64 -> prod and sum are invalid + result = get_result() + + # with numeric_only=True, these are dropped, and we get + # an empty DataFrame back + expected = df.set_index(keys)[[]] + tm.assert_equal(result, expected) + return + + elif isinstance(values, Categorical): + # GH#41291 + # Categorical doesn't implement sum or prod + result = get_result() + + # with numeric_only=True, these are dropped, and we get + # an empty DataFrame back + expected = df.set_index(keys)[[]] + if len(keys) != 1 and op == "prod": + # TODO: why just prod and not sum? + # Categorical is special without 'observed=True' + lev = Categorical([0], dtype=values.dtype) + mi = MultiIndex.from_product([lev, lev], names=["A", "B"]) + expected = DataFrame([], columns=[], index=mi) + + tm.assert_equal(result, expected) + return + + elif df.dtypes[0] == object: + # FIXME: the test is actually wrong here, xref #41341 + result = get_result() + # In this case we have list-of-list, will raise TypeError, + # and subsequently be dropped as nuisance columns + expected = df.set_index(keys)[[]] + tm.assert_equal(result, expected) + return result = get_result() expected = df.set_index(keys)[columns] @@ -2313,12 +2356,17 @@ def test_groupby_all_nan_groups_drop(): def test_groupby_empty_multi_column(): # GH 15106 - result = DataFrame(data=[], columns=["A", "B", "C"]).groupby(["A", "B"]).sum() + df = DataFrame(data=[], columns=["A", "B", "C"]) + gb = df.groupby(["A", "B"]) + result = gb.sum(numeric_only=False) expected = DataFrame( [], columns=["C"], index=MultiIndex([[], []], [[], []], names=["A", "B"]) ) tm.assert_frame_equal(result, expected) + result = gb.sum(numeric_only=True) + tm.assert_frame_equal(result, expected[[]]) + def test_groupby_filtered_df_std(): # GH 16174 diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index dae5c7274ffc5..9062049029e4d 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -24,7 +24,6 @@ DataFrameGroupBy, SeriesGroupBy, ) -from pandas.core.groupby.groupby import DataError def assert_fp_equal(a, b): @@ -741,11 +740,21 @@ def test_cython_transform_frame(op, args, targop): tm.assert_frame_equal(expected, getattr(gb, op)(*args).sort_index(axis=1)) # individual columns for c in df: - if c not in ["float", "int", "float_missing"] and op != "shift": - msg = "No numeric types to aggregate" - with pytest.raises(DataError, match=msg): + if ( + c not in ["float", "int", "float_missing"] + and op != "shift" + and not (c == "timedelta" and op == "cumsum") + ): + msg = "|".join( + [ + "does not support .* operations", + ".* is not supported for object dtype", + "is not implemented for this dtype", + ] + ) + with pytest.raises(TypeError, match=msg): gb[c].transform(op) - with pytest.raises(DataError, match=msg): + with pytest.raises(TypeError, match=msg): getattr(gb[c], op)() else: expected = gb[c].apply(targop)
- [x] closes #41291 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41706
2021-05-28T16:14:58Z
2021-06-02T15:14:09Z
2021-06-02T15:14:08Z
2021-06-02T16:59:36Z
REF: simplify _try_cast
diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 92f94f4424ee8..0267116cdfb99 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -17,10 +17,7 @@ import numpy.ma as ma from pandas._libs import lib -from pandas._libs.tslibs import ( - IncompatibleFrequency, - OutOfBoundsDatetime, -) +from pandas._libs.tslibs import IncompatibleFrequency from pandas._typing import ( AnyArrayLike, ArrayLike, @@ -719,9 +716,7 @@ def _try_cast( # while maybe_cast_to_datetime treats it as UTC # see test_maybe_promote_any_numpy_dtype_with_datetimetz - # error: Incompatible return value type (got "Union[ExtensionArray, - # ndarray, List[Any]]", expected "Union[ExtensionArray, ndarray]") - return maybe_cast_to_datetime(arr, dtype) # type: ignore[return-value] + return maybe_cast_to_datetime(arr, dtype) # TODO: copy? array_type = dtype.construct_array_type()._from_sequence @@ -734,6 +729,9 @@ def _try_cast( return subarr return ensure_wrapped_if_datetimelike(arr).astype(dtype, copy=copy) + elif dtype.kind in ["m", "M"]: + return maybe_cast_to_datetime(arr, dtype) + try: # GH#15832: Check if we are requesting a numeric dtype and # that we can convert the data to the requested dtype. @@ -743,9 +741,7 @@ def _try_cast( maybe_cast_to_integer_array(arr, dtype) subarr = arr else: - subarr = maybe_cast_to_datetime(arr, dtype) - if dtype is not None and dtype.kind == "M": - return subarr + subarr = arr if not isinstance(subarr, ABCExtensionArray): # 4 tests fail if we move this to a try/except/else; see @@ -753,16 +749,8 @@ def _try_cast( # test_constructor_dict_cast2, test_loc_setitem_dtype subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy) - except OutOfBoundsDatetime: - # in case of out of bound datetime64 -> always raise - raise - except (ValueError, TypeError) as err: - if dtype is not None and raise_cast_failure: - raise - elif "Cannot cast" in str(err) or "cannot be converted to timedelta64" in str( - err - ): - # via _disallow_mismatched_datetimelike + except (ValueError, TypeError): + if raise_cast_failure: raise else: subarr = np.array(arr, dtype=object, copy=copy) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index df79276f67386..f61f5c1d82596 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1579,7 +1579,7 @@ def try_timedelta(v: np.ndarray) -> np.ndarray: def maybe_cast_to_datetime( value: ExtensionArray | np.ndarray | list, dtype: DtypeObj | None -) -> ExtensionArray | np.ndarray | list: +) -> ExtensionArray | np.ndarray: """ try to cast the array/value to a datetimelike dtype, converting float nan to iNaT @@ -1705,7 +1705,8 @@ def maybe_cast_to_datetime( "maybe_cast_to_datetime allows a list *only* if dtype is not None" ) - return value + # at this point we have converted or raised in all cases where we had a list + return cast(ArrayLike, value) def sanitize_to_nanoseconds(values: np.ndarray, copy: bool = False) -> np.ndarray:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41705
2021-05-28T16:00:43Z
2021-05-31T17:41:50Z
2021-05-31T17:41:50Z
2021-05-31T18:10:51Z
REF: simplify Index.astype
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 2a50ebd959ace..68069a2e9482c 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -906,13 +906,10 @@ def astype(self, dtype, copy=True): if is_dtype_equal(self.dtype, dtype): return self.copy() if copy else self - elif is_categorical_dtype(dtype): - from pandas.core.indexes.category import CategoricalIndex - - return CategoricalIndex(self, name=self.name, dtype=dtype, copy=copy) - - elif is_extension_array_dtype(dtype): - return Index(np.asarray(self), name=self.name, dtype=dtype, copy=copy) + elif isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + new_values = cls._from_sequence(self, dtype=dtype, copy=False) + return Index(new_values, dtype=dtype, copy=copy, name=self.name) try: casted = self._values.astype(dtype, copy=copy) diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index 83998a2792a8a..066fa1f547328 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -18,6 +18,7 @@ cache_readonly, doc, ) +from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.cast import ( find_common_type, @@ -365,11 +366,17 @@ def astype(self, dtype, copy: bool = True) -> Index: return self return self.copy() - if isinstance(dtype, np.dtype) and dtype.kind == "M" and dtype != "M8[ns]": + if ( + isinstance(self.dtype, np.dtype) + and isinstance(dtype, np.dtype) + and dtype.kind == "M" + and dtype != "M8[ns]" + ): # For now Datetime supports this by unwrapping ndarray, but DTI doesn't - raise TypeError(f"Cannot cast {type(self._data).__name__} to dtype") + raise TypeError(f"Cannot cast {type(self).__name__} to dtype") - new_values = self._data.astype(dtype, copy=copy) + with rewrite_exception(type(self._data).__name__, type(self).__name__): + new_values = self._data.astype(dtype, copy=copy) # pass copy=False because any copying will be done in the # _data.astype call above diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 894abb0fb1776..61f3d62320a6e 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -422,12 +422,6 @@ def __reduce__(self): d.update(self._get_attributes_dict()) return _new_IntervalIndex, (type(self), d), None - @Appender(Index.astype.__doc__) - def astype(self, dtype, copy: bool = True): - with rewrite_exception("IntervalArray", type(self).__name__): - new_values = self._values.astype(dtype, copy=copy) - return Index(new_values, dtype=new_values.dtype, name=self.name) - @property def inferred_type(self) -> str: """Return a string of the type inferred from the values""" diff --git a/pandas/tests/indexes/datetimes/methods/test_astype.py b/pandas/tests/indexes/datetimes/methods/test_astype.py index 24387267cd5c4..3e329818540c3 100644 --- a/pandas/tests/indexes/datetimes/methods/test_astype.py +++ b/pandas/tests/indexes/datetimes/methods/test_astype.py @@ -223,7 +223,7 @@ def test_astype_object_with_nat(self): def test_astype_raises(self, dtype): # GH 13149, GH 13209 idx = DatetimeIndex(["2016-05-16", "NaT", NaT, np.NaN]) - msg = "Cannot cast DatetimeArray to dtype" + msg = "Cannot cast DatetimeIndex to dtype" with pytest.raises(TypeError, match=msg): idx.astype(dtype) diff --git a/pandas/tests/indexes/period/methods/test_astype.py b/pandas/tests/indexes/period/methods/test_astype.py index 73439d349bebd..74f627478a29c 100644 --- a/pandas/tests/indexes/period/methods/test_astype.py +++ b/pandas/tests/indexes/period/methods/test_astype.py @@ -21,7 +21,7 @@ class TestPeriodIndexAsType: def test_astype_raises(self, dtype): # GH#13149, GH#13209 idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq="D") - msg = "Cannot cast PeriodArray to dtype" + msg = "Cannot cast PeriodIndex to dtype" with pytest.raises(TypeError, match=msg): idx.astype(dtype) diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py index 54e61b35eb70f..e372fd007630a 100644 --- a/pandas/tests/indexes/period/test_constructors.py +++ b/pandas/tests/indexes/period/test_constructors.py @@ -538,7 +538,7 @@ def setup_method(self, method): self.series = Series(period_range("2000-01-01", periods=10, freq="D")) def test_constructor_cant_cast_period(self): - msg = "Cannot cast PeriodArray to dtype float64" + msg = "Cannot cast PeriodIndex to dtype float64" with pytest.raises(TypeError, match=msg): Series(period_range("2000-01-01", periods=10, freq="D"), dtype=float) diff --git a/pandas/tests/indexes/timedeltas/methods/test_astype.py b/pandas/tests/indexes/timedeltas/methods/test_astype.py index c2c7a1f32ae6e..fbe66bf78dbeb 100644 --- a/pandas/tests/indexes/timedeltas/methods/test_astype.py +++ b/pandas/tests/indexes/timedeltas/methods/test_astype.py @@ -101,7 +101,7 @@ def test_astype_timedelta64(self): def test_astype_raises(self, dtype): # GH 13149, GH 13209 idx = TimedeltaIndex([1e14, "NaT", NaT, np.NaN]) - msg = "Cannot cast TimedeltaArray to dtype" + msg = "Cannot cast TimedeltaIndex to dtype" with pytest.raises(TypeError, match=msg): idx.astype(dtype)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41703
2021-05-28T15:32:10Z
2021-05-31T21:29:45Z
2021-05-31T21:29:45Z
2021-05-31T21:30:02Z
TYP: fix ignores
diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 92f94f4424ee8..05e267bf83dd6 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -554,9 +554,8 @@ def sanitize_array( # TODO: copy? subarr = maybe_convert_platform(data) if subarr.dtype == object: - # Argument 1 to "maybe_infer_to_datetimelike" has incompatible - # type "Union[ExtensionArray, ndarray]"; expected "ndarray" - subarr = maybe_infer_to_datetimelike(subarr) # type: ignore[arg-type] + subarr = cast(np.ndarray, subarr) + subarr = maybe_infer_to_datetimelike(subarr) subarr = _sanitize_ndim(subarr, data, dtype, index, allow_2d=allow_2d) @@ -620,9 +619,7 @@ def _sanitize_ndim( if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype): # i.e. PandasDtype("O") - # error: Argument "dtype" to "asarray_tuplesafe" has incompatible type - # "Type[object]"; expected "Union[str, dtype[Any], None]" - result = com.asarray_tuplesafe(data, dtype=object) # type: ignore[arg-type] + result = com.asarray_tuplesafe(data, dtype=np.dtype("object")) cls = dtype.construct_array_type() result = cls._from_sequence(result, dtype=dtype) else: diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 40883dd8f747b..03554e67d7931 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -123,9 +123,8 @@ def maybe_convert_platform( arr = values if arr.dtype == object: - # error: Argument 1 to "maybe_convert_objects" has incompatible type - # "Union[ExtensionArray, ndarray]"; expected "ndarray" - arr = lib.maybe_convert_objects(arr) # type: ignore[arg-type] + arr = cast(np.ndarray, arr) + arr = lib.maybe_convert_objects(arr) return arr @@ -1249,13 +1248,12 @@ def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> Arra return values.copy() return values - if isinstance(values, ABCExtensionArray): + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray values = values.astype(dtype, copy=copy) else: - # error: Argument 1 to "astype_nansafe" has incompatible type "ExtensionArray"; - # expected "ndarray" - values = astype_nansafe(values, dtype, copy=copy) # type: ignore[arg-type] + values = astype_nansafe(values, dtype, copy=copy) # in pandas we don't store numpy str dtypes, so convert to object if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str): @@ -1958,7 +1956,7 @@ def construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray: def construct_1d_ndarray_preserving_na( - values: Sequence, dtype: DtypeObj | None = None, copy: bool = False + values: Sequence, dtype: np.dtype | None = None, copy: bool = False ) -> np.ndarray: """ Construct a new ndarray, coercing `values` to `dtype`, preserving NA. @@ -2003,17 +2001,9 @@ def construct_1d_ndarray_preserving_na( and isinstance(values, np.ndarray) and values.dtype.kind == "f" ): - # Argument 2 to "astype_float_to_int_nansafe" has incompatible - # type "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" - return astype_float_to_int_nansafe( - values, dtype, copy=copy # type: ignore[arg-type] - ) + return astype_float_to_int_nansafe(values, dtype, copy=copy) else: - # error: Argument "dtype" to "array" has incompatible type - # "Union[dtype[Any], ExtensionDtype, None]"; expected "Union[dtype[Any], - # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, - # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" - subarr = np.array(values, dtype=dtype, copy=copy) # type: ignore[arg-type] + subarr = np.array(values, dtype=dtype, copy=copy) return subarr diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 894abb0fb1776..1ae0be5e5f5bf 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1165,6 +1165,8 @@ def interval_range( if periods is not None: periods += 1 + breaks: np.ndarray | TimedeltaIndex | DatetimeIndex + if is_number(endpoint): # force consistency between start/end/freq (lower end if freq skips it) if com.all_not_none(start, end, freq): @@ -1190,16 +1192,8 @@ def interval_range( else: # delegate to the appropriate range function if isinstance(endpoint, Timestamp): - # error: Incompatible types in assignment (expression has type - # "DatetimeIndex", variable has type "ndarray") - breaks = date_range( # type: ignore[assignment] - start=start, end=end, periods=periods, freq=freq - ) + breaks = date_range(start=start, end=end, periods=periods, freq=freq) else: - # error: Incompatible types in assignment (expression has type - # "TimedeltaIndex", variable has type "ndarray") - breaks = timedelta_range( # type: ignore[assignment] - start=start, end=end, periods=periods, freq=freq - ) + breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq) return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41702
2021-05-28T15:03:59Z
2021-05-31T19:04:25Z
2021-05-31T19:04:25Z
2021-05-31T19:12:18Z
REGR: fix DataFrame sum and prod with min_count and numeric_only
diff --git a/doc/source/whatsnew/v1.2.5.rst b/doc/source/whatsnew/v1.2.5.rst index 60e146b2212eb..1d7b7a762e2ae 100644 --- a/doc/source/whatsnew/v1.2.5.rst +++ b/doc/source/whatsnew/v1.2.5.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :func:`concat` between two :class:`DataFrames` where one has an :class:`Index` that is all-None and the other is :class:`DatetimeIndex` incorrectly raising (:issue:`40841`) +- Fixed regression in :meth:`DataFrame.sum` and :meth:`DataFrame.prod` when ``min_count`` and ``numeric_only`` are both given (:issue:`41074`) - Regression in :func:`read_csv` when using ``memory_map=True`` with an non-UTF8 encoding (:issue:`40986`) - diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6e71cb49596c8..2f4cff3aa405c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9856,36 +9856,42 @@ def _get_data() -> DataFrame: return out - assert numeric_only is None - data = self values = data.values - try: - result = func(values) + if numeric_only is None: - except TypeError: - # e.g. in nanops trying to convert strs to float + try: + result = func(values) - data = _get_data() - labels = data._get_agg_axis(axis) + except TypeError: + # e.g. in nanops trying to convert strs to float - values = data.values - with np.errstate(all="ignore"): - result = func(values) + data = _get_data() + labels = data._get_agg_axis(axis) - # columns have been dropped GH#41480 - arg_name = "numeric_only" - if name in ["all", "any"]: - arg_name = "bool_only" - warnings.warn( - "Dropping of nuisance columns in DataFrame reductions " - f"(with '{arg_name}=None') is deprecated; in a future " - "version this will raise TypeError. Select only valid " - "columns before calling the reduction.", - FutureWarning, - stacklevel=5, - ) + values = data.values + with np.errstate(all="ignore"): + result = func(values) + + # columns have been dropped GH#41480 + arg_name = "numeric_only" + if name in ["all", "any"]: + arg_name = "bool_only" + warnings.warn( + "Dropping of nuisance columns in DataFrame reductions " + f"(with '{arg_name}=None') is deprecated; in a future " + "version this will raise TypeError. Select only valid " + "columns before calling the reduction.", + FutureWarning, + stacklevel=5, + ) + else: + if numeric_only: + data = _get_data() + labels = data._get_agg_axis(axis) + values = data.values + result = func(values) if hasattr(result, "dtype"): if filter_type == "bool" and notna(result).all(): diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 564f5d20b0301..f84e980644753 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1,5 +1,6 @@ from datetime import timedelta from decimal import Decimal +import re from dateutil.tz import tzlocal import numpy as np @@ -154,7 +155,7 @@ def assert_stat_op_api(opname, float_frame, float_string_frame, has_numeric_only DataFrame with columns of type float float_string_frame : DataFrame DataFrame with both float and string columns - has_numeric_only : bool, default False + has_numeric_only : bool, default True Whether the method "opname" has the kwarg "numeric_only" """ # make sure works on mixed-type frame @@ -811,35 +812,36 @@ def test_sum_corner(self): assert len(axis1) == 0 @pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)]) - def test_sum_prod_nanops(self, method, unit): + @pytest.mark.parametrize("numeric_only", [None, True, False]) + def test_sum_prod_nanops(self, method, unit, numeric_only): idx = ["a", "b", "c"] df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]}) # The default - result = getattr(df, method)() + result = getattr(df, method)(numeric_only=numeric_only) expected = Series([unit, unit, unit], index=idx, dtype="float64") tm.assert_series_equal(result, expected) # min_count=1 - result = getattr(df, method)(min_count=1) + result = getattr(df, method)(numeric_only=numeric_only, min_count=1) expected = Series([unit, unit, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count=0 - result = getattr(df, method)(min_count=0) + result = getattr(df, method)(numeric_only=numeric_only, min_count=0) expected = Series([unit, unit, unit], index=idx, dtype="float64") tm.assert_series_equal(result, expected) - result = getattr(df.iloc[1:], method)(min_count=1) + result = getattr(df.iloc[1:], method)(numeric_only=numeric_only, min_count=1) expected = Series([unit, np.nan, np.nan], index=idx) tm.assert_series_equal(result, expected) # min_count > 1 df = DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) - result = getattr(df, method)(min_count=5) + result = getattr(df, method)(numeric_only=numeric_only, min_count=5) expected = Series(result, index=["A", "B"]) tm.assert_series_equal(result, expected) - result = getattr(df, method)(min_count=6) + result = getattr(df, method)(numeric_only=numeric_only, min_count=6) expected = Series(result, index=["A", "B"]) tm.assert_series_equal(result, expected) @@ -1685,7 +1687,7 @@ def test_minmax_extensionarray(method, numeric_only): @pytest.mark.parametrize("meth", ["max", "min", "sum", "mean", "median"]) -def test_groupy_regular_arithmetic_equivalent(meth): +def test_groupby_regular_arithmetic_equivalent(meth): # GH#40660 df = DataFrame( {"a": [pd.Timedelta(hours=6), pd.Timedelta(hours=7)], "b": [12.1, 13.3]} @@ -1708,3 +1710,16 @@ def test_frame_mixed_numeric_object_with_timestamp(ts_value): result = df.sum() expected = Series([1, 1.1, "foo"], index=list("abc")) tm.assert_series_equal(result, expected) + + +def test_prod_sum_min_count_mixed_object(): + # https://github.com/pandas-dev/pandas/issues/41074 + df = DataFrame([1, "a", True]) + + result = df.prod(axis=0, min_count=1, numeric_only=False) + expected = Series(["a"]) + tm.assert_series_equal(result, expected) + + msg = re.escape("unsupported operand type(s) for +: 'int' and 'str'") + with pytest.raises(TypeError, match=msg): + df.sum(axis=0, min_count=1, numeric_only=False)
- [ ] closes #41074 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry it appears that the clean-up in #35899 to simplify _reduce may have been a bit premature, have reinstated a few lines of the removed code for backport purposes. The longer term fix would probably be to support min_count in _mgr.reduce, https://github.com/pandas-dev/pandas/pull/40143#issuecomment-787945291
https://api.github.com/repos/pandas-dev/pandas/pulls/41701
2021-05-28T14:29:58Z
2021-05-31T16:59:24Z
null
2021-05-31T18:44:41Z
ENH: Deprecate non-keyword arguments for Resampler.interpolate
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index ba6bfb9da11cc..b03cdea1a8ce5 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -686,6 +686,7 @@ Deprecations - Deprecated passing arguments as positional (except for ``"codes"``) in :meth:`MultiIndex.codes` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`Index.set_names` and :meth:`MultiIndex.set_names` (except for ``names``) (:issue:`41485`) - Deprecated passing arguments (apart from ``cond`` and ``other``) as positional in :meth:`DataFrame.mask` and :meth:`Series.mask` (:issue:`41485`) +- Deprecated passing arguments as positional in :meth:`Resampler.interpolate` (other than ``"method"``) (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.clip` and :meth:`Series.clip` (other than ``"upper"`` and ``"lower"``) (:issue:`41485`) - Deprecated special treatment of lists with first element a Categorical in the :class:`DataFrame` constructor; pass as ``pd.DataFrame({col: categorical, ...})`` instead (:issue:`38845`) - Deprecated behavior of :class:`DataFrame` constructor when a ``dtype`` is passed and the data cannot be cast to that dtype. In a future version, this will raise instead of being silently ignored (:issue:`24435`) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 8195c18768eec..6378432392a04 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -34,6 +34,7 @@ from pandas.util._decorators import ( Appender, Substitution, + deprecate_nonkeyword_arguments, doc, ) @@ -832,6 +833,7 @@ def fillna(self, method, limit=None): """ return self._upsample(method, limit=limit) + @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "method"]) @doc(NDFrame.interpolate, **_shared_docs_kwargs) def interpolate( self, diff --git a/pandas/tests/resample/test_deprecated.py b/pandas/tests/resample/test_deprecated.py index fdb3a7872ad67..1f99c2888aad5 100644 --- a/pandas/tests/resample/test_deprecated.py +++ b/pandas/tests/resample/test_deprecated.py @@ -278,3 +278,30 @@ def test_resample_base_with_timedeltaindex(): tm.assert_index_equal(without_base.index, exp_without_base) tm.assert_index_equal(with_base.index, exp_with_base) + + +def test_interpolate_posargs_deprecation(): + # GH 41485 + idx = pd.to_datetime(["1992-08-27 07:46:48", "1992-08-27 07:46:59"]) + s = Series([1, 4], index=idx) + + msg = ( + r"In a future version of pandas all arguments of Resampler\.interpolate " + r"except for the argument 'method' will be keyword-only" + ) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = s.resample("3s").interpolate("linear", 0) + + idx = pd.to_datetime( + [ + "1992-08-27 07:46:48", + "1992-08-27 07:46:51", + "1992-08-27 07:46:54", + "1992-08-27 07:46:57", + ] + ) + expected = Series([1.0, 1.0, 1.0, 1.0], index=idx) + + expected.index._data.freq = "3s" + tm.assert_series_equal(result, expected)
- [x] xref #41485 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41699
2021-05-28T12:59:07Z
2021-06-05T09:12:39Z
2021-06-05T09:12:39Z
2021-06-05T09:12:49Z
BUG: Ignore chartsheets
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 81545ada63ce5..394be484f3d72 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -173,6 +173,7 @@ MultiIndex I/O ^^^ +- Bug in :func:`read_excel` attempting to read chart sheets from .xlsx files (:issue:`41448`) - - diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 719a4472fb9e3..4d6a766ad6cfa 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -82,8 +82,9 @@ or ``StringIO``. sheet_name : str, int, list, or None, default 0 Strings are used for sheet names. Integers are used in zero-indexed - sheet positions. Lists of strings/integers are used to request - multiple sheets. Specify None to get all sheets. + sheet positions (chart sheets do not count as a sheet position). + Lists of strings/integers are used to request multiple sheets. + Specify None to get all worksheets. Available cases: @@ -92,7 +93,7 @@ * ``"Sheet1"``: Load sheet with name "Sheet1" * ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5" as a dict of `DataFrame` - * None: All sheets. + * None: All worksheets. header : int, list of int, default 0 Row (0-indexed) to use for the column labels of the parsed diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index bc067e216760c..c74cf2099f41a 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -530,7 +530,7 @@ def load_workbook(self, filepath_or_buffer: FilePathOrBuffer): @property def sheet_names(self) -> list[str]: - return self.book.sheetnames + return [sheet.title for sheet in self.book.worksheets] def get_sheet_by_name(self, name: str): self.raise_if_bad_sheet_by_name(name) diff --git a/pandas/tests/io/data/excel/chartsheet.xls b/pandas/tests/io/data/excel/chartsheet.xls new file mode 100644 index 0000000000000..7d027400fbd52 Binary files /dev/null and b/pandas/tests/io/data/excel/chartsheet.xls differ diff --git a/pandas/tests/io/data/excel/chartsheet.xlsb b/pandas/tests/io/data/excel/chartsheet.xlsb new file mode 100644 index 0000000000000..805087280f851 Binary files /dev/null and b/pandas/tests/io/data/excel/chartsheet.xlsb differ diff --git a/pandas/tests/io/data/excel/chartsheet.xlsm b/pandas/tests/io/data/excel/chartsheet.xlsm new file mode 100644 index 0000000000000..aadb48d6f4824 Binary files /dev/null and b/pandas/tests/io/data/excel/chartsheet.xlsm differ diff --git a/pandas/tests/io/data/excel/chartsheet.xlsx b/pandas/tests/io/data/excel/chartsheet.xlsx new file mode 100644 index 0000000000000..c8d5e7afb3d07 Binary files /dev/null and b/pandas/tests/io/data/excel/chartsheet.xlsx differ diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index d40fb3ce4a135..cbd241ceda0b1 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -1250,6 +1250,34 @@ def test_trailing_blanks(self, read_ext): result = pd.read_excel(file_name) assert result.shape == (3, 3) + def test_ignore_chartsheets_by_str(self, request, read_ext): + # GH 41448 + if pd.read_excel.keywords["engine"] == "odf": + pytest.skip("chartsheets do not exist in the ODF format") + if pd.read_excel.keywords["engine"] == "pyxlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pytest.raises(ValueError, match="Worksheet named 'Chart1' not found"): + pd.read_excel("chartsheet" + read_ext, sheet_name="Chart1") + + def test_ignore_chartsheets_by_int(self, request, read_ext): + # GH 41448 + if pd.read_excel.keywords["engine"] == "odf": + pytest.skip("chartsheets do not exist in the ODF format") + if pd.read_excel.keywords["engine"] == "pyxlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pytest.raises( + ValueError, match="Worksheet index 1 is invalid, 1 worksheets found" + ): + pd.read_excel("chartsheet" + read_ext, sheet_name=1) + class TestExcelFileRead: @pytest.fixture(autouse=True) @@ -1501,6 +1529,19 @@ def test_engine_invalid_option(self, read_ext): with pd.option_context(f"io.excel{read_ext}.reader", "abc"): pass + def test_ignore_chartsheets(self, request, engine, read_ext): + # GH 41448 + if engine == "odf": + pytest.skip("chartsheets do not exist in the ODF format") + if engine == "pyxlsb": + request.node.add_marker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pd.ExcelFile("chartsheet" + read_ext) as excel: + assert excel.sheet_names == ["Sheet1"] + def test_corrupt_files_closed(self, request, engine, read_ext): # GH41778 errors = (BadZipFile,)
- [x] closes #41448 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Issue #41448 relates specifically to our openpyxl engine, and is resolved by this commit. However, the bug also exists in our pyxlsb engine and cannot be resolved until the upstream https://github.com/willtrnr/pyxlsb/issues/33 is addressed. I propose closing #41448 and leaving the pyxlsb case as an xfail for now. Let me know if there's a better way to track this.
https://api.github.com/repos/pandas-dev/pandas/pulls/41698
2021-05-28T04:12:15Z
2021-07-02T01:23:08Z
2021-07-02T01:23:08Z
2022-06-07T03:22:14Z
TST: More old issues
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index b84ff38b43ae7..a8df09d479f22 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -626,6 +626,18 @@ def test_setitem_iloc_two_dimensional_generator(self): expected = DataFrame({"a": [1, 2, 3], "b": [4, 1, 1]}) tm.assert_frame_equal(df, expected) + def test_setitem_dtypes_bytes_type_to_object(self): + # GH 20734 + index = Series(name="id", dtype="S24") + df = DataFrame(index=index) + df["a"] = Series(name="a", index=index, dtype=np.uint32) + df["b"] = Series(name="b", index=index, dtype="S64") + df["c"] = Series(name="c", index=index, dtype="S64") + df["d"] = Series(name="d", index=index, dtype=np.uint8) + result = df.dtypes + expected = Series([np.uint32, object, object, np.uint8], index=list("abcd")) + tm.assert_series_equal(result, expected) + class TestSetitemTZAwareValues: @pytest.fixture diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index cf4127da79bf9..2007e60dbc5d0 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -1146,3 +1146,35 @@ def test_apply_as_index_constant_lambda(as_index, expected): df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 1, 2, 2], "c": [1, 1, 1, 1]}) result = df.groupby(["a", "b"], as_index=as_index).apply(lambda x: 1) tm.assert_equal(result, expected) + + +def test_sort_index_groups(): + # GH 20420 + df = DataFrame( + {"A": [1, 2, 3, 4, 5], "B": [6, 7, 8, 9, 0], "C": [1, 1, 1, 2, 2]}, + index=range(5), + ) + result = df.groupby("C").apply(lambda x: x.A.sort_index()) + expected = Series( + range(1, 6), + index=MultiIndex.from_tuples( + [(1, 0), (1, 1), (1, 2), (2, 3), (2, 4)], names=["C", None] + ), + name="A", + ) + tm.assert_series_equal(result, expected) + + +def test_positional_slice_groups_datetimelike(): + # GH 21651 + expected = DataFrame( + { + "date": pd.date_range("2010-01-01", freq="12H", periods=5), + "vals": range(5), + "let": list("abcde"), + } + ) + result = expected.groupby([expected.let, expected.date.dt.date]).apply( + lambda x: x.iloc[0:] + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py index 1b74096cbfbdf..dfbf1a5b2cdc2 100644 --- a/pandas/tests/groupby/test_nth.py +++ b/pandas/tests/groupby/test_nth.py @@ -641,3 +641,25 @@ def test_nth_nan_in_grouper(dropna): ) tm.assert_frame_equal(result, expected) + + +def test_first_categorical_and_datetime_data_nat(): + # GH 20520 + df = DataFrame( + { + "group": ["first", "first", "second", "third", "third"], + "time": 5 * [np.datetime64("NaT")], + "categories": Series(["a", "b", "c", "a", "b"], dtype="category"), + } + ) + result = df.groupby("group").first() + expected = DataFrame( + { + "time": 3 * [np.datetime64("NaT")], + "categories": Series(["a", "c", "a"]).astype( + pd.CategoricalDtype(["a", "b", "c"]) + ), + } + ) + expected.index = Index(["first", "second", "third"], name="group") + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index b5092f83e1a9f..dae5c7274ffc5 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -1259,3 +1259,11 @@ def test_categorical_and_not_categorical_key(observed): tm.assert_series_equal(result, expected) expected_explicit = Series([4, 2, 4], name="B") tm.assert_series_equal(result, expected_explicit) + + +def test_string_rank_grouping(): + # GH 19354 + df = DataFrame({"A": [1, 1, 2], "B": [1, 2, 3]}) + result = df.groupby("A").transform("rank") + expected = DataFrame({"B": [1.0, 2.0, 1.0]}) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index f0018c8a82453..afcff6db5e3dd 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -866,3 +866,27 @@ def test_loc_get_scalar_casting_to_float(): result = df.loc[[(3, 4)], "b"].iloc[0] assert result == 2 assert isinstance(result, np.int64) + + +def test_loc_empty_single_selector_with_names(): + # GH 19517 + idx = MultiIndex.from_product([["a", "b"], ["A", "B"]], names=[1, 0]) + s2 = Series(index=idx, dtype=np.float64) + result = s2.loc["a"] + expected = Series([np.nan, np.nan], index=Index(["A", "B"], name=0)) + tm.assert_series_equal(result, expected) + + +def test_loc_keyerror_rightmost_key_missing(): + # GH 20951 + + df = DataFrame( + { + "A": [100, 100, 200, 200, 300, 300], + "B": [10, 10, 20, 21, 31, 33], + "C": range(6), + } + ) + df = df.set_index(["A", "B"]) + with pytest.raises(KeyError, match="^1$"): + df.loc[(100, 1)] diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 880fa6398d25a..aac26c13c2a7c 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -910,3 +910,26 @@ def test_none_comparison(series_with_simple_index): result = series < None assert not result.iat[0] assert not result.iat[1] + + +def test_series_varied_multiindex_alignment(): + # GH 20414 + s1 = Series( + range(8), + index=pd.MultiIndex.from_product( + [list("ab"), list("xy"), [1, 2]], names=["ab", "xy", "num"] + ), + ) + s2 = Series( + [1000 * i for i in range(1, 5)], + index=pd.MultiIndex.from_product([list("xy"), [1, 2]], names=["xy", "num"]), + ) + result = s1.loc[pd.IndexSlice["a", :, :]] + s2 + expected = Series( + [1000, 2001, 3002, 4003], + index=pd.MultiIndex.from_tuples( + [("a", "x", 1), ("a", "x", 2), ("a", "y", 1), ("a", "y", 2)], + names=["ab", "xy", "num"], + ), + ) + tm.assert_series_equal(result, expected)
- [x] closes #19354 - [x] closes #19517 - [x] closes #20414 - [x] closes #20420 - [x] closes #20520 - [x] closes #20734 - [x] closes #20951 - [x] closes #21651 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41697
2021-05-28T03:46:24Z
2021-05-28T19:14:00Z
2021-05-28T19:13:58Z
2021-05-28T19:14:09Z
TYP: io broken off from #41059
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 2a86ff13a2edc..443ed2491404a 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -6,9 +6,11 @@ from enum import Enum import itertools from typing import ( + TYPE_CHECKING, Any, Callable, DefaultDict, + Hashable, Iterable, Sequence, cast, @@ -68,6 +70,9 @@ ) from pandas.io.date_converters import generic_parser +if TYPE_CHECKING: + from typing import Literal + parser_defaults = { "delimiter": None, "escapechar": None, @@ -121,6 +126,8 @@ class BadLineHandleMethod(Enum): _implicit_index: bool = False _first_chunk: bool + index_col: int | Sequence[int] | Sequence[str] | Literal[False] | None + index_names: list[Hashable] | None def __init__(self, kwds): @@ -130,7 +137,7 @@ def __init__(self, kwds): self.index_col = kwds.get("index_col", None) self.unnamed_cols: set = set() - self.index_names: list | None = None + self.index_names: list[Hashable] | None = None self.col_names = None self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False)) @@ -176,10 +183,11 @@ def __init__(self, kwds): # validate index_col that only contains integers if self.index_col is not None: - is_sequence = isinstance(self.index_col, (list, tuple, np.ndarray)) if not ( - is_sequence - and all(map(is_integer, self.index_col)) + ( + isinstance(self.index_col, (list, tuple, np.ndarray)) + and all(is_integer(x) for x in self.index_col) + ) or is_integer(self.index_col) ): raise ValueError( @@ -298,8 +306,12 @@ def _should_parse_dates(self, i: int) -> bool: name = self.index_names[i] else: name = None - j = i if self.index_col is None else self.index_col[i] - + # error: Value of type "Union[int, Sequence[int]]" is not indexable + j = ( + i + if self.index_col is None + else self.index_col[i] # type: ignore[index] + ) if is_scalar(self.parse_dates): return (j == self.parse_dates) or ( name is not None and name == self.parse_dates @@ -323,12 +335,17 @@ def _extract_multi_indexer_columns( # the names are the tuples of the header that are not the index cols # 0 is the name of the index, assuming index_col is a list of column # numbers - ic = self.index_col - if ic is None: + index_col = self.index_col + if index_col is None: ic = [] + elif not isinstance(index_col, (list, tuple, np.ndarray)): + ic = [index_col] + else: + # Incompatible types in assignment (expression has type + # "Union[List[Any], Tuple[Any, ...]]", variable has type + # "List[Union[int, Sequence[int], Sequence[str]]]") + ic = index_col # type: ignore[assignment] - if not isinstance(ic, (list, tuple, np.ndarray)): - ic = [ic] sic = set(ic) # clean the index_names @@ -379,7 +396,12 @@ def _maybe_dedup_names(self, names): if self.mangle_dupe_cols: names = list(names) # so we can index counts: DefaultDict[int | str | tuple, int] = defaultdict(int) - is_potential_mi = _is_potential_multi_index(names, self.index_col) + # error: Argument 2 to "_is_potential_multi_index" has incompatible + # type "Union[int, Sequence[int], None]"; expected + # "Union[bool, Sequence[int], None]" + is_potential_mi = _is_potential_multi_index( + names, self.index_col # type: ignore[arg-type] + ) for i, col in enumerate(names): cur_count = counts[col] @@ -442,7 +464,11 @@ def ix(col): to_remove = [] index = [] - for idx in self.index_col: + # error: Item "int" of "Union[int, Sequence[int], None]" has no + # attribute "__iter__" (not iterable) + # error: Item "None" of "Union[int, Sequence[int], None]" has no + # attribute "__iter__" (not iterable + for idx in self.index_col: # type: ignore[union-attr] i = ix(idx) to_remove.append(i) index.append(data[i]) @@ -471,7 +497,11 @@ def _get_name(icol): to_remove = [] index = [] - for idx in self.index_col: + # error: Item "int" of "Union[int, Sequence[int], None]" has no + # attribute "__iter__" (not iterable) + # error: Item "None" of "Union[int, Sequence[int], None]" has no + # attribute "__iter__" (not iterable + for idx in self.index_col: # type: ignore[union-attr] name = _get_name(idx) to_remove.append(name) index.append(data[name]) diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py index 5c1f8f94a72da..27deb0ae6c7a5 100644 --- a/pandas/io/parsers/c_parser_wrapper.py +++ b/pandas/io/parsers/c_parser_wrapper.py @@ -39,10 +39,7 @@ def __init__(self, src: FilePathOrBuffer, **kwds): self.low_memory = kwds.pop("low_memory", False) # #2442 - # error: Cannot determine type of 'index_col' - kwds["allow_leading_cols"] = ( - self.index_col is not False # type: ignore[has-type] - ) + kwds["allow_leading_cols"] = self.index_col is not False # GH20529, validate usecol arg before TextReader kwds["usecols"] = self.usecols @@ -82,7 +79,6 @@ def __init__(self, src: FilePathOrBuffer, **kwds): if len(self._reader.header) > 1: # we have a multi index in the columns # error: Cannot determine type of 'names' - # error: Cannot determine type of 'index_names' # error: Cannot determine type of 'col_names' ( self.names, # type: ignore[has-type] @@ -91,7 +87,7 @@ def __init__(self, src: FilePathOrBuffer, **kwds): passed_names, ) = self._extract_multi_indexer_columns( self._reader.header, - self.index_names, # type: ignore[has-type] + self.index_names, self.col_names, # type: ignore[has-type] passed_names, ) @@ -160,10 +156,7 @@ def __init__(self, src: FilePathOrBuffer, **kwds): self.orig_names = self.names # type: ignore[has-type] if not self._has_complex_date_col: - # error: Cannot determine type of 'index_col' - if self._reader.leading_cols == 0 and is_index_col( - self.index_col # type: ignore[has-type] - ): + if self._reader.leading_cols == 0 and is_index_col(self.index_col): self._name_processed = True ( @@ -174,8 +167,7 @@ def __init__(self, src: FilePathOrBuffer, **kwds): ) = self._clean_index_names( # error: Cannot determine type of 'names' self.names, # type: ignore[has-type] - # error: Cannot determine type of 'index_col' - self.index_col, # type: ignore[has-type] + self.index_col, self.unnamed_cols, ) @@ -266,7 +258,8 @@ def read(self, nrows=None): if self.index_col is None: values = data.pop(i) else: - values = data.pop(self.index_col[i]) + # error: Value of type "Union[int, Sequence[int]]" is not indexable + values = data.pop(self.index_col[i]) # type: ignore[index] values = self._maybe_parse_dates(values, i, try_parse_dates=True) arrays.append(values) diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index 3635d5b32faf4..0220e607d9c71 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -120,7 +120,6 @@ def __init__(self, f: Union[FilePathOrBuffer, list], **kwds): # The original set is stored in self.original_columns. if len(self.columns) > 1: # we are processing a multi index column - # error: Cannot determine type of 'index_names' # error: Cannot determine type of 'col_names' ( self.columns, @@ -129,7 +128,7 @@ def __init__(self, f: Union[FilePathOrBuffer, list], **kwds): _, ) = self._extract_multi_indexer_columns( self.columns, - self.index_names, # type: ignore[has-type] + self.index_names, self.col_names, # type: ignore[has-type] ) # Update list of original names to include all indices. @@ -259,10 +258,9 @@ def read(self, rows=None): if not len(content): # pragma: no cover # DataFrame with the right metadata, even though it's length 0 names = self._maybe_dedup_names(self.orig_names) - # error: Cannot determine type of 'index_col' index, columns, col_dict = self._get_empty_meta( names, - self.index_col, # type: ignore[has-type] + self.index_col, self.index_names, self.dtype, ) @@ -291,8 +289,9 @@ def _exclude_implicit_index(self, alldata): offset = 0 if self._implicit_index: - # error: Cannot determine type of 'index_col' - offset = len(self.index_col) # type: ignore[has-type] + # error: Argument 1 to "len" has incompatible type + # "Union[int, Sequence[int], None]"; expected "Sized" + offset = len(self.index_col) # type: ignore[arg-type] len_alldata = len(alldata) return { @@ -441,9 +440,12 @@ def _infer_columns(self): # line for the rest of the parsing code if hr == header[-1]: lc = len(this_columns) - # error: Cannot determine type of 'index_col' - sic = self.index_col # type: ignore[has-type] - ic = len(sic) if sic is not None else 0 + sic = self.index_col + # error: Argument 1 to "len" has incompatible type + # "Union[int, Sequence[int]]"; expected "Sized" + ic = ( + len(sic) if sic is not None else 0 # type: ignore[arg-type] + ) unnamed_count = len(this_unnamed_cols) # if wrong number of blanks or no index, not our format @@ -882,8 +884,7 @@ def _get_index_name(self, columns): if line is not None: # leave it 0, #2442 # Case 1 - # error: Cannot determine type of 'index_col' - index_col = self.index_col # type: ignore[has-type] + index_col = self.index_col if index_col is not False: implicit_first_cols = len(line) - self.num_original_columns @@ -922,20 +923,16 @@ def _rows_to_cols(self, content): col_len = self.num_original_columns if self._implicit_index: - col_len += len(self.index_col) + # error: Argument 1 to "len" has incompatible type + # "Union[int, Sequence[int]]"; expected "Sized" + col_len += len(self.index_col) # type: ignore[arg-type] max_len = max(len(row) for row in content) # Check that there are no rows with too many # elements in their row (rows with too few # elements are padded with NaN). - # error: Non-overlapping identity check (left operand type: "List[int]", - # right operand type: "Literal[False]") - if ( - max_len > col_len - and self.index_col is not False # type: ignore[comparison-overlap] - and self.usecols is None - ): + if max_len > col_len and self.index_col is not False and self.usecols is None: footers = self.skipfooter if self.skipfooter else 0 bad_lines = [] @@ -987,13 +984,13 @@ def _rows_to_cols(self, content): col_indices = self._col_indices if self._implicit_index: + # error: Argument 1 to "len" has incompatible type + # "Union[int, Sequence[int]]"; expected "Sized" + lic = len(self.index_col) # type: ignore[arg-type] zipped_content = [ a for i, a in enumerate(zipped_content) - if ( - i < len(self.index_col) - or i - len(self.index_col) in col_indices - ) + if (i < lic or i - lic in col_indices) ] else: zipped_content = [
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41695
2021-05-27T21:59:39Z
2021-06-12T18:14:44Z
null
2021-06-12T18:14:57Z
BUG: Empty DataFrame(..., dtype='datetime64[ns]').quantile(..., axis=1) returns float64 dtype
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6e71cb49596c8..6b2c63ece2154 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -10271,6 +10271,11 @@ def quantile( C 1 days 12:00:00 Name: 0.5, dtype: object """ + + data = self._get_numeric_data() if numeric_only else self + axis = self._get_axis_number(axis) + dtypes = set(data.dtypes) + validate_percentile(q) if not is_list_like(q): @@ -10278,11 +10283,15 @@ def quantile( res = self.quantile( [q], axis=axis, numeric_only=numeric_only, interpolation=interpolation ) - return res.iloc[0] + res = res.iloc[0] + # GH#41544 + if len(dtypes) == 1 and not numeric_only: + res_dtype = next(iter(dtypes)) + return res.astype(res_dtype) + else: + return res q = Index(q, dtype=np.float64) - data = self._get_numeric_data() if numeric_only else self - axis = self._get_axis_number(axis) if axis == 1: data = data.T diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index c01195a6afff1..73e6ac219a99e 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -680,7 +680,6 @@ def test_empty_numeric(self, dtype, expected_data, expected_index, axis): [], 1, "datetime64[ns]", - marks=pytest.mark.xfail(reason="#GH 41544"), ), ["datetime64[ns]", [pd.NaT, pd.NaT], ["a", "b"], 0, "datetime64[ns]"], ],
- [ ] xref #41544 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41694
2021-05-27T20:28:22Z
2021-08-17T02:01:47Z
null
2021-08-17T02:01:47Z
DOC: add `to_html` to style.rst
diff --git a/doc/source/reference/style.rst b/doc/source/reference/style.rst index 0d743b5fe8b8b..68efd3b000bbc 100644 --- a/doc/source/reference/style.rst +++ b/doc/source/reference/style.rst @@ -67,5 +67,6 @@ Style export and import Styler.render Styler.export Styler.use + Styler.to_html Styler.to_excel Styler.to_latex diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 73924631aea5c..d6c151c3ed740 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -761,13 +761,13 @@ def to_html( ---------- buf : str, Path, or StringIO-like, optional, default None Buffer to write to. If ``None``, the output is returned as a string. - table_uuid: str, optional + table_uuid : str, optional Id attribute assigned to the <table> HTML element in the format: ``<table id="T_<table_uuid>" ..>`` If not given uses Styler's initially assigned value. - table_attributes: str, optional + table_attributes : str, optional Attributes to assign within the `<table>` HTML element in the format: ``<table .. <table_attributes> >``
follow up on recent `to_html` enhancement.
https://api.github.com/repos/pandas-dev/pandas/pulls/41692
2021-05-27T17:51:59Z
2021-06-03T23:28:33Z
2021-06-03T23:28:32Z
2021-06-04T08:10:07Z
Min max sparse fillna
diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 4847372f18239..6ab296b314615 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -1397,7 +1397,7 @@ def max(self, axis=0, *args, **kwargs): # This condition returns a nan if there are no valid values in the array. if self.size > 0 and self._valid_sp_values.size == 0: - return np.nan + return self.fill_value else: return np.nanmax(self, axis) @@ -1406,7 +1406,7 @@ def min(self, axis=0, *args, **kwargs): # This condition returns a nan if there are no valid values in the array. if self.size > 0 and self._valid_sp_values.size == 0: - return np.nan + return self.fill_value else: return np.nanmin(self, axis) diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index b29855caf6c1d..1cc8a2df44812 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -1326,6 +1326,9 @@ class TestMinMax: data_neg = plain_data * (-1) data_NaN = SparseArray(np.array([0, 1, 2, np.nan, 4])) data_all_NaN = SparseArray(np.array([np.nan, np.nan, np.nan, np.nan, np.nan])) + data_NA_filled = SparseArray( + np.array([np.nan, np.nan, np.nan, np.nan, np.nan]), fill_value=5 + ) @pytest.mark.parametrize( "raw_data,max_expected,min_expected", @@ -1334,6 +1337,7 @@ class TestMinMax: (data_neg, [0], [-4]), (data_NaN, [4], [0]), (data_all_NaN, [np.nan], [np.nan]), + (data_NA_filled, [5], [5]), ], ) def test_maxmin(self, raw_data, max_expected, min_expected):
- [x] closes #41552 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41691
2021-05-27T17:16:45Z
2021-06-02T13:23:19Z
2021-06-02T13:23:19Z
2021-06-02T13:23:23Z
DOC: added explanation of each of the documentation sections
diff --git a/doc/source/development/contributing_documentation.rst b/doc/source/development/contributing_documentation.rst index a4a4f781d9dad..b36f2ba20a41b 100644 --- a/doc/source/development/contributing_documentation.rst +++ b/doc/source/development/contributing_documentation.rst @@ -21,6 +21,14 @@ the next person. About the pandas documentation -------------------------------- +As you can see from the top navigation, the documentation is split into five sections: + +* **Getting Started:** This is where new pandas users should start. It explains how to get set up, and how to get oriented with pandas. +* **User Guide:** These are deeper dives into specific pandas features, meant for users with a bit more experience. +* **API Reference:** These are the pages explaining pandas classes and functions, automatically generated from the docstrings in the code. +* **Development:** This explains how to contribute to pandas. +* **Release Notes:** The changes included in each version of pandas. + The documentation is written in **reStructuredText**, which is almost like writing in plain English, and built using `Sphinx <https://www.sphinx-doc.org/en/master/>`__. The Sphinx Documentation has an excellent `introduction to reST @@ -89,6 +97,10 @@ Some other important things to know about the docs: ``doc/source/reference``, else Sphinx will emit a warning. +* The "website" is separate from the "docs", and can be found under ``web/``. + +* While it's a proper noun, the "pandas" project name is always written lower-case. + .. note:: The ``.rst`` files are used to automatically generate Markdown and HTML versions
This is meant to help contributors understand what information belongs where. - [ ] ~~closes #xxxx~~ - [ ] tests ~~added /~~ passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] ~~whatsnew entry~~
https://api.github.com/repos/pandas-dev/pandas/pulls/41689
2021-05-27T13:44:56Z
2021-08-17T01:56:06Z
null
2021-08-17T01:56:06Z
Fix error in pre commit for ecosystem rst
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index f5212b6fc8a51..ee061e7b7d3e6 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -569,4 +569,3 @@ Library Accessor Classes Description .. _composeml: https://github.com/alteryx/compose .. _datatest: https://datatest.readthedocs.io/ .. _woodwork: https://github.com/alteryx/woodwork -
Pre-commit errors on master
https://api.github.com/repos/pandas-dev/pandas/pulls/41688
2021-05-27T08:39:28Z
2021-05-27T10:20:58Z
2021-05-27T10:20:58Z
2021-05-27T10:25:30Z
CI: add sdist release workflow
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml new file mode 100644 index 0000000000000..0c2e30a74bbdb --- /dev/null +++ b/.github/workflows/sdist.yml @@ -0,0 +1,64 @@ +name: sdist + +on: + push: + branches: + - master + pull_request: + branches: + - master + - 1.2.x + - 1.3.x + paths-ignore: + - "doc/**" + +jobs: + build: + runs-on: ubuntu-latest + timeout-minutes: 60 + defaults: + run: + shell: bash -l {0} + + strategy: + fail-fast: false + matrix: + python-version: ["3.7", "3.8", "3.9"] + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip setuptools wheel + + # GH 39416 + pip install numpy + + - name: Build pandas sdist + run: | + pip list + python setup.py sdist --formats=gztar + + - uses: conda-incubator/setup-miniconda@v2 + with: + activate-environment: pandas-sdist + python-version: ${{ matrix.python-version }} + + - name: Install pandas from sdist + run: | + conda list + python -m pip install dist/*.gz + + - name: Import pandas + run: | + cd .. + conda list + python -c "import pandas; pandas.show_versions();"
- [x] closes #39417
https://api.github.com/repos/pandas-dev/pandas/pulls/41685
2021-05-26T21:51:39Z
2021-06-21T13:03:13Z
2021-06-21T13:03:11Z
2021-07-07T02:39:48Z
DOC: Add woodwork to ecosystem docs
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index bc2325f15852c..f5212b6fc8a51 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -75,12 +75,12 @@ Statsmodels leverages pandas objects as the underlying data container for comput Use pandas DataFrames in your `scikit-learn <https://scikit-learn.org/>`__ ML pipeline. -`Featuretools <https://github.com/featuretools/featuretools/>`__ +`Featuretools <https://github.com/alteryx/featuretools/>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Featuretools is a Python library for automated feature engineering built on top of pandas. It excels at transforming temporal and relational datasets into feature matrices for machine learning using reusable feature engineering "primitives". Users can contribute their own primitives in Python and share them with the rest of the community. -`Compose <https://github.com/FeatureLabs/compose>`__ +`Compose <https://github.com/alteryx/compose>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Compose is a machine learning tool for labeling data and prediction engineering. It allows you to structure the labeling process by parameterizing prediction problems and transforming time-driven relational data into target values with cutoff times that can be used for supervised learning. @@ -551,11 +551,12 @@ Library Accessor Classes Description ================== ============ ==================================== =============================================================================== `cyberpandas`_ ``ip`` ``Series`` Provides common operations for working with IP addresses. `pdvega`_ ``vgplot`` ``Series``, ``DataFrame`` Provides plotting functions from the Altair_ library. -`pandas-genomics`_ ``genomics`` ``Series``, ``DataFrame`` Provides common operations for quality control and analysis of genomics data +`pandas-genomics`_ ``genomics`` ``Series``, ``DataFrame`` Provides common operations for quality control and analysis of genomics data. `pandas_path`_ ``path`` ``Index``, ``Series`` Provides `pathlib.Path`_ functions for Series. `pint-pandas`_ ``pint`` ``Series``, ``DataFrame`` Provides units support for numeric Series and DataFrames. `composeml`_ ``slice`` ``DataFrame`` Provides a generator for enhanced data slicing. `datatest`_ ``validate`` ``Series``, ``DataFrame``, ``Index`` Provides validation, differences, and acceptance managers. +`woodwork`_ ``ww`` ``Series``, ``DataFrame`` Provides physical, logical, and semantic data typing information for Series and DataFrames. ================== ============ ==================================== =============================================================================== .. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest @@ -565,5 +566,7 @@ Library Accessor Classes Description .. _pandas_path: https://github.com/drivendataorg/pandas-path/ .. _pathlib.Path: https://docs.python.org/3/library/pathlib.html .. _pint-pandas: https://github.com/hgrecco/pint-pandas -.. _composeml: https://github.com/FeatureLabs/compose +.. _composeml: https://github.com/alteryx/compose .. _datatest: https://datatest.readthedocs.io/ +.. _woodwork: https://github.com/alteryx/woodwork + diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md index 547a5f30e0516..81ddf9c1e657f 100644 --- a/web/pandas/community/ecosystem.md +++ b/web/pandas/community/ecosystem.md @@ -34,7 +34,7 @@ computation. Use pandas DataFrames in your [scikit-learn](https://scikit-learn.org/) ML pipeline. -### [Featuretools](https://github.com/featuretools/featuretools/) +### [Featuretools](https://github.com/alteryx/featuretools/) Featuretools is a Python library for automated feature engineering built on top of pandas. It excels at transforming temporal and relational @@ -42,7 +42,7 @@ datasets into feature matrices for machine learning using reusable feature engineering "primitives". Users can contribute their own primitives in Python and share them with the rest of the community. -### [Compose](https://github.com/FeatureLabs/compose) +### [Compose](https://github.com/alteryx/compose) Compose is a machine learning tool for labeling data and prediction engineering. It allows you to structure the labeling process by parameterizing @@ -386,4 +386,5 @@ authors to coordinate on the namespace. | [pandas-genomics](https://pandas-genomics.readthedocs.io/en/latest/) | `genomics` | `Series`, `DataFrame` | | [pandas_path](https://github.com/drivendataorg/pandas-path/) | `path` | `Index`, `Series` | | [pint-pandas](https://github.com/hgrecco/pint-pandas) | `pint` | `Series`, `DataFrame` | - | [composeml](https://github.com/FeatureLabs/compose) | `slice` | `DataFrame` | + | [composeml](https://github.com/alteryx/compose) | `slice` | `DataFrame` | + | [woodwork](https://github.com/alteryx/woodwork) | `slice` | `Series`, `DataFrame` |
- [x] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41684
2021-05-26T20:38:04Z
2021-05-27T01:47:23Z
2021-05-27T01:47:23Z
2021-05-27T01:54:04Z
Add test for fixed regression in concat with empty DataFrames
diff --git a/pandas/tests/reshape/concat/test_empty.py b/pandas/tests/reshape/concat/test_empty.py index a97e9265b4f99..304dea52f359a 100644 --- a/pandas/tests/reshape/concat/test_empty.py +++ b/pandas/tests/reshape/concat/test_empty.py @@ -249,3 +249,26 @@ def test_empty_dtype_coerce(self): result = concat([df1, df2]) expected = df1.dtypes tm.assert_series_equal(result.dtypes, expected) + + def test_concat_empty_dataframe(self): + # 39037 + df1 = DataFrame(columns=["a", "b"]) + df2 = DataFrame(columns=["b", "c"]) + result = concat([df1, df2, df1]) + expected = DataFrame(columns=["a", "b", "c"]) + tm.assert_frame_equal(result, expected) + + df3 = DataFrame(columns=["a", "b"]) + df4 = DataFrame(columns=["b"]) + result = concat([df3, df4]) + expected = DataFrame(columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + def test_concat_empty_dataframe_different_dtypes(self): + # 39037 + df1 = DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}) + df2 = DataFrame({"a": [1, 2, 3]}) + + result = concat([df1[:0], df2[:0]]) + assert result["a"].dtype == np.int64 + assert result["b"].dtype == np.object_
- [ ✓ ] closes #39037 - [ ✓ ] tests added / passed - [ ✓ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41677
2021-05-26T10:03:14Z
2021-05-28T19:05:48Z
2021-05-28T19:05:48Z
2021-05-28T19:05:52Z
TST: Old Issues
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 7fe921571ee2e..da930ab4d7423 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1837,3 +1837,11 @@ def test_arithemetic_multiindex_align(): ) result = df1 - df2 tm.assert_frame_equal(result, expected) + + +def test_bool_frame_mult_float(): + # GH 18549 + df = DataFrame(True, list("ab"), list("cd")) + result = df * 1.0 + expected = DataFrame(np.ones((2, 2)), list("ab"), list("cd")) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_filters.py b/pandas/tests/groupby/test_filters.py index 995fd58a84cbd..b40514568452c 100644 --- a/pandas/tests/groupby/test_filters.py +++ b/pandas/tests/groupby/test_filters.py @@ -599,3 +599,16 @@ def test_filter_dropna_with_empty_groups(): result_true = groupped.filter(lambda x: x.mean() > 1, dropna=True) expected_true = Series(index=pd.Index([], dtype=int), dtype=np.float64) tm.assert_series_equal(result_true, expected_true) + + +def test_filter_consistent_result_before_after_agg_func(): + # GH 17091 + df = DataFrame({"data": range(6), "key": list("ABCABC")}) + grouper = df.groupby("key") + result = grouper.filter(lambda x: True) + expected = DataFrame({"data": range(6), "key": list("ABCABC")}) + tm.assert_frame_equal(result, expected) + + grouper.sum() + result = grouper.filter(lambda x: True) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 0c20622311e1f..772aa97c47233 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -977,3 +977,10 @@ def test_extension_array_cross_section_converts(): result = df.iloc[0] tm.assert_series_equal(result, expected) + + +def test_getitem_object_index_float_string(): + # GH 17286 + s = Series([1] * 4, index=Index(["a", "b", "c", 1.0])) + assert s["a"] == 1 + assert s[1.0] == 1 diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 8c79bafa2f888..c1a096ed06efc 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1565,6 +1565,19 @@ def test_loc_getitem_slice_datetime_objs_with_datetimeindex(self): result = ser.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)] tm.assert_series_equal(result, ser) + def test_loc_getitem_datetime_string_with_datetimeindex(self): + # GH 16710 + df = DataFrame( + {"a": range(10), "b": range(10)}, + index=date_range("2010-01-01", "2010-01-10"), + ) + result = df.loc[["2010-01-01", "2010-01-05"], ["a", "b"]] + expected = DataFrame( + {"a": [0, 4], "b": [0, 4]}, + index=DatetimeIndex(["2010-01-01", "2010-01-05"]), + ) + tm.assert_frame_equal(result, expected) + def test_loc_getitem_sorted_index_level_with_duplicates(self): # GH#4516 sorting a MultiIndex with duplicates and multiple dtypes mi = MultiIndex.from_tuples( diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 219c94b5a895d..10c8ccae67fb2 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -1393,6 +1393,44 @@ def test_to_latex_non_string_index(self): ) assert result == expected + def test_to_latex_multiindex_multirow(self): + # GH 16719 + mi = pd.MultiIndex.from_product( + [[0.0, 1.0], [3.0, 2.0, 1.0], ["0", "1"]], names=["i", "val0", "val1"] + ) + df = DataFrame(index=mi) + result = df.to_latex(multirow=True, escape=False) + expected = _dedent( + r""" + \begin{tabular}{lll} + \toprule + & & \\ + i & val0 & val1 \\ + \midrule + \multirow{6}{*}{0.0} & \multirow{2}{*}{3.0} & 0 \\ + & & 1 \\ + \cline{2-3} + & \multirow{2}{*}{2.0} & 0 \\ + & & 1 \\ + \cline{2-3} + & \multirow{2}{*}{1.0} & 0 \\ + & & 1 \\ + \cline{1-3} + \cline{2-3} + \multirow{6}{*}{1.0} & \multirow{2}{*}{3.0} & 0 \\ + & & 1 \\ + \cline{2-3} + & \multirow{2}{*}{2.0} & 0 \\ + & & 1 \\ + \cline{2-3} + & \multirow{2}{*}{1.0} & 0 \\ + & & 1 \\ + \bottomrule + \end{tabular} + """ + ) + assert result == expected + class TestTableBuilder: @pytest.fixture diff --git a/pandas/tests/tseries/offsets/test_custom_business_hour.py b/pandas/tests/tseries/offsets/test_custom_business_hour.py index 07270008adbd2..c2b4e3c343c11 100644 --- a/pandas/tests/tseries/offsets/test_custom_business_hour.py +++ b/pandas/tests/tseries/offsets/test_custom_business_hour.py @@ -19,6 +19,8 @@ assert_offset_equal, ) +from pandas.tseries.holiday import USFederalHolidayCalendar + class TestCustomBusinessHour(Base): _offset = CustomBusinessHour @@ -298,3 +300,11 @@ def test_apply_nanoseconds(self, nano_case): offset, cases = nano_case for base, expected in cases.items(): assert_offset_equal(offset, base, expected) + + def test_us_federal_holiday_with_datetime(self): + # GH 16867 + bhour_us = CustomBusinessHour(calendar=USFederalHolidayCalendar()) + t0 = datetime(2014, 1, 17, 15) + result = t0 + bhour_us * 8 + expected = Timestamp("2014-01-21 15:00:00") + assert result == expected
- [x] closes #16710 - [x] closes #16719 - [x] closes #16867 - [x] closes #17091 - [x] closes #17286 - [x] closes #18549 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/41674
2021-05-26T05:22:05Z
2021-05-26T17:12:42Z
2021-05-26T17:12:40Z
2021-05-26T18:33:02Z
BUG: PeriodIndex.get_loc with mismatched freq
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 8a3d6cf63d4f1..acf5202a28409 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -937,6 +937,7 @@ Indexing - Bug in :meth:`Series.__delitem__` with ``ExtensionDtype`` incorrectly casting to ``ndarray`` (:issue:`40386`) - Bug in :meth:`DataFrame.loc` returning :class:`MultiIndex` in wrong order if indexer has duplicates (:issue:`40978`) - Bug in :meth:`DataFrame.__setitem__` raising ``TypeError`` when using a str subclass as the column name with a :class:`DatetimeIndex` (:issue:`37366`) +- Bug in :meth:`PeriodIndex.get_loc` failing to raise ``KeyError`` when given a :class:`Period` with a mismatched ``freq`` (:issue:`41670`) Missing ^^^^^^^ diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 2600363bc28eb..c1104b80a0a7a 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -4,10 +4,7 @@ datetime, timedelta, ) -from typing import ( - Any, - Hashable, -) +from typing import Hashable import warnings import numpy as np @@ -318,24 +315,6 @@ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: return False return dtype.freq == self.freq - # ------------------------------------------------------------------------ - # Indexing - - @doc(Index.__contains__) - def __contains__(self, key: Any) -> bool: - if isinstance(key, Period): - if key.freq != self.freq: - return False - else: - return key.ordinal in self._engine - else: - hash(key) - try: - self.get_loc(key) - return True - except KeyError: - return False - # ------------------------------------------------------------------------ # Index Methods @@ -472,6 +451,8 @@ def get_loc(self, key, method=None, tolerance=None): elif is_integer(key): # Period constructor will cast to string, which we dont want raise KeyError(key) + elif isinstance(key, Period) and key.freq != self.freq: + raise KeyError(key) try: key = Period(key, freq=self.freq) diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index e820c2250256e..a41d02cfbd394 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -338,15 +338,21 @@ def test_get_loc_integer(self): pi2.get_loc(46) # TODO: This method came from test_period; de-dup with version above - def test_get_loc2(self): + @pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"]) + def test_get_loc_method(self, method): idx = period_range("2000-01-01", periods=3) - for method in [None, "pad", "backfill", "nearest"]: - assert idx.get_loc(idx[1], method) == 1 - assert idx.get_loc(idx[1].asfreq("H", how="start"), method) == 1 - assert idx.get_loc(idx[1].to_timestamp(), method) == 1 - assert idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method) == 1 - assert idx.get_loc(str(idx[1]), method) == 1 + assert idx.get_loc(idx[1], method) == 1 + assert idx.get_loc(idx[1].to_timestamp(), method) == 1 + assert idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method) == 1 + assert idx.get_loc(str(idx[1]), method) == 1 + + key = idx[1].asfreq("H", how="start") + with pytest.raises(KeyError, match=str(key)): + idx.get_loc(key, method=method) + + # TODO: This method came from test_period; de-dup with version above + def test_get_loc3(self): idx = period_range("2000-01-01", periods=5)[::2] assert idx.get_loc("2000-01-02T12", method="nearest", tolerance="1 day") == 1 @@ -401,6 +407,21 @@ def test_get_loc_invalid_string_raises_keyerror(self): assert "A" not in ser assert "A" not in pi + def test_get_loc_mismatched_freq(self): + # see also test_get_indexer_mismatched_dtype testing we get analogous + # behavior for get_loc + dti = date_range("2016-01-01", periods=3) + pi = dti.to_period("D") + pi2 = dti.to_period("W") + pi3 = pi.view(pi2.dtype) # i.e. matching i8 representations + + with pytest.raises(KeyError, match="W-SUN"): + pi.get_loc(pi2[0]) + + with pytest.raises(KeyError, match="W-SUN"): + # even though we have matching i8 values + pi.get_loc(pi3[0]) + class TestGetIndexer: def test_get_indexer(self):
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41670
2021-05-25T23:22:58Z
2021-05-28T15:58:08Z
2021-05-28T15:58:08Z
2021-06-26T07:47:43Z
TST: Check float format in object column (#35603)
diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index 96a69476ccbef..0d5c3bc21c609 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -240,6 +240,13 @@ def test_series_repr_nat(self): ) assert result == expected + def test_float_repr(self): + # GH#35603 + # check float format when cast to object + ser = Series([1.0]).astype(object) + expected = "0 1.0\ndtype: object" + assert repr(ser) == expected + class TestCategoricalRepr: def test_categorical_repr_unicode(self):
- [ ] closes #35603 - [x] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry This test checks if a Series cast to object still shows data in float format. Ran `pytest pandas/tests/series/test_constructors.py` Output: ``` ================================================= test session starts ================================================= platform win32 -- Python 3.8.10, pytest-6.2.4, py-1.10.0, pluggy-0.13.1 rootdir: C:\Users\mdhsi\pandas-michael, configfile: pyproject.toml plugins: hypothesis-6.12.0, asyncio-0.14.0, cov-2.11.1, forked-1.3.0, instafail-0.4.1, xdist-2.2.1 collected 301 items pandas\tests\series\test_constructors.py .......................................................................................................................................................................................................x..................................................................................................... --------------------------- generated xml file: C:\Users\mdhsi\pandas-michael\test-data.xml --------------------------- ================================================ slowest 30 durations ================================================= 0.25s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_empty_constructor[<lambda>-True0] 0.13s call pandas/tests/series/test_constructors.py::TestSeriesConstructorIndexCoercion::test_series_constructor_datetimelike_index_coercion 0.09s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['dateutil/US/Pacific'-True] 0.04s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_dtype_datetime64_10 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_empty[OrderedDict] 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_empty[dict] 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_cant_cast_datetimelike[PeriodIndex] 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_cant_cast_datetimelike[DatetimeIndex] 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_cant_cast_datetimelike[TimedeltaIndex] 0.02s setup pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_datetimelike_scalar_to_string_dtype[string] 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_pass_none 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_categorical_with_coercion 0.02s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_dtype_timedelta64 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[pytz.FixedOffset(300)-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['Asia/Tokyo'-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[pytz.FixedOffset(300)-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[tzlocal()-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['dateutil/Asia/Singapore'-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[datetime.timezone(datetime.timedelta(seconds=3600))-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[tzutc()-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['+01:15'-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['UTC'-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['UTC-02:15'-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[tzlocal()-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[pytz.FixedOffset(-300)-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['dateutil/Asia/Singapore'-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['UTC'-True] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive['-02:15'-False] 0.01s call pandas/tests/series/test_constructors.py::TestSeriesConstructors::test_constructor_data_aware_dtype_naive[pytz.FixedOffset(-300)-False] =========================================== 300 passed, 1 xfailed in 2.22s ============================================ ```
https://api.github.com/repos/pandas-dev/pandas/pulls/41668
2021-05-25T22:11:50Z
2021-05-27T18:41:06Z
2021-05-27T18:41:06Z
2021-05-27T18:41:10Z
ENH: add abiility for json.dumps to parse and add timezones
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 1eb22436204a8..7f6748787b963 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -218,6 +218,7 @@ Other enhancements - :meth:`Series.loc.__getitem__` and :meth:`Series.loc.__setitem__` with :class:`MultiIndex` now raising helpful error message when indexer has too many dimensions (:issue:`35349`) - :meth:`pandas.read_stata` and :class:`StataReader` support reading data from compressed files. - Add support for parsing ``ISO 8601``-like timestamps with negative signs to :meth:`pandas.Timedelta` (:issue:`37172`) +- Add support for making ``ISO 8601``-like timestamps with timezone information in pd.io.json.dumps (:issue:`12997`) - Add support for unary operators in :class:`FloatingArray` (:issue:`38749`) - :class:`RangeIndex` can now be constructed by passing a ``range`` object directly e.g. ``pd.RangeIndex(range(3))`` (:issue:`12067`) - :meth:`round` being enabled for the nullable integer and floating dtypes (:issue:`38844`) diff --git a/pandas/_libs/src/ujson/python/date_conversions.c b/pandas/_libs/src/ujson/python/date_conversions.c index 0744c6af74480..b8ad021a17dcc 100644 --- a/pandas/_libs/src/ujson/python/date_conversions.c +++ b/pandas/_libs/src/ujson/python/date_conversions.c @@ -8,6 +8,9 @@ The full license is in the LICENSE file, distributed with this software. // Conversion routines that are useful for serialization, // but which don't interact with JSON objects directly +#include <Python.h> +#include <datetime.h> + #include "date_conversions.h" #include <../../../tslibs/src/datetime/np_datetime.h> #include <../../../tslibs/src/datetime/np_datetime_strings.h> @@ -55,7 +58,7 @@ char *int64ToIso(int64_t value, NPY_DATETIMEUNIT base, size_t *len) { return NULL; } - ret_code = make_iso_8601_datetime(&dts, result, *len, base); + ret_code = make_iso_8601_datetime(&dts, result, *len, base, -1); if (ret_code != 0) { PyErr_SetString(PyExc_ValueError, "Could not convert datetime value to string"); @@ -77,8 +80,8 @@ npy_datetime NpyDateTimeToEpoch(npy_datetime dt, NPY_DATETIMEUNIT base) { char *PyDateTimeToIso(PyObject *obj, NPY_DATETIMEUNIT base, size_t *len) { npy_datetimestruct dts; - int ret; - + int ret, local; + int tzoffset = -1; ret = convert_pydatetime_to_datetimestruct(obj, &dts); if (ret != 0) { if (!PyErr_Occurred()) { @@ -87,11 +90,26 @@ char *PyDateTimeToIso(PyObject *obj, NPY_DATETIMEUNIT base, } return NULL; } + if (PyObject_HasAttrString(obj, "tzinfo")){ + PyObject *tzinfo = PyObject_GetAttrString(obj, "tzinfo"); + Py_DECREF(tzinfo); + + if ((tzinfo != NULL) && (tzinfo != Py_None)){ + tzoffset = get_tzoffset_from_pytzinfo(tzinfo, &dts); + if (tzoffset == 0){ + tzoffset = -1; + } + } + } - *len = (size_t)get_datetime_iso_8601_strlen(0, base); + if (tzoffset == -1){ + local = 0; + } else { + local = 1; + } + *len = (size_t)get_datetime_iso_8601_strlen(local, base); char *result = PyObject_Malloc(*len); - ret = make_iso_8601_datetime(&dts, result, *len, base); - + ret = make_iso_8601_datetime(&dts, result, *len, base, tzoffset); if (ret != 0) { PyErr_SetString(PyExc_ValueError, "Could not convert datetime value to string"); diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 31b43cdb28d9d..0099ae1196473 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -47,6 +47,7 @@ Numeric decoder derived from from TCL library #include <ultrajson.h> #include "date_conversions.h" #include "datetime.h" +#include "../../../tslibs/src/datetime/np_datetime.h" static PyTypeObject *type_decimal; static PyTypeObject *cls_dataframe; @@ -180,6 +181,8 @@ void *initObjToJSON(void) { /* Initialise numpy API */ import_array(); + /* Initialize pandas datetime API */ + pandas_pydatetime_import(); // GH 31463 return NULL; } @@ -213,6 +216,14 @@ static TypeContext *createTypeContext(void) { return pc; } +// static PyObject *get_tzinfo(PyObject *obj){ +// if (PyObject_HasAttrString(obj, "tzinfo")){ +// PyObject *tzinfo = PyObject_GetAttrString(obj, "tzinfo"); +// return tzinfo; +// } +// return Py_None; +// } + static PyObject *get_values(PyObject *obj) { PyObject *values = NULL; @@ -1600,7 +1611,8 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { } ISITERABLE: - + // tzinfo = get_tzinfo(obj); + if (PyObject_TypeCheck(obj, cls_index)) { if (enc->outputFormat == SPLIT) { tc->type = JT_OBJECT; diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime.c b/pandas/_libs/tslibs/src/datetime/np_datetime.c index 9ad2ead5f919f..8e0d7c5538611 100644 --- a/pandas/_libs/tslibs/src/datetime/np_datetime.c +++ b/pandas/_libs/tslibs/src/datetime/np_datetime.c @@ -21,12 +21,15 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #endif // NPY_NO_DEPRECATED_API #include <Python.h> +#include <datetime.h> #include <numpy/arrayobject.h> #include <numpy/arrayscalars.h> #include <numpy/ndarraytypes.h> #include "np_datetime.h" - +#define PyDateTime_FromDateAndTimeAndZone(year, month, day, hour, min, sec, usec, tz) \ + PyDateTimeAPI->DateTime_FromDateAndTime(year, month, day, hour, \ + min, sec, usec, tz, PyDateTimeAPI->DateTimeType) #if PY_MAJOR_VERSION >= 3 #define PyInt_AsLong PyLong_AsLong #endif // PyInt_AsLong @@ -41,6 +44,13 @@ const int days_per_month_table[2][12] = { {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}}; + + +void pandas_pydatetime_import(void) +{ + PyDateTime_IMPORT; +} + /* * Returns 1 if the given year is a leap year, 0 otherwise. */ @@ -764,3 +774,33 @@ void pandas_timedelta_to_timedeltastruct(npy_timedelta td, "invalid base unit"); } } + +/* + * Gets a tzoffset in minutes by calling the fromutc() function on + * the Python datetime.tzinfo object. + */ +int get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts) +{ + PyDateTime_Date *dt; + PyDateTime_Delta *tzoffset; + + /* Create a Python datetime to give to the timezone object */ + dt = (PyDateTime_Date *) PyDateTime_FromDateAndTimeAndZone((int)dts->year, dts->month, dts->day, + dts->hour, dts->min, 0, 0, timezone_obj); + if (!(PyDateTime_Check(dt))) { + Py_DECREF(dt); + return -1; + } + tzoffset = (PyDateTime_Delta *) PyObject_CallMethod(timezone_obj, "utcoffset", "O", dt); + + Py_DECREF(dt); + if (!(PyDelta_Check(tzoffset))){ + Py_DECREF(tzoffset); + return -1; + } + + long offset_minutes = (tzoffset->days * 24 * 60) + ((long) tzoffset->seconds / 60); + Py_DECREF(tzoffset); + return (int) offset_minutes; + +} diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime.h b/pandas/_libs/tslibs/src/datetime/np_datetime.h index 0bbc24ed822c5..0b3c8e8795a14 100644 --- a/pandas/_libs/tslibs/src/datetime/np_datetime.h +++ b/pandas/_libs/tslibs/src/datetime/np_datetime.h @@ -33,6 +33,7 @@ extern const npy_datetimestruct _NS_MAX_DTS; // stuff pandas needs // ---------------------------------------------------------------------------- +void pandas_pydatetime_import(void); int convert_pydatetime_to_datetimestruct(PyObject *dtobj, npy_datetimestruct *out); @@ -75,5 +76,10 @@ int cmp_npy_datetimestruct(const npy_datetimestruct *a, void add_minutes_to_datetimestruct(npy_datetimestruct *dts, int minutes); +/* + * Gets a tzoffset in minutes by calling the fromutc() function on + * the Python datetime.tzinfo object. + */ +int get_tzoffset_from_pytzinfo(PyObject *timezone_obj, npy_datetimestruct *dts); #endif // PANDAS__LIBS_TSLIBS_SRC_DATETIME_NP_DATETIME_H_ diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c index b245ae5880ecb..70046b23174c8 100644 --- a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c +++ b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c @@ -578,7 +578,7 @@ int get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base) { if (base >= NPY_FR_h) { if (local) { - len += 5; /* "+####" or "-####" */ + len += 6; /* "+##:##" or "-##:##" */ } else { len += 1; /* "Z" */ } @@ -601,11 +601,15 @@ int get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base) { * 'base' restricts the output to that unit. Set 'base' to * -1 to auto-detect a base after which all the values are zero. * + * 'tzoffset' are the minutes of the offset from UTC created by timezones + * e.g. 'tzoffset` of -750 with a dts of (2021, 4, 2, 0, 0, 0, 0) would + * produce '2021-04-01T11:30:00-12:30 + * * Returns 0 on success, -1 on failure (for example if the output * string was too short). */ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, - NPY_DATETIMEUNIT base) { + NPY_DATETIMEUNIT base, int tzoffset) { char *substr = outstr; int sublen = outlen; int tmplen; @@ -638,6 +642,12 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, substr += tmplen; sublen -= tmplen; + if (tzoffset != -1){ + npy_datetimestruct dts_local; + dts_local = *dts; + dts = &dts_local; + add_minutes_to_datetimestruct(dts, tzoffset); + } /* Stop if the unit is years */ if (base == NPY_FR_Y) { if (sublen > 0) { @@ -883,13 +893,54 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; add_time_zone: - /* UTC "Zulu" time */ - if (sublen < 1) { - goto string_too_short; + if (tzoffset != -1) { + /* Add the +/- sign */ + if (sublen < 1) { + goto string_too_short; + } + if (tzoffset < 0) { + substr[0] = '-'; + tzoffset = -tzoffset; + } + else { + substr[0] = '+'; + } + substr += 1; + sublen -= 1; + + /* Add the timezone offset */ + if (sublen < 1 ) { + goto string_too_short; + } + substr[0] = (char)((tzoffset / (10*60)) % 10 + '0'); + if (sublen < 2 ) { + goto string_too_short; + } + substr[1] = (char)((tzoffset / 60) % 10 + '0'); + if (sublen < 3 ) { + goto string_too_short; + } + substr[2] = ':'; + if (sublen < 4) { + goto string_too_short; + } + substr[3] = (char)(((tzoffset % 60) / 10) % 10 + '0'); + if (sublen < 5 ) { + goto string_too_short; + } + substr[4] = (char)((tzoffset % 60) % 10 + '0'); + substr += 5; + sublen -= 5; + } + /* UTC "Zulu" time */ + else { + if (sublen < 1) { + goto string_too_short; + } + substr[0] = 'Z'; + substr += 1; + sublen -= 1; } - substr[0] = 'Z'; - substr += 1; - sublen -= 1; /* Add a NULL terminator, and return */ if (sublen > 0) { diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h index 200a71ff0c2b7..13f727bba5e9a 100644 --- a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h +++ b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h @@ -78,7 +78,7 @@ get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base); */ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, - NPY_DATETIMEUNIT base); + NPY_DATETIMEUNIT base, int tzoffset); /* * Converts an pandas_timedeltastruct to an ISO 8601 string. diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 0ffc6044a5897..86526965e1b1f 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1185,8 +1185,6 @@ def test_sparse(self): "ts", [ Timestamp("2013-01-10 05:00:00Z"), - Timestamp("2013-01-10 00:00:00", tz="US/Eastern"), - Timestamp("2013-01-10 00:00:00-0500"), ], ) def test_tz_is_utc(self, ts): @@ -1198,12 +1196,26 @@ def test_tz_is_utc(self, ts): dt = ts.to_pydatetime() assert dumps(dt, iso_dates=True) == exp + @pytest.mark.parametrize( + "ts", + [ + Timestamp("2013-01-10 00:00:00", tz="US/Eastern"), + Timestamp("2013-01-10 00:00:00-0500"), + ], + ) + def test_tz_is_localized(self, ts): + from pandas.io.json import dumps + + exp = '"2013-01-10T00:00:00.000-05:00"' + + assert dumps(ts, iso_dates=True) == exp + dt = ts.to_pydatetime() + assert dumps(dt, iso_dates=True) == exp + @pytest.mark.parametrize( "tz_range", [ - pd.date_range("2013-01-01 05:00:00Z", periods=2), - pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"), - pd.date_range("2013-01-01 00:00:00-0500", periods=2), + pd.date_range("2013-01-01 05:00:00Z", periods=2) ], ) def test_tz_range_is_utc(self, tz_range): @@ -1223,6 +1235,30 @@ def test_tz_range_is_utc(self, tz_range): result = dumps(df, iso_dates=True) assert result == dfexp + @pytest.mark.parametrize( + "tz_range", + [ + pd.date_range("2013-01-01 00:00:00", periods=2, tz='US/Eastern'), + pd.date_range("2013-01-01 00:00:00-0500", periods=2) + ], + ) + def test_tz_range_is_local(self, tz_range): + from pandas.io.json import dumps + + exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]' + dfexp = ( + '{"DT":{' + '"0":"2013-01-01T00:00:00.000-05:00",' + '"1":"2013-01-02T00:00:00.000-05:00"}}' + ) + + assert dumps(tz_range, iso_dates=True) == exp + dti = DatetimeIndex(tz_range) + assert dumps(dti, iso_dates=True) == exp + df = DataFrame({"DT": dti}) + result = dumps(df, iso_dates=True) + assert result == dfexp + def test_read_inline_jsonl(self): # GH9180 result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True) diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index 805f6b8dbe461..9e21c1a259f93 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -391,8 +391,9 @@ def test_encode_time_conversion_basic(self, test): def test_encode_time_conversion_pytz(self): # see gh-11473: to_json segfaults with timezone-aware datetimes - test = datetime.time(10, 12, 15, 343243, pytz.utc) - output = ujson.encode(test) + test = datetime.datetime(2021, 5, 25, 10, 12, 15, 343243, + pytz.timezone('US/Eastern')) + output = ujson.encode(test, iso_dates=True, date_unit='us') expected = f'"{test.isoformat()}"' assert expected == output
- [x] closes #12997 - [x] tests modofied - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Ok, so this is a doozy AND it's not done, but it can be merged as is. What do we do: We add timezones to json output! Before: ``` ts = pd.Timestamp('2020-10-5 4:00:05', tz='US/Pacific') print(pd.io.json.dumps(ts, iso_dates=True)) print(ts.isoformat()) ``` would provide `"2020-10-05T11:00:05.000Z"` and `"2020-10-05T04:00:05.000-07:00"` respectively. Now they both produce ``"2020-10-05T04:00:05.000-07:00"`` So what do I mean by not complete? This still doesn't work for arrays that have datetime values. This only works for single datetime values. So: `pd.DataFrame({'foo': range(5)}, index=pd.date_range('2020-10-12', freq='H', periods=5, tz='US/Eastern')).to_json(date_format='iso')` will still produce: `'{"foo":{"2020-10-12T04:00:00.000Z":0,"2020-10-12T05:00:00.000Z":1,"2020-10-12T06:00:00.000Z":2,"2020-10-12T07:00:00.000Z":3,"2020-10-12T08:00:00.000Z":4}}'` and not `'{"foo":{"2020-10-12T00:00:00.000-04:00":0,"2020-10-12T01:00:00.000-04:00":1,"2020-10-12T02:00:00.000-04:00":2,"2020-10-12T03:00:00.000-04:00":3,"2020-10-12T04:00:00.000-04:00":4}}'` Working on it currently. Open to recommendations!
https://api.github.com/repos/pandas-dev/pandas/pulls/41667
2021-05-25T21:42:40Z
2021-05-26T15:42:27Z
null
2021-05-26T15:42:30Z
CI: fix npdev build
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index f7cec262ca302..8544671ab3702 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -116,12 +116,14 @@ cdef class IndexEngine: if self.is_monotonic_increasing: values = self._get_index_values() - try: - left = values.searchsorted(val, side='left') - right = values.searchsorted(val, side='right') - except TypeError: - # e.g. GH#29189 get_loc(None) with a Float64Index - raise KeyError(val) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=FutureWarning) + try: + left = values.searchsorted(val, side='left') + right = values.searchsorted(val, side='right') + except TypeError: + # e.g. GH#29189 get_loc(None) with a Float64Index + raise KeyError(val) diff = right - left if diff == 0: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index f8f5e5e05bc35..ae6e777d604dc 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -13,6 +13,7 @@ ) from warnings import ( catch_warnings, + filterwarnings, simplefilter, warn, ) @@ -1583,7 +1584,9 @@ def searchsorted(arr, value, side="left", sorter=None) -> np.ndarray: # and `value` is a pd.Timestamp, we may need to convert value arr = ensure_wrapped_if_datetimelike(arr) - return arr.searchsorted(value, side=side, sorter=sorter) + with catch_warnings(): + filterwarnings("ignore", category=FutureWarning) + return arr.searchsorted(value, side=side, sorter=sorter) # ---- # diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index ec69d9ccbdd90..539f7a784da5d 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1,5 +1,6 @@ from __future__ import annotations +from collections import abc from datetime import ( datetime, time, @@ -71,7 +72,10 @@ from pandas.core.arrays._ranges import generate_regular_range from pandas.core.arrays.integer import IntegerArray import pandas.core.common as com -from pandas.core.construction import extract_array +from pandas.core.construction import ( + create_ndarray, + extract_array, +) from pandas.tseries.frequencies import get_period_alias from pandas.tseries.offsets import ( @@ -2012,10 +2016,10 @@ def sequence_to_dt64ns( if not hasattr(data, "dtype"): # e.g. list, tuple - if np.ndim(data) == 0: + if lib.is_iterator(data) or isinstance(data, (abc.KeysView, abc.ValuesView)): # i.e. generator data = list(data) - data = np.asarray(data) + data = create_ndarray(data, copy=False) copy = False elif isinstance(data, ABCMultiIndex): raise TypeError("Cannot create a DatetimeArray from a MultiIndex.") @@ -2026,7 +2030,7 @@ def sequence_to_dt64ns( data = data.to_numpy("int64", na_value=iNaT) elif not isinstance(data, (np.ndarray, ExtensionArray)): # GH#24539 e.g. xarray, dask object - data = np.asarray(data) + data = create_ndarray(data) if isinstance(data, DatetimeArray): inferred_freq = data.freq diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index e9d554200805e..959d7a3784bb0 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -23,7 +23,10 @@ ) from pandas.core.arraylike import OpsMixin from pandas.core.arrays._mixins import NDArrayBackedExtensionArray -from pandas.core.construction import ensure_wrapped_if_datetimelike +from pandas.core.construction import ( + create_ndarray, + ensure_wrapped_if_datetimelike, +) from pandas.core.strings.object_array import ObjectStringArrayMixin @@ -94,12 +97,11 @@ def _from_sequence( if isinstance(dtype, PandasDtype): dtype = dtype._dtype - # error: Argument "dtype" to "asarray" has incompatible type - # "Union[ExtensionDtype, str, dtype[Any], dtype[floating[_64Bit]], Type[object], - # None]"; expected "Union[dtype[Any], None, type, _SupportsDType, str, - # Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any], - # _DTypeDict, Tuple[Any, Any]]]" - result = np.asarray(scalars, dtype=dtype) # type: ignore[arg-type] + result = create_ndarray( + scalars, + dtype=dtype, # type: ignore[arg-type] + copy=False, + ) if ( result.ndim > 1 and not hasattr(scalars, "dtype") diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index ea87ac64cfe22..148ee79ae47b8 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -1,5 +1,6 @@ from __future__ import annotations +from collections import abc from datetime import timedelta from typing import TYPE_CHECKING @@ -66,7 +67,10 @@ ) from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com -from pandas.core.construction import extract_array +from pandas.core.construction import ( + create_ndarray, + extract_array, +) from pandas.core.ops.common import unpack_zerodim_and_defer if TYPE_CHECKING: @@ -965,10 +969,10 @@ def sequence_to_td64ns( # Unwrap whatever we have into a np.ndarray if not hasattr(data, "dtype"): # e.g. list, tuple - if np.ndim(data) == 0: + if lib.is_iterator(data) or isinstance(data, (abc.KeysView, abc.ValuesView)): # i.e. generator data = list(data) - data = np.array(data, copy=False) + data = create_ndarray(data, copy=False) elif isinstance(data, ABCMultiIndex): raise TypeError("Cannot create a DatetimeArray from a MultiIndex.") else: @@ -978,7 +982,7 @@ def sequence_to_td64ns( data = data.to_numpy("int64", na_value=iNaT) elif not isinstance(data, (np.ndarray, ExtensionArray)): # GH#24539 e.g. xarray, dask object - data = np.asarray(data) + data = create_ndarray(data, copy=False) elif isinstance(data, ABCCategorical): data = data.categories.take(data.codes, fill_value=NaT)._values copy = False diff --git a/pandas/core/common.py b/pandas/core/common.py index 04ff2d2c4618f..c6f31e9fe67d5 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -129,7 +129,7 @@ def is_bool_indexer(key: Any) -> bool: is_array_like(key) and is_extension_array_dtype(key.dtype) ): if key.dtype == np.object_: - key = np.asarray(key) + key = np.asarray(key, dtype=object) if not lib.is_bool_array(key): na_msg = "Cannot mask with non-boolean array containing NA / NaN values" @@ -142,8 +142,10 @@ def is_bool_indexer(key: Any) -> bool: elif is_bool_dtype(key.dtype): return True elif isinstance(key, list): + from pandas.core.construction import create_ndarray + try: - arr = np.asarray(key) + arr = create_ndarray(key, copy=False) return arr.dtype == np.bool_ and len(arr) == len(key) except TypeError: # pragma: no cover return False @@ -221,6 +223,8 @@ def count_not_none(*args) -> int: def asarray_tuplesafe(values, dtype: NpDtype | None = None) -> np.ndarray: + if dtype is not None: + dtype = np.dtype(dtype) if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")): values = list(values) @@ -229,15 +233,12 @@ def asarray_tuplesafe(values, dtype: NpDtype | None = None) -> np.ndarray: # expected "ndarray") return values._values # type: ignore[return-value] - # error: Non-overlapping container check (element type: "Union[str, dtype[Any], - # None]", container item type: "type") - if isinstance(values, list) and dtype in [ # type: ignore[comparison-overlap] - np.object_, - object, - ]: + if isinstance(values, list) and dtype == np.dtype("object"): return construct_1d_object_array_from_listlike(values) - result = np.asarray(values, dtype=dtype) + from pandas.core.construction import create_ndarray + + result = create_ndarray(values, dtype=dtype, copy=False) if issubclass(result.dtype.type, str): result = np.asarray(values, dtype=object) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 188bb64932de0..aa75d6540edd2 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -12,6 +12,7 @@ Sequence, cast, ) +import warnings import numpy as np import numpy.ma as ma @@ -815,3 +816,20 @@ def create_series_with_explicit_dtype( return Series( data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath ) + + +def create_ndarray( + obj, *, dtype: np.dtype | None = None, copy: bool = True +) -> np.ndarray: + """ + Call np.ndarray if we do not know the outcome dtype. + """ + if dtype is not None: + return np.array(obj, dtype=dtype, copy=copy) + try: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=FutureWarning) + out = np.array(obj, copy=copy) + except (TypeError, ValueError): + out = np.array(obj, dtype=object, copy=copy) + return out diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 40883dd8f747b..16f15234d7167 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -882,7 +882,9 @@ def maybe_infer_dtype_type(element): if hasattr(element, "dtype"): tipo = element.dtype elif is_list_like(element): - element = np.asarray(element) + from pandas.core.construction import create_ndarray + + element = create_ndarray(element, copy=False) tipo = element.dtype return tipo @@ -1608,8 +1610,9 @@ def maybe_cast_to_datetime( if is_datetime64 or is_datetime64tz: dtype = ensure_nanosecond_dtype(dtype) + from pandas.core.construction import create_ndarray - value = np.array(value, copy=False) + value = create_ndarray(value, copy=False) # we have an array of datetime or timedeltas & nulls if value.size or not is_dtype_equal(value.dtype, dtype): @@ -2009,11 +2012,17 @@ def construct_1d_ndarray_preserving_na( values, dtype, copy=copy # type: ignore[arg-type] ) else: + from pandas.core.construction import create_ndarray + # error: Argument "dtype" to "array" has incompatible type # "Union[dtype[Any], ExtensionDtype, None]"; expected "Union[dtype[Any], # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" - subarr = np.array(values, dtype=dtype, copy=copy) # type: ignore[arg-type] + subarr = create_ndarray( + values, + dtype=dtype, # type: ignore[arg-type] + copy=copy, + ) return subarr diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index de7c522b4fbec..1f6b89b51d003 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -38,6 +38,7 @@ from pandas.core.dtypes.generic import ABCSeries import pandas.core.common as com +from pandas.core.construction import create_ndarray from pandas.core.indexes.base import ( Index, maybe_extract_name, @@ -152,7 +153,7 @@ def _ensure_array(cls, data, dtype, copy: bool): if not isinstance(data, (ABCSeries, list, tuple)): data = list(data) - data = np.asarray(data, dtype=dtype) + data = create_ndarray(data, dtype=dtype, copy=False) if issubclass(data.dtype.type, str): cls._string_data_error(data) @@ -160,7 +161,7 @@ def _ensure_array(cls, data, dtype, copy: bool): dtype = cls._ensure_dtype(dtype) if copy or not is_dtype_equal(data.dtype, dtype): - subarr = np.array(data, dtype=dtype, copy=copy) + subarr = create_ndarray(data, dtype=dtype, copy=copy) cls._assert_safe_casting(data, subarr) else: subarr = data @@ -169,7 +170,7 @@ def _ensure_array(cls, data, dtype, copy: bool): # GH#13601, GH#20285, GH#27125 raise ValueError("Index data must be 1-dimensional") - subarr = np.asarray(subarr) + subarr = create_ndarray(subarr, copy=False) return subarr @classmethod diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index be5b89f08b5ca..152eb16d36f20 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -42,7 +42,10 @@ ) import pandas.core.common as com -from pandas.core.construction import array as pd_array +from pandas.core.construction import ( + array as pd_array, + create_ndarray, +) from pandas.core.indexers import ( check_array_indexer, is_empty_indexer, @@ -1712,7 +1715,10 @@ def _setitem_with_indexer_split_path(self, indexer, value, name: str): if isinstance(value, ABCDataFrame): self._setitem_with_indexer_frame_value(indexer, value, name) - elif np.ndim(value) == 2: + elif (hasattr(value, "ndim") and value.ndim == 2) or ( + not hasattr(value, "ndim") + and create_ndarray(value, copy=False).ndim == 2 + ): self._setitem_with_indexer_2d_value(indexer, value) elif len(ilocs) == 1 and lplane_indexer == len(value) and not is_scalar(pi): @@ -1763,7 +1769,7 @@ def _setitem_with_indexer_split_path(self, indexer, value, name: str): for loc in ilocs: self._setitem_single_column(loc, value, pi) - def _setitem_with_indexer_2d_value(self, indexer, value): + def _setitem_with_indexer_2d_value(self, indexer, value: np.ndarray) -> None: # We get here with np.ndim(value) == 2, excluding DataFrame, # which goes through _setitem_with_indexer_frame_value pi = indexer[0] diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 31e32b053367b..ff0902710fadc 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -421,7 +421,7 @@ def _convert(arr): return self.apply(_convert) def replace(self: T, value, **kwargs) -> T: - assert np.ndim(value) == 0, value + assert not lib.is_list_like(value) # TODO "replace" is right now implemented on the blocks, we should move # it to general array algos so it can be reused here return self.apply_with_block("replace", value=value, **kwargs) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 4f1b16e747394..487cbdacde0a9 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -102,6 +102,7 @@ import pandas.core.common as com import pandas.core.computation.expressions as expressions from pandas.core.construction import ( + create_ndarray, ensure_wrapped_if_datetimelike, extract_array, ) @@ -933,7 +934,7 @@ def setitem(self, indexer, value): arr_value = value else: is_ea_value = False - arr_value = np.asarray(value) + arr_value = create_ndarray(value, copy=False) if transpose: values = values.T diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 323aa45874d96..7604ed23813a2 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -410,7 +410,7 @@ def convert( ) def replace(self: T, to_replace, value, inplace: bool, regex: bool) -> T: - assert np.ndim(value) == 0, value + assert not is_list_like(value) return self.apply( "replace", to_replace=to_replace, value=value, inplace=inplace, regex=regex ) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index b32eb9e308780..004f73ef97324 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -84,7 +84,10 @@ PyTablesExpr, maybe_expression, ) -from pandas.core.construction import extract_array +from pandas.core.construction import ( + create_ndarray, + extract_array, +) from pandas.core.indexes.api import ensure_index from pandas.core.internals import BlockManager @@ -3854,11 +3857,16 @@ def _create_axes( if table_exists: indexer = len(new_non_index_axes) # i.e. 0 exist_axis = self.non_index_axes[indexer][1] - if not array_equivalent(np.array(append_axis), np.array(exist_axis)): + + if not array_equivalent( + create_ndarray(append_axis, copy=True), + create_ndarray(exist_axis, copy=True), + ): # ahah! -> reindex if array_equivalent( - np.array(sorted(append_axis)), np.array(sorted(exist_axis)) + create_ndarray(sorted(append_axis), copy=True), + create_ndarray(sorted(exist_axis), copy=True), ): append_axis = exist_axis diff --git a/pandas/tests/frame/methods/test_to_records.py b/pandas/tests/frame/methods/test_to_records.py index 2c96cf291c154..ce356fbe59d12 100644 --- a/pandas/tests/frame/methods/test_to_records.py +++ b/pandas/tests/frame/methods/test_to_records.py @@ -3,6 +3,8 @@ import numpy as np import pytest +from pandas.compat import is_numpy_dev + from pandas import ( CategoricalDtype, DataFrame, @@ -171,20 +173,26 @@ def test_to_records_with_categorical(self): ), ), # Pass in a type instance. - ( + pytest.param( {"column_dtypes": str}, np.rec.array( [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")], ), + marks=pytest.mark.xfail( + is_numpy_dev, reason="https://github.com/numpy/numpy/issues/19078" + ), ), # Pass in a dtype instance. - ( + pytest.param( {"column_dtypes": np.dtype("unicode")}, np.rec.array( [("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")], ), + marks=pytest.mark.xfail( + is_numpy_dev, reason="https://github.com/numpy/numpy/issues/19078" + ), ), # Pass in a dictionary (name-only). ( diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index c6155cac101e6..78b5547f6e58f 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -3242,6 +3242,7 @@ def test_nat_representations(self): assert f(NaT) == "NaT" +@pytest.mark.filterwarnings("ignore:Promotion of numbers and bools:FutureWarning") def test_format_percentiles(): result = fmt.format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999]) expected = ["1.999%", "2.001%", "50%", "66.667%", "99.99%"]
- [ ] xref #41632 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry hopefully numpy will provide an alternative that makes this unnecessary
https://api.github.com/repos/pandas-dev/pandas/pulls/41665
2021-05-25T18:21:32Z
2021-05-31T16:23:05Z
null
2021-06-04T20:37:44Z
ENH: Add support for defaultdict as "dtype" in parser
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 1a5a9980e5e96..c927fe1a77ead 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -274,6 +274,7 @@ Other enhancements - Add keyword ``dropna`` to :meth:`DataFrame.value_counts` to allow counting rows that include ``NA`` values (:issue:`41325`) - :meth:`Series.replace` will now cast results to ``PeriodDtype`` where possible instead of ``object`` dtype (:issue:`41526`) - Improved error message in ``corr`` and ``cov`` methods on :class:`.Rolling`, :class:`.Expanding`, and :class:`.ExponentialMovingWindow` when ``other`` is not a :class:`DataFrame` or :class:`Series` (:issue:`41741`) +- :class:`TextReader` and everything that uses the class supports defaultdict as dtype .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index e5e61e409c320..16bfb8764e17f 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -985,10 +985,17 @@ cdef class TextReader: col_dtype = None if self.dtype is not None: if isinstance(self.dtype, dict): + # gh-41574 + # Designed to support defaultdict if name in self.dtype: col_dtype = self.dtype[name] - elif i in self.dtype: - col_dtype = self.dtype[i] + else: + # the defaultdict must return a default value only if + # both the column name and the index are not presented + try: + col_dtype = self.dtype[i] + except KeyError: + pass else: if self.dtype.names: # structured array diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index f914e0601fb89..0f582356ce7fc 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -215,6 +215,33 @@ def __init__(self, kwds): # Normally, this arg would get pre-processed earlier on self.on_bad_lines = kwds.get("on_bad_lines", self.BadLineHandleMethod.ERROR) + # gh-41996 + # Moved from c_parser_wrapper and adapted + kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None)) + + # Moved from python_parser (begin) + # The function is renamed from `_clean_mapping` + def _translate_indices_into_colnames(mapping): + """converts dictkeys as col numbers to col names""" + if (mapping is None) or not(isinstance(mapping, dict)): + return mapping + clean = {} + for col, v in mapping.items(): + if isinstance(col, int) and col not in self.orig_names: + col = self.orig_names[col] + clean[col] = v + # gh-41574 + # Designed to support defaultdict + if isinstance(mapping, defaultdict): + clean = defaultdict(mapping.default_factory, clean) + return clean + + if "converters" in kwds: + kwds["converters"] = _translate_indices_into_colnames(kwds["converters"]) + if "dtype" in kwds: + kwds["dtype"] = _translate_indices_into_colnames(kwds["dtype"]) + # Moved from python_parser (end) + def _open_handles(self, src: FilePathOrBuffer, kwds: dict[str, Any]) -> None: """ Let the readers open IOHandles after they are done with their potential raises. @@ -1232,3 +1259,24 @@ def _validate_parse_dates_arg(parse_dates): def is_index_col(col) -> bool: return col is not None and col is not False + + +# gh-41996 +# Moved from c_parser_wrapper +def ensure_dtype_objs(dtype): + """ + Ensure we have either None, a dtype object, or a dictionary mapping to + dtype objects. + """ + if isinstance(dtype, dict): + # gh-41574 + # Designed to support defaultdict + prepared_dtype = {k: pandas_dtype(dtype[k]) for k in dtype} + if isinstance(dtype, defaultdict): + type_for_default_factory = pandas_dtype(dtype.default_factory()) + prepared_default_factory = lambda: type_for_default_factory + prepared_dtype = defaultdict(prepared_default_factory, prepared_dtype) + return prepared_dtype + elif dtype is not None: + dtype = pandas_dtype(dtype) + return dtype diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py index 110211125514e..d9eee0c1ad9cb 100644 --- a/pandas/io/parsers/c_parser_wrapper.py +++ b/pandas/io/parsers/c_parser_wrapper.py @@ -1,5 +1,6 @@ from __future__ import annotations +from collections import defaultdict import warnings import numpy as np @@ -64,7 +65,6 @@ def __init__(self, src: FilePathOrBuffer, **kwds): ): kwds.pop(key, None) - kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None)) try: self._reader = parsers.TextReader(self.handles.handle, **kwds) except Exception: @@ -394,15 +394,3 @@ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict: ) warnings.warn(warning_message, DtypeWarning, stacklevel=8) return result - - -def ensure_dtype_objs(dtype): - """ - Ensure we have either None, a dtype object, or a dictionary mapping to - dtype objects. - """ - if isinstance(dtype, dict): - dtype = {k: pandas_dtype(dtype[k]) for k in dtype} - elif dtype is not None: - dtype = pandas_dtype(dtype) - return dtype diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index 13f2d62399418..691783651dd8b 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -307,23 +307,6 @@ def get_chunk(self, size=None): return self.read(rows=size) def _convert_data(self, data): - # apply converters - def _clean_mapping(mapping): - """converts col numbers to names""" - clean = {} - for col, v in mapping.items(): - if isinstance(col, int) and col not in self.orig_names: - col = self.orig_names[col] - clean[col] = v - return clean - - clean_conv = _clean_mapping(self.converters) - if not isinstance(self.dtype, dict): - # handles single dtype applied to all columns - clean_dtypes = self.dtype - else: - clean_dtypes = _clean_mapping(self.dtype) - # Apply NA values. clean_na_values = {} clean_na_fvalues = {} @@ -347,8 +330,8 @@ def _clean_mapping(mapping): clean_na_values, clean_na_fvalues, self.verbose, - clean_conv, - clean_dtypes, + self.converters, + self.dtype, ) def _infer_columns(self): diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index d594bf8a75d49..b70bc71929d7a 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -2,6 +2,7 @@ Tests the TextReader class in parsers.pyx, which is integral to the C engine in parsers.py """ +from collections import defaultdict from io import ( BytesIO, StringIO, @@ -21,7 +22,7 @@ TextFileReader, read_csv, ) -from pandas.io.parsers.c_parser_wrapper import ensure_dtype_objs +from pandas.io.parsers.base_parser import ensure_dtype_objs class TestTextReader: @@ -247,6 +248,12 @@ def _make_reader(**kwds): assert result[0].dtype == "u1" assert result[1].dtype == "O" + # see gh-41574 + reader = _make_reader(dtype=defaultdict(lambda: "u1", {1: "S1"})) + result = reader.read() + assert result[0].dtype == "u1" + assert result[1].dtype == "S1" + def test_usecols(self): data = """\ a,b,c
- [x] closes #41574 - [x] tests added / passed (1 test is modified) - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry # What is new: Now defaultdict can be provides as dtype. The implementation allows moving to supporting Iterable as dtype
https://api.github.com/repos/pandas-dev/pandas/pulls/41664
2021-05-25T17:31:30Z
2021-11-28T21:05:22Z
null
2021-11-28T21:05:22Z
REF: move _str_extract function in accessor.py to array method
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index cb8a08f5668ac..95d9409b265ce 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2453,7 +2453,9 @@ def replace(self, to_replace, value, inplace: bool = False): # ------------------------------------------------------------------------ # String methods interface - def _str_map(self, f, na_value=np.nan, dtype=np.dtype("object")): + def _str_map( + self, f, na_value=np.nan, dtype=np.dtype("object"), convert: bool = True + ): # Optimization to apply the callable `f` to the categories once # and rebuild the result by `take`ing from the result with the codes. # Returns the same type as the object-dtype implementation though. diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 74ca5130ca322..ab1dadf4d2dfa 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -410,7 +410,9 @@ def _cmp_method(self, other, op): # String methods interface _str_na_value = StringDtype.na_value - def _str_map(self, f, na_value=None, dtype: Dtype | None = None): + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): from pandas.arrays import BooleanArray if dtype is None: diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index 4370f3a4e15cf..454d8ebde989b 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -741,7 +741,9 @@ def value_counts(self, dropna: bool = True) -> Series: _str_na_value = ArrowStringDtype.na_value - def _str_map(self, f, na_value=None, dtype: Dtype | None = None): + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): # TODO: de-duplicate with StringArray method. This method is moreless copy and # paste. diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index e1399968cb1c4..7643019ff8c55 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -13,10 +13,7 @@ import numpy as np import pandas._libs.lib as lib -from pandas._typing import ( - ArrayLike, - FrameOrSeriesUnion, -) +from pandas._typing import FrameOrSeriesUnion from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( @@ -160,7 +157,6 @@ class StringMethods(NoNewAttributesMixin): # TODO: Dispatch all the methods # Currently the following are not dispatched to the array # * cat - # * extract # * extractall def __init__(self, data): @@ -243,7 +239,7 @@ def _wrap_result( self, result, name=None, - expand=None, + expand: bool | None = None, fill_value=np.nan, returns_string=True, ): @@ -2385,10 +2381,7 @@ def extract( 2 NaN dtype: object """ - from pandas import ( - DataFrame, - array as pd_array, - ) + from pandas import DataFrame if not isinstance(expand, bool): raise ValueError("expand must be True or False") @@ -2400,8 +2393,6 @@ def extract( if not expand and regex.groups > 1 and isinstance(self._data, ABCIndex): raise ValueError("only one regex group is supported with Index") - # TODO: dispatch - obj = self._data result_dtype = _result_dtype(obj) @@ -2415,8 +2406,8 @@ def extract( result = DataFrame(columns=columns, dtype=result_dtype) else: - result_list = _str_extract( - obj.array, pat, flags=flags, expand=returns_df + result_list = self._data.array._str_extract( + pat, flags=flags, expand=returns_df ) result_index: Index | None @@ -2431,9 +2422,7 @@ def extract( else: name = _get_single_group_name(regex) - result_arr = _str_extract(obj.array, pat, flags=flags, expand=returns_df) - # not dispatching, so we have to reconstruct here. - result = pd_array(result_arr, dtype=result_dtype) + result = self._data.array._str_extract(pat, flags=flags, expand=returns_df) return self._wrap_result(result, name=name) @forbid_nonstring_types(["bytes"]) @@ -3121,33 +3110,6 @@ def _get_group_names(regex: re.Pattern) -> list[Hashable]: return [names.get(1 + i, i) for i in range(regex.groups)] -def _str_extract(arr: ArrayLike, pat: str, flags=0, expand: bool = True): - """ - Find groups in each string in the array using passed regular expression. - - Returns - ------- - np.ndarray or list of lists is expand is True - """ - regex = re.compile(pat, flags=flags) - - empty_row = [np.nan] * regex.groups - - def f(x): - if not isinstance(x, str): - return empty_row - m = regex.search(x) - if m: - return [np.nan if item is None else item for item in m.groups()] - else: - return empty_row - - if expand: - return [f(val) for val in np.asarray(arr)] - - return np.array([f(val)[0] for val in np.asarray(arr)], dtype=object) - - def str_extractall(arr, pat, flags=0): regex = re.compile(pat, flags=flags) # the regex must contain capture groups. diff --git a/pandas/core/strings/base.py b/pandas/core/strings/base.py index 730870b448cb2..cd71844d3b527 100644 --- a/pandas/core/strings/base.py +++ b/pandas/core/strings/base.py @@ -230,3 +230,7 @@ def _str_split(self, pat=None, n=-1, expand=False): @abc.abstractmethod def _str_rsplit(self, pat=None, n=-1): pass + + @abc.abstractmethod + def _str_extract(self, pat: str, flags: int = 0, expand: bool = True): + pass diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py index c214ada9c1ada..7ce4abe904f3b 100644 --- a/pandas/core/strings/object_array.py +++ b/pandas/core/strings/object_array.py @@ -32,7 +32,9 @@ def __len__(self): # For typing, _str_map relies on the object being sized. raise NotImplementedError - def _str_map(self, f, na_value=None, dtype: Dtype | None = None): + def _str_map( + self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + ): """ Map a callable over valid element of the array. @@ -47,6 +49,8 @@ def _str_map(self, f, na_value=None, dtype: Dtype | None = None): for object-dtype and Categorical and ``pd.NA`` for StringArray. dtype : Dtype, optional The dtype of the result array. + convert : bool, default True + Whether to call `maybe_convert_objects` on the resulting ndarray """ if dtype is None: dtype = np.dtype("object") @@ -60,9 +64,9 @@ def _str_map(self, f, na_value=None, dtype: Dtype | None = None): arr = np.asarray(self, dtype=object) mask = isna(arr) - convert = not np.all(mask) + map_convert = convert and not np.all(mask) try: - result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert) + result = lib.map_infer_mask(arr, f, mask.view(np.uint8), map_convert) except (TypeError, AttributeError) as e: # Reraise the exception if callable `f` got wrong number of args. # The user may want to be warned by this, instead of getting NaN @@ -88,7 +92,7 @@ def g(x): return result if na_value is not np.nan: np.putmask(result, mask, na_value) - if result.dtype == object: + if convert and result.dtype == object: result = lib.maybe_convert_objects(result) return result @@ -410,3 +414,28 @@ def _str_lstrip(self, to_strip=None): def _str_rstrip(self, to_strip=None): return self._str_map(lambda x: x.rstrip(to_strip)) + + def _str_extract(self, pat: str, flags: int = 0, expand: bool = True): + regex = re.compile(pat, flags=flags) + na_value = self._str_na_value + + if not expand: + + def g(x): + m = regex.search(x) + return m.groups()[0] if m else na_value + + return self._str_map(g, convert=False) + + empty_row = [na_value] * regex.groups + + def f(x): + if not isinstance(x, str): + return empty_row + m = regex.search(x) + if m: + return [na_value if item is None else item for item in m.groups()] + else: + return empty_row + + return [f(val) for val in np.asarray(self)]
perf neutral refactor, another step towards #41372 after this... can use pyarrow native functions for expand case use _wrap_result and _str_map (without a perf impact)
https://api.github.com/repos/pandas-dev/pandas/pulls/41663
2021-05-25T16:06:25Z
2021-05-27T01:52:24Z
2021-05-27T01:52:24Z
2021-05-27T10:18:44Z
DEPR: Series(dt64naive, dtype=dt64tz) -> will match DatetimeIndex
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index ea9017da8a2f9..e0f77d8cffbb8 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -696,6 +696,7 @@ Deprecations - Deprecated passing arguments (apart from ``value``) as positional in :meth:`DataFrame.fillna` and :meth:`Series.fillna` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.reset_index` (other than ``"level"``) and :meth:`Series.reset_index` (:issue:`41485`) - Deprecated construction of :class:`Series` or :class:`DataFrame` with ``DatetimeTZDtype`` data and ``datetime64[ns]`` dtype. Use ``Series(data).dt.tz_localize(None)`` instead (:issue:`41555`,:issue:`33401`) +- In a future version, constructing :class:`Series` or :class:`DataFrame` with ``datetime64[ns]`` data and ``DatetimeTZDtype`` will treat the data as wall-times instead of as UTC times (matching DatetimeIndex behavior). To treat the data as UTC times, use ``pd.Series(data).dt.tz_localize("UTC").dt.tz_convert(dtype.tz)`` or ``pd.Series(data.view("int64"), dtype=dtype)`` (:issue:`33401`) - Deprecated passing arguments as positional in :meth:`DataFrame.set_axis` and :meth:`Series.set_axis` (other than ``"labels"``) (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.where` and :meth:`Series.where` (other than ``"cond"`` and ``"other"``) (:issue:`41485`) - Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_csv` (:issue:`41485`) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 40883dd8f747b..df79276f67386 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1657,6 +1657,22 @@ def maybe_cast_to_datetime( # Numeric values are UTC at this point, # so localize and convert # equiv: Series(dta).astype(dtype) # though deprecated + if getattr(vdtype, "kind", None) == "M": + # GH#24559, GH#33401 deprecate behavior inconsistent + # with DatetimeArray/DatetimeIndex + warnings.warn( + "In a future version, constructing a Series " + "from datetime64[ns] data and a " + "DatetimeTZDtype will interpret the data " + "as wall-times instead of " + "UTC times, matching the behavior of " + "DatetimeIndex. To treat the data as UTC " + "times, use pd.Series(data).dt" + ".tz_localize('UTC').tz_convert(dtype.tz) " + "or pd.Series(data.view('int64'), dtype=dtype)", + FutureWarning, + stacklevel=5, + ) value = dta.tz_localize("UTC").tz_convert(dtype.tz) except OutOfBoundsDatetime: diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index af730bf299336..646d1f0ab1508 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1094,7 +1094,21 @@ def test_construction_consistency(self): result = Series(ser.dt.tz_convert("UTC"), dtype=ser.dtype) tm.assert_series_equal(result, ser) - result = Series(ser.values, dtype=ser.dtype) + msg = "will interpret the data as wall-times" + with tm.assert_produces_warning(FutureWarning, match=msg): + # deprecate behavior inconsistent with DatetimeIndex GH#33401 + result = Series(ser.values, dtype=ser.dtype) + tm.assert_series_equal(result, ser) + + with tm.assert_produces_warning(None): + # one suggested alternative to the deprecated usage + middle = Series(ser.values).dt.tz_localize("UTC") + result = middle.dt.tz_convert(ser.dtype.tz) + tm.assert_series_equal(result, ser) + + with tm.assert_produces_warning(None): + # the other suggested alternative to the deprecated usage + result = Series(ser.values.view("int64"), dtype=ser.dtype) tm.assert_series_equal(result, ser) @pytest.mark.parametrize(
- [ ] xref #33401 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry I think this is the last deprecation needed for the constructor/astype consistency.
https://api.github.com/repos/pandas-dev/pandas/pulls/41662
2021-05-25T14:55:41Z
2021-05-31T14:50:51Z
2021-05-31T14:50:51Z
2021-05-31T15:12:52Z
DOC: update styler user guide for new `text_gradient`
diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb index 86696cc909764..219b74407fae4 100644 --- a/doc/source/user_guide/style.ipynb +++ b/doc/source/user_guide/style.ipynb @@ -1012,7 +1012,8 @@ " - [.highlight_min][minfunc] and [.highlight_max][maxfunc]: for use with identifying extremeties in data.\n", " - [.highlight_between][betweenfunc] and [.highlight_quantile][quantilefunc]: for use with identifying classes within data.\n", " - [.background_gradient][bgfunc]: a flexible method for highlighting cells based or their, or other, values on a numeric scale.\n", - " - [.bar][barfunc]: to display mini-charts within cell backgrounds.\n", + " - [.text_gradient][textfunc]: similar method for highlighting text based on their, or other, values on a numeric scale.\n", + " - [.bar][barfunc]: to display mini-charts within cell backgrounds.\n", " \n", "The individual documentation on each function often gives more examples of their arguments.\n", "\n", @@ -1022,6 +1023,7 @@ "[betweenfunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_between.rst\n", "[quantilefunc]: ../reference/api/pandas.io.formats.style.Styler.highlight_quantile.rst\n", "[bgfunc]: ../reference/api/pandas.io.formats.style.Styler.background_gradient.rst\n", + "[textfunc]: ../reference/api/pandas.io.formats.style.Styler.text_gradient.rst\n", "[barfunc]: ../reference/api/pandas.io.formats.style.Styler.bar.rst" ] }, @@ -1098,14 +1100,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Background Gradient" + "### Background Gradient and Text Gradient" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "You can create \"heatmaps\" with the `background_gradient` method. These require matplotlib, and we'll use [Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) to get a nice colormap." + "You can create \"heatmaps\" with the `background_gradient` and `text_gradient` methods. These require matplotlib, and we'll use [Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) to get a nice colormap." ] }, { @@ -1120,19 +1122,31 @@ "df2.style.background_gradient(cmap=cm)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df2.style.text_gradient(cmap=cm)" + ] + }, { "cell_type": "markdown", "metadata": {}, "source": [ - "[.background_gradient][bgfunc] has a number of keyword arguments to customise the gradients and colors. See its documentation.\n", + "[.background_gradient][bgfunc] and [.text_gradient][textfunc] have a number of keyword arguments to customise the gradients and colors. See the documentation.\n", "\n", - "[bgfunc]: ../reference/api/pandas.io.formats.style.Styler.background_gradient.rst" + "[bgfunc]: ../reference/api/pandas.io.formats.style.Styler.background_gradient.rst\n", + "[textfunc]: ../reference/api/pandas.io.formats.style.Styler.text_gradient.rst" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + "### Set properties\n", + "\n", "Use `Styler.set_properties` when the style doesn't actually depend on the values. This is just a simple wrapper for `.applymap` where the function returns the same properties for all cells." ] },
Add `text_gradient` to Styler user guide after merge.
https://api.github.com/repos/pandas-dev/pandas/pulls/41661
2021-05-25T10:22:55Z
2021-05-25T12:43:05Z
2021-05-25T12:43:05Z
2021-05-25T13:33:18Z
ENH: add long and short captions to `Styler.to_latex`
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index ba6bfb9da11cc..2f024af34b19d 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -143,7 +143,7 @@ One also has greater control of the display through separate sparsification of t Render trimming has also been added for large numbers of data elements to avoid browser overload (:issue:`40712`). We have added an extension to allow LaTeX styling as an alternative to CSS styling and a method :meth:`.Styler.to_latex` -which renders the necessary LaTeX format including built-up styles. An additional file io function :meth:`.Styler.to_html` has been added for convenience (:issue:`40312`). +which renders the necessary LaTeX format including built-up styles (:issue:`21673`, :issue:`41659`). An additional file io function :meth:`Styler.to_html` has been added for convenience (:issue:`40312`). Documentation has also seen major revisions in light of new features (:issue:`39720` :issue:`39317` :issue:`40493`) diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index d6c151c3ed740..7b88d53dd7f4e 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -87,8 +87,8 @@ class Styler(StylerRenderer): List of {selector: (attr, value)} dicts; see Notes. uuid : str, default None A unique identifier to avoid CSS collisions; generated automatically. - caption : str, default None - Caption to attach to the table. + caption : str, tuple, default None + String caption to attach to the table. Tuple only used for LaTeX dual captions. table_attributes : str, default None Items that show up in the opening ``<table>`` tag in addition to automatic (by default) id. @@ -175,7 +175,7 @@ def __init__( precision: int | None = None, table_styles: CSSStyles | None = None, uuid: str | None = None, - caption: str | None = None, + caption: str | tuple | None = None, table_attributes: str | None = None, cell_ids: bool = True, na_rep: str | None = None, @@ -419,7 +419,7 @@ def to_latex( position_float: str | None = None, hrules: bool = False, label: str | None = None, - caption: str | None = None, + caption: str | tuple | None = None, sparse_index: bool | None = None, sparse_columns: bool | None = None, multirow_align: str = "c", @@ -460,8 +460,10 @@ def to_latex( label : str, optional The LaTeX label included as: \\label{<label>}. This is used with \\ref{<label>} in the main .tex file. - caption : str, optional - The LaTeX table caption included as: \\caption{<caption>}. + caption : str, tuple, optional + If string, the LaTeX table caption included as: \\caption{<caption>}. + If tuple, i.e ("full caption", "short caption"), the caption included + as: \\caption[<caption[1]>]{<caption[0]>}. sparse_index : bool, optional Whether to sparsify the display of a hierarchical index. Setting to False will display each explicit level element in a hierarchical key for each row. @@ -1344,13 +1346,16 @@ def set_uuid(self, uuid: str) -> Styler: self.uuid = uuid return self - def set_caption(self, caption: str) -> Styler: + def set_caption(self, caption: str | tuple) -> Styler: """ Set the text added to a ``<caption>`` HTML element. Parameters ---------- - caption : str + caption : str, tuple + For HTML output either the string input is used or the first element of the + tuple. For LaTeX the string input provides a caption and the additional + tuple input allows for full captions and short captions, in that order. Returns ------- diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index 7af8802673f80..7686d8a340c37 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -75,7 +75,7 @@ def __init__( uuid_len: int = 5, table_styles: CSSStyles | None = None, table_attributes: str | None = None, - caption: str | None = None, + caption: str | tuple | None = None, cell_ids: bool = True, ): diff --git a/pandas/io/formats/templates/html_table.tpl b/pandas/io/formats/templates/html_table.tpl index dadefa4bd8365..33153af6f0882 100644 --- a/pandas/io/formats/templates/html_table.tpl +++ b/pandas/io/formats/templates/html_table.tpl @@ -6,8 +6,10 @@ <table id="T_{{uuid}}"{% if table_attributes %} {{table_attributes}}{% endif %}> {% endif %} {% block caption %} -{% if caption %} +{% if caption and caption is string %} <caption>{{caption}}</caption> +{% elif caption and caption is sequence %} + <caption>{{caption[0]}}</caption> {% endif %} {% endblock caption %} {% block thead %} diff --git a/pandas/io/formats/templates/latex.tpl b/pandas/io/formats/templates/latex.tpl index e5db6ad8ca7f8..66fe99642850f 100644 --- a/pandas/io/formats/templates/latex.tpl +++ b/pandas/io/formats/templates/latex.tpl @@ -9,9 +9,12 @@ {% if position_float is not none%} \{{position_float}} {% endif %} -{% if caption %} +{% if caption and caption is string %} \caption{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %} +{% elif caption and caption is sequence %} +\caption[{{caption[1]}}]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %} + {% endif %} {% for style in table_styles %} {% if style['selector'] not in ['position', 'position_float', 'caption', 'toprule', 'midrule', 'bottomrule', 'column_format'] %} diff --git a/pandas/tests/io/formats/style/test_html.py b/pandas/tests/io/formats/style/test_html.py index 6c3abe04db926..74b4c7ea3977c 100644 --- a/pandas/tests/io/formats/style/test_html.py +++ b/pandas/tests/io/formats/style/test_html.py @@ -231,3 +231,8 @@ def test_from_custom_template(tmpdir): assert result.template_html is not Styler.template_html styler = result(DataFrame({"A": [1, 2]})) assert styler.render() + + +def test_caption_as_sequence(styler): + styler.set_caption(("full cap", "short cap")) + assert "<caption>full cap</caption>" in styler.render() diff --git a/pandas/tests/io/formats/style/test_to_latex.py b/pandas/tests/io/formats/style/test_to_latex.py index 5945502a4c90c..97347bddaa187 100644 --- a/pandas/tests/io/formats/style/test_to_latex.py +++ b/pandas/tests/io/formats/style/test_to_latex.py @@ -438,3 +438,8 @@ def test_parse_latex_table_wrapping(styler): overwrite=False, ) assert _parse_latex_table_wrapping(styler.table_styles, None) is True + + +def test_short_caption(styler): + result = styler.to_latex(caption=("full cap", "short cap")) + assert "\\caption[short cap]{full cap}" in result
completes an item on the list for `DataFrame.to_latex` deprecation #41649
https://api.github.com/repos/pandas-dev/pandas/pulls/41659
2021-05-25T07:58:00Z
2021-06-04T14:40:12Z
2021-06-04T14:40:12Z
2021-06-04T18:10:35Z
DOC: add `Styler.to_latex` info in `io.rst` doc page
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 7f0cd613726dc..d26e511202f9c 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -22,6 +22,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like text;Fixed-Width Text File;:ref:`read_fwf<io.fwf_reader>` text;`JSON <https://www.json.org/>`__;:ref:`read_json<io.json_reader>`;:ref:`to_json<io.json_writer>` text;`HTML <https://en.wikipedia.org/wiki/HTML>`__;:ref:`read_html<io.read_html>`;:ref:`to_html<io.html>` + text;`LaTeX <https://en.wikipedia.org/wiki/LaTeX>`__;;:ref:`Styler.to_latex<io.latex>` text;`XML <https://www.w3.org/standards/xml/core>`__;:ref:`read_xml<io.read_xml>`;:ref:`to_xml<io.xml>` text; Local clipboard;:ref:`read_clipboard<io.clipboard>`;:ref:`to_clipboard<io.clipboard>` binary;`MS Excel <https://en.wikipedia.org/wiki/Microsoft_Excel>`__;:ref:`read_excel<io.excel_reader>`;:ref:`to_excel<io.excel_writer>` @@ -2830,7 +2831,42 @@ parse HTML tables in the top-level pandas io function ``read_html``. .. |lxml| replace:: **lxml** .. _lxml: https://lxml.de +.. _io.latex: +LaTeX +----- + +.. versionadded:: 1.3.0 + +Currently there are no methods to read from LaTeX, only output methods. + +Writing to LaTeX files +'''''''''''''''''''''' + +.. note:: + + DataFrame *and* Styler objects currently have a ``to_latex`` method. We recommend + using the `Styler.to_latex() <../reference/api/pandas.io.formats.style.Styler.to_latex.rst>`__ method + over `DataFrame.to_latex() <../reference/api/pandas.DataFrame.to_latex.rst>`__ due to the former's greater flexibility with + conditional styling, and the latter's possible future deprecation. + +Review the documentation for `Styler.to_latex <../reference/api/pandas.io.formats.style.Styler.to_latex.rst>`__, +which gives examples of conditional styling and explains the operation of its keyword +arguments. + +For simple application the following pattern is sufficient. + +.. ipython:: python + + df = pd.DataFrame([[1, 2], [3, 4]], index=["a", "b"], columns=["c", "d"]) + print(df.style.to_latex()) + +To format values before output, chain the `Styler.format <../reference/api/pandas.io.formats.style.Styler.format.rst>`__ +method. + +.. ipython:: python + + print(df.style.format("€ {}").to_latex()) XML ---
follow-on from adding `Styler.to_latex`. This is basic addition that primarily points to the core method docs (which have detailed descriptions and examples)
https://api.github.com/repos/pandas-dev/pandas/pulls/41658
2021-05-25T06:47:54Z
2021-05-25T14:46:23Z
2021-05-25T14:46:23Z
2021-05-25T15:00:11Z
ENH: Deprecate arguments #41485
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index c87cd9b116f2b..655ee09a02ca8 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -694,6 +694,7 @@ Deprecations - Deprecated construction of :class:`Series` or :class:`DataFrame` with ``DatetimeTZDtype`` data and ``datetime64[ns]`` dtype. Use ``Series(data).dt.tz_localize(None)`` instead (:issue:`41555`,:issue:`33401`) - Deprecated passing arguments as positional in :meth:`DataFrame.set_axis` and :meth:`Series.set_axis` (other than ``"labels"``) (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.where` and :meth:`Series.where` (other than ``"cond"`` and ``"other"``) (:issue:`41485`) +- Deprecated passing arguments as positional (other than ``filepath_or_buffer``) in :func:`read_csv` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.drop` (other than ``"labels"``) and :meth:`Series.drop` (:issue:`41485`) - diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index ad08b8d4b7097..d957a669351c1 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -30,7 +30,10 @@ AbstractMethodError, ParserWarning, ) -from pandas.util._decorators import Appender +from pandas.util._decorators import ( + Appender, + deprecate_nonkeyword_arguments, +) from pandas.core.dtypes.common import ( is_file_like, @@ -472,6 +475,9 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): return parser.read(nrows) +@deprecate_nonkeyword_arguments( + version=None, allowed_args=["filepath_or_buffer"], stacklevel=3 +) @Appender( _doc_read_csv_and_table.format( func_name="read_csv", diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index adafbf38439d5..eba5e52516b4c 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -733,6 +733,18 @@ def test_read_csv_delimiter_and_sep_no_default(all_parsers): parser.read_csv(f, sep=" ", delimiter=".") +def test_read_csv_posargs_deprecation(all_parsers): + # GH 41485 + f = StringIO("a,b\n1,2") + parser = all_parsers + msg = ( + "In a future version of pandas all arguments of read_csv " + "except for the argument 'filepath_or_buffer' will be keyword-only" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + parser.read_csv(f, " ") + + @pytest.mark.parametrize("delimiter", [",", "\t"]) def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter): # GH: 35958
- [ ] xref #41485 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41657
2021-05-25T05:18:11Z
2021-05-27T18:18:15Z
2021-05-27T18:18:15Z
2021-05-29T08:21:48Z
BUG: groupby.transform/agg caching *args with numba engine
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 258e391b9220c..6d1a6a4e96b33 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -988,6 +988,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupBy.__getitem__` with non-unique columns incorrectly returning a malformed :class:`SeriesGroupBy` instead of :class:`DataFrameGroupBy` (:issue:`41427`) - Bug in :meth:`DataFrameGroupBy.transform` with non-unique columns incorrectly raising ``AttributeError`` (:issue:`41427`) - Bug in :meth:`Resampler.apply` with non-unique columns incorrectly dropping duplicated columns (:issue:`41445`) +- Bug in :meth:`DataFrameGroupBy.transform` and :meth:`DataFrameGroupBy.agg` with ``engine="numba"`` where ``*args`` were being cached with the user passed function (:issue:`41647`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index b27eb4bb8f325..1c0a3dcc1e1db 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1131,10 +1131,16 @@ def _transform_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs) group_keys = self.grouper._get_group_keys() numba_transform_func = numba_.generate_numba_transform_func( - tuple(args), kwargs, func, engine_kwargs + kwargs, func, engine_kwargs ) result = numba_transform_func( - sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns) + sorted_data, + sorted_index, + starts, + ends, + len(group_keys), + len(data.columns), + *args, ) cache_key = (func, "groupby_transform") @@ -1157,11 +1163,15 @@ def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs) starts, ends, sorted_index, sorted_data = self._numba_prep(func, data) group_keys = self.grouper._get_group_keys() - numba_agg_func = numba_.generate_numba_agg_func( - tuple(args), kwargs, func, engine_kwargs - ) + numba_agg_func = numba_.generate_numba_agg_func(kwargs, func, engine_kwargs) result = numba_agg_func( - sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns) + sorted_data, + sorted_index, + starts, + ends, + len(group_keys), + len(data.columns), + *args, ) cache_key = (func, "groupby_agg") diff --git a/pandas/core/groupby/numba_.py b/pandas/core/groupby/numba_.py index 26070fcb5e89c..ad78280c5d835 100644 --- a/pandas/core/groupby/numba_.py +++ b/pandas/core/groupby/numba_.py @@ -56,11 +56,12 @@ def f(values, index, ...): def generate_numba_agg_func( - args: tuple, kwargs: dict[str, Any], func: Callable[..., Scalar], engine_kwargs: dict[str, bool] | None, -) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int], np.ndarray]: +) -> Callable[ + [np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int, Any], np.ndarray +]: """ Generate a numba jitted agg function specified by values from engine_kwargs. @@ -72,8 +73,6 @@ def generate_numba_agg_func( Parameters ---------- - args : tuple - *args to be passed into the function kwargs : dict **kwargs to be passed into the function func : function @@ -103,6 +102,7 @@ def group_agg( end: np.ndarray, num_groups: int, num_columns: int, + *args: Any, ) -> np.ndarray: result = np.empty((num_groups, num_columns)) for i in numba.prange(num_groups): @@ -116,11 +116,12 @@ def group_agg( def generate_numba_transform_func( - args: tuple, kwargs: dict[str, Any], func: Callable[..., np.ndarray], engine_kwargs: dict[str, bool] | None, -) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int], np.ndarray]: +) -> Callable[ + [np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int, Any], np.ndarray +]: """ Generate a numba jitted transform function specified by values from engine_kwargs. @@ -132,8 +133,6 @@ def generate_numba_transform_func( Parameters ---------- - args : tuple - *args to be passed into the function kwargs : dict **kwargs to be passed into the function func : function @@ -163,6 +162,7 @@ def group_transform( end: np.ndarray, num_groups: int, num_columns: int, + *args: Any, ) -> np.ndarray: result = np.empty((len(values), num_columns)) for i in numba.prange(num_groups): diff --git a/pandas/tests/groupby/aggregate/test_numba.py b/pandas/tests/groupby/aggregate/test_numba.py index 6de81d03ca418..ba2d6eeb287c0 100644 --- a/pandas/tests/groupby/aggregate/test_numba.py +++ b/pandas/tests/groupby/aggregate/test_numba.py @@ -6,7 +6,9 @@ from pandas import ( DataFrame, + Index, NamedAgg, + Series, option_context, ) import pandas._testing as tm @@ -154,3 +156,20 @@ def test_multifunc_notimplimented(agg_func): with pytest.raises(NotImplementedError, match="Numba engine can"): grouped[1].agg(agg_func, engine="numba") + + +@td.skip_if_no("numba", "0.46.0") +def test_args_not_cached(): + # GH 41647 + def sum_last(values, index, n): + return values[-n:].sum() + + df = DataFrame({"id": [0, 0, 1, 1], "x": [1, 1, 1, 1]}) + grouped_x = df.groupby("id")["x"] + result = grouped_x.agg(sum_last, 1, engine="numba") + expected = Series([1.0] * 2, name="x", index=Index([0, 1], name="id")) + tm.assert_series_equal(result, expected) + + result = grouped_x.agg(sum_last, 2, engine="numba") + expected = Series([2.0] * 2, name="x", index=Index([0, 1], name="id")) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/transform/test_numba.py b/pandas/tests/groupby/transform/test_numba.py index fbee2361b9b45..8019071be72f3 100644 --- a/pandas/tests/groupby/transform/test_numba.py +++ b/pandas/tests/groupby/transform/test_numba.py @@ -5,6 +5,7 @@ from pandas import ( DataFrame, + Series, option_context, ) import pandas._testing as tm @@ -146,3 +147,20 @@ def test_multifunc_notimplimented(agg_func): with pytest.raises(NotImplementedError, match="Numba engine can"): grouped[1].transform(agg_func, engine="numba") + + +@td.skip_if_no("numba", "0.46.0") +def test_args_not_cached(): + # GH 41647 + def sum_last(values, index, n): + return values[-n:].sum() + + df = DataFrame({"id": [0, 0, 1, 1], "x": [1, 1, 1, 1]}) + grouped_x = df.groupby("id")["x"] + result = grouped_x.transform(sum_last, 1, engine="numba") + expected = Series([1.0] * 4, name="x") + tm.assert_series_equal(result, expected) + + result = grouped_x.transform(sum_last, 2, engine="numba") + expected = Series([2.0] * 4, name="x") + tm.assert_series_equal(result, expected)
- [x] closes #41647 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41656
2021-05-25T04:57:22Z
2021-05-26T01:46:37Z
2021-05-26T01:46:36Z
2021-05-26T03:26:26Z
REF: de-duplicate _format_attrs
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 8fb88e625d948..74659c98dbb7e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1158,14 +1158,18 @@ def _format_data(self, name=None) -> str_t: is_justify = False return format_object_summary( - self, self._formatter_func, is_justify=is_justify, name=name + self, + self._formatter_func, + is_justify=is_justify, + name=name, + line_break_each_value=self._is_multi, ) - def _format_attrs(self): + def _format_attrs(self) -> list[tuple[str_t, str_t | int]]: """ Return a list of tuples of the (attr,formatted_value). """ - return format_object_attrs(self) + return format_object_attrs(self, include_dtype=not self._is_multi) def _mpl_repr(self): # how to represent ourselves to matplotlib @@ -2407,6 +2411,13 @@ def is_all_dates(self) -> bool: ) return self._is_all_dates + @cache_readonly + def _is_multi(self) -> bool: + """ + Cached check equivalent to isinstance(self, MultiIndex) + """ + return isinstance(self, ABCMultiIndex) + # -------------------------------------------------------------------- # Pickle Methods diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index e835990eb8d89..9b4ddb9d5c222 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -324,13 +324,8 @@ def _format_attrs(self): # error: "CategoricalIndex" has no attribute "ordered" ("ordered", self.ordered), # type: ignore[attr-defined] ] - if self.name is not None: - attrs.append(("name", ibase.default_pprint(self.name))) - attrs.append(("dtype", f"'{self.dtype.name}'")) - max_seq_items = get_option("display.max_seq_items") or len(self) - if len(self) > max_seq_items: - attrs.append(("length", len(self))) - return attrs + extra = super()._format_attrs() + return attrs + extra def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]: from pandas.io.formats.printing import pprint_thing diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index b2377f5b27966..857353e0f56f7 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -361,7 +361,9 @@ def _format_attrs(self): freq = self.freqstr if freq is not None: freq = repr(freq) - attrs.append(("freq", freq)) + # Argument 1 to "append" of "list" has incompatible type + # "Tuple[str, Optional[str]]"; expected "Tuple[str, Union[str, int]]" + attrs.append(("freq", freq)) # type: ignore[arg-type] return attrs def _summary(self, name=None) -> str: diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index fc92a1b3afe53..e4618007cc4dc 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -16,8 +16,6 @@ import numpy as np -from pandas._config import get_option - from pandas._libs import lib from pandas._libs.interval import ( Interval, @@ -80,7 +78,6 @@ from pandas.core.indexes.base import ( Index, _index_shared_docs, - default_pprint, ensure_index, maybe_extract_name, ) @@ -919,49 +916,9 @@ def _format_native_types(self, na_rep="NaN", quoting=None, **kwargs): return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs) def _format_data(self, name=None) -> str: - # TODO: integrate with categorical and make generic # name argument is unused here; just for compat with base / categorical - n = len(self) - max_seq_items = min((get_option("display.max_seq_items") or n) // 10, 10) - - formatter = str - - if n == 0: - summary = "[]" - elif n == 1: - first = formatter(self[0]) - summary = f"[{first}]" - elif n == 2: - first = formatter(self[0]) - last = formatter(self[-1]) - summary = f"[{first}, {last}]" - else: - - if n > max_seq_items: - n = min(max_seq_items // 2, 10) - head = [formatter(x) for x in self[:n]] - tail = [formatter(x) for x in self[-n:]] - head_joined = ", ".join(head) - tail_joined = ", ".join(tail) - summary = f"[{head_joined} ... {tail_joined}]" - else: - tail = [formatter(x) for x in self] - joined = ", ".join(tail) - summary = f"[{joined}]" - - return summary + "," + self._format_space() - - def _format_attrs(self): - attrs = [] - if self.name is not None: - attrs.append(("name", default_pprint(self.name))) - attrs.append(("dtype", f"'{self.dtype}'")) - return attrs - - def _format_space(self) -> str: - space = " " * (len(type(self).__name__) + 1) - return f"\n{space}" + return self._data._format_data() + "," + self._format_space() # -------------------------------------------------------------------- # Set Operations diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 1a3719233a1da..b50c741b123e2 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -89,11 +89,7 @@ lexsort_indexer, ) -from pandas.io.formats.printing import ( - format_object_attrs, - format_object_summary, - pprint_thing, -) +from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: from pandas import ( @@ -1287,20 +1283,6 @@ def _formatter_func(self, tup): formatter_funcs = [level._formatter_func for level in self.levels] return tuple(func(val) for func, val in zip(formatter_funcs, tup)) - def _format_data(self, name=None) -> str: - """ - Return the formatted data as a unicode string - """ - return format_object_summary( - self, self._formatter_func, name=name, line_break_each_value=True - ) - - def _format_attrs(self): - """ - Return a list of tuples of the (attr,formatted_value). - """ - return format_object_attrs(self, include_dtype=False) - def _format_native_types(self, na_rep="nan", **kwargs): new_levels = [] new_codes = []
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41655
2021-05-24T23:37:24Z
2021-05-25T12:37:52Z
2021-05-25T12:37:52Z
2021-05-25T14:01:37Z
API: EA._can_hold_na -> EDtype.can_hold_na
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 7dddb9f3d6f25..4f2d80e73fedf 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -35,6 +35,7 @@ from pandas.util._decorators import ( Appender, Substitution, + cache_readonly, ) from pandas.util._validators import ( validate_bool_kwarg, @@ -1273,7 +1274,9 @@ def _concat_same_type( # such as take(), reindex(), shift(), etc. In addition, those results # will then be of the ExtensionArray subclass rather than an array # of objects - _can_hold_na = True + @cache_readonly + def _can_hold_na(self) -> bool: + return self.dtype._can_hold_na def _reduce(self, name: str, *, skipna: bool = True, **kwargs): """ diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 068f5703649fa..ae9d7dcd648e3 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -353,7 +353,6 @@ class Categorical(NDArrayBackedExtensionArray, PandasObject, ObjectStringArrayMi # tolist is not actually deprecated, just suppressed in the __dir__ _hidden_attrs = PandasObject._hidden_attrs | frozenset(["tolist"]) _typ = "categorical" - _can_hold_na = True _dtype: CategoricalDtype diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 4b264eef4bada..17f12536b4663 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -159,6 +159,10 @@ class DatetimeLikeArrayMixin(OpsMixin, NDArrayBackedExtensionArray): _recognized_scalars: tuple[type, ...] _ndarray: np.ndarray + @cache_readonly + def _can_hold_na(self) -> bool: + return True + def __init__(self, data, dtype: Dtype | None = None, freq=None, copy=False): raise AbstractMethodError(self) diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index 9671c340a0a92..414c60603b9fe 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -367,6 +367,13 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: else: return None + @property + def _can_hold_na(self) -> bool: + """ + Can arrays of this dtype hold NA values? + """ + return True + def register_extension_dtype(cls: type[E]) -> type[E]: """ diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c7769046c70b2..9a1be4d010196 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -175,15 +175,15 @@ def is_view(self) -> bool: return values.base is not None @final - @property + @cache_readonly def _can_hold_na(self) -> bool: """ Can we store NA values in this Block? """ - values = self.values - if isinstance(values, np.ndarray): - return values.dtype.kind not in ["b", "i", "u"] - return values._can_hold_na + dtype = self.dtype + if isinstance(dtype, np.dtype): + return dtype.kind not in ["b", "i", "u"] + return dtype._can_hold_na @final @cache_readonly diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py index 2402c70a166b7..13dec96b144ff 100644 --- a/pandas/tests/extension/test_external_block.py +++ b/pandas/tests/extension/test_external_block.py @@ -14,9 +14,11 @@ class CustomBlock(ExtensionBlock): _holder = np.ndarray - # error: Cannot override final attribute "_can_hold_na" - # (previously declared in base class "Block") - _can_hold_na = False # type: ignore[misc] + + # Cannot override final attribute "_can_hold_na" + @property # type: ignore[misc] + def _can_hold_na(self) -> bool: + return False @pytest.fixture
- [x] closes #40574 - [ ] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41654
2021-05-24T23:34:41Z
2021-06-08T12:55:41Z
2021-06-08T12:55:41Z
2021-06-08T14:43:16Z
FMT: trim redundant freqstr from PeriodIndex __repr__
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index aee0d4fecd6ae..ec69d9ccbdd90 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1119,14 +1119,14 @@ def to_period(self, freq=None) -> PeriodArray: ... "2000-08-31 00:00:00"])) >>> df.index.to_period("M") PeriodIndex(['2000-03', '2000-05', '2000-08'], - dtype='period[M]', freq='M') + dtype='period[M]') Infer the daily frequency >>> idx = pd.date_range("2017-01-01", periods=2) >>> idx.to_period() PeriodIndex(['2017-01-01', '2017-01-02'], - dtype='period[D]', freq='D') + dtype='period[D]') """ from pandas.core.arrays import PeriodArray diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 101209be30b40..c2323c8697eee 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -564,15 +564,15 @@ def asfreq(self, freq=None, how: str = "E") -> PeriodArray: >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A') >>> pidx PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'], - dtype='period[A-DEC]', freq='A-DEC') + dtype='period[A-DEC]') >>> pidx.asfreq('M') PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12', - '2015-12'], dtype='period[M]', freq='M') + '2015-12'], dtype='period[M]') >>> pidx.asfreq('M', how='S') PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01', - '2015-01'], dtype='period[M]', freq='M') + '2015-01'], dtype='period[M]') """ how = libperiod.validate_end_alias(how) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 136843938b683..fb51f4ba08bfe 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -153,11 +153,11 @@ class PeriodIndex(DatetimeIndexOpsMixin): -------- >>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3]) >>> idx - PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]', freq='Q-DEC') + PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]') """ _typ = "periodindex" - _attributes = ["name", "freq"] + _attributes = ["name"] # define my properties & methods for delegation _is_numeric_dtype = False @@ -636,7 +636,7 @@ def period_range( PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06', '2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12', '2018-01'], - dtype='period[M]', freq='M') + dtype='period[M]') If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor endpoints for a ``PeriodIndex`` with frequency matching that of the @@ -645,7 +645,7 @@ def period_range( >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'), ... end=pd.Period('2017Q2', freq='Q'), freq='M') PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'], - dtype='period[M]', freq='M') + dtype='period[M]') """ if com.count_not_none(start, end, periods) != 2: raise ValueError( diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py index a8f8406e24fef..70156092eeabe 100644 --- a/pandas/tests/indexes/datetimelike.py +++ b/pandas/tests/indexes/datetimelike.py @@ -44,7 +44,9 @@ def test_str(self, simple_index): if hasattr(idx, "tz"): if idx.tz is not None: assert idx.tz in str(idx) - if hasattr(idx, "freq"): + if isinstance(idx, pd.PeriodIndex): + assert f"dtype='period[{idx.freqstr}]'" in str(idx) + else: assert f"freq='{idx.freqstr}'" in str(idx) def test_view(self, simple_index): diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py index 7d054a7af4a4d..bfd83f1360671 100644 --- a/pandas/tests/indexes/period/test_formats.py +++ b/pandas/tests/indexes/period/test_formats.py @@ -62,40 +62,31 @@ def test_representation(self, method): idx9 = pd.period_range("2013Q1", periods=3, freq="Q") idx10 = PeriodIndex(["2011-01-01", "2011-02-01"], freq="3D") - exp1 = "PeriodIndex([], dtype='period[D]', freq='D')" + exp1 = "PeriodIndex([], dtype='period[D]')" - exp2 = "PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')" + exp2 = "PeriodIndex(['2011-01-01'], dtype='period[D]')" - exp3 = "PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', freq='D')" + exp3 = "PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]')" exp4 = ( "PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], " - "dtype='period[D]', freq='D')" + "dtype='period[D]')" ) - exp5 = ( - "PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', " - "freq='A-DEC')" - ) + exp5 = "PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]')" exp6 = ( "PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], " - "dtype='period[H]', freq='H')" + "dtype='period[H]')" ) - exp7 = "PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', freq='Q-DEC')" + exp7 = "PeriodIndex(['2013Q1'], dtype='period[Q-DEC]')" - exp8 = "PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', freq='Q-DEC')" + exp8 = "PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]')" - exp9 = ( - "PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], " - "dtype='period[Q-DEC]', freq='Q-DEC')" - ) + exp9 = "PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], dtype='period[Q-DEC]')" - exp10 = ( - "PeriodIndex(['2011-01-01', '2011-02-01'], " - "dtype='period[3D]', freq='3D')" - ) + exp10 = "PeriodIndex(['2011-01-01', '2011-02-01'], dtype='period[3D]')" for idx, expected in zip( [idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idx10],
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41653
2021-05-24T23:28:04Z
2021-05-25T12:39:44Z
2021-05-25T12:39:44Z
2021-05-25T14:16:58Z
REF: standardize astype in EA subclasses
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index a6d1986937d2b..d904cc6e1b3de 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -529,10 +529,8 @@ def astype(self, dtype, copy=True): Returns ------- - array : ndarray - NumPy ndarray with 'dtype' for its dtype. + np.ndarray or ExtensionArray """ - from pandas.core.arrays.string_ import StringDtype dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, self.dtype): @@ -541,10 +539,10 @@ def astype(self, dtype, copy=True): else: return self.copy() - # FIXME: Really hard-code here? - if isinstance(dtype, StringDtype): - # allow conversion to StringArrays - return dtype.construct_array_type()._from_sequence(self, copy=False) + if isinstance(dtype, ExtensionDtype): + # allow conversion to e.g. StringArrays + cls = dtype.construct_array_type() + return cls._from_sequence(self, dtype=dtype, copy=copy) return np.array(self, dtype=dtype, copy=copy) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index ecc45357db8c1..71a187cf5f4e0 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -101,7 +101,6 @@ ) import pandas.core.common as com from pandas.core.construction import ( - array as pd_array, extract_array, sanitize_array, ) @@ -494,19 +493,18 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: """ dtype = pandas_dtype(dtype) if self.dtype is dtype: - result = self.copy() if copy else self + return self.copy() if copy else self elif is_categorical_dtype(dtype): dtype = cast(Union[str, CategoricalDtype], dtype) # GH 10696/18593/18630 dtype = self.dtype.update_dtype(dtype) - self = self.copy() if copy else self - result = self._set_dtype(dtype) + obj = self.copy() if copy else self + return obj._set_dtype(dtype) - # TODO: consolidate with ndarray case? elif isinstance(dtype, ExtensionDtype): - result = pd_array(self, dtype=dtype, copy=copy) + return super().astype(dtype, copy=copy) elif is_integer_dtype(dtype) and self.isna().any(): raise ValueError("Cannot convert float NaN to integer") diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 08cb12a1373bb..f78e148850e67 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -69,7 +69,6 @@ is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_dtype_equal, - is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_list_like, @@ -82,6 +81,7 @@ ) from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, + ExtensionDtype, PeriodDtype, ) from pandas.core.dtypes.missing import ( @@ -385,14 +385,13 @@ def astype(self, dtype, copy: bool = True): # 3. DatetimeArray.astype handles datetime -> period dtype = pandas_dtype(dtype) + if isinstance(dtype, ExtensionDtype): + return super().astype(dtype=dtype, copy=copy) + if is_object_dtype(dtype): return self._box_values(self.asi8.ravel()).reshape(self.shape) - elif is_string_dtype(dtype) and not is_categorical_dtype(dtype): - if is_extension_array_dtype(dtype): - arr_cls = dtype.construct_array_type() - return arr_cls._from_sequence(self, dtype=dtype, copy=copy) - else: - return self._format_native_types() + elif is_string_dtype(dtype): + return self._format_native_types() elif is_integer_dtype(dtype): # we deliberately ignore int32 vs. int64 here. # See https://github.com/pandas-dev/pandas/issues/24381 for more. @@ -422,9 +421,6 @@ def astype(self, dtype, copy: bool = True): # and conversions for any datetimelike to float msg = f"Cannot cast {type(self).__name__} to dtype {dtype}" raise TypeError(msg) - elif is_categorical_dtype(dtype): - arr_cls = dtype.construct_array_type() - return arr_cls(self, dtype=dtype) else: return np.asarray(self, dtype=dtype) diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 2318cae004c5a..d054bfa1503e1 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -48,7 +48,10 @@ needs_i8_conversion, pandas_dtype, ) -from pandas.core.dtypes.dtypes import IntervalDtype +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + IntervalDtype, +) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, @@ -70,7 +73,6 @@ ExtensionArray, _extension_array_shared_docs, ) -from pandas.core.arrays.categorical import Categorical import pandas.core.common as com from pandas.core.construction import ( array as pd_array, @@ -827,7 +829,6 @@ def astype(self, dtype, copy: bool = True): ExtensionArray or NumPy ndarray with 'dtype' for its dtype. """ from pandas import Index - from pandas.core.arrays.string_ import StringDtype if dtype is not None: dtype = pandas_dtype(dtype) @@ -848,13 +849,11 @@ def astype(self, dtype, copy: bool = True): ) raise TypeError(msg) from err return self._shallow_copy(new_left, new_right) - elif is_categorical_dtype(dtype): - return Categorical(np.asarray(self), dtype=dtype) - elif isinstance(dtype, StringDtype): - return dtype.construct_array_type()._from_sequence(self, copy=False) - # TODO: This try/except will be repeated. try: + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + return cls._from_sequence(self, dtype=dtype, copy=copy) return np.asarray(self).astype(dtype, copy=copy) except (TypeError, ValueError) as err: msg = f"Cannot cast {type(self).__name__} to dtype {dtype}" diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index d274501143916..2d6eef44e23d4 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -320,8 +320,7 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: return cls(data, mask, copy=False) if isinstance(dtype, ExtensionDtype): - eacls = dtype.construct_array_type() - return eacls._from_sequence(self, dtype=dtype, copy=copy) + return super().astype(dtype=dtype, copy=copy) raise NotImplementedError("subclass must implement astype to np.dtype") diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 8d150c8f6ad3d..edd0c6c4b563e 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -435,8 +435,8 @@ def astype(self, dtype, copy=True): values = arr.astype(dtype.numpy_dtype) return FloatingArray(values, mask, copy=False) elif isinstance(dtype, ExtensionDtype): - cls = dtype.construct_array_type() - return cls._from_sequence(self, dtype=dtype, copy=copy) + return super().astype(dtype=dtype, copy=copy) + elif np.issubdtype(dtype, np.floating): arr = self._ndarray.copy() mask = self.isna() diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 433d45d94167d..b3b06bdf53cbd 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1103,7 +1103,7 @@ def astype_nansafe( if issubclass(dtype.type, str): return lib.ensure_string_array(arr, skipna=skipna, convert_na_value=False) - elif is_datetime64_dtype(arr): + elif is_datetime64_dtype(arr.dtype): if dtype == np.int64: warnings.warn( f"casting {arr.dtype} values to int64 with .astype(...) " @@ -1123,7 +1123,7 @@ def astype_nansafe( raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]") - elif is_timedelta64_dtype(arr): + elif is_timedelta64_dtype(arr.dtype): if dtype == np.int64: warnings.warn( f"casting {arr.dtype} values to int64 with .astype(...) "
https://api.github.com/repos/pandas-dev/pandas/pulls/41652
2021-05-24T22:57:01Z
2021-06-18T02:08:07Z
null
2021-11-08T16:37:29Z
GH41457 Upgrade Bootstrap to v5.0
diff --git a/web/pandas/_templates/layout.html b/web/pandas/_templates/layout.html index 023bfe9e26b78..b3bb1a2a3f86d 100644 --- a/web/pandas/_templates/layout.html +++ b/web/pandas/_templates/layout.html @@ -14,8 +14,8 @@ <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <link rel='shortcut icon' type='image/x-icon' href='{{ base_url }}/static/img/favicon.ico'/> <link rel="stylesheet" - href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" - integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" + href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/css/bootstrap.min.css" + integrity="sha384-+0n0xVW2eSR5OomGNYDnhzAbDsOXxcvSN1TPprVMTNDbiYZCxYbOOl7+AMvyTG2x" crossorigin="anonymous"> {% for stylesheet in static.css %} <link rel="stylesheet" @@ -27,14 +27,14 @@ <header> <nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark"> <div class="container"> - <button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#nav-content" aria-controls="nav-content" aria-expanded="false" aria-label="Toggle navigation"> + <button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#nav-content" aria-controls="nav-content" aria-expanded="false" aria-label="Toggle navigation"> <span class="navbar-toggler-icon"></span> </button> {% if static.logo %}<a class="navbar-brand" href="{{ base_url }}/"><img alt="" src="{{ base_url }}{{ static.logo }}"/></a>{% endif %} <div class="collapse navbar-collapse" id="nav-content"> - <ul class="navbar-nav ml-auto"> + <ul class="navbar-nav ms-auto"> {% for item in navbar %} {% if not item.has_subitems %} <li class="nav-item"> @@ -43,7 +43,7 @@ {% else %} <li class="nav-item dropdown"> <a class="nav-link dropdown-toggle" - data-toggle="dropdown" + data-bs-toggle="dropdown" id="{{ item.slug }}" href="#" role="button" @@ -68,7 +68,7 @@ </div> </main> <footer class="container pt-4 pt-md-5 border-top"> - <ul class="list-inline social-buttons float-right"> + <ul class="list-inline social-buttons float-end"> <li class="list-inline-item"> <a href="https://twitter.com/pandas_dev/"> <i class="fab fa-twitter"></i> @@ -89,15 +89,9 @@ pandas is a fiscally sponsored project of <a href="https://numfocus.org">NumFOCUS.</a> </p> </footer> - - <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" - integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" - crossorigin="anonymous"></script> - <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" - integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" - crossorigin="anonymous"></script> - <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" - integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" + + <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/js/bootstrap.bundle.min.js" + integrity="sha384-gtEjrD/SeCtmISkJkNUaaKMoLD0//ElJ19smozuHV6z3Iehds+3Ulb9Bn9Plx0x4" crossorigin="anonymous"></script> </body> </html> diff --git a/web/pandas/contribute.md b/web/pandas/contribute.md index 9f4ebaf97598c..0163a1c8110b2 100644 --- a/web/pandas/contribute.md +++ b/web/pandas/contribute.md @@ -14,7 +14,7 @@ and about current sponsors in the [sponsors page](about/sponsors.html). <i class="fas fa-circle fa-stack-2x pink"></i> <i class="fas fa-building fa-stack-1x fa-inverse"></i> </span> - <h4 class="service-heading mt-3 font-weight-bold blue">Corporate support</h4> + <h4 class="service-heading mt-3 fw-bold blue">Corporate support</h4> <p class="text-muted"> pandas depends on companies and institutions using the software to support its development. Hiring people to work on pandas, or letting existing employees to contribute to the @@ -28,7 +28,7 @@ and about current sponsors in the [sponsors page](about/sponsors.html). <i class="fas fa-circle fa-stack-2x pink"></i> <i class="fas fa-users fa-stack-1x fa-inverse"></i> </span> - <h4 class="service-heading mt-3 font-weight-bold blue">Individual contributors</h4> + <h4 class="service-heading mt-3 fw-bold blue">Individual contributors</h4> <p class="text-muted"> pandas is mostly developed by volunteers. All kind of contributions are welcome, such as contributions to the code, to the website (including graphical designers), @@ -42,7 +42,7 @@ and about current sponsors in the [sponsors page](about/sponsors.html). <i class="fas fa-circle fa-stack-2x pink"></i> <i class="fas fa-dollar-sign fa-stack-1x fa-inverse"></i> </span> - <h4 class="service-heading mt-3 font-weight-bold blue">Donations</h4> + <h4 class="service-heading mt-3 fw-bold blue">Donations</h4> <p class="text-muted"> Individual donations are appreciated, and are used for things like the project infrastructure, travel expenses for our volunteer contributors to attend diff --git a/web/pandas/index.html b/web/pandas/index.html index 75c797d6dd93d..930f6caa59cb9 100644 --- a/web/pandas/index.html +++ b/web/pandas/index.html @@ -3,7 +3,7 @@ <div class="container"> <div class="row"> <div class="col-md-9"> - <section class="jumbotron text-center"> + <section class="h-30 p-5 bg-light border rounded-3 text-center mb-4"> <h1>pandas</h1> <p> <strong>pandas</strong> is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool,<br/> @@ -98,7 +98,7 @@ <h4>Previous versions</h4> {% endif %} {% if releases[5:] %} <p class="text-center"> - <a data-toggle="collapse" href="#show-more-releases" role="button" aria-expanded="false" aria-controls="show-more-releases">Show more</a> + <a data-bs-toggle="collapse" href="#show-more-releases" role="button" aria-expanded="false" aria-controls="show-more-releases">Show more</a> </p> <ul id="show-more-releases" class="collapse"> {% for release in releases[5:] %} diff --git a/web/pandas/static/css/pandas.css b/web/pandas/static/css/pandas.css index 459f006db5727..67955dd35587c 100644 --- a/web/pandas/static/css/pandas.css +++ b/web/pandas/static/css/pandas.css @@ -42,6 +42,18 @@ ol ol, ol ul, ul ol, ul ul { a.navbar-brand img { height: 3rem; } +a:link:not(.btn):not(.dropdown-item):not(.nav-link) { +text-decoration: none; +} +a:visited:not(.btn):not(.dropdown-item):not(.nav-link) { +text-decoration: none; +} +a:hover:not(.btn):not(.dropdown-item):not(.nav-link) { +text-decoration: underline; +} +a:active:not(.btn):not(.dropdown-item):not(.nav-link) { +text-decoration: underline; +} div.card { margin: 0 0 .2em .2em !important; }
Whats new: Update BS script tags in template Replace jumbotron with utilities Replace font-weight-bold with fw-bold Replace ml-auto with ms-auto Replace float-right with float-end Replace data-target with data-bs-target Replace data-toggle with data-bs-toggle Add CSS rules for <a> - [x] closes #41457 - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41651
2021-05-24T22:19:48Z
2021-07-06T21:32:53Z
2021-07-06T21:32:52Z
2021-07-06T22:33:05Z
Deprecated nonkeyword arguments for set_codes function
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 645443c450146..c6e0e5bf2ffcd 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -679,6 +679,7 @@ Deprecations - Deprecated the ``convert_float`` optional argument in :func:`read_excel` and :meth:`ExcelFile.parse` (:issue:`41127`) - Deprecated behavior of :meth:`DatetimeIndex.union` with mixed timezones; in a future version both will be cast to UTC instead of object dtype (:issue:`39328`) - Deprecated using ``usecols`` with out of bounds indices for ``read_csv`` with ``engine="c"`` (:issue:`25623`) +- Deprecated passing arguments as positional (except for ``"codes"``) in :meth:`MultiIndex.codes` (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`Index.set_names` and :meth:`MultiIndex.set_names` (except for ``names``) (:issue:`41485`) - Deprecated passing arguments as positional in :meth:`DataFrame.clip` and :meth:`Series.clip` (other than ``"upper"`` and ``"lower"``) (:issue:`41485`) - Deprecated special treatment of lists with first element a Categorical in the :class:`DataFrame` constructor; pass as ``pd.DataFrame({col: categorical, ...})`` instead (:issue:`38845`) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 1362679ae0064..59882422f5439 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -991,6 +991,7 @@ def _set_codes( self._reset_cache() + @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "codes"]) def set_codes(self, codes, level=None, inplace=None, verify_integrity: bool = True): """ Set new codes on MultiIndex. Defaults to returning new index. @@ -1058,7 +1059,7 @@ def set_codes(self, codes, level=None, inplace=None, verify_integrity: bool = Tr warnings.warn( "inplace is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=3, ) else: inplace = False diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py index e756f95bb2bc5..e806ee1751b00 100644 --- a/pandas/tests/indexes/multi/test_get_set.py +++ b/pandas/tests/indexes/multi/test_get_set.py @@ -449,3 +449,25 @@ def test_set_levels_pos_args_deprecation(): names=["foo", "bar"], ) tm.assert_index_equal(result, expected) + + +def test_set_codes_pos_args_depreciation(idx): + # https://github.com/pandas-dev/pandas/issues/41485 + msg = ( + r"In a future version of pandas all arguments of MultiIndex.set_codes except " + r"for the argument 'codes' will be keyword-only" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = idx.set_codes([[0, 0, 1, 2, 3, 3], [0, 1, 0, 1, 0, 1]], [0, 1]) + expected = MultiIndex.from_tuples( + [ + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ], + names=["first", "second"], + ) + tm.assert_index_equal(result, expected)
- [ ] xref #41485 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41650
2021-05-24T21:25:28Z
2021-05-27T14:44:26Z
2021-05-27T14:44:26Z
2021-05-27T14:44:30Z
REF: `DataFrame.to_latex` directs to `Styler.to_latex`
diff --git a/ci/deps/actions-38-db-min.yaml b/ci/deps/actions-38-db-min.yaml index c93f791b7dba7..55489a41f7997 100644 --- a/ci/deps/actions-38-db-min.yaml +++ b/ci/deps/actions-38-db-min.yaml @@ -15,6 +15,7 @@ dependencies: - numpy<1.20 # GH#39541 compat for pyarrow<3 - python-dateutil - pytz + - jinja2 # optional - beautifulsoup4 diff --git a/ci/deps/actions-38-db.yaml b/ci/deps/actions-38-db.yaml index b4495fa6887f4..d6c37cf62dad8 100644 --- a/ci/deps/actions-38-db.yaml +++ b/ci/deps/actions-38-db.yaml @@ -20,6 +20,7 @@ dependencies: - gcsfs>=0.6.0 - geopandas - html5lib + - jinja2 - matplotlib - moto>=1.3.14 - flask diff --git a/ci/deps/actions-38-locale_slow.yaml b/ci/deps/actions-38-locale_slow.yaml index e7276027f2a41..b55cc935e131d 100644 --- a/ci/deps/actions-38-locale_slow.yaml +++ b/ci/deps/actions-38-locale_slow.yaml @@ -28,3 +28,4 @@ dependencies: - xlsxwriter=1.2.2 - xlwt=1.3.0 - html5lib=1.1 + - jinja2 diff --git a/ci/deps/actions-38-slow.yaml b/ci/deps/actions-38-slow.yaml index 08900a31fe27c..39fe7ea7b086a 100644 --- a/ci/deps/actions-38-slow.yaml +++ b/ci/deps/actions-38-slow.yaml @@ -36,3 +36,4 @@ dependencies: - moto - flask - numba + - jinja2 diff --git a/ci/deps/actions-38.yaml b/ci/deps/actions-38.yaml index 86b038ff7d4b6..273d9e2d016ae 100644 --- a/ci/deps/actions-38.yaml +++ b/ci/deps/actions-38.yaml @@ -18,3 +18,4 @@ dependencies: - nomkl - pytz - tabulate==0.8.7 + - jinja2 diff --git a/ci/deps/actions-39-numpydev.yaml b/ci/deps/actions-39-numpydev.yaml index 03181a9d71d1d..18fe197eb0a24 100644 --- a/ci/deps/actions-39-numpydev.yaml +++ b/ci/deps/actions-39-numpydev.yaml @@ -13,6 +13,7 @@ dependencies: # pandas dependencies - python-dateutil - pytz + - jinja2 - pip - pip: - cython==0.29.21 # GH#34014 diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 20ae37c85a9d9..99b96b62e314e 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -227,6 +227,7 @@ Package Minimum support `NumPy <https://numpy.org>`__ 1.18.5 `python-dateutil <https://dateutil.readthedocs.io/en/stable/>`__ 2.8.1 `pytz <https://pypi.org/project/pytz/>`__ 2020.1 +`Jinja2 <https://jinja.pocoo.org>`__ 2.11 ================================================================ ========================== .. _install.recommended_dependencies: @@ -266,7 +267,6 @@ Visualization Dependency Minimum Version Notes ========================= ================== ============================================================= matplotlib 3.3.2 Plotting library -Jinja2 2.11 Conditional formatting with DataFrame.style tabulate 0.8.7 Printing in Markdown-friendly format (see `tabulate`_) ========================= ================== ============================================================= diff --git a/environment.yml b/environment.yml index 733bd06fbe12f..2199dd690d828 100644 --- a/environment.yml +++ b/environment.yml @@ -7,6 +7,7 @@ dependencies: - python=3.8 - python-dateutil>=2.8.1 - pytz + - jinja2 # pandas.Styler (DataFrame.to_html / to_latex) # benchmarks - asv @@ -79,7 +80,6 @@ dependencies: - bottleneck>=1.3.1 - ipykernel - ipython>=7.11.1 - - jinja2 # pandas.Styler - matplotlib>=3.3.2 # pandas.plotting, Series.plot, DataFrame.plot - numexpr>=2.7.1 - scipy>=1.4.1 diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e171ded654989..f3387aa0ad9e7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -26,7 +26,10 @@ import numpy as np -from pandas._config import config +from pandas._config import ( + config, + get_option, +) from pandas._libs import lib from pandas._libs.tslibs import ( @@ -2108,7 +2111,7 @@ def _repr_latex_(self): Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). """ - if config.get_option("display.latex.repr"): + if config.get_option("styler.render.repr") == "latex": return self.to_latex() else: return None @@ -3116,32 +3119,45 @@ class (index) object 'bird' 'bird' 'mammal' 'mammal' def to_latex( self, buf=None, + *, + hrules=False, columns=None, - col_space=None, - header=True, - index=True, - na_rep="NaN", - formatters=None, - float_format=None, - sparsify=None, - index_names=True, - bold_rows=False, column_format=None, - longtable=None, + position=None, + position_float=None, + caption=None, + label=None, + index=True, + header=True, + index_names="all", + sparse_index=None, + sparse_columns=None, + multirow_align=None, + multicol_align=None, + siunitx=False, + environment=None, + formatter=None, + precision=None, + na_rep=None, + decimal=None, + thousands=None, escape=None, + bold_header="none", encoding=None, - decimal=".", + formatters=None, + float_format=None, multicolumn=None, multicolumn_format=None, multirow=None, - caption=None, - label=None, - position=None, + longtable=None, + sparsify=None, + col_space=None, + bold_rows=None, ): r""" Render object to a LaTeX tabular, longtable, or nested table/tabular. - Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted + The output can be copy/pasted into a main LaTeX document or read from an external file with ``\input{{table.tex}}``. @@ -3151,150 +3167,338 @@ def to_latex( .. versionchanged:: 1.2.0 Added position argument, changed meaning of caption argument. + .. versionchanged:: 1.4.0 + Now uses ``Styler.to_latex`` implementation via jinja2 templating. + Significant changes to arguments. See Notes. + Parameters ---------- - buf : str, Path or StringIO-like, optional, default None - Buffer to write to. If None, the output is returned as a string. + buf : str, Path or StringIO-like, optional + Buffer to write to. If `None`, the output is returned as a string. + hrules : bool + Set to `True` to add `\\toprule`, `\\midrule` and `\\bottomrule` from the + `booktabs` LaTeX package. + + .. versionadded:: 1.4.0 columns : list of label, optional The subset of columns to write. Writes all columns by default. - col_space : int, optional - The minimum width of each column. - header : bool or list of str, default True - Write out the column names. If a list of strings is given, - it is assumed to be aliases for the column names. - index : bool, default True - Write row names (index). - na_rep : str, default 'NaN' - Missing data representation. - formatters : list of functions or dict of {{str: function}}, optional - Formatter functions to apply to columns' elements by position or - name. The result of each function must be a unicode string. - List must be of length equal to the number of columns. - float_format : one-parameter function or str, optional, default None - Formatter for floating point numbers. For example - ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will - both result in 0.1234 being formatted as 0.12. - sparsify : bool, optional - Set to False for a DataFrame with a hierarchical index to print - every multiindex key at each row. By default, the value will be - read from the config module. - index_names : bool, default True - Prints the names of the indexes. - bold_rows : bool, default False - Make the row labels bold in the output. column_format : str, optional - The columns format as specified in `LaTeX table format - <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3 - columns. By default, 'l' will be used for all columns except - columns of numbers, which default to 'r'. - longtable : bool, optional - By default, the value will be read from the pandas config - module. Use a longtable environment instead of tabular. Requires - adding a \usepackage{{longtable}} to your LaTeX preamble. - escape : bool, optional - By default, the value will be read from the pandas config - module. When set to False prevents from escaping latex special - characters in column names. + The LaTeX column specification placed in location: + + `\\begin{{tabular}}{{<column_format>}}` + + Defaults to 'l' for index and + non-numeric data columns, and, for numeric data columns, + to 'r' by default, or 'S' if ``siunitx`` is `True`. + + .. versionchanged:: 1.4.0 + position : str, optional + The LaTeX positional argument (e.g. 'h!') for tables, placed in location: + + `\\begin{{table}}[<position>]`. + + .. versionchanged:: 1.2.0 + position_float : {{"centering", "raggedleft", "raggedright"}}, optional + The LaTeX float command placed in location: + + `\\begin{{table}}[<position>]` + + `\\<position_float>` + + Cannot be used if ``environment`` is "longtable". + + .. versionadded:: 1.4.0 + caption : str, tuple, optional + If string, then table caption included as: `\\caption{{<caption>}}`. + If tuple, i.e ("full caption", "short caption"), the caption included + as: + + `\\caption[<caption[1]>]{{<caption[0]>}}`. + + .. versionadded:: 1.0.0 + + .. versionchanged:: 1.2.0 + Optionally allow caption to be a tuple `(full_caption, short_caption)`. + label : str, optional + The LaTeX label included as: `\\label{{<label>}}`. + This is used with `\\ref{{<label>}}` in the main .tex file. + + .. versionadded:: 1.0.0 + index : bool + Whether to print index labels. + + .. versionchanged:: 1.4.0 + header : bool or list of str + Whether to print column headers. If a list of strings is given is assumed + to be aliases for the column names. + index_names : {{"all", "index", "columns", "none"}}, bool + Whether to print the names of the indexes before rendering. + + .. versionchanged:: 1.4.0 + sparse_index : bool, optional + Whether to sparsify the display of a hierarchical index. Setting to False + will display each explicit level element in a hierarchical key for each row. + Defaults to ``pandas.options.styler.sparse.index``, which is `True`. + + .. versionadded:: 1.4.0 + sparse_columns : bool, optional + Equivalent to ``sparse_index`` applied to column headers. + Defaults to ``pandas.options.styler.sparse.columns``, which is `True`. + + .. versionadded:: 1.4.0 + multirow_align : {{"c", "t", "b", "naive"}} + If sparsifying hierarchical MultiIndexes, whether to align text centrally, + at the top or bottom. If "naive" is given will not use `multirow` package. + Defaults to ``pandas.options.styler.latex.multirow_align``, which is "c". + + .. versionadded:: 1.4.0 + multicol_align : {{"r", "c", "l", "naive-l", "naive-r"}} + If sparsifying hierarchical MultiIndex columns, whether to align text at + the left, centrally, or at the right. If a "naive" entry is given will not + use `multicolumn` package. + Defaults to ``pandas.options.styler.latex.multicol_align``, which is "r". + + .. versionadded:: 1.4.0 + siunitx : bool, default False + Set to `True` to structure LaTeX compatible with the `siunitx` package. + + .. versionadded:: 1.4.0 + environment : str, optional + If given, the environment that will replace "table" in `\\begin{{table}}`. + If "longtable" is specified then a more suitable template is + rendered. + Defaults to ``pandas.options.styler.latex.environment``, which is `None`. + + .. versionadded:: 1.4.0 + formatter : str, callable, dict, optional + Object to define how values are displayed. See notes for ``Styler.format``. + Defaults to ``pandas.options.styler.format.formatter``, which is `None`. + + .. versionadded:: 1.4.0 + precision : int, optional + Floating point precision to use for display purposes, if not determined by + the specified ``formatter``. Defaults to + ``pandas.options.styler.format.precision``, which is 6. + + .. versionadded:: 1.4.0 + na_rep : str, optional + Representation for missing values. + Defaults to ``pandas.options.styler.format.na_rep``, which is `None`. + + .. versionchanged:: 1.4.0 + decimal : str, default "." + Character used as decimal separator for floats, complex and integers. + Defaults to ``pandas.options.styler.format.decimal``, which is ".". + thousands : str, optional, default None + Character used as thousands separator for floats, complex and integers. + Defaults to ``pandas.options.styler.format.thousands``, which is `None`. + + .. versionadded:: 1.4.0 + escape : bool, + Replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, + ``{{``, ``}}``, ``~``, ``^``, and ``\`` in the cell display string with + LaTeX-safe sequences. + Escaping is done before ``formatter``. + Defaults to (``pandas.options.styler.format.escape`` `=="latex"`), which + is `False`. + + .. versionchanged:: 1.4.0 + bold_header : {{"none", "index", "columns", "both"}} + Make the row labels and/or column headers bold in the output, using + command `\\textbf{{<value>}}`. + + .. versionadded:: 1.4.0 encoding : str, optional - A string representing the encoding to use in the output file, - defaults to 'utf-8'. - decimal : str, default '.' - Character recognized as decimal separator, e.g. ',' in Europe. + Character encoding setting for file output, defaults to + ``pandas.options.styler.render.encoding``, which is "utf-8", if None. + + .. versionchanged:: 1.4.0 + formatters : list, tuple or dict of one-param. functions, optional + Unused. + + .. deprecated:: 1.4.0 + Use ``formatter`` instead, which is passed to ``Styler.format``. + float_format : one-parameter function, optional, default None + Unused. + + .. deprecated:: 1.4.0 + Deprecated in favour of using arguments native to ``Styler.format``, + such as ``precision``, ``decimal``, and ``thousands``. multicolumn : bool, default True - Use \multicolumn to enhance MultiIndex columns. - The default will be read from the config module. + Unused. + + .. deprecated:: 1.4.0 + Use alternative ``sparse_columns`` and ``multicol_align`` arguments. multicolumn_format : str, default 'l' - The alignment for multicolumns, similar to `column_format` - The default will be read from the config module. + Unused. + + .. deprecated:: 1.4.0 + Use alternative ``sparse_columns`` and ``multicol_align`` arguments. multirow : bool, default False - Use \multirow to enhance MultiIndex rows. Requires adding a - \usepackage{{multirow}} to your LaTeX preamble. Will print - centered labels (instead of top-aligned) across the contained - rows, separating groups via clines. The default will be read - from the pandas config module. - caption : str or tuple, optional - Tuple (full_caption, short_caption), - which results in ``\caption[short_caption]{{full_caption}}``; - if a single string is passed, no short caption will be set. + Unused. - .. versionadded:: 1.0.0 + .. deprecated:: 1.4.0 + Use alternative ``sparse_index`` and ``multirow_align`` arguments. + longtable : bool + Unused. - .. versionchanged:: 1.2.0 - Optionally allow caption to be a tuple ``(full_caption, short_caption)``. + .. deprecated:: 1.4.0 + Use ``environment='longtable'`` instead. + sparsify : bool + Unused. - label : str, optional - The LaTeX label to be placed inside ``\label{{}}`` in the output. - This is used with ``\ref{{}}`` in the main ``.tex`` file. + .. deprecated:: 1.4.0 + Use ``sparse_columns`` and ``sparse_rows`` instead. + col_space : int, optional + Unused. - .. versionadded:: 1.0.0 - position : str, optional - The LaTeX positional argument for tables, to be placed after - ``\begin{{}}`` in the output. + .. deprecated:: 1.4.0 + Adding LaTeX styling commands renders spacing not applicable. + bold_rows : bool, default False + Unused. - .. versionadded:: 1.2.0 + .. deprecated:: 1.4.0 + Use ``bold_header`` instead. {returns} See Also -------- + Styler.to_latex : Render a Styler to LaTeX. DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. + Notes + ----- + + .. note:: + As of version 1.4.0 this method was changed to use the implementation of + `Styler.to_latex()` via jinja2 templating, and no longer uses the pandas + internal `LatexFormatter` class. This is the reason for many of the + altered arguments in the method signature. + + The Styler implementation has extended functionality and performance and + further information is available in its own documentation relevant for this + method. + Examples -------- - >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], - ... mask=['red', 'purple'], - ... weapon=['sai', 'bo staff'])) - >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE + >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'], + ... 'mask': ['red', 'purple'], + ... 'weapon': ['sai', 'bo staff']}}) + >>> print(df.to_latex(index=False, hrules=True)) \begin{{tabular}}{{lll}} - \toprule - name & mask & weapon \\ - \midrule - Raphael & red & sai \\ - Donatello & purple & bo staff \\ + \toprule + name & mask & weapon \\ + \midrule + Raphael & red & sai \\ + Donatello & purple & bo staff \\ \bottomrule \end{{tabular}} """ - # Get defaults from the pandas config - if self.ndim == 1: - self = self.to_frame() - if longtable is None: - longtable = config.get_option("display.latex.longtable") - if escape is None: - escape = config.get_option("display.latex.escape") - if multicolumn is None: - multicolumn = config.get_option("display.latex.multicolumn") - if multicolumn_format is None: - multicolumn_format = config.get_option("display.latex.multicolumn_format") - if multirow is None: - multirow = config.get_option("display.latex.multirow") - - self = cast("DataFrame", self) - formatter = DataFrameFormatter( - self, - columns=columns, - col_space=col_space, + kwargs = locals() + deprecated_args = { + "longtable": '`environment="longtable"`', + "multicolumn": "`multicol_align`", + "multicolumn_format": "`multicol_align`", + "multirow": "`multirow_align`", + "col_space": None, + "formatters": "`formatter`, `precision`, `decimal`, `thousands`", + "float_format": "`precision`, `decimal`, `thousands`, `formatter`", + "sparsify": "`sparse_index`, `sparse_columns`", + "bold_rows": "`bold_header`", + } + for k, v in deprecated_args.items(): + if kwargs[k] is not None: + warnings.warn( + f"`{k}` is deprecated after transition to `Styler.to_latex()` " + f"signature." + ("" if v is None else f" Consider {v} instead."), + FutureWarning, + stacklevel=2, + ) + + from pandas.io.formats.style import Styler + + if ( + escape is None and get_option("styler.format.escape") == "latex" + ) or escape is True: + escape = "latex" + else: + escape = None + # error: Argument 1 to "Styler" has incompatible type "NDFrame"; expected + # "Union[DataFrame, Series]" + styler = Styler( + self, # type: ignore[arg-type] + uuid="", + formatter=formatter, na_rep=na_rep, - header=header, - index=index, - formatters=formatters, - float_format=float_format, - bold_rows=bold_rows, - sparsify=sparsify, - index_names=index_names, - escape=escape, + precision=precision, decimal=decimal, + thousands=thousands, + escape=escape, ) - return DataFrameRenderer(formatter).to_latex( + + for ax in [0, 1]: # + fmt: Callable | str | None = None + if isinstance(formatter, dict): + fmt = formatter.get("__index__" if ax == 0 else "__columns__") + decimal = decimal or get_option("styler.format.decimal") + styler.format_index( + axis=ax, + escape="latex" if escape else None, + na_rep=na_rep, + decimal=decimal, + precision=precision, + thousands=thousands, + formatter=fmt, + ) + + if header is False: + styler.hide_columns() + elif isinstance(header, (list, tuple)): + if len(header) != len(styler.columns): + raise ValueError( + f"`header` gave {len(header)} aliases for {len(styler.columns)} " + f"columns." + ) + for i, val in enumerate(header): + + def disp(x, v): + return f"{v}" + + styler._display_funcs_columns[(0, i)] = functools.partial(disp, v=val) + if isinstance(styler.columns, MultiIndex): + styler.hide_columns( + level=[i for i in range(styler.columns.nlevels) if i != 0] + ) + if not index: + styler.hide_index() + if index_names is False or index_names in ["none", "columns"]: + styler.hide_index(names=True) + if index_names is False or index_names in ["none", "index"]: + styler.hide_columns(names=True) + if columns: + hidden = [col for col in styler.columns if col not in columns] + styler.hide_columns(hidden) + if bold_header in ["index", "both"]: + styler.applymap_index(lambda val: "textbf: --rwrap;") + if bold_header in ["columns", "both"]: + styler.applymap_index(lambda val: "textbf: --rwrap;", axis=1) + + return styler.to_latex( buf=buf, - column_format=column_format, - longtable=longtable, encoding=encoding, - multicolumn=multicolumn, - multicolumn_format=multicolumn_format, - multirow=multirow, - caption=caption, - label=label, + sparse_index=sparse_index, + sparse_columns=sparse_columns, + column_format=column_format, position=position, + position_float=position_float, + hrules=hrules, + label=label, + caption=caption, + multirow_align=multirow_align, + multicol_align=multicol_align, + environment=environment, + siunitx=siunitx, ) @final diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 2fbd9506e391a..7653abf10244b 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -786,6 +786,8 @@ def to_latex( ) if column_format is not None: + if not isinstance(column_format, str): + raise ValueError("`column_format` must be a string.") # add more recent setting to table_styles obj.set_table_styles( [{"selector": "column_format", "props": f":{column_format}"}], @@ -870,10 +872,12 @@ def to_latex( siunitx=siunitx, ) - encoding = encoding or get_option("styler.render.encoding") - return save_to_buffer( - latex, buf=buf, encoding=None if buf is None else encoding + encoding = ( + (encoding or get_option("styler.render.encoding")) + if isinstance(buf, str) # i.e. a filepath + else encoding ) + return save_to_buffer(latex, buf=buf, encoding=encoding) def to_html( self, @@ -1800,7 +1804,7 @@ def set_caption(self, caption: str | tuple) -> Styler: self : Styler """ msg = "`caption` must be either a string or 2-tuple of strings." - if isinstance(caption, tuple): + if isinstance(caption, (tuple, list)): if ( len(caption) != 2 or not isinstance(caption[0], str) @@ -2218,9 +2222,7 @@ def hide_index( subset = non_reducing_slice(subset_) hide = self.data.loc[subset] hrows = self.index.get_indexer_for(hide.index) - # error: Incompatible types in assignment (expression has type - # "ndarray", variable has type "Sequence[int]") - self.hidden_rows = hrows # type: ignore[assignment] + self.hidden_rows = list(hrows) if names: self.hide_index_names = True @@ -2357,9 +2359,7 @@ def hide_columns( subset = non_reducing_slice(subset_) hide = self.data.loc[subset] hcols = self.columns.get_indexer_for(hide.columns) - # error: Incompatible types in assignment (expression has type - # "ndarray", variable has type "Sequence[int]") - self.hidden_columns = hcols # type: ignore[assignment] + self.hidden_columns = list(hcols) if names: self.hide_column_names = True diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index e2cfc50510173..62393dd980c19 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -269,15 +269,12 @@ def test_repr_column_name_unicode_truncation_bug(self): def test_latex_repr(self): result = r"""\begin{tabular}{llll} -\toprule -{} & 0 & 1 & 2 \\ -\midrule -0 & $\alpha$ & b & c \\ -1 & 1 & 2 & 3 \\ -\bottomrule + & 0 & 1 & 2 \\ +0 & $\alpha$ & b & c \\ +1 & 1 & 2 & 3 \\ \end{tabular} """ - with option_context("display.latex.escape", False, "display.latex.repr", True): + with option_context("styler.render.repr", "latex"): df = DataFrame([[r"$\alpha$", "b", "c"], [1, 2, 3]]) assert result == df._repr_latex_() diff --git a/pandas/tests/io/formats/test_printing.py b/pandas/tests/io/formats/test_printing.py index f0d5ef19c4468..9bc1840ed0aba 100644 --- a/pandas/tests/io/formats/test_printing.py +++ b/pandas/tests/io/formats/test_printing.py @@ -143,7 +143,7 @@ def test_publishes(self): formatted = self.display_formatter.format(obj) assert set(formatted[0].keys()) == expected - with_latex = pd.option_context("display.latex.repr", True) + with_latex = pd.option_context("styler.render.repr", "latex") with opt, with_latex: formatted = self.display_formatter.format(obj) diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 10c8ccae67fb2..8af398f1b565e 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -61,15 +61,15 @@ def test_to_latex_to_file_utf8_without_encoding(self): def test_to_latex_tabular_with_index(self): df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex() + result = df.to_latex(hrules=True) expected = _dedent( r""" \begin{tabular}{lrl} \toprule - {} & a & b \\ + & a & b \\ \midrule - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \bottomrule \end{tabular} """ @@ -78,15 +78,15 @@ def test_to_latex_tabular_with_index(self): def test_to_latex_tabular_without_index(self): df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex(index=False) + result = df.to_latex(index=False, hrules=True) expected = _dedent( r""" \begin{tabular}{rl} \toprule - a & b \\ + a & b \\ \midrule - 1 & b1 \\ - 2 & b2 \\ + 1 & b1 \\ + 2 & b2 \\ \bottomrule \end{tabular} """ @@ -99,7 +99,7 @@ def test_to_latex_tabular_without_index(self): ) def test_to_latex_bad_column_format(self, bad_column_format): df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - msg = r"column_format must be str or unicode" + msg = r"`column_format` must be a string" with pytest.raises(ValueError, match=msg): df.to_latex(column_format=bad_column_format) @@ -109,15 +109,15 @@ def test_to_latex_column_format_just_works(self, float_frame): def test_to_latex_column_format(self): df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex(column_format="lcr") + result = df.to_latex(column_format="lcr", hrules=True) expected = _dedent( r""" \begin{tabular}{lcr} \toprule - {} & a & b \\ + & a & b \\ \midrule - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \bottomrule \end{tabular} """ @@ -127,15 +127,15 @@ def test_to_latex_column_format(self): def test_to_latex_float_format_object_col(self): # GH#40024 ser = Series([1000.0, "test"]) - result = ser.to_latex(float_format="{:,.0f}".format) + result = ser.to_latex(precision=0, thousands=",", hrules=True) expected = _dedent( r""" \begin{tabular}{ll} \toprule - {} & 0 \\ + & 0 \\ \midrule 0 & 1,000 \\ - 1 & test \\ + 1 & test \\ \bottomrule \end{tabular} """ @@ -144,14 +144,12 @@ def test_to_latex_float_format_object_col(self): def test_to_latex_empty_tabular(self): df = DataFrame() - result = df.to_latex() + result = df.to_latex(hrules=True) expected = _dedent( r""" \begin{tabular}{l} \toprule - Empty DataFrame - Columns: Index([], dtype='object') - Index: Index([], dtype='object') \\ + \midrule \bottomrule \end{tabular} """ @@ -160,16 +158,16 @@ def test_to_latex_empty_tabular(self): def test_to_latex_series(self): s = Series(["a", "b", "c"]) - result = s.to_latex() + result = s.to_latex(hrules=True) expected = _dedent( r""" \begin{tabular}{ll} \toprule - {} & 0 \\ + & 0 \\ \midrule - 0 & a \\ - 1 & b \\ - 2 & c \\ + 0 & a \\ + 1 & b \\ + 2 & c \\ \bottomrule \end{tabular} """ @@ -180,15 +178,15 @@ def test_to_latex_midrule_location(self): # GH 18326 df = DataFrame({"a": [1, 2]}) df.index.name = "foo" - result = df.to_latex(index_names=False) + result = df.to_latex(index_names=False, hrules=True) expected = _dedent( r""" \begin{tabular}{lr} \toprule - {} & a \\ + & a \\ \midrule - 0 & 1 \\ - 1 & 2 \\ + 0 & 1 \\ + 1 & 2 \\ \bottomrule \end{tabular} """ @@ -199,14 +197,15 @@ def test_to_latex_midrule_location(self): class TestToLatexLongtable: def test_to_latex_empty_longtable(self): df = DataFrame() - result = df.to_latex(longtable=True) + result = df.to_latex(environment="longtable") expected = _dedent( r""" \begin{longtable}{l} - \toprule - Empty DataFrame - Columns: Index([], dtype='object') - Index: Index([], dtype='object') \\ + \endfirsthead + \endhead + \multicolumn{1}{r}{Continued on next page} \\ + \endfoot + \endlastfoot \end{longtable} """ ) @@ -214,28 +213,26 @@ def test_to_latex_empty_longtable(self): def test_to_latex_longtable_with_index(self): df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex(longtable=True) + result = df.to_latex(environment="longtable", hrules=True) expected = _dedent( r""" \begin{longtable}{lrl} \toprule - {} & a & b \\ + & a & b \\ \midrule \endfirsthead - \toprule - {} & a & b \\ + & a & b \\ \midrule \endhead \midrule - \multicolumn{3}{r}{{Continued on next page}} \\ + \multicolumn{3}{r}{Continued on next page} \\ \midrule \endfoot - \bottomrule \endlastfoot - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \end{longtable} """ ) @@ -243,28 +240,26 @@ def test_to_latex_longtable_with_index(self): def test_to_latex_longtable_without_index(self): df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex(index=False, longtable=True) + result = df.to_latex(index=False, environment="longtable", hrules=True) expected = _dedent( r""" \begin{longtable}{rl} \toprule - a & b \\ + a & b \\ \midrule \endfirsthead - \toprule - a & b \\ + a & b \\ \midrule \endhead \midrule - \multicolumn{2}{r}{{Continued on next page}} \\ + \multicolumn{2}{r}{Continued on next page} \\ \midrule \endfoot - \bottomrule \endlastfoot - 1 & b1 \\ - 2 & b2 \\ + 1 & b1 \\ + 2 & b2 \\ \end{longtable} """ ) @@ -279,7 +274,7 @@ def test_to_latex_longtable_without_index(self): ], ) def test_to_latex_longtable_continued_on_next_page(self, df, expected_number): - result = df.to_latex(index=False, longtable=True) + result = df.to_latex(index=False, environment="longtable") assert fr"\multicolumn{{{expected_number}}}" in result @@ -287,13 +282,14 @@ class TestToLatexHeader: def test_to_latex_no_header_with_index(self): # GH 7124 df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex(header=False) + result = df.to_latex(header=False, hrules=True) expected = _dedent( r""" \begin{tabular}{lrl} \toprule - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + \midrule + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \bottomrule \end{tabular} """ @@ -303,11 +299,12 @@ def test_to_latex_no_header_with_index(self): def test_to_latex_no_header_without_index(self): # GH 7124 df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex(index=False, header=False) + result = df.to_latex(index=False, header=False, hrules=True) expected = _dedent( r""" \begin{tabular}{rl} \toprule + \midrule 1 & b1 \\ 2 & b2 \\ \bottomrule @@ -319,15 +316,15 @@ def test_to_latex_no_header_without_index(self): def test_to_latex_specified_header_with_index(self): # GH 7124 df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex(header=["AA", "BB"]) + result = df.to_latex(header=["AA", "BB"], hrules=True) expected = _dedent( r""" \begin{tabular}{lrl} \toprule - {} & AA & BB \\ + & AA & BB \\ \midrule - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \bottomrule \end{tabular} """ @@ -337,15 +334,15 @@ def test_to_latex_specified_header_with_index(self): def test_to_latex_specified_header_without_index(self): # GH 7124 df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex(header=["AA", "BB"], index=False) + result = df.to_latex(header=["AA", "BB"], index=False, hrules=True) expected = _dedent( r""" \begin{tabular}{rl} \toprule AA & BB \\ \midrule - 1 & b1 \\ - 2 & b2 \\ + 1 & b1 \\ + 2 & b2 \\ \bottomrule \end{tabular} """ @@ -368,22 +365,22 @@ def test_to_latex_number_of_items_in_header_missmatch_raises( ): # GH 7124 df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - msg = f"Writing 2 cols but got {num_aliases} aliases" + msg = f"`header` gave {num_aliases} aliases for 2 columns." with pytest.raises(ValueError, match=msg): df.to_latex(header=header) def test_to_latex_decimal(self): # GH 12031 df = DataFrame({"a": [1.0, 2.1], "b": ["b1", "b2"]}) - result = df.to_latex(decimal=",") + result = df.to_latex(decimal=",", precision=1, hrules=True) expected = _dedent( r""" \begin{tabular}{lrl} \toprule - {} & a & b \\ + & a & b \\ \midrule - 0 & 1,0 & b1 \\ - 1 & 2,1 & b2 \\ + 0 & 1,0 & b1 \\ + 1 & 2,1 & b2 \\ \bottomrule \end{tabular} """ @@ -392,38 +389,28 @@ def test_to_latex_decimal(self): class TestToLatexBold: - def test_to_latex_bold_rows(self): + @pytest.mark.parametrize("bold_header", ["both", "none", "index", "columns"]) + def test_to_latex_bold_rows(self, bold_header): # GH 16707 - df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex(bold_rows=True) - expected = _dedent( - r""" - \begin{tabular}{lrl} - \toprule - {} & a & b \\ - \midrule - \textbf{0} & 1 & b1 \\ - \textbf{1} & 2 & b2 \\ - \bottomrule - \end{tabular} - """ + cols = ( + "\\textbf{a} & \\textbf{b}" + if bold_header in ["both", "columns"] + else "a & b" ) - assert result == expected - - def test_to_latex_no_bold_rows(self): - # GH 16707 + idx1 = "\\textbf{0}" if bold_header in ["both", "index"] else "0" + idx2 = "\\textbf{1}" if bold_header in ["both", "index"] else "1" df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex(bold_rows=False) + result = df.to_latex(bold_header=bold_header, hrules=True) expected = _dedent( - r""" - \begin{tabular}{lrl} - \toprule - {} & a & b \\ - \midrule - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ - \bottomrule - \end{tabular} + f""" + \\begin{{tabular}}{{lrl}} + \\toprule + & {cols} \\\\ + \\midrule + {idx1} & 1 & b1 \\\\ + {idx2} & 2 & b2 \\\\ + \\bottomrule + \\end{{tabular}} """ ) assert result == expected @@ -457,7 +444,9 @@ def label_longtable(self): def test_to_latex_caption_only(self, df_short, caption_table): # GH 25436 - result = df_short.to_latex(caption=caption_table) + result = df_short.to_latex( + caption=caption_table, hrules=True, position_float="centering" + ) expected = _dedent( r""" \begin{table} @@ -465,10 +454,10 @@ def test_to_latex_caption_only(self, df_short, caption_table): \caption{a table in a \texttt{table/tabular} environment} \begin{tabular}{lrl} \toprule - {} & a & b \\ + & a & b \\ \midrule - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \bottomrule \end{tabular} \end{table} @@ -478,7 +467,9 @@ def test_to_latex_caption_only(self, df_short, caption_table): def test_to_latex_label_only(self, df_short, label_table): # GH 25436 - result = df_short.to_latex(label=label_table) + result = df_short.to_latex( + label=label_table, position_float="centering", hrules=True + ) expected = _dedent( r""" \begin{table} @@ -486,10 +477,10 @@ def test_to_latex_label_only(self, df_short, label_table): \label{tab:table_tabular} \begin{tabular}{lrl} \toprule - {} & a & b \\ + & a & b \\ \midrule - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \bottomrule \end{tabular} \end{table} @@ -499,7 +490,12 @@ def test_to_latex_label_only(self, df_short, label_table): def test_to_latex_caption_and_label(self, df_short, caption_table, label_table): # GH 25436 - result = df_short.to_latex(caption=caption_table, label=label_table) + result = df_short.to_latex( + caption=caption_table, + label=label_table, + hrules=True, + position_float="centering", + ) expected = _dedent( r""" \begin{table} @@ -508,10 +504,10 @@ def test_to_latex_caption_and_label(self, df_short, caption_table, label_table): \label{tab:table_tabular} \begin{tabular}{lrl} \toprule - {} & a & b \\ + & a & b \\ \midrule - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \bottomrule \end{tabular} \end{table} @@ -525,7 +521,11 @@ def test_to_latex_caption_and_shortcaption( caption_table, short_caption, ): - result = df_short.to_latex(caption=(caption_table, short_caption)) + result = df_short.to_latex( + caption=(caption_table, short_caption), + hrules=True, + position_float="centering", + ) expected = _dedent( r""" \begin{table} @@ -533,10 +533,10 @@ def test_to_latex_caption_and_shortcaption( \caption[a table]{a table in a \texttt{table/tabular} environment} \begin{tabular}{lrl} \toprule - {} & a & b \\ + & a & b \\ \midrule - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \bottomrule \end{tabular} \end{table} @@ -561,6 +561,8 @@ def test_to_latex_caption_shortcaption_and_label( result = df_short.to_latex( caption=(caption_table, short_caption), label=label_table, + hrules=True, + position_float="centering", ) expected = _dedent( r""" @@ -570,10 +572,10 @@ def test_to_latex_caption_shortcaption_and_label( \label{tab:table_tabular} \begin{tabular}{lrl} \toprule - {} & a & b \\ + & a & b \\ \midrule - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \bottomrule \end{tabular} \end{table} @@ -594,14 +596,16 @@ def test_to_latex_caption_shortcaption_and_label( def test_to_latex_bad_caption_raises(self, bad_caption): # test that wrong number of params is raised df = DataFrame({"a": [1]}) - msg = "caption must be either a string or a tuple of two strings" + msg = "`caption` must be either a string or 2-tuple of strings." with pytest.raises(ValueError, match=msg): df.to_latex(caption=bad_caption) def test_to_latex_two_chars_caption(self, df_short): # test that two chars caption is handled correctly # it must not be unpacked into long_caption, short_caption. - result = df_short.to_latex(caption="xy") + result = df_short.to_latex( + caption="xy", hrules=True, position_float="centering" + ) expected = _dedent( r""" \begin{table} @@ -609,10 +613,10 @@ def test_to_latex_two_chars_caption(self, df_short): \caption{xy} \begin{tabular}{lrl} \toprule - {} & a & b \\ + & a & b \\ \midrule - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \bottomrule \end{tabular} \end{table} @@ -624,29 +628,30 @@ def test_to_latex_longtable_caption_only(self, df_short, caption_longtable): # GH 25436 # test when no caption and no label is provided # is performed by test_to_latex_longtable() - result = df_short.to_latex(longtable=True, caption=caption_longtable) + result = df_short.to_latex( + environment="longtable", caption=caption_longtable, hrules=True + ) expected = _dedent( r""" \begin{longtable}{lrl} - \caption{a table in a \texttt{longtable} environment}\\ + \caption{a table in a \texttt{longtable} environment} \\ \toprule - {} & a & b \\ + & a & b \\ \midrule \endfirsthead \caption[]{a table in a \texttt{longtable} environment} \\ \toprule - {} & a & b \\ + & a & b \\ \midrule \endhead \midrule - \multicolumn{3}{r}{{Continued on next page}} \\ + \multicolumn{3}{r}{Continued on next page} \\ \midrule \endfoot - \bottomrule \endlastfoot - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \end{longtable} """ ) @@ -654,29 +659,29 @@ def test_to_latex_longtable_caption_only(self, df_short, caption_longtable): def test_to_latex_longtable_label_only(self, df_short, label_longtable): # GH 25436 - result = df_short.to_latex(longtable=True, label=label_longtable) + result = df_short.to_latex( + environment="longtable", label=label_longtable, hrules=True + ) expected = _dedent( r""" \begin{longtable}{lrl} - \label{tab:longtable}\\ + \label{tab:longtable} \\ \toprule - {} & a & b \\ + & a & b \\ \midrule \endfirsthead - \toprule - {} & a & b \\ + & a & b \\ \midrule \endhead \midrule - \multicolumn{3}{r}{{Continued on next page}} \\ + \multicolumn{3}{r}{Continued on next page} \\ \midrule \endfoot - \bottomrule \endlastfoot - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \end{longtable} """ ) @@ -690,35 +695,34 @@ def test_to_latex_longtable_caption_and_label( ): # GH 25436 result = df_short.to_latex( - longtable=True, + environment="longtable", + hrules=True, caption=caption_longtable, label=label_longtable, ) expected = _dedent( r""" - \begin{longtable}{lrl} - \caption{a table in a \texttt{longtable} environment} - \label{tab:longtable}\\ - \toprule - {} & a & b \\ - \midrule - \endfirsthead - \caption[]{a table in a \texttt{longtable} environment} \\ - \toprule - {} & a & b \\ - \midrule - \endhead - \midrule - \multicolumn{3}{r}{{Continued on next page}} \\ - \midrule - \endfoot - - \bottomrule - \endlastfoot - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ - \end{longtable} - """ + \begin{longtable}{lrl} + \caption{a table in a \texttt{longtable} environment} \label{tab:longtable} \\ + \toprule + & a & b \\ + \midrule + \endfirsthead + \caption[]{a table in a \texttt{longtable} environment} \\ + \toprule + & a & b \\ + \midrule + \endhead + \midrule + \multicolumn{3}{r}{Continued on next page} \\ + \midrule + \endfoot + \bottomrule + \endlastfoot + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ + \end{longtable} + """ ) assert result == expected @@ -731,35 +735,34 @@ def test_to_latex_longtable_caption_shortcaption_and_label( ): # test when the caption, the short_caption and the label are provided result = df_short.to_latex( - longtable=True, + environment="longtable", caption=(caption_longtable, short_caption), label=label_longtable, + hrules=True, ) expected = _dedent( r""" - \begin{longtable}{lrl} - \caption[a table]{a table in a \texttt{longtable} environment} - \label{tab:longtable}\\ - \toprule - {} & a & b \\ - \midrule - \endfirsthead - \caption[]{a table in a \texttt{longtable} environment} \\ - \toprule - {} & a & b \\ - \midrule - \endhead - \midrule - \multicolumn{3}{r}{{Continued on next page}} \\ - \midrule - \endfoot - - \bottomrule - \endlastfoot - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ - \end{longtable} - """ +\begin{longtable}{lrl} +\caption[a table]{a table in a \texttt{longtable} environment} \label{tab:longtable} \\ +\toprule + & a & b \\ +\midrule +\endfirsthead +\caption[]{a table in a \texttt{longtable} environment} \\ +\toprule + & a & b \\ +\midrule +\endhead +\midrule +\multicolumn{3}{r}{Continued on next page} \\ +\midrule +\endfoot +\bottomrule +\endlastfoot +0 & 1 & b1 \\ +1 & 2 & b2 \\ +\end{longtable} +""" ) assert result == expected @@ -773,31 +776,32 @@ def df_with_symbols(self): yield DataFrame({"co$e^x$": {a: "a", b: "b"}, "co^l1": {a: "a", b: "b"}}) def test_to_latex_escape_false(self, df_with_symbols): - result = df_with_symbols.to_latex(escape=False) + result = df_with_symbols.to_latex(hrules=True) expected = _dedent( r""" \begin{tabular}{lll} \toprule - {} & co$e^x$ & co^l1 \\ + & co$e^x$ & co^l1 \\ \midrule - a & a & a \\ - b & b & b \\ + a & a & a \\ + b & b & b \\ \bottomrule \end{tabular} """ ) assert result == expected - def test_to_latex_escape_default(self, df_with_symbols): - result = df_with_symbols.to_latex() # default: escape=True + def test_to_latex_escape_true(self, df_with_symbols): + # note default changed with 1.4.0 + result = df_with_symbols.to_latex(hrules=True, escape=True) expected = _dedent( r""" \begin{tabular}{lll} \toprule - {} & co\$e\textasciicircum x\$ & co\textasciicircum l1 \\ + & co\$e\textasciicircum x\$ & co\textasciicircum l1 \\ \midrule - a & a & a \\ - b & b & b \\ + a & a & a \\ + b & b & b \\ \bottomrule \end{tabular} """ @@ -806,16 +810,16 @@ def test_to_latex_escape_default(self, df_with_symbols): def test_to_latex_special_escape(self): df = DataFrame([r"a\b\c", r"^a^b^c", r"~a~b~c"]) - result = df.to_latex() + result = df.to_latex(hrules=True, escape=True) expected = _dedent( r""" \begin{tabular}{ll} \toprule - {} & 0 \\ + & 0 \\ \midrule - 0 & a\textbackslash b\textbackslash c \\ - 1 & \textasciicircum a\textasciicircum b\textasciicircum c \\ - 2 & \textasciitilde a\textasciitilde b\textasciitilde c \\ + 0 & a\textbackslash b\textbackslash c \\ + 1 & \textasciicircum a\textasciicircum b\textasciicircum c \\ + 2 & \textasciitilde a\textasciitilde b\textasciitilde c \\ \bottomrule \end{tabular} """ @@ -825,23 +829,23 @@ def test_to_latex_special_escape(self): def test_to_latex_escape_special_chars(self): special_characters = ["&", "%", "$", "#", "_", "{", "}", "~", "^", "\\"] df = DataFrame(data=special_characters) - result = df.to_latex() + result = df.to_latex(hrules=True, escape=True) expected = _dedent( r""" \begin{tabular}{ll} \toprule - {} & 0 \\ - \midrule - 0 & \& \\ - 1 & \% \\ - 2 & \$ \\ - 3 & \# \\ - 4 & \_ \\ - 5 & \{ \\ - 6 & \} \\ - 7 & \textasciitilde \\ - 8 & \textasciicircum \\ - 9 & \textbackslash \\ + & 0 \\ + \midrule + 0 & \& \\ + 1 & \% \\ + 2 & \$ \\ + 3 & \# \\ + 4 & \_ \\ + 5 & \{ \\ + 6 & \} \\ + 7 & \textasciitilde \\ + 8 & \textasciicircum \\ + 9 & \textbackslash \\ \bottomrule \end{tabular} """ @@ -851,15 +855,15 @@ def test_to_latex_escape_special_chars(self): def test_to_latex_specified_header_special_chars_without_escape(self): # GH 7124 df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex(header=["$A$", "$B$"], escape=False) + result = df.to_latex(header=["$A$", "$B$"], escape=False, hrules=True) expected = _dedent( r""" \begin{tabular}{lrl} \toprule - {} & $A$ & $B$ \\ + & $A$ & $B$ \\ \midrule - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \bottomrule \end{tabular} """ @@ -871,17 +875,19 @@ class TestToLatexPosition: def test_to_latex_position(self): the_position = "h" df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex(position=the_position) + result = df.to_latex( + position=the_position, position_float="centering", hrules=True + ) expected = _dedent( r""" \begin{table}[h] \centering \begin{tabular}{lrl} \toprule - {} & a & b \\ + & a & b \\ \midrule - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \bottomrule \end{tabular} \end{table} @@ -890,30 +896,27 @@ def test_to_latex_position(self): assert result == expected def test_to_latex_longtable_position(self): - the_position = "t" df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) - result = df.to_latex(longtable=True, position=the_position) + result = df.to_latex(environment="longtable", position="t", hrules=True) expected = _dedent( r""" \begin{longtable}[t]{lrl} \toprule - {} & a & b \\ + & a & b \\ \midrule \endfirsthead - \toprule - {} & a & b \\ + & a & b \\ \midrule \endhead \midrule - \multicolumn{3}{r}{{Continued on next page}} \\ + \multicolumn{3}{r}{Continued on next page} \\ \midrule \endfoot - \bottomrule \endlastfoot - 0 & 1 & b1 \\ - 1 & 2 & b2 \\ + 0 & 1 & b1 \\ + 1 & 2 & b2 \\ \end{longtable} """ ) @@ -942,17 +945,17 @@ def test_to_latex_with_formatters(self): "object": lambda x: f"-{x!s}-", "__index__": lambda x: f"index: {x}", } - result = df.to_latex(formatters=dict(formatters)) + result = df.to_latex(formatter=formatters, hrules=True) expected = _dedent( r""" \begin{tabular}{llrrl} \toprule - {} & datetime64 & float & int & object \\ + & datetime64 & float & int & object \\ \midrule - index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\ - index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\ - index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\ + index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\ + index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\ + index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\ \bottomrule \end{tabular} """ @@ -962,12 +965,12 @@ def test_to_latex_with_formatters(self): def test_to_latex_float_format_no_fixed_width_3decimals(self): # GH 21625 df = DataFrame({"x": [0.19999]}) - result = df.to_latex(float_format="%.3f") + result = df.to_latex(formatter="{:.3f}", hrules=True) expected = _dedent( r""" \begin{tabular}{lr} \toprule - {} & x \\ + & x \\ \midrule 0 & 0.200 \\ \bottomrule @@ -979,12 +982,12 @@ def test_to_latex_float_format_no_fixed_width_3decimals(self): def test_to_latex_float_format_no_fixed_width_integer(self): # GH 22270 df = DataFrame({"x": [100.0]}) - result = df.to_latex(float_format="%.0f") + result = df.to_latex(formatter="{:.0f}", hrules=True) expected = _dedent( r""" \begin{tabular}{lr} \toprule - {} & x \\ + & x \\ \midrule 0 & 100 \\ \bottomrule @@ -1002,15 +1005,15 @@ def test_to_latex_na_rep_and_float_format(self, na_rep): ], columns=["Group", "Data"], ) - result = df.to_latex(na_rep=na_rep, float_format="{:.2f}".format) + result = df.to_latex(na_rep=na_rep, precision=2, hrules=True) expected = _dedent( fr""" \begin{{tabular}}{{llr}} \toprule - {{}} & Group & Data \\ + & Group & Data \\ \midrule - 0 & A & 1.22 \\ - 1 & A & {na_rep} \\ + 0 & A & 1.22 \\ + 1 & A & {na_rep} \\ \bottomrule \end{{tabular}} """ @@ -1049,15 +1052,15 @@ def test_to_latex_multindex_header(self): # GH 16718 df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}) df = df.set_index(["a", "b"]) - observed = df.to_latex(header=["r1", "r2"]) + observed = df.to_latex(header=["r1", "r2"], hrules=True) expected = _dedent( r""" \begin{tabular}{llrr} \toprule - & & r1 & r2 \\ - a & b & & \\ + & & r1 & r2 \\ + a & b & & \\ \midrule - 0 & 1 & 2 & 3 \\ + 0 & 1 & 2 & 3 \\ \bottomrule \end{tabular} """ @@ -1068,13 +1071,13 @@ def test_to_latex_multiindex_empty_name(self): # GH 18669 mi = pd.MultiIndex.from_product([[1, 2]], names=[""]) df = DataFrame(-1, index=mi, columns=range(4)) - observed = df.to_latex() + observed = df.to_latex(hrules=True) expected = _dedent( r""" \begin{tabular}{lrrrr} \toprule - & 0 & 1 & 2 & 3 \\ - {} & & & & \\ + & 0 & 1 & 2 & 3 \\ + & & & & \\ \midrule 1 & -1 & -1 & -1 & -1 \\ 2 & -1 & -1 & -1 & -1 \\ @@ -1086,15 +1089,15 @@ def test_to_latex_multiindex_empty_name(self): def test_to_latex_multiindex_column_tabular(self): df = DataFrame({("x", "y"): ["a"]}) - result = df.to_latex() + result = df.to_latex(hrules=True) expected = _dedent( r""" \begin{tabular}{ll} \toprule - {} & x \\ - {} & y \\ + & x \\ + & y \\ \midrule - 0 & a \\ + 0 & a \\ \bottomrule \end{tabular} """ @@ -1103,14 +1106,14 @@ def test_to_latex_multiindex_column_tabular(self): def test_to_latex_multiindex_small_tabular(self): df = DataFrame({("x", "y"): ["a"]}).T - result = df.to_latex() + result = df.to_latex(hrules=True) expected = _dedent( r""" \begin{tabular}{lll} \toprule - & & 0 \\ + & & 0 \\ \midrule - x & y & a \\ + x & y & a \\ \bottomrule \end{tabular} """ @@ -1118,18 +1121,18 @@ def test_to_latex_multiindex_small_tabular(self): assert result == expected def test_to_latex_multiindex_tabular(self, multiindex_frame): - result = multiindex_frame.to_latex() + result = multiindex_frame.to_latex(hrules=True, multirow_align="naive") expected = _dedent( r""" \begin{tabular}{llrrrr} \toprule - & & 0 & 1 & 2 & 3 \\ + & & 0 & 1 & 2 & 3 \\ \midrule - c1 & 0 & 0 & 1 & 2 & 3 \\ - & 1 & 4 & 5 & 6 & 7 \\ - c2 & 0 & 0 & 1 & 2 & 3 \\ - & 1 & 4 & 5 & 6 & 7 \\ - c3 & 0 & 0 & 1 & 2 & 3 \\ + c1 & 0 & 0 & 1 & 2 & 3 \\ + & 1 & 4 & 5 & 6 & 7 \\ + c2 & 0 & 0 & 1 & 2 & 3 \\ + & 1 & 4 & 5 & 6 & 7 \\ + c3 & 0 & 0 & 1 & 2 & 3 \\ \bottomrule \end{tabular} """ @@ -1140,18 +1143,18 @@ def test_to_latex_multicolumn_tabular(self, multiindex_frame): # GH 14184 df = multiindex_frame.T df.columns.names = ["a", "b"] - result = df.to_latex() + result = df.to_latex(hrules=True, multicol_align="l") expected = _dedent( r""" \begin{tabular}{lrrrrr} \toprule a & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\ - b & 0 & 1 & 0 & 1 & 0 \\ + b & 0 & 1 & 0 & 1 & 0 \\ \midrule - 0 & 0 & 4 & 0 & 4 & 0 \\ - 1 & 1 & 5 & 1 & 5 & 1 \\ - 2 & 2 & 6 & 2 & 6 & 2 \\ - 3 & 3 & 7 & 3 & 7 & 3 \\ + 0 & 0 & 4 & 0 & 4 & 0 \\ + 1 & 1 & 5 & 1 & 5 & 1 \\ + 2 & 2 & 6 & 2 & 6 & 2 \\ + 3 & 3 & 7 & 3 & 7 & 3 \\ \bottomrule \end{tabular} """ @@ -1161,18 +1164,18 @@ def test_to_latex_multicolumn_tabular(self, multiindex_frame): def test_to_latex_index_has_name_tabular(self): # GH 10660 df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]}) - result = df.set_index(["a", "b"]).to_latex() + result = df.set_index(["a", "b"]).to_latex(hrules=True, multirow_align="naive") expected = _dedent( r""" \begin{tabular}{llr} \toprule - & & c \\ - a & b & \\ + & & c \\ + a & b & \\ \midrule - 0 & a & 1 \\ - & b & 2 \\ - 1 & a & 3 \\ - & b & 4 \\ + 0 & a & 1 \\ + & b & 2 \\ + 1 & a & 3 \\ + & b & 4 \\ \bottomrule \end{tabular} """ @@ -1182,17 +1185,26 @@ def test_to_latex_index_has_name_tabular(self): def test_to_latex_groupby_tabular(self): # GH 10660 df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]}) - result = df.groupby("a").describe().to_latex() + result = ( + df.groupby("a") + .describe() + .to_latex( + hrules=True, + formatter={("c", "std"): "{:.6f}"}, + precision=1, + escape=True, + ) + ) expected = _dedent( r""" \begin{tabular}{lrrrrrrrr} \toprule - {} & \multicolumn{8}{l}{c} \\ - {} & count & mean & std & min & 25\% & 50\% & 75\% & max \\ - a & & & & & & & & \\ + & \multicolumn{8}{r}{c} \\ + & count & mean & std & min & 25\% & 50\% & 75\% & max \\ + a & & & & & & & & \\ \midrule - 0 & 2.0 & 1.5 & 0.707107 & 1.0 & 1.25 & 1.5 & 1.75 & 2.0 \\ - 1 & 2.0 & 3.5 & 0.707107 & 3.0 & 3.25 & 3.5 & 3.75 & 4.0 \\ + 0 & 2.0 & 1.5 & 0.707107 & 1.0 & 1.2 & 1.5 & 1.8 & 2.0 \\ + 1 & 2.0 & 3.5 & 0.707107 & 3.0 & 3.2 & 3.5 & 3.8 & 4.0 \\ \bottomrule \end{tabular} """ @@ -1210,15 +1222,15 @@ def test_to_latex_multiindex_dupe_level(self): df = DataFrame( index=pd.MultiIndex.from_tuples([("A", "c"), ("B", "c")]), columns=["col"] ) - result = df.to_latex() + result = df.to_latex(hrules=True, na_rep="NaN") expected = _dedent( r""" \begin{tabular}{lll} \toprule - & & col \\ + & & col \\ \midrule - A & c & NaN \\ - B & c & NaN \\ + A & c & NaN \\ + B & c & NaN \\ \bottomrule \end{tabular} """ @@ -1226,19 +1238,19 @@ def test_to_latex_multiindex_dupe_level(self): assert result == expected def test_to_latex_multicolumn_default(self, multicolumn_frame): - result = multicolumn_frame.to_latex() + result = multicolumn_frame.to_latex(hrules=True) expected = _dedent( r""" \begin{tabular}{lrrrrr} \toprule - {} & \multicolumn{2}{l}{c1} & \multicolumn{2}{l}{c2} & c3 \\ - {} & 0 & 1 & 0 & 1 & 0 \\ - \midrule - 0 & 0 & 5 & 0 & 5 & 0 \\ - 1 & 1 & 6 & 1 & 6 & 1 \\ - 2 & 2 & 7 & 2 & 7 & 2 \\ - 3 & 3 & 8 & 3 & 8 & 3 \\ - 4 & 4 & 9 & 4 & 9 & 4 \\ + & \multicolumn{2}{r}{c1} & \multicolumn{2}{r}{c2} & c3 \\ + & 0 & 1 & 0 & 1 & 0 \\ + \midrule + 0 & 0 & 5 & 0 & 5 & 0 \\ + 1 & 1 & 6 & 1 & 6 & 1 \\ + 2 & 2 & 7 & 2 & 7 & 2 \\ + 3 & 3 & 8 & 3 & 8 & 3 \\ + 4 & 4 & 9 & 4 & 9 & 4 \\ \bottomrule \end{tabular} """ @@ -1246,19 +1258,19 @@ def test_to_latex_multicolumn_default(self, multicolumn_frame): assert result == expected def test_to_latex_multicolumn_false(self, multicolumn_frame): - result = multicolumn_frame.to_latex(multicolumn=False) + result = multicolumn_frame.to_latex(multicol_align="naive-l", hrules=True) expected = _dedent( r""" \begin{tabular}{lrrrrr} \toprule - {} & c1 & & c2 & & c3 \\ - {} & 0 & 1 & 0 & 1 & 0 \\ - \midrule - 0 & 0 & 5 & 0 & 5 & 0 \\ - 1 & 1 & 6 & 1 & 6 & 1 \\ - 2 & 2 & 7 & 2 & 7 & 2 \\ - 3 & 3 & 8 & 3 & 8 & 3 \\ - 4 & 4 & 9 & 4 & 9 & 4 \\ + & c1 & & c2 & & c3 \\ + & 0 & 1 & 0 & 1 & 0 \\ + \midrule + 0 & 0 & 5 & 0 & 5 & 0 \\ + 1 & 1 & 6 & 1 & 6 & 1 \\ + 2 & 2 & 7 & 2 & 7 & 2 \\ + 3 & 3 & 8 & 3 & 8 & 3 \\ + 4 & 4 & 9 & 4 & 9 & 4 \\ \bottomrule \end{tabular} """ @@ -1266,20 +1278,18 @@ def test_to_latex_multicolumn_false(self, multicolumn_frame): assert result == expected def test_to_latex_multirow_true(self, multicolumn_frame): - result = multicolumn_frame.T.to_latex(multirow=True) + result = multicolumn_frame.T.to_latex(hrules=True) expected = _dedent( r""" \begin{tabular}{llrrrrr} \toprule - & & 0 & 1 & 2 & 3 & 4 \\ - \midrule - \multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\ - & 1 & 5 & 6 & 7 & 8 & 9 \\ - \cline{1-7} - \multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\ - & 1 & 5 & 6 & 7 & 8 & 9 \\ - \cline{1-7} - c3 & 0 & 0 & 1 & 2 & 3 & 4 \\ + & & 0 & 1 & 2 & 3 & 4 \\ + \midrule + \multirow[c]{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\ + & 1 & 5 & 6 & 7 & 8 & 9 \\ + \multirow[c]{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\ + & 1 & 5 & 6 & 7 & 8 & 9 \\ + c3 & 0 & 0 & 1 & 2 & 3 & 4 \\ \bottomrule \end{tabular} """ @@ -1289,24 +1299,22 @@ def test_to_latex_multirow_true(self, multicolumn_frame): def test_to_latex_multicolumnrow_with_multicol_format(self, multicolumn_frame): multicolumn_frame.index = multicolumn_frame.T.index result = multicolumn_frame.T.to_latex( - multirow=True, - multicolumn=True, - multicolumn_format="c", + multirow_align="t", + multicol_align="c", + hrules=True, ) expected = _dedent( r""" \begin{tabular}{llrrrrr} \toprule - & & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\ - & & 0 & 1 & 0 & 1 & 0 \\ - \midrule - \multirow{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\ - & 1 & 5 & 6 & 7 & 8 & 9 \\ - \cline{1-7} - \multirow{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\ - & 1 & 5 & 6 & 7 & 8 & 9 \\ - \cline{1-7} - c3 & 0 & 0 & 1 & 2 & 3 & 4 \\ + & & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\ + & & 0 & 1 & 0 & 1 & 0 \\ + \midrule + \multirow[t]{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\ + & 1 & 5 & 6 & 7 & 8 & 9 \\ + \multirow[t]{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\ + & 1 & 5 & 6 & 7 & 8 & 9 \\ + c3 & 0 & 0 & 1 & 2 & 3 & 4 \\ \bottomrule \end{tabular} """ @@ -1324,28 +1332,36 @@ def test_to_latex_multiindex_names(self, name0, name1, axes): for idx in axes: df.axes[idx].names = names - idx_names = tuple(n or "{}" for n in names) - idx_names_row = ( - f"{idx_names[0]} & {idx_names[1]} & & & & \\\\\n" - if (0 in axes and any(names)) + index_names_row = ( + f"{{{name0 if name0 else ''}}} & {{{name1 if name1 else ''}}} & {{}} & " + f"{{}} & {{}} & {{}} \\\\\n" + if (0 in axes and (name0 is not None or name1 is not None)) else "" ) - placeholder = "{}" if any(names) and 1 in axes else " " - col_names = [n if (bool(n) and 1 in axes) else placeholder for n in names] - observed = df.to_latex() - expected = r"""\begin{tabular}{llrrrr} -\toprule - & %s & \multicolumn{2}{l}{1} & \multicolumn{2}{l}{2} \\ - & %s & 3 & 4 & 3 & 4 \\ -%s\midrule -1 & 3 & -1 & -1 & -1 & -1 \\ - & 4 & -1 & -1 & -1 & -1 \\ -2 & 3 & -1 & -1 & -1 & -1 \\ - & 4 & -1 & -1 & -1 & -1 \\ -\bottomrule -\end{tabular} -""" % tuple( - list(col_names) + [idx_names_row] + column_name0 = f"{{{name0 if (name0 and 1 in axes) else ''}}}" + column_name1 = f"{{{name1 if (name1 and 1 in axes) else ''}}}" + + observed = df.to_latex( + hrules=True, + multicol_align="l", + sparse_columns=False, + sparse_index=False, + siunitx=True, + ) + expected = dedent( + f"""\ +\\begin{{tabular}}{{llSSSS}} +\\toprule +{{}} & {column_name0} & {{1}} & {{1}} & {{2}} & {{2}} \\\\ +{{}} & {column_name1} & {{3}} & {{4}} & {{3}} & {{4}} \\\\ +{index_names_row}\\midrule +1 & 3 & -1 & -1 & -1 & -1 \\\\ +1 & 4 & -1 & -1 & -1 & -1 \\\\ +2 & 3 & -1 & -1 & -1 & -1 \\\\ +2 & 4 & -1 & -1 & -1 & -1 \\\\ +\\bottomrule +\\end{{tabular}} +""" ) assert observed == expected @@ -1355,19 +1371,21 @@ def test_to_latex_multiindex_nans(self, one_row): df = DataFrame({"a": [None, 1], "b": [2, 3], "c": [4, 5]}) if one_row: df = df.iloc[[0]] - observed = df.set_index(["a", "b"]).to_latex() + observed = df.set_index(["a", "b"]).to_latex( + hrules=True, na_rep="NaN", precision=1 + ) expected = _dedent( r""" \begin{tabular}{llr} \toprule - & & c \\ - a & b & \\ + & & c \\ + a & b & \\ \midrule - NaN & 2 & 4 \\ + NaN & 2 & 4 \\ """ ) if not one_row: - expected += r"""1.0 & 3 & 5 \\ + expected += r"""1.0 & 3 & 5 \\ """ expected += r"""\bottomrule \end{tabular} @@ -1377,16 +1395,16 @@ def test_to_latex_multiindex_nans(self, one_row): def test_to_latex_non_string_index(self): # GH 19981 df = DataFrame([[1, 2, 3]] * 2).set_index([0, 1]) - result = df.to_latex() + result = df.to_latex(hrules=True, multirow_align="naive") expected = _dedent( r""" \begin{tabular}{llr} \toprule - & & 2 \\ - 0 & 1 & \\ + & & 2 \\ + 0 & 1 & \\ \midrule - 1 & 2 & 3 \\ - & 2 & 3 \\ + 1 & 2 & 3 \\ + & 2 & 3 \\ \bottomrule \end{tabular} """ @@ -1399,32 +1417,25 @@ def test_to_latex_multiindex_multirow(self): [[0.0, 1.0], [3.0, 2.0, 1.0], ["0", "1"]], names=["i", "val0", "val1"] ) df = DataFrame(index=mi) - result = df.to_latex(multirow=True, escape=False) + result = df.to_latex(escape=False, precision=1, hrules=True) expected = _dedent( r""" \begin{tabular}{lll} \toprule - & & \\ i & val0 & val1 \\ \midrule - \multirow{6}{*}{0.0} & \multirow{2}{*}{3.0} & 0 \\ - & & 1 \\ - \cline{2-3} - & \multirow{2}{*}{2.0} & 0 \\ - & & 1 \\ - \cline{2-3} - & \multirow{2}{*}{1.0} & 0 \\ - & & 1 \\ - \cline{1-3} - \cline{2-3} - \multirow{6}{*}{1.0} & \multirow{2}{*}{3.0} & 0 \\ - & & 1 \\ - \cline{2-3} - & \multirow{2}{*}{2.0} & 0 \\ - & & 1 \\ - \cline{2-3} - & \multirow{2}{*}{1.0} & 0 \\ - & & 1 \\ + \multirow[c]{6}{*}{0.0} & \multirow[c]{2}{*}{3.0} & 0 \\ + & & 1 \\ + & \multirow[c]{2}{*}{2.0} & 0 \\ + & & 1 \\ + & \multirow[c]{2}{*}{1.0} & 0 \\ + & & 1 \\ + \multirow[c]{6}{*}{1.0} & \multirow[c]{2}{*}{3.0} & 0 \\ + & & 1 \\ + & \multirow[c]{2}{*}{2.0} & 0 \\ + & & 1 \\ + & \multirow[c]{2}{*}{1.0} & 0 \\ + & & 1 \\ \bottomrule \end{tabular} """ @@ -1514,3 +1525,24 @@ def test_get_strrow_multindex_multicolumn(self, row_num, expected): ) assert row_string_converter.get_strrow(row_num=row_num) == expected + + +@pytest.mark.parametrize( + "deprecated", + [ + "longtable", + "multicolumn", + "multicolumn_format", + "multirow", + "col_space", + "formatters", + "float_format", + "sparsify", + "bold_rows", + ], +) +def test_deprecation_warning(deprecated): + df = DataFrame([[1]]) + msg = f"`{deprecated}` is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.to_latex(**{deprecated: "some_value_not_None"}) diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index 555342dd39005..8969deb7b7a9e 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -198,16 +198,13 @@ def test_timeseries_repr_object_dtype(self): def test_latex_repr(self): result = r"""\begin{tabular}{ll} -\toprule -{} & 0 \\ -\midrule -0 & $\alpha$ \\ -1 & b \\ -2 & c \\ -\bottomrule + & 0 \\ +0 & $\alpha$ \\ +1 & b \\ +2 & c \\ \end{tabular} """ - with option_context("display.latex.escape", False, "display.latex.repr", True): + with option_context("styler.render.repr", "latex"): s = Series([r"$\alpha$", "b", "c"]) assert result == s._repr_latex_() diff --git a/requirements-dev.txt b/requirements-dev.txt index 9b35de4bccb48..218bf15865064 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -4,6 +4,7 @@ numpy>=1.18.5 python-dateutil>=2.8.1 pytz +jinja2 asv cython>=0.29.21 black==21.5b2 @@ -51,7 +52,6 @@ blosc bottleneck>=1.3.1 ipykernel ipython>=7.11.1 -jinja2 matplotlib>=3.3.2 numexpr>=2.7.1 scipy>=1.4.1 diff --git a/setup.cfg b/setup.cfg index 62ff0c6934f77..dcd2d3e4d2128 100644 --- a/setup.cfg +++ b/setup.cfg @@ -33,6 +33,7 @@ install_requires = numpy>=1.18.5 python-dateutil>=2.8.1 pytz>=2020.1 + jinja2>=2.11 python_requires = >=3.8 include_package_data = True zip_safe = False
Closes #41649. A lot of the audit trail for the features to get this to work are documented in the issue. ## For Reviewers These are the key changes to note in **DataFrame.to_latex tests** - The expected output remains as close as possible to the original, altering the arg input in `to_latex` to demonstrate the new combinations needed. - No tests have been removed, and all pass with often minor alterations to the test formatting . - `hrules=True` is added to all tests to get `\toprule \midrule \bottomrule`. This is a new option and the default in Styler is not to have these. - `\cline` (for multirow) has gone. This feature is not implemented in Styler. - `col_space` is gone, meaning there is not a nice console alignment of text. Of course the LaTeX rendering is unaffected by this, and when styling LaTeX commands are added this needs to be removed anyway. - There are some nice new features added here, but all currently exists from Styler anyway, and that is where testing resides. ## Follow-Ups - [ ] Make jinja2 a **hard dependency** (#43423) (this PR actually contains the jinja2 PR to help with tests passing) - [ ] Deprecate all `pandas.options.display.latex` documenting the replacement `styler.latex` options instead. - [ ] Remove all redundant code for python LatexFormatter. - [ ] Reimplement cline? Im not a huge fan of this and I think it (maybe?) quite complicated to implement ## Revised Docs ![Screen Shot 2021-09-06 at 23 50 02](https://user-images.githubusercontent.com/24256554/132262289-da75546f-9aca-4938-b32b-12eb84377b25.png) ![Screen Shot 2021-09-06 at 23 50 20](https://user-images.githubusercontent.com/24256554/132262291-eca0003a-ca69-4079-8023-88c489d84034.png) ![Screen Shot 2021-09-06 at 23 50 32](https://user-images.githubusercontent.com/24256554/132262293-89d724d2-673b-4770-9924-0a7269b82b9e.png) ![Screen Shot 2021-09-06 at 23 50 44](https://user-images.githubusercontent.com/24256554/132262294-0e03da0d-5fbb-4101-90d5-5728fe1dfeb6.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/41648
2021-05-24T20:47:22Z
2021-11-16T17:33:11Z
null
2022-03-06T07:41:04Z
Documentation: Indexing and selecting data¶
diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index dc66303a44f53..145aa163db272 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -245,6 +245,9 @@ new column. In 0.21.0 and later, this will raise a ``UserWarning``: 1 2.0 2 3.0 +While attribute access will work in many cases, bracket indexing is the recommended syntax for indexing a DataFrame. For 10 reasons why, +please check `this post <https://stackoverflow.com/a/41130329/10491422>` about attribute vs bracket indexing. + Slicing ranges --------------
fixes #41629 Hi @LukeTonin, how does this look? let me know if this needs any changes! Thanks
https://api.github.com/repos/pandas-dev/pandas/pulls/41645
2021-05-24T15:44:59Z
2021-05-26T01:45:12Z
null
2021-05-26T01:45:13Z
BUG: Series[int].loc setitem with Series[int] results in Series[float]
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 177b1ccd166cb..741fc323262b6 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -2218,6 +2218,8 @@ def can_hold_element(arr: ArrayLike, element: Any) -> bool: if dtype.kind in ["i", "u"]: if tipo is not None: if tipo.kind not in ["i", "u"]: + if is_float(element) and element.is_integer(): + return True # Anything other than integer we cannot hold return False elif dtype.itemsize < tipo.itemsize: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c7769046c70b2..adca54abce04d 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -781,14 +781,6 @@ def _replace_list( # so un-tile here return self.replace(src_list, dest_list[0], inplace, regex) - # https://github.com/pandas-dev/pandas/issues/40371 - # the following pairs check code caused a regression so we catch that case here - # until the issue is fixed properly in can_hold_element - - # error: "Iterable[Any]" has no attribute "tolist" - if hasattr(src_list, "tolist"): - src_list = src_list.tolist() # type: ignore[attr-defined] - # Exclude anything that we know we won't contain pairs = [ (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 3f850dfbc6a39..13054062defb4 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -158,7 +158,7 @@ def test_setitem_series_object_dtype(self, indexer, ser_index): expected = Series([Series([42], index=[ser_index]), 0], dtype="object") tm.assert_series_equal(ser, expected) - @pytest.mark.parametrize("index, exp_value", [(0, 42.0), (1, np.nan)]) + @pytest.mark.parametrize("index, exp_value", [(0, 42), (1, np.nan)]) def test_setitem_series(self, index, exp_value): # GH#38303 ser = Series([0, 0])
``` >>> pd.__version__ '1.3.0.dev0+1695.g55e58542db' >>> >>> s = pd.Series([0, 0]) >>> s 0 0 1 0 dtype: int64 >>> >>> s2 = pd.Series([42]) >>> s2 0 42 dtype: int64 >>> >>> s.loc[0] = s2 >>> s 0 42.0 1 0.0 dtype: float64 >>> ``` Does not need a release note, this issue is only on master as on 1.2.4 this raises `ValueError: No axis named 1 for object type Series` which was fixed in #39358 draft, since if we merge and backport #40555 first, we can also remove the patch in this PR.
https://api.github.com/repos/pandas-dev/pandas/pulls/41644
2021-05-24T15:32:39Z
2021-06-08T14:12:08Z
2021-06-08T14:12:08Z
2021-06-09T21:57:09Z
REF: share __array_wrap__
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 8fb88e625d948..33f31c4ce96c0 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -813,6 +813,7 @@ def __array_wrap__(self, result, context=None): return result attrs = self._get_attributes_dict() + attrs.pop("freq", None) # For DatetimeIndex/TimedeltaIndex return Index(result, **attrs) @cache_readonly diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index b2377f5b27966..185f1dced72bf 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -35,7 +35,6 @@ ) from pandas.core.dtypes.common import ( - is_bool_dtype, is_categorical_dtype, is_dtype_equal, is_integer, @@ -113,15 +112,10 @@ def __array_wrap__(self, result, context=None): """ Gets called after a ufunc and other functions. """ - result = lib.item_from_zerodim(result) - if is_bool_dtype(result) or lib.is_scalar(result): - return result - - attrs = self._get_attributes_dict() - if not is_period_dtype(self.dtype) and attrs["freq"]: - # no need to infer if freq is None - attrs["freq"] = "infer" - return type(self)(result, **attrs) + out = super().__array_wrap__(result, context=context) + if isinstance(out, DatetimeTimedeltaMixin) and self.freq is not None: + out = out._with_freq("infer") + return out # ------------------------------------------------------------------------ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index ac09159c23566..d02f415a4f29f 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -257,8 +257,8 @@ class DatetimeIndex(DatetimeTimedeltaMixin): _engine_type = libindex.DatetimeEngine _supports_partial_string_indexing = True - _comparables = ["name", "freqstr", "tz"] - _attributes = ["name", "tz", "freq"] + _comparables = ["name", "freqstr"] + _attributes = ["name", "freq"] _is_numeric_dtype = False diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index fc92a1b3afe53..1dfcb0ec29f27 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -256,7 +256,7 @@ def func(self, other, sort=None): class IntervalIndex(ExtensionIndex): _typ = "intervalindex" _comparables = ["name"] - _attributes = ["name", "closed"] + _attributes = ["name"] # annotate properties pinned via inherit_names closed: str @@ -422,12 +422,8 @@ def __contains__(self, key: Any) -> bool: def _multiindex(self) -> MultiIndex: return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"]) - def __array_wrap__(self, result, context=None): - # we don't want the superclass implementation - return result - def __reduce__(self): - d = {"left": self.left, "right": self.right} + d = {"left": self.left, "right": self.right, "closed": self.closed} d.update(self._get_attributes_dict()) return _new_IntervalIndex, (type(self), d), None diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 136843938b683..2f2d16b01af53 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -34,7 +34,6 @@ from pandas.util._decorators import doc from pandas.core.dtypes.common import ( - is_bool_dtype, is_datetime64_any_dtype, is_float, is_integer, @@ -350,42 +349,6 @@ def __contains__(self, key: Any) -> bool: # ------------------------------------------------------------------------ # Index Methods - def __array_wrap__(self, result, context=None): - """ - Gets called after a ufunc and other functions. - - Needs additional handling as PeriodIndex stores internal data as int - dtype - - Replace this to __numpy_ufunc__ in future version and implement - __array_function__ for Indexes - """ - if isinstance(context, tuple) and len(context) > 0: - func = context[0] - if func is np.add: - pass - elif func is np.subtract: - name = self.name - left = context[1][0] - right = context[1][1] - if isinstance(left, PeriodIndex) and isinstance(right, PeriodIndex): - name = left.name if left.name == right.name else None - return Index(result, name=name) - elif isinstance(left, Period) or isinstance(right, Period): - return Index(result, name=name) - elif isinstance(func, np.ufunc): - if "M->M" not in func.types: - msg = f"ufunc '{func.__name__}' not supported for the PeriodIndex" - # This should be TypeError, but TypeError cannot be raised - # from here because numpy catches. - raise ValueError(msg) - - if is_bool_dtype(result): - return result - # the result is object dtype array of Period - # cannot pass _simple_new as it is - return type(self)(result, freq=self.freq, name=self.name) - def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray: """ where : array of timestamps
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41643
2021-05-24T15:26:15Z
2021-05-25T12:48:10Z
2021-05-25T12:48:10Z
2021-05-25T13:58:22Z
typo fix
diff --git a/setup.cfg b/setup.cfg index f39e377e50c97..6ce66a6f2bdbd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,7 +14,7 @@ classifiers = Environment :: Console Intended Audience :: Science/Research License :: OSI Approved :: BSD License - Operating System :: OS Independen + Operating System :: OS Independent Programming Language :: Cython Programming Language :: Python Programming Language :: Python :: 3
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/41642
2021-05-24T15:24:22Z
2021-05-24T15:43:40Z
2021-05-24T15:43:40Z
2021-05-24T15:43:47Z