title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
TST: FIXMES in DataFrame.quantile tests
diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index 2e6318955e119..5773edbdbcdec 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -280,9 +280,13 @@ def test_quantile_datetime(self): tm.assert_frame_equal(result, expected) # empty when numeric_only=True - # FIXME (gives empty frame in 0.18.1, broken in 0.19.0) - # result = df[['a', 'c']].quantile(.5) - # result = df[['a', 'c']].quantile([.5]) + result = df[["a", "c"]].quantile(0.5) + expected = Series([], index=[], dtype=np.float64, name=0.5) + tm.assert_series_equal(result, expected) + + result = df[["a", "c"]].quantile([0.5]) + expected = DataFrame(index=[0.5]) + tm.assert_frame_equal(result, expected) def test_quantile_invalid(self, datetime_frame): msg = "percentiles should all be in the interval \\[0, 1\\]" @@ -481,7 +485,7 @@ def test_quantile_nat(self): ) tm.assert_frame_equal(res, exp) - def test_quantile_empty_no_rows(self): + def test_quantile_empty_no_rows_floats(self): # floats df = DataFrame(columns=["a", "b"], dtype="float64") @@ -494,21 +498,43 @@ def test_quantile_empty_no_rows(self): exp = DataFrame([[np.nan, np.nan]], columns=["a", "b"], index=[0.5]) tm.assert_frame_equal(res, exp) - # FIXME (gives empty frame in 0.18.1, broken in 0.19.0) - # res = df.quantile(0.5, axis=1) - # res = df.quantile([0.5], axis=1) + res = df.quantile(0.5, axis=1) + exp = Series([], index=[], dtype="float64", name=0.5) + tm.assert_series_equal(res, exp) + + res = df.quantile([0.5], axis=1) + exp = DataFrame(columns=[], index=[0.5]) + tm.assert_frame_equal(res, exp) + def test_quantile_empty_no_rows_ints(self): # ints df = DataFrame(columns=["a", "b"], dtype="int64") - # FIXME (gives empty frame in 0.18.1, broken in 0.19.0) - # res = df.quantile(0.5) + res = df.quantile(0.5) + exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5) + tm.assert_series_equal(res, exp) + def test_quantile_empty_no_rows_dt64(self): # datetimes df = DataFrame(columns=["a", "b"], dtype="datetime64[ns]") - # FIXME (gives NaNs instead of NaT in 0.18.1 or 0.19.0) - # res = df.quantile(0.5, numeric_only=False) + res = df.quantile(0.5, numeric_only=False) + exp = Series( + [pd.NaT, pd.NaT], index=["a", "b"], dtype="datetime64[ns]", name=0.5 + ) + tm.assert_series_equal(res, exp) + + # Mixed dt64/dt64tz + df["a"] = df["a"].dt.tz_localize("US/Central") + res = df.quantile(0.5, numeric_only=False) + exp = exp.astype(object) + tm.assert_series_equal(res, exp) + + # both dt64tz + df["b"] = df["b"].dt.tz_localize("US/Central") + res = df.quantile(0.5, numeric_only=False) + exp = exp.astype(df["b"].dtype) + tm.assert_series_equal(res, exp) def test_quantile_empty_no_columns(self): # GH#23925 _get_numeric_data may drop all columns
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry cc @jorisvandenbossche these were introduced in #14536
https://api.github.com/repos/pandas-dev/pandas/pulls/44437
2021-11-13T22:13:18Z
2021-11-14T02:09:07Z
2021-11-14T02:09:07Z
2021-11-15T07:32:43Z
TST: remove pyarrow bz2 xfail
diff --git a/pandas/tests/io/parser/test_compression.py b/pandas/tests/io/parser/test_compression.py index e0799df8d7a4c..5aa0edfd8b46a 100644 --- a/pandas/tests/io/parser/test_compression.py +++ b/pandas/tests/io/parser/test_compression.py @@ -103,8 +103,6 @@ def test_compression(parser_and_data, compression_only, buffer, filename): tm.write_to_compressed(compress_type, path, data) compression = "infer" if filename else compress_type - if ext == "bz2": - pytest.xfail("pyarrow wheels don't have bz2 codec support") if buffer: with open(path, "rb") as f: result = parser.read_csv(f, compression=compression)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44436
2021-11-13T21:48:56Z
2021-11-14T02:06:11Z
2021-11-14T02:06:11Z
2021-11-14T15:12:16Z
REF: simplify putmask_smart
diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py index 77e38e6c6e3fc..1f37e0e5d249a 100644 --- a/pandas/core/array_algos/putmask.py +++ b/pandas/core/array_algos/putmask.py @@ -4,7 +4,6 @@ from __future__ import annotations from typing import Any -import warnings import numpy as np @@ -15,16 +14,12 @@ ) from pandas.core.dtypes.cast import ( + can_hold_element, convert_scalar_for_putitemlike, find_common_type, infer_dtype_from, ) -from pandas.core.dtypes.common import ( - is_float_dtype, - is_integer_dtype, - is_list_like, -) -from pandas.core.dtypes.missing import isna_compat +from pandas.core.dtypes.common import is_list_like from pandas.core.arrays import ExtensionArray @@ -75,7 +70,7 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd `values`, updated in-place. mask : np.ndarray[bool] Applies to both sides (array like). - new : `new values` either scalar or an array like aligned with `values` + new : listlike `new values` aligned with `values` Returns ------- @@ -89,9 +84,6 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd # we cannot use np.asarray() here as we cannot have conversions # that numpy does when numeric are mixed with strings - if not is_list_like(new): - new = np.broadcast_to(new, mask.shape) - # see if we are only masking values that if putted # will work in the current dtype try: @@ -100,27 +92,12 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd # TypeError: only integer scalar arrays can be converted to a scalar index pass else: - # make sure that we have a nullable type if we have nulls - if not isna_compat(values, nn[0]): - pass - elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)): - # only compare integers/floats - pass - elif not (is_float_dtype(values.dtype) or is_integer_dtype(values.dtype)): - # only compare integers/floats - pass - else: - - # we ignore ComplexWarning here - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", np.ComplexWarning) - nn_at = nn.astype(values.dtype) - - comp = nn == nn_at - if is_list_like(comp) and comp.all(): - nv = values.copy() - nv[mask] = nn_at - return nv + # We only get to putmask_smart when we cannot hold 'new' in values. + # The "smart" part of putmask_smart is checking if we can hold new[mask] + # in values, in which case we can still avoid the need to cast. + if can_hold_element(values, nn): + values[mask] = nn + return values new = np.asarray(new) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 55e5b0d0439fa..e20bbb0d90fba 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -952,7 +952,8 @@ def putmask(self, mask, new) -> list[Block]: List[Block] """ orig_mask = mask - mask, noop = validate_putmask(self.values.T, mask) + values = cast(np.ndarray, self.values) + mask, noop = validate_putmask(values.T, mask) assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) # if we are passed a scalar None, convert it here @@ -960,7 +961,6 @@ def putmask(self, mask, new) -> list[Block]: new = self.fill_value if self._can_hold_element(new): - # error: Argument 1 to "putmask_without_repeat" has incompatible type # "Union[ndarray, ExtensionArray]"; expected "ndarray" putmask_without_repeat(self.values.T, mask, new) # type: ignore[arg-type] @@ -979,9 +979,15 @@ def putmask(self, mask, new) -> list[Block]: elif self.ndim == 1 or self.shape[0] == 1: # no need to split columns - # error: Argument 1 to "putmask_smart" has incompatible type "Union[ndarray, - # ExtensionArray]"; expected "ndarray" - nv = putmask_smart(self.values.T, mask, new).T # type: ignore[arg-type] + if not is_list_like(new): + # putmask_smart can't save us the need to cast + return self.coerce_to_target_dtype(new).putmask(mask, new) + + # This differs from + # `self.coerce_to_target_dtype(new).putmask(mask, new)` + # because putmask_smart will check if new[mask] may be held + # by our dtype. + nv = putmask_smart(values.T, mask, new).T return [self.make_block(nv)] else:
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44435
2021-11-13T21:41:14Z
2021-11-14T02:06:51Z
2021-11-14T02:06:51Z
2021-11-14T15:11:45Z
BUG: read_csv and read_fwf not skipping all defined rows when nrows is given
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index a593a03de5c25..e4e45e9fb0647 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -633,6 +633,7 @@ I/O - Bug in unpickling a :class:`Index` with object dtype incorrectly inferring numeric dtypes (:issue:`43188`) - Bug in :func:`read_csv` where reading multi-header input with unequal lengths incorrectly raising uncontrolled ``IndexError`` (:issue:`43102`) - Bug in :func:`read_csv`, changed exception class when expecting a file path name or file-like object from ``OSError`` to ``TypeError`` (:issue:`43366`) +- Bug in :func:`read_csv` and :func:`read_fwf` ignoring all ``skiprows`` except first when ``nrows`` is specified for ``engine='python'`` (:issue:`44021`) - Bug in :func:`read_json` not handling non-numpy dtypes correctly (especially ``category``) (:issue:`21892`, :issue:`33205`) - Bug in :func:`json_normalize` where multi-character ``sep`` parameter is incorrectly prefixed to every key (:issue:`43831`) - Bug in :func:`json_normalize` where reading data with missing multi-level metadata would not respect errors="ignore" (:issue:`44312`) diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index 4d596aa2f3fa6..36387f0835f4a 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -19,7 +19,10 @@ import numpy as np import pandas._libs.lib as lib -from pandas._typing import FilePathOrBuffer +from pandas._typing import ( + FilePathOrBuffer, + Scalar, +) from pandas.errors import ( EmptyDataError, ParserError, @@ -1020,14 +1023,7 @@ def _get_lines(self, rows=None): new_rows = self.data[self.pos : self.pos + rows] new_pos = self.pos + rows - # Check for stop rows. n.b.: self.skiprows is a set. - if self.skiprows: - new_rows = [ - row - for i, row in enumerate(new_rows) - if not self.skipfunc(i + self.pos) - ] - + new_rows = self._remove_skipped_rows(new_rows) lines.extend(new_rows) self.pos = new_pos @@ -1035,11 +1031,21 @@ def _get_lines(self, rows=None): new_rows = [] try: if rows is not None: - for _ in range(rows): + + rows_to_skip = 0 + if self.skiprows is not None and self.pos is not None: + # Only read additional rows if pos is in skiprows + rows_to_skip = len( + set(self.skiprows) - set(range(self.pos)) + ) + + for _ in range(rows + rows_to_skip): # assert for mypy, data is Iterator[str] or None, would # error in next assert self.data is not None new_rows.append(next(self.data)) + + new_rows = self._remove_skipped_rows(new_rows) lines.extend(new_rows) else: rows = 0 @@ -1052,12 +1058,7 @@ def _get_lines(self, rows=None): new_rows.append(new_row) except StopIteration: - if self.skiprows: - new_rows = [ - row - for i, row in enumerate(new_rows) - if not self.skipfunc(i + self.pos) - ] + new_rows = self._remove_skipped_rows(new_rows) lines.extend(new_rows) if len(lines) == 0: raise @@ -1076,6 +1077,13 @@ def _get_lines(self, rows=None): lines = self._check_thousands(lines) return self._check_decimal(lines) + def _remove_skipped_rows(self, new_rows: list[list[Scalar]]) -> list[list[Scalar]]: + if self.skiprows: + return [ + row for i, row in enumerate(new_rows) if not self.skipfunc(i + self.pos) + ] + return new_rows + class FixedWidthReader(abc.Iterator): """ diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py index 8d1fa97f9f8bb..d4e33543d8a04 100644 --- a/pandas/tests/io/parser/test_read_fwf.py +++ b/pandas/tests/io/parser/test_read_fwf.py @@ -862,3 +862,18 @@ def test_colspecs_with_comment(): ) expected = DataFrame([[1, "K"]], columns=[0, 1]) tm.assert_frame_equal(result, expected) + + +def test_skip_rows_and_n_rows(): + # GH#44021 + data = """a\tb +1\t a +2\t b +3\t c +4\t d +5\t e +6\t f + """ + result = read_fwf(StringIO(data), nrows=4, skiprows=[2, 4]) + expected = DataFrame({"a": [1, 3, 5, 6], "b": ["a", "c", "e", "f"]}) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/parser/test_skiprows.py b/pandas/tests/io/parser/test_skiprows.py index 9df6bf42c55d2..627bda44016e9 100644 --- a/pandas/tests/io/parser/test_skiprows.py +++ b/pandas/tests/io/parser/test_skiprows.py @@ -256,3 +256,21 @@ def test_skip_rows_bad_callable(all_parsers): with pytest.raises(ZeroDivisionError, match=msg): parser.read_csv(StringIO(data), skiprows=lambda x: 1 / 0) + + +def test_skip_rows_and_n_rows(all_parsers): + # GH#44021 + data = """a,b +1,a +2,b +3,c +4,d +5,e +6,f +7,g +8,h +""" + parser = all_parsers + result = parser.read_csv(StringIO(data), nrows=5, skiprows=[2, 4, 6]) + expected = DataFrame({"a": [1, 3, 5, 7, 8], "b": ["a", "c", "e", "g", "h"]}) + tm.assert_frame_equal(result, expected)
- [x] closes #44021 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44434
2021-11-13T21:34:27Z
2021-11-16T00:55:59Z
2021-11-16T00:55:58Z
2021-11-19T21:31:24Z
Fix FloatingArray.equals on older numpy
diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 1797f1aff4235..568f3484e78e4 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -47,6 +47,7 @@ ) from pandas.core.dtypes.inference import is_array_like from pandas.core.dtypes.missing import ( + array_equivalent, isna, notna, ) @@ -636,11 +637,12 @@ def equals(self, other) -> bool: # GH#44382 if e.g. self[1] is np.nan and other[1] is pd.NA, we are NOT # equal. - return np.array_equal(self._mask, other._mask) and np.array_equal( - self._data[~self._mask], - other._data[~other._mask], - equal_nan=True, - ) + if not np.array_equal(self._mask, other._mask): + return False + + left = self._data[~self._mask] + right = other._data[~other._mask] + return array_equivalent(left, right, dtype_equal=True) def _reduce(self, name: str, *, skipna: bool = True, **kwargs): if name in {"any", "all"}: diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index c457b52cf4b0e..eea3fa37b7435 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -475,8 +475,8 @@ def array_equivalent( return np.array_equal(left, right) -def _array_equivalent_float(left, right): - return ((left == right) | (np.isnan(left) & np.isnan(right))).all() +def _array_equivalent_float(left, right) -> bool: + return bool(((left == right) | (np.isnan(left) & np.isnan(right))).all()) def _array_equivalent_datetimelike(left, right):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44432
2021-11-13T21:17:18Z
2021-11-13T23:26:01Z
2021-11-13T23:26:01Z
2021-11-13T23:26:58Z
BUG: to_datetime/to_timedelta with memoryview
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 017acb8ef930b..4b7325116cc2c 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -573,6 +573,7 @@ Conversion - Bug in :class:`IntegerDtype` not allowing coercion from string dtype (:issue:`25472`) - Bug in :func:`to_datetime` with ``arg:xr.DataArray`` and ``unit="ns"`` specified raises TypeError (:issue:`44053`) - Bug in :meth:`DataFrame.convert_dtypes` not returning the correct type when a subclass does not overload :meth:`_constructor_sliced` (:issue:`43201`) +- Bug in :func:`to_datetime` and :func:`to_timedelta` when passed a ``memoryview`` object (:issue:`44431`) - Strings diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 25142cad9a30d..ba5a46ab62543 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -14,6 +14,7 @@ from cpython.datetime cimport ( PyTime_Check, ) from cpython.iterator cimport PyIter_Check +from cpython.memoryview cimport PyMemoryView_Check from cpython.number cimport PyNumber_Check from cpython.object cimport ( Py_EQ, @@ -1107,7 +1108,7 @@ cdef inline bint c_is_list_like(object obj, bint allow_sets) except -1: and not cnp.PyArray_IsZeroDim(obj) # exclude sets if allow_sets is False and not (allow_sets is False and isinstance(obj, abc.Set)) - ) + ) or (PyMemoryView_Check(obj) and obj.ndim != 0) _TYPE_MAP = { diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index ff7c7a7782da3..6e2a9fa453d50 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -1429,11 +1429,10 @@ def test_from_obscure_array(dtype, array_likes): expected = func(data)[0] tm.assert_equal(result, expected) - # FIXME: dask and memoryview both break on these - # func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype] - # result = func(arr).array - # expected = func(data).array - # tm.assert_equal(result, expected) + func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype] + result = func(arr).array + expected = func(data).array + tm.assert_equal(result, expected) # Let's check the Indexes while we're here idx_cls = {"M8[ns]": DatetimeIndex, "m8[ns]": TimedeltaIndex}[dtype] diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 5936248456ca7..9cec86df1fbda 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -118,6 +118,12 @@ def coerce(request): (object(), False, "object"), (np.nan, False, "NaN"), (None, False, "None"), + # error: Argument 1 to "memoryview" has incompatible type "ndarray[Any, Any]"; + # expected "Union[bytes, bytearray, memoryview, array[Any], mmap]" + (memoryview(np.array([1, 2])), True, "memoryview-1d"), # type: ignore[arg-type] + # error: Argument 1 to "memoryview" has incompatible type "ndarray[Any, Any]"; + # expected "Union[bytes, bytearray, memoryview, array[Any], mmap]" + (memoryview(np.array(3.0)), False, "memoryview-0d"), # type: ignore[arg-type] ] objs, expected, ids = zip(*ll_params)
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44431
2021-11-13T19:53:27Z
2021-11-24T23:54:41Z
null
2021-11-24T23:54:50Z
Doc: Clean obj.empty docs to describe Series/DataFrame
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 23608cf0192df..30e057cac968f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2001,15 +2001,15 @@ def __contains__(self, key) -> bool_t: @property def empty(self) -> bool_t: """ - Indicator whether DataFrame is empty. + Indicator whether Series/DataFrame is empty. - True if DataFrame is entirely empty (no items), meaning any of the + True if Series/DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool - If DataFrame is empty, return True, if not return False. + If Series/DataFrame is empty, return True, if not return False. See Also -------- @@ -2019,7 +2019,7 @@ def empty(self) -> bool_t: Notes ----- - If DataFrame contains only NaNs, it is still not considered empty. See + If Series/DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples @@ -2045,6 +2045,16 @@ def empty(self) -> bool_t: False >>> df.dropna().empty True + + >>> ser_empty = pd.Series({'A' : []}) + >>> ser_empty + A [] + dtype: object + >>> ser_empty.empty + False + >>> ser_empty = pd.Series() + >>> ser_empty.empty + True """ return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
- [x] closes #42697
https://api.github.com/repos/pandas-dev/pandas/pulls/44430
2021-11-13T19:37:30Z
2021-11-14T02:23:35Z
2021-11-14T02:23:35Z
2021-11-14T19:41:40Z
WIP/TST: troubleshoot xfails
diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py index abb70089f1fef..4aa150afadef6 100644 --- a/pandas/tests/frame/constructors/test_from_records.py +++ b/pandas/tests/frame/constructors/test_from_records.py @@ -33,10 +33,7 @@ def test_from_records_with_datetimes(self): arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])] dtypes = [("EXPIRY", "<M8[ns]")] - try: - recarray = np.core.records.fromarrays(arrdata, dtype=dtypes) - except (ValueError): - pytest.skip("known failure of numpy rec array creation") + recarray = np.core.records.fromarrays(arrdata, dtype=dtypes) result = DataFrame.from_records(recarray) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 9db6567ca1b56..c6afa3803bcb6 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -651,10 +651,6 @@ def test_get_indexer_mixed_dtypes(self, target): ([date(9999, 1, 1), date(9999, 1, 1)], [-1, -1]), ], ) - # FIXME: these warnings are flaky GH#36131 - @pytest.mark.filterwarnings( - "ignore:Comparison of Timestamp with datetime.date:FutureWarning" - ) def test_get_indexer_out_of_bounds_date(self, target, positions): values = DatetimeIndex([Timestamp("2020-01-01"), Timestamp("2020-01-02")]) diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index ab0199dca3f24..9d18fcbe551ac 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -3304,6 +3304,7 @@ def test_repr_html_ipython_config(ip): "encoding, data", [(None, "abc"), ("utf-8", "abc"), ("gbk", "造成输出中文显示乱码"), ("foo", "abc")], ) +@td.check_file_leaks def test_filepath_or_buffer_arg( method, filepath_or_buffer, diff --git a/pandas/tests/io/parser/test_dialect.py b/pandas/tests/io/parser/test_dialect.py index 55b193903bce0..a53cc1fc7390c 100644 --- a/pandas/tests/io/parser/test_dialect.py +++ b/pandas/tests/io/parser/test_dialect.py @@ -9,6 +9,7 @@ import pytest from pandas.errors import ParserWarning +import pandas.util._test_decorators as td from pandas import DataFrame import pandas._testing as tm @@ -84,6 +85,7 @@ class InvalidDialect: [None, "doublequote", "escapechar", "skipinitialspace", "quotechar", "quoting"], ) @pytest.mark.parametrize("value", ["dialect", "default", "other"]) +@td.check_file_leaks def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, value): # see gh-23761. dialect_name, dialect_kwargs = custom_dialect diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py index e6dfe64df3864..23235efb10de4 100644 --- a/pandas/tests/io/pytables/test_append.py +++ b/pandas/tests/io/pytables/test_append.py @@ -580,8 +580,7 @@ def check_col(key, name, size): & (df_new.A > 0) & (df_new.B < 0) ] - tm.assert_frame_equal(result, expected, check_freq=False) - # FIXME: 2020-05-07 freq check randomly fails in the CI + tm.assert_frame_equal(result, expected) # yield an empty frame result = store.select("df", "string='foo' and string2='cool'") @@ -610,9 +609,7 @@ def check_col(key, name, size): result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"]) expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")] - tm.assert_frame_equal(result, expected, check_freq=False) - # FIXME: 2020-12-07 intermittent build failures here with freq of - # None instead of BDay(4) + tm.assert_frame_equal(result, expected) with ensure_clean_store(setup_path) as store: # doc example part 2 diff --git a/pandas/tests/io/pytables/test_select.py b/pandas/tests/io/pytables/test_select.py index fc8d4506abda0..eb4c425c332ff 100644 --- a/pandas/tests/io/pytables/test_select.py +++ b/pandas/tests/io/pytables/test_select.py @@ -838,8 +838,7 @@ def test_select_as_multiple(setup_path): ) expected = concat([df1, df2], axis=1) expected = expected[(expected.A > 0) & (expected.B > 0)] - tm.assert_frame_equal(result, expected, check_freq=False) - # FIXME: 2021-01-20 this is failing with freq None vs 4B on some builds + tm.assert_frame_equal(result, expected) # multiple (diff selector) result = store.select_as_multiple( diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index 83c86d4da05e6..bb63902bade7b 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -648,9 +648,7 @@ def test_coordinates(setup_path): expected = concat([df1, df2], axis=1) expected = expected[(expected.A > 0) & (expected.B > 0)] - tm.assert_frame_equal(result, expected, check_freq=False) - # FIXME: 2021-01-18 on some (mostly windows) builds we get freq=None - # but expect freq="18B" + tm.assert_frame_equal(result, expected) # pass array/mask as the coordinates with ensure_clean_store(setup_path) as store: diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index 1d9b14a739848..940b0e8988d65 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -81,20 +81,20 @@ def test_dont_register_by_default(self): @td.skip_if_no("matplotlib", min_version="3.1.3") def test_registering_no_warning(self): plt = pytest.importorskip("matplotlib.pyplot") - s = Series(range(12), index=date_range("2017", periods=12)) + ser = Series(range(12), index=date_range("2017", periods=12)) _, ax = plt.subplots() # Set to the "warn" state, in case this isn't the first test run register_matplotlib_converters() - ax.plot(s.index, s.values) + ax.plot(ser.index, ser.values) plt.close() def test_pandas_plots_register(self): plt = pytest.importorskip("matplotlib.pyplot") - s = Series(range(12), index=date_range("2017", periods=12)) + ser = Series(range(12), index=date_range("2017", periods=12)) # Set to the "warn" state, in case this isn't the first test run with tm.assert_produces_warning(None) as w: - s.plot() + ser.plot() try: assert len(w) == 0 @@ -118,17 +118,17 @@ def test_option_no_warning(self): pytest.importorskip("matplotlib.pyplot") ctx = cf.option_context("plotting.matplotlib.register_converters", False) plt = pytest.importorskip("matplotlib.pyplot") - s = Series(range(12), index=date_range("2017", periods=12)) + ser = Series(range(12), index=date_range("2017", periods=12)) _, ax = plt.subplots() # Test without registering first, no warning with ctx: - ax.plot(s.index, s.values) + ax.plot(ser.index, ser.values) # Now test with registering register_matplotlib_converters() with ctx: - ax.plot(s.index, s.values) + ax.plot(ser.index, ser.values) plt.close() def test_registry_resets(self): diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index f74cab9ed04da..44113d1e217fd 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -179,12 +179,7 @@ def check_format_of_first_point(ax, expected_string): first_line = ax.get_lines()[0] first_x = first_line.get_xdata()[0].ordinal first_y = first_line.get_ydata()[0] - try: - assert expected_string == ax.format_coord(first_x, first_y) - except (ValueError): - pytest.skip( - "skipping test because issue forming test comparison GH7664" - ) + assert expected_string == ax.format_coord(first_x, first_y) annual = Series(1, index=date_range("2014-01-01", periods=3, freq="A-DEC")) _, ax = self.plt.subplots() diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index 403f4a2c06df1..85942839951fe 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -236,6 +236,11 @@ def test_hist_kde_color(self): @td.skip_if_no_mpl class TestDataFramePlots(TestPlotBase): + @pytest.mark.xfail( + reason="2021-12-06 ValueError: view limit minimum -36435.78943428784 is " + "less than 1 and is an invalid Matplotlib date value. This often happens " + "if you pass a non-datetime value to an axis that has datetime units" + ) def test_hist_df_legacy(self): from matplotlib.patches import Rectangle
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44429
2021-11-13T19:34:50Z
2021-12-08T16:44:00Z
null
2021-12-08T16:44:05Z
BUG: DataFrame with mismatched NA value and dtype
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 59b164c156d79..92fadf801cec7 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -608,6 +608,7 @@ Missing ^^^^^^^ - Bug in :meth:`DataFrame.fillna` with limit and no method ignores axis='columns' or ``axis = 1`` (:issue:`40989`) - Bug in :meth:`DataFrame.fillna` not replacing missing values when using a dict-like ``value`` and duplicate column names (:issue:`43476`) +- Bug in constructing a :class:`DataFrame` with a dictionary ``np.datetime64`` as a value and ``dtype='timedelta64[ns]'``, or vice-versa, incorrectly casting instead of raising (:issue:`??`) - MultiIndex diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index e6d6b561803d6..a766f8321a641 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -443,15 +443,18 @@ def dict_to_mgr( if missing.any() and not is_integer_dtype(dtype): nan_dtype: DtypeObj - if dtype is None or ( - isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.flexible) - ): + if dtype is not None: + # calling sanitize_array ensures we don't mix-and-match + # NA dtypes + midxs = missing.values.nonzero()[0] + for i in midxs: + arr = sanitize_array(arrays.iat[i], index, dtype=dtype) + arrays.iat[i] = arr + else: # GH#1783 nan_dtype = np.dtype("object") - else: - nan_dtype = dtype - val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) - arrays.loc[missing] = [val] * missing.sum() + val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) + arrays.loc[missing] = [val] * missing.sum() arrays = list(arrays) columns = ensure_index(columns) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index f92bbe1c718ab..52797862afa14 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2903,14 +2903,7 @@ def test_from_timedelta64_scalar_object(self, constructor): assert isinstance(get1(obj), np.timedelta64) @pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64]) - def test_from_scalar_datetimelike_mismatched(self, constructor, cls, request): - node = request.node - params = node.callspec.params - if params["frame_or_series"] is DataFrame and params["constructor"] is dict: - mark = pytest.mark.xfail( - reason="DataFrame incorrectly allows mismatched datetimelike" - ) - node.add_marker(mark) + def test_from_scalar_datetimelike_mismatched(self, constructor, cls): scalar = cls("NaT", "ns") dtype = {np.datetime64: "m8[ns]", np.timedelta64: "M8[ns]"}[cls]
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry <s>Also fixed the FloatingArray.equals bug on older numpys</s>no longer needed, so reverted
https://api.github.com/repos/pandas-dev/pandas/pulls/44428
2021-11-13T19:20:24Z
2021-11-14T02:05:12Z
2021-11-14T02:05:12Z
2021-11-14T15:20:30Z
[DOC] Transferring data from the 6th column to the first. #44379
diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst index a589ad96ca7d9..40ff1049e5820 100644 --- a/doc/source/user_guide/basics.rst +++ b/doc/source/user_guide/basics.rst @@ -2051,32 +2051,33 @@ The following table lists all of pandas extension types. For methods requiring ` arguments, strings can be specified as indicated. See the respective documentation sections for more on each type. -+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+ -| Kind of Data | Data Type | Scalar | Array | String Aliases | Documentation | -+===================+===========================+====================+===============================+=========================================+===============================+ -| tz-aware datetime | :class:`DatetimeTZDtype` | :class:`Timestamp` | :class:`arrays.DatetimeArray` | ``'datetime64[ns, <tz>]'`` | :ref:`timeseries.timezone` | -+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+ -| Categorical | :class:`CategoricalDtype` | (none) | :class:`Categorical` | ``'category'`` | :ref:`categorical` | -+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+ -| period | :class:`PeriodDtype` | :class:`Period` | :class:`arrays.PeriodArray` | ``'period[<freq>]'``, | :ref:`timeseries.periods` | -| (time spans) | | | | ``'Period[<freq>]'`` | | -+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+ -| sparse | :class:`SparseDtype` | (none) | :class:`arrays.SparseArray` | ``'Sparse'``, ``'Sparse[int]'``, | :ref:`sparse` | -| | | | | ``'Sparse[float]'`` | | -+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+ -| intervals | :class:`IntervalDtype` | :class:`Interval` | :class:`arrays.IntervalArray` | ``'interval'``, ``'Interval'``, | :ref:`advanced.intervalindex` | -| | | | | ``'Interval[<numpy_dtype>]'``, | | -| | | | | ``'Interval[datetime64[ns, <tz>]]'``, | | -| | | | | ``'Interval[timedelta64[<freq>]]'`` | | -+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+ -| nullable integer + :class:`Int64Dtype`, ... | (none) | :class:`arrays.IntegerArray` | ``'Int8'``, ``'Int16'``, ``'Int32'``, | :ref:`integer_na` | -| | | | | ``'Int64'``, ``'UInt8'``, ``'UInt16'``, | | -| | | | | ``'UInt32'``, ``'UInt64'`` | | -+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+ -| Strings | :class:`StringDtype` | :class:`str` | :class:`arrays.StringArray` | ``'string'`` | :ref:`text` | -+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+ -| Boolean (with NA) | :class:`BooleanDtype` | :class:`bool` | :class:`arrays.BooleanArray` | ``'boolean'`` | :ref:`api.arrays.bool` | -+-------------------+---------------------------+--------------------+-------------------------------+-----------------------------------------+-------------------------------+ ++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+ +| Kind of Data | Data Type | Scalar | Array | String Aliases | ++=================================================+===============+===========+========+===========+===============================+========================================+ +| :ref:`tz-aware datetime <timeseries.timezone>` | :class:`DatetimeTZDtype` | :class:`Timestamp` | :class:`arrays.DatetimeArray` | ``'datetime64[ns, <tz>]'`` | +| | | | | | ++-------------------------------------------------+---------------+-----------+--------------------+-------------------------------+----------------------------------------+ +| :ref:`Categorical <categorical>` | :class:`CategoricalDtype` | (none) | :class:`Categorical` | ``'category'`` | ++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+ +| :ref:`period (time spans) <timeseries.periods>` | :class:`PeriodDtype` | :class:`Period` | :class:`arrays.PeriodArray` | ``'period[<freq>]'``, | +| | | | ``'Period[<freq>]'`` | | ++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+ +| :ref:`sparse <sparse>` | :class:`SparseDtype` | (none) | :class:`arrays.SparseArray` | ``'Sparse'``, ``'Sparse[int]'``, | +| | | | | ``'Sparse[float]'`` | ++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+ +| :ref:`intervals <advanced.intervalindex>` | :class:`IntervalDtype` | :class:`Interval` | :class:`arrays.IntervalArray` | ``'interval'``, ``'Interval'``, | +| | | | | ``'Interval[<numpy_dtype>]'``, | +| | | | | ``'Interval[datetime64[ns, <tz>]]'``, | +| | | | | ``'Interval[timedelta64[<freq>]]'`` | ++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+ +| :ref:`nullable integer <integer_na>` | :class:`Int64Dtype`, ... | (none) | :class:`arrays.IntegerArray` | ``'Int8'``, ``'Int16'``, ``'Int32'``, | +| | | | | ``'Int64'``, ``'UInt8'``, ``'UInt16'``,| +| | | | | ``'UInt32'``, ``'UInt64'`` | ++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+ +| :ref:`Strings <text>` | :class:`StringDtype` | :class:`str` | :class:`arrays.StringArray` | ``'string'`` | ++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+ +| :ref:`Boolean (with NA) <api.arrays.bool>` | :class:`BooleanDtype` | :class:`bool` | :class:`arrays.BooleanArray` | ``'boolean'`` | ++-------------------------------------------------+---------------------------+--------------------+-------------------------------+----------------------------------------+ pandas has two ways to store strings.
Transferring data from the 6th column to the first. This should resolve issue #44379
https://api.github.com/repos/pandas-dev/pandas/pulls/44427
2021-11-13T18:34:43Z
2021-11-25T17:23:36Z
2021-11-25T17:23:36Z
2021-11-25T17:23:50Z
TYP: improve typing for DataFrame.to_string
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b88c97b8e988d..2f99785674a1a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -989,15 +989,13 @@ def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ - buf = StringIO("") if self._info_repr(): + buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() - self.to_string(buf=buf, **repr_params) - - return buf.getvalue() + return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ @@ -1006,7 +1004,7 @@ def _repr_html_(self) -> str | None: Mainly for IPython notebook. """ if self._info_repr(): - buf = StringIO("") + buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) @@ -1043,6 +1041,56 @@ def _repr_html_(self) -> str | None: else: return None + @overload + def to_string( + self, + buf: None = ..., + columns: Sequence[str] | None = ..., + col_space: int | list[int] | dict[Hashable, int] | None = ..., + header: bool | Sequence[str] = ..., + index: bool = ..., + na_rep: str = ..., + formatters: fmt.FormattersType | None = ..., + float_format: fmt.FloatFormatType | None = ..., + sparsify: bool | None = ..., + index_names: bool = ..., + justify: str | None = ..., + max_rows: int | None = ..., + max_cols: int | None = ..., + show_dimensions: bool = ..., + decimal: str = ..., + line_width: int | None = ..., + min_rows: int | None = ..., + max_colwidth: int | None = ..., + encoding: str | None = ..., + ) -> str: + ... + + @overload + def to_string( + self, + buf: FilePathOrBuffer[str], + columns: Sequence[str] | None = ..., + col_space: int | list[int] | dict[Hashable, int] | None = ..., + header: bool | Sequence[str] = ..., + index: bool = ..., + na_rep: str = ..., + formatters: fmt.FormattersType | None = ..., + float_format: fmt.FloatFormatType | None = ..., + sparsify: bool | None = ..., + index_names: bool = ..., + justify: str | None = ..., + max_rows: int | None = ..., + max_cols: int | None = ..., + show_dimensions: bool = ..., + decimal: str = ..., + line_width: int | None = ..., + min_rows: int | None = ..., + max_colwidth: int | None = ..., + encoding: str | None = ..., + ) -> None: + ... + @Substitution( header_type="bool or sequence of strings", header="Write out the column names. If a list of strings " @@ -1058,7 +1106,7 @@ def to_string( self, buf: FilePathOrBuffer[str] | None = None, columns: Sequence[str] | None = None, - col_space: int | None = None, + col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN",
null
https://api.github.com/repos/pandas-dev/pandas/pulls/44426
2021-11-13T18:33:54Z
2021-11-18T09:38:11Z
2021-11-18T09:38:11Z
2021-11-18T10:09:52Z
BUG: Using boolean keys to select a column (GH44322)
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index a593a03de5c25..02e2523d265e4 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -605,7 +605,7 @@ Indexing - Bug in :meth:`Series.__setitem__` with an integer dtype other than ``int64`` setting with a ``range`` object unnecessarily upcasting to ``int64`` (:issue:`44261`) - Bug in :meth:`Series.__setitem__` with a boolean mask indexer setting a listlike value of length 1 incorrectly broadcasting that value (:issue:`44265`) - Bug in :meth:`DataFrame.loc.__setitem__` and :meth:`DataFrame.iloc.__setitem__` with mixed dtypes sometimes failing to operate in-place (:issue:`44345`) -- +- Bug in :meth:`DataFrame.loc.__getitem__` incorrectly raising ``KeyError`` when selecting a single column with a boolean key (:issue:`44322`). Missing ^^^^^^^ diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 91f1415178471..7d12a27aed84a 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -994,7 +994,7 @@ def _validate_key(self, key, axis: int): # slice of labels (where start-end in labels) # slice of integers (only if in the labels) # boolean not in slice and with boolean index - if isinstance(key, bool) and not is_bool_dtype(self.obj.index): + if isinstance(key, bool) and not is_bool_dtype(self.obj._get_axis(axis)): raise KeyError( f"{key}: boolean label can not be used without a boolean index" ) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 63d1568ed4d43..a07928b40ad0e 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -178,6 +178,26 @@ def test_column_types_consistent(self): ) tm.assert_frame_equal(df, expected) + @pytest.mark.parametrize( + "obj, key, exp", + [ + ( + DataFrame([[1]], columns=Index([False])), + IndexSlice[:, False], + Series([1], name=False), + ), + (Series([1], index=Index([False])), False, [1]), + (DataFrame([[1]], index=Index([False])), False, Series([1], name=False)), + ], + ) + def test_loc_getitem_single_boolean_arg(self, obj, key, exp): + # GH 44322 + res = obj.loc[key] + if isinstance(exp, (DataFrame, Series)): + tm.assert_equal(res, exp) + else: + assert res == exp + class TestLoc2: # TODO: better name, just separating out things that rely on base class
- [x] closes #44322 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44425
2021-11-13T12:51:42Z
2021-11-15T18:22:56Z
2021-11-15T18:22:55Z
2021-11-15T18:22:59Z
BUG: AttributeError: 'BooleanArray' object has no attribute 'sum' while infer types
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 339585810bec1..b514b3855e42f 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -698,7 +698,7 @@ def _infer_types(self, values, na_values, try_num_bool=True): # error: Argument 2 to "isin" has incompatible type "List[Any]"; expected # "Union[Union[ExtensionArray, ndarray], Index, Series]" mask = algorithms.isin(values, list(na_values)) # type: ignore[arg-type] - na_count = mask.sum() + na_count = mask.astype("uint8").sum() if na_count > 0: if is_integer_dtype(values): values = values.astype(np.float64) diff --git a/pandas/tests/io/parser/test_index_col.py b/pandas/tests/io/parser/test_index_col.py index 646cb2029919d..26e3e9c182b42 100644 --- a/pandas/tests/io/parser/test_index_col.py +++ b/pandas/tests/io/parser/test_index_col.py @@ -297,3 +297,24 @@ def test_multiindex_columns_index_col_with_data(all_parsers): index=Index(["data"]), ) tm.assert_frame_equal(result, expected) + + +@skip_pyarrow +def test_infer_types_boolean_sum(all_parsers): + # GH#44079 + parser = all_parsers + result = parser.read_csv( + StringIO("0,1"), + names=["a", "b"], + index_col=["a"], + dtype={"a": "UInt8"}, + ) + expected = DataFrame( + data={ + "a": [ + 0, + ], + "b": [1], + } + ).set_index("a") + tm.assert_frame_equal(result, expected, check_index_type=False)
- [x] closes #44079 - [ ] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44423
2021-11-13T06:12:28Z
2021-11-14T04:10:15Z
null
2021-11-14T04:10:15Z
Bug gh44079
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 339585810bec1..b514b3855e42f 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -698,7 +698,7 @@ def _infer_types(self, values, na_values, try_num_bool=True): # error: Argument 2 to "isin" has incompatible type "List[Any]"; expected # "Union[Union[ExtensionArray, ndarray], Index, Series]" mask = algorithms.isin(values, list(na_values)) # type: ignore[arg-type] - na_count = mask.sum() + na_count = mask.astype("uint8").sum() if na_count > 0: if is_integer_dtype(values): values = values.astype(np.float64)
- [x] closes #44079 - [ ] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44422
2021-11-13T03:50:43Z
2021-11-13T06:10:20Z
null
2021-11-13T06:10:20Z
DOC: whatsnew for the improvement to warning messages
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 8db9be21ca4ef..94606e049018e 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -15,6 +15,31 @@ including other versions of pandas. Enhancements ~~~~~~~~~~~~ +.. _whatsnew_140.enhancements.warning_lineno: + +Improved warning messages +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Previously, warning messages may have pointed to lines within the pandas library. Running the script ``setting_with_copy_warning.py`` + +.. code-block:: python + + import pandas as pd + + df = pd.DataFrame({'a': [1, 2, 3]}) + df[:2].loc[:, 'a'] = 5 + +with pandas 1.3 resulted in:: + + .../site-packages/pandas/core/indexing.py:1951: SettingWithCopyWarning: + A value is trying to be set on a copy of a slice from a DataFrame. + +This made it difficult to determine where the warning was being generated from. Now pandas will inspect the call stack, reporting the first line outside of the pandas library that gave rise to the warning. The output of the above script is now:: + + setting_with_copy_warning.py:4: SettingWithCopyWarning: + A value is trying to be set on a copy of a slice from a DataFrame. + + .. _whatsnew_140.enhancements.numeric_index: More flexible numeric dtypes for indexes
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry The use of find_stack_level is a significant improvement for users, thought it should go in the whatsnew. It appears to me that the ipython directive in sphinx will suppress any warning message, so I've just made it a literal block. ![image](https://user-images.githubusercontent.com/45562402/141593623-2ff651b4-9eb4-4bc8-8b79-0ee26a6676fb.png) cc @phofl
https://api.github.com/repos/pandas-dev/pandas/pulls/44419
2021-11-12T23:07:11Z
2021-11-13T17:21:03Z
2021-11-13T17:21:03Z
2021-11-13T17:34:57Z
DOC: Add how=cross description to join
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b01de5dec610d..212bb63693d56 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9155,6 +9155,11 @@ def join( * inner: form intersection of calling frame's index (or column if on is specified) with `other`'s index, preserving the order of the calling's one. + * cross: creates the cartesian product from both frames, preserves the order + of the left keys. + + .. versionadded:: 1.2.0 + lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default ''
null
https://api.github.com/repos/pandas-dev/pandas/pulls/44418
2021-11-12T22:44:01Z
2021-11-13T17:05:35Z
2021-11-13T17:05:35Z
2021-11-13T18:48:29Z
BUG: DataFrame.astype(series) with duplicate columns
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 59b164c156d79..8964f5e3ffad2 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -714,6 +714,7 @@ Styler Other ^^^^^ +- Bug in :meth:`DataFrame.astype` with non-unique columns and a :class:`Series` ``dtype`` argument (:issue:`44417`) - Bug in :meth:`CustomBusinessMonthBegin.__add__` (:meth:`CustomBusinessMonthEnd.__add__`) not applying the extra ``offset`` parameter when beginning (end) of the target month is already a business day (:issue:`41356`) - Bug in :meth:`RangeIndex.union` with another ``RangeIndex`` with matching (even) ``step`` and starts differing by strictly less than ``step / 2`` (:issue:`44019`) - Bug in :meth:`RangeIndex.difference` with ``sort=None`` and ``step<0`` failing to sort (:issue:`44085`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6b51456006021..eb0c5a236c2da 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5826,14 +5826,22 @@ def astype( "Only a column name can be used for the " "key in a dtype mappings argument." ) + + # GH#44417 cast to Series so we can use .iat below, which will be + # robust in case we + from pandas import Series + + dtype_ser = Series(dtype, dtype=object) + dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False) + results = [] - for col_name, col in self.items(): - if col_name in dtype: - results.append( - col.astype(dtype=dtype[col_name], copy=copy, errors=errors) - ) + for i, (col_name, col) in enumerate(self.items()): + cdt = dtype_ser.iat[i] + if isna(cdt): + res_col = col.copy() if copy else col else: - results.append(col.copy() if copy else col) + res_col = col.astype(dtype=cdt, copy=copy, errors=errors) + results.append(res_col) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 3c45f7263265c..b8354e800753d 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -992,7 +992,7 @@ def _wrap_applied_output( result = self.obj._constructor( index=self.grouper.result_index, columns=data.columns ) - result = result.astype(data.dtypes.to_dict(), copy=False) + result = result.astype(data.dtypes, copy=False) return result # GH12824 diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py index 9f1f953cecc7e..e5e07761fd755 100644 --- a/pandas/tests/frame/methods/test_astype.py +++ b/pandas/tests/frame/methods/test_astype.py @@ -261,6 +261,26 @@ def test_astype_duplicate_col(self): expected = concat([a1_str, b, a2_str], axis=1) tm.assert_frame_equal(result, expected) + def test_astype_duplicate_col_series_arg(self): + # GH#44417 + vals = np.random.randn(3, 4) + df = DataFrame(vals, columns=["A", "B", "C", "A"]) + dtypes = df.dtypes + dtypes.iloc[0] = str + dtypes.iloc[2] = "Float64" + + result = df.astype(dtypes) + expected = DataFrame( + { + 0: vals[:, 0].astype(str), + 1: vals[:, 1], + 2: pd.array(vals[:, 2], dtype="Float64"), + 3: vals[:, 3], + } + ) + expected.columns = df.columns + tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize( "dtype", [ diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 203d8abb465d0..f632da9616124 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2031,6 +2031,16 @@ def get_result(): tm.assert_equal(result, expected) +def test_empty_groupby_apply_nonunique_columns(): + # GH#44417 + df = DataFrame(np.random.randn(0, 4)) + df[3] = df[3].astype(np.int64) + df.columns = [0, 1, 2, 0] + gb = df.groupby(df[1]) + res = gb.apply(lambda x: x) + assert (res.dtypes == df.dtypes).all() + + def test_tuple_as_grouping(): # https://github.com/pandas-dev/pandas/issues/18314 df = DataFrame(
- [ ] closes #xxxx - [x] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44417
2021-11-12T22:24:42Z
2021-11-14T02:12:10Z
2021-11-14T02:12:10Z
2021-11-14T06:03:30Z
ENH: Use find_stack_level
diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index c9f7fd43c1050..05cd3a3a72257 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -11,6 +11,7 @@ ) from pandas._libs.missing import is_matching_na import pandas._libs.testing as _testing +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_bool, @@ -106,7 +107,7 @@ def assert_almost_equal( "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # https://github.com/python/mypy/issues/7642 # error: Argument 1 to "_get_tol_from_less_precise" has incompatible @@ -340,7 +341,7 @@ def _get_ilevel_values(index, level): "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # https://github.com/python/mypy/issues/7642 # error: Argument 1 to "_get_tol_from_less_precise" has incompatible @@ -818,7 +819,7 @@ def assert_extension_array_equal( "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) rtol = atol = _get_tol_from_less_precise(check_less_precise) @@ -964,7 +965,7 @@ def assert_series_equal( "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) rtol = atol = _get_tol_from_less_precise(check_less_precise) @@ -1247,7 +1248,7 @@ def assert_frame_equal( "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) rtol = atol = _get_tol_from_less_precise(check_less_precise) diff --git a/pandas/io/common.py b/pandas/io/common.py index be6577e646ac3..12c7afc8ee2e4 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -49,6 +49,7 @@ import_lzma, ) from pandas.compat._optional import import_optional_dependency +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_file_like @@ -270,7 +271,7 @@ def _get_filepath_or_buffer( warnings.warn( "compression has no effect when passing a non-binary object as input.", RuntimeWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) compression_method = None diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py index f079a25f69fec..ef60afa195234 100644 --- a/pandas/io/date_converters.py +++ b/pandas/io/date_converters.py @@ -4,6 +4,7 @@ import numpy as np from pandas._libs.tslibs import parsing +from pandas.util._exceptions import find_stack_level def parse_date_time(date_col, time_col): @@ -18,7 +19,7 @@ def parse_date_time(date_col, time_col): Use pd.to_datetime(date_col + " " + time_col).to_pydatetime() instead to get a Numpy array. """, # noqa: E501 FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) date_col = _maybe_cast(date_col) time_col = _maybe_cast(time_col) @@ -38,7 +39,7 @@ def parse_date_fields(year_col, month_col, day_col): np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array. """, # noqa: E501 FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) year_col = _maybe_cast(year_col) @@ -63,7 +64,7 @@ def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col, second_ np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array. """, # noqa: E501 FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) year_col = _maybe_cast(year_col) @@ -89,7 +90,7 @@ def generic_parser(parse_func, *cols): Use pd.to_datetime instead. """, FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) N = _check_columns(cols) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index e543c9161a26e..1caf334f9607e 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -833,7 +833,7 @@ def __new__( warnings.warn( "Use of **kwargs is deprecated, use engine_kwargs instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # only switch class if generic(ExcelWriter) @@ -868,7 +868,7 @@ def __new__( "deprecated and will also raise a warning, it can " "be globally set and the warning suppressed.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) cls = get_writer(engine) diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 339585810bec1..6374f52f6964b 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -32,6 +32,7 @@ ParserError, ParserWarning, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import astype_nansafe from pandas.core.dtypes.common import ( @@ -558,7 +559,7 @@ def _convert_to_ndarrays( f"for column {c} - only the converter will be used." ), ParserWarning, - stacklevel=7, + stacklevel=find_stack_level(), ) try: @@ -830,7 +831,7 @@ def _check_data_length(self, columns: list[str], data: list[ArrayLike]) -> None: "Length of header or names does not match length of data. This leads " "to a loss of data with index_col=False.", ParserWarning, - stacklevel=6, + stacklevel=find_stack_level(), ) def _evaluate_usecols(self, usecols, names): diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py index 352dd998dda0f..db750cded45e5 100644 --- a/pandas/io/parsers/c_parser_wrapper.py +++ b/pandas/io/parsers/c_parser_wrapper.py @@ -10,6 +10,7 @@ FilePathOrBuffer, ) from pandas.errors import DtypeWarning +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_categorical_dtype, @@ -387,7 +388,7 @@ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict: f"Specify dtype option on import or set low_memory=False." ] ) - warnings.warn(warning_message, DtypeWarning, stacklevel=8) + warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level()) return result diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index b0e868b260369..4d596aa2f3fa6 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -24,6 +24,7 @@ EmptyDataError, ParserError, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_integer from pandas.core.dtypes.inference import is_dict_like @@ -555,7 +556,7 @@ def _handle_usecols( "Defining usecols with out of bounds indices is deprecated " "and will raise a ParserError in a future version.", FutureWarning, - stacklevel=8, + stacklevel=find_stack_level(), ) col_indices = self.usecols diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 6d3cc84a31d05..6fb9497dbc1d6 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -1041,7 +1041,7 @@ def _clean_options(self, options, engine): "engine='python'." ), ParserWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) index_col = options["index_col"] @@ -1573,7 +1573,9 @@ def _merge_with_dialect_properties( conflict_msgs.append(msg) if conflict_msgs: - warnings.warn("\n\n".join(conflict_msgs), ParserWarning, stacklevel=2) + warnings.warn( + "\n\n".join(conflict_msgs), ParserWarning, stacklevel=find_stack_level() + ) kwds[param] = dialect_val return kwds diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 8c8e9b9feeb80..0e886befb5f2f 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -45,6 +45,7 @@ from pandas.compat.pickle_compat import patch_pickle from pandas.errors import PerformanceWarning from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, @@ -2190,7 +2191,9 @@ def update_info(self, info): # frequency/name just warn if key in ["freq", "index_name"]: ws = attribute_conflict_doc % (key, existing_value, value) - warnings.warn(ws, AttributeConflictWarning, stacklevel=6) + warnings.warn( + ws, AttributeConflictWarning, stacklevel=find_stack_level() + ) # reset idx[key] = None @@ -3080,7 +3083,7 @@ def write_array( pass else: ws = performance_doc % (inferred_type, key, items) - warnings.warn(ws, PerformanceWarning, stacklevel=7) + warnings.warn(ws, PerformanceWarning, stacklevel=find_stack_level()) vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom()) vlarr.append(value) diff --git a/pandas/io/sql.py b/pandas/io/sql.py index ec5262ee3a04c..867ce52cbde6f 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -28,6 +28,7 @@ from pandas._typing import DtypeArg from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_datetime64tz_dtype, @@ -1159,7 +1160,7 @@ def _sqlalchemy_type(self, col): "the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the database.", UserWarning, - stacklevel=8, + stacklevel=find_stack_level(), ) return BigInteger elif col_type == "floating": @@ -1886,7 +1887,7 @@ def _create_table_setup(self): pat = re.compile(r"\s+") column_names = [col_name for col_name, _, _ in column_names_and_types] if any(map(pat.search, column_names)): - warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6) + warnings.warn(_SAFE_NAMES_WARNING, stacklevel=find_stack_level()) escape = _get_valid_sqlite_name @@ -1948,7 +1949,7 @@ def _sql_type_name(self, col): "the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the database.", UserWarning, - stacklevel=8, + stacklevel=find_stack_level(), ) col_type = "integer" diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 9679e79d8c4ba..5314a61191d78 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -13,6 +13,8 @@ import matplotlib.ticker as ticker import numpy as np +from pandas.util._exceptions import find_stack_level + from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ( ABCDataFrame, @@ -233,7 +235,7 @@ def create_subplots( "When passing multiple axes, sharex and sharey " "are ignored. These settings must be specified when creating axes.", UserWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) if ax.size == naxes: fig = ax.flat[0].get_figure() @@ -256,7 +258,7 @@ def create_subplots( "To output multiple subplots, the figure containing " "the passed axes is being cleared.", UserWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) fig.clear() diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index c2d7f7b3f716c..fc01771507888 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -29,6 +29,7 @@ from pandas._libs.tslibs.parsing import get_rule_month from pandas._typing import npt from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_datetime64_dtype, @@ -116,7 +117,7 @@ def get_offset(name: str) -> DateOffset: "get_offset is deprecated and will be removed in a future version, " "use to_offset instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return _get_offset(name) diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index f8bd1ec7bc96a..ee54b1b2074cb 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -12,6 +12,8 @@ import numpy as np +from pandas.util._exceptions import find_stack_level + from pandas.core.dtypes.common import ( is_bool, is_integer, @@ -339,7 +341,7 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name): "positional arguments for 'index' or 'columns' will raise " "a 'TypeError'." ) - warnings.warn(msg, FutureWarning, stacklevel=4) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) out[data._get_axis_name(0)] = args[0] out[data._get_axis_name(1)] = args[1] else: diff --git a/pandas/util/testing.py b/pandas/util/testing.py index af9fe4846b27d..0ab59a202149d 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1,5 +1,7 @@ import warnings +from pandas.util._exceptions import find_stack_level + from pandas._testing import * # noqa warnings.warn( @@ -8,5 +10,5 @@ "public API at pandas.testing instead." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), )
Part of #44347 - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/44416
2021-11-12T21:24:20Z
2021-11-13T17:04:23Z
2021-11-13T17:04:23Z
2021-11-13T21:16:16Z
TST: de-duplicate assert_slics_equivalent
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py index c2c55a4060f7a..4f9ef2c3c3ffa 100644 --- a/pandas/_testing/__init__.py +++ b/pandas/_testing/__init__.py @@ -82,6 +82,7 @@ assert_extension_array_equal, assert_frame_equal, assert_index_equal, + assert_indexing_slices_equivalent, assert_interval_array_equal, assert_is_sorted, assert_is_valid_plot_return_object, diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index c9f7fd43c1050..82253b73a824f 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -1444,3 +1444,17 @@ def is_extension_array_dtype_and_needs_i8_conversion(left_dtype, right_dtype) -> Related to issue #37609 """ return is_extension_array_dtype(left_dtype) and needs_i8_conversion(right_dtype) + + +def assert_indexing_slices_equivalent(ser: Series, l_slc: slice, i_slc: slice): + """ + Check that ser.iloc[i_slc] matches ser.loc[l_slc] and, if applicable, + ser[l_slc]. + """ + expected = ser.iloc[i_slc] + + assert_series_equal(ser.loc[l_slc], expected) + + if not ser.index.is_integer(): + # For integer indices, .loc and plain getitem are position-based. + assert_series_equal(ser[l_slc], expected) diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py index 42edaa2fe6c3a..55d45a21d643a 100644 --- a/pandas/tests/indexing/multiindex/test_slice.py +++ b/pandas/tests/indexing/multiindex/test_slice.py @@ -702,32 +702,30 @@ def test_per_axis_per_level_setitem(self): tm.assert_frame_equal(df, expected) def test_multiindex_label_slicing_with_negative_step(self): - s = Series( + ser = Series( np.arange(20), MultiIndex.from_product([list("abcde"), np.arange(4)]) ) SLC = pd.IndexSlice - def assert_slices_equivalent(l_slc, i_slc): - tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc]) - tm.assert_series_equal(s[l_slc], s.iloc[i_slc]) + tm.assert_indexing_slices_equivalent(ser, SLC[::-1], SLC[::-1]) - assert_slices_equivalent(SLC[::-1], SLC[::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC["d"::-1], SLC[15::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[("d",)::-1], SLC[15::-1]) - assert_slices_equivalent(SLC["d"::-1], SLC[15::-1]) - assert_slices_equivalent(SLC[("d",)::-1], SLC[15::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[:"d":-1], SLC[:11:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[:("d",):-1], SLC[:11:-1]) - assert_slices_equivalent(SLC[:"d":-1], SLC[:11:-1]) - assert_slices_equivalent(SLC[:("d",):-1], SLC[:11:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC["d":"b":-1], SLC[15:3:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[("d",):"b":-1], SLC[15:3:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC["d":("b",):-1], SLC[15:3:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[("d",):("b",):-1], SLC[15:3:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC["b":"d":-1], SLC[:0]) - assert_slices_equivalent(SLC["d":"b":-1], SLC[15:3:-1]) - assert_slices_equivalent(SLC[("d",):"b":-1], SLC[15:3:-1]) - assert_slices_equivalent(SLC["d":("b",):-1], SLC[15:3:-1]) - assert_slices_equivalent(SLC[("d",):("b",):-1], SLC[15:3:-1]) - assert_slices_equivalent(SLC["b":"d":-1], SLC[:0]) - - assert_slices_equivalent(SLC[("c", 2)::-1], SLC[10::-1]) - assert_slices_equivalent(SLC[:("c", 2):-1], SLC[:9:-1]) - assert_slices_equivalent(SLC[("e", 0):("c", 2):-1], SLC[16:9:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[("c", 2)::-1], SLC[10::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[:("c", 2):-1], SLC[:9:-1]) + tm.assert_indexing_slices_equivalent( + ser, SLC[("e", 0):("c", 2):-1], SLC[16:9:-1] + ) def test_multiindex_slice_first_level(self): # GH 12697 diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 7c7e9f79a77ae..2805c8877ed78 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -709,21 +709,17 @@ def run_tests(df, rhs, right_loc, right_iloc): def test_str_label_slicing_with_negative_step(self): SLC = pd.IndexSlice - def assert_slices_equivalent(l_slc, i_slc): - tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc]) - - if not idx.is_integer: - # For integer indices, .loc and plain getitem are position-based. - tm.assert_series_equal(s[l_slc], s.iloc[i_slc]) - tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc]) - for idx in [_mklbl("A", 20), np.arange(20) + 100, np.linspace(100, 150, 20)]: idx = Index(idx) - s = Series(np.arange(20), index=idx) - assert_slices_equivalent(SLC[idx[9] :: -1], SLC[9::-1]) - assert_slices_equivalent(SLC[: idx[9] : -1], SLC[:8:-1]) - assert_slices_equivalent(SLC[idx[13] : idx[9] : -1], SLC[13:8:-1]) - assert_slices_equivalent(SLC[idx[9] : idx[13] : -1], SLC[:0]) + ser = Series(np.arange(20), index=idx) + tm.assert_indexing_slices_equivalent(ser, SLC[idx[9] :: -1], SLC[9::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[: idx[9] : -1], SLC[:8:-1]) + tm.assert_indexing_slices_equivalent( + ser, SLC[idx[13] : idx[9] : -1], SLC[13:8:-1] + ) + tm.assert_indexing_slices_equivalent( + ser, SLC[idx[9] : idx[13] : -1], SLC[:0] + ) def test_slice_with_zero_step_raises(self, indexer_sl, frame_or_series): obj = frame_or_series(np.arange(20), index=_mklbl("A", 20)) diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 6c3587c7eeada..8a34882b1e5d4 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -338,26 +338,19 @@ def test_slice_with_zero_step_raises(index, frame_or_series, indexer_sli): ], ) def test_slice_with_negative_step(index): - def assert_slices_equivalent(l_slc, i_slc): - expected = ts.iloc[i_slc] - - tm.assert_series_equal(ts[l_slc], expected) - tm.assert_series_equal(ts.loc[l_slc], expected) - keystr1 = str(index[9]) keystr2 = str(index[13]) - box = type(index[0]) - ts = Series(np.arange(20), index) + ser = Series(np.arange(20), index) SLC = IndexSlice - for key in [keystr1, box(keystr1)]: - assert_slices_equivalent(SLC[key::-1], SLC[9::-1]) - assert_slices_equivalent(SLC[:key:-1], SLC[:8:-1]) + for key in [keystr1, index[9]]: + tm.assert_indexing_slices_equivalent(ser, SLC[key::-1], SLC[9::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[:key:-1], SLC[:8:-1]) - for key2 in [keystr2, box(keystr2)]: - assert_slices_equivalent(SLC[key2:key:-1], SLC[13:8:-1]) - assert_slices_equivalent(SLC[key:key2:-1], SLC[0:0:-1]) + for key2 in [keystr2, index[13]]: + tm.assert_indexing_slices_equivalent(ser, SLC[key2:key:-1], SLC[13:8:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[key:key2:-1], SLC[0:0:-1]) def test_tuple_index():
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry Also fixes one of the usages that uses `.is_integer` instead of `.is_integer()`
https://api.github.com/repos/pandas-dev/pandas/pulls/44415
2021-11-12T19:50:19Z
2021-11-13T17:05:21Z
2021-11-13T17:05:21Z
2021-11-13T17:19:28Z
TST: collect/share Index tests
diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index 389bf56ab6035..bb1a1bc72116d 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -44,6 +44,19 @@ class TestDataFrameSetItem: + def test_setitem_str_subclass(self): + # GH#37366 + class mystring(str): + pass + + data = ["2020-10-22 01:21:00+00:00"] + index = DatetimeIndex(data) + df = DataFrame({"a": [1]}, index=index) + df["b"] = 2 + df[mystring("c")] = 3 + expected = DataFrame({"a": [1], "b": [2], mystring("c"): [3]}, index=index) + tm.assert_equal(df, expected) + @pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"]) def test_setitem_dtype(self, dtype, float_frame): arr = np.random.randn(len(float_frame)) diff --git a/pandas/tests/indexes/base_class/test_formats.py b/pandas/tests/indexes/base_class/test_formats.py index f07b06acbfbdb..9053d45dee623 100644 --- a/pandas/tests/indexes/base_class/test_formats.py +++ b/pandas/tests/indexes/base_class/test_formats.py @@ -122,6 +122,14 @@ def test_repr_summary(self): assert len(result) < 200 assert "..." in result + def test_summary_bug(self): + # GH#3869 + ind = Index(["{other}%s", "~:{range}:0"], name="A") + result = ind._summary() + # shouldn't be formatted accidentally. + assert "~:{range}:0" in result + assert "{other}%s" in result + def test_index_repr_bool_nan(self): # GH32146 arr = Index([True, False, np.nan], dtype=object) @@ -132,3 +140,9 @@ def test_index_repr_bool_nan(self): exp2 = repr(arr) out2 = "Index([True, False, nan], dtype='object')" assert out2 == exp2 + + def test_format_different_scalar_lengths(self): + # GH#35439 + idx = Index(["aaaaaaaaa", "b"]) + expected = ["aaaaaaaaa", "b"] + assert idx.format() == expected diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 33d2558613baf..a5ee743b5cd9a 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -69,26 +69,6 @@ def test_pickle_compat_construction(self): with pytest.raises(TypeError, match=msg): self._index_cls() - @pytest.mark.parametrize("name", [None, "new_name"]) - def test_to_frame(self, name, simple_index): - # see GH-15230, GH-22580 - idx = simple_index - - if name: - idx_name = name - else: - idx_name = idx.name or 0 - - df = idx.to_frame(name=idx_name) - - assert df.index is idx - assert len(df.columns) == 1 - assert df.columns[0] == idx_name - assert df[idx_name].values is not idx.values - - df = idx.to_frame(index=False, name=idx_name) - assert df.index is not idx - def test_shift(self, simple_index): # GH8083 test the base class for shift @@ -226,46 +206,6 @@ def test_repr_max_seq_item_setting(self, simple_index): repr(idx) assert "..." not in str(idx) - def test_copy_name(self, index): - # gh-12309: Check that the "name" argument - # passed at initialization is honored. - if isinstance(index, MultiIndex): - return - - first = type(index)(index, copy=True, name="mario") - second = type(first)(first, copy=False) - - # Even though "copy=False", we want a new object. - assert first is not second - - # Not using tm.assert_index_equal() since names differ. - assert index.equals(first) - - assert first.name == "mario" - assert second.name == "mario" - - s1 = Series(2, index=first) - s2 = Series(3, index=second[:-1]) - - if not isinstance(index, CategoricalIndex): - # See gh-13365 - s3 = s1 * s2 - assert s3.index.name == "mario" - - def test_copy_name2(self, index): - # gh-35592 - if isinstance(index, MultiIndex): - return - - assert index.copy(name="mario").name == "mario" - - with pytest.raises(ValueError, match="Length of new names must be 1, got 2"): - index.copy(name=["mario", "luigi"]) - - msg = f"{type(index).__name__}.name must be a hashable type" - with pytest.raises(TypeError, match=msg): - index.copy(name=[["mario"]]) - def test_ensure_copied_data(self, index): # Check the "copy" argument of each Index.__new__ is honoured # GH12309 diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py index 36046aaeacaae..197038dbadaf7 100644 --- a/pandas/tests/indexes/datetimes/test_formats.py +++ b/pandas/tests/indexes/datetimes/test_formats.py @@ -254,3 +254,20 @@ def test_dti_custom_business_summary_dateutil(self): pd.bdate_range( "1/1/2005", "1/1/2009", freq="C", tz=dateutil.tz.tzutc() )._summary() + + +class TestFormat: + def test_format_with_name_time_info(self): + # bug I fixed 12/20/2011 + dates = pd.date_range("2011-01-01 04:00:00", periods=10, name="something") + + formatted = dates.format(name=True) + assert formatted[0] == "something" + + def test_format_datetime_with_time(self): + dti = DatetimeIndex([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)]) + + result = dti.format() + expected = ["2012-02-07 00:00:00", "2012-02-07 23:00:00"] + assert len(result) == 2 + assert result == expected diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index c3152b77d39df..beca71969dfcd 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -21,25 +21,12 @@ ) import pandas._testing as tm -from pandas.tseries.offsets import ( - BDay, - CDay, -) +from pandas.tseries.frequencies import to_offset START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) class TestGetItem: - def test_ellipsis(self): - # GH#21282 - idx = date_range( - "2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx" - ) - - result = idx[...] - assert result.equals(idx) - assert result is not idx - def test_getitem_slice_keeps_name(self): # GH4226 st = Timestamp("2013-07-01 00:00:00", tz="America/Los_Angeles") @@ -88,44 +75,17 @@ def test_getitem(self): tm.assert_index_equal(result, expected) assert result.freq == expected.freq - def test_dti_business_getitem(self): - rng = bdate_range(START, END) - smaller = rng[:5] - exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="B") - tm.assert_index_equal(smaller, exp) - assert smaller.freq == exp.freq - - assert smaller.freq == rng.freq - - sliced = rng[::5] - assert sliced.freq == BDay() * 5 - - fancy_indexed = rng[[4, 3, 2, 1, 0]] - assert len(fancy_indexed) == 5 - assert isinstance(fancy_indexed, DatetimeIndex) - assert fancy_indexed.freq is None - - # 32-bit vs. 64-bit platforms - assert rng[4] == rng[np.int_(4)] - - def test_dti_business_getitem_matplotlib_hackaround(self): - rng = bdate_range(START, END) - with tm.assert_produces_warning(FutureWarning): - # GH#30588 multi-dimensional indexing deprecated - values = rng[:, None] - expected = rng.values[:, None] - tm.assert_numpy_array_equal(values, expected) - - def test_dti_custom_getitem(self): - rng = bdate_range(START, END, freq="C") + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_dti_business_getitem(self, freq): + rng = bdate_range(START, END, freq=freq) smaller = rng[:5] - exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="C") + exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq=freq) tm.assert_index_equal(smaller, exp) assert smaller.freq == exp.freq assert smaller.freq == rng.freq sliced = rng[::5] - assert sliced.freq == CDay() * 5 + assert sliced.freq == to_offset(freq) * 5 fancy_indexed = rng[[4, 3, 2, 1, 0]] assert len(fancy_indexed) == 5 @@ -135,8 +95,9 @@ def test_dti_custom_getitem(self): # 32-bit vs. 64-bit platforms assert rng[4] == rng[np.int_(4)] - def test_dti_custom_getitem_matplotlib_hackaround(self): - rng = bdate_range(START, END, freq="C") + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_dti_business_getitem_matplotlib_hackaround(self, freq): + rng = bdate_range(START, END, freq=freq) with tm.assert_produces_warning(FutureWarning): # GH#30588 multi-dimensional indexing deprecated values = rng[:, None] @@ -255,6 +216,12 @@ def test_where_tz(self): class TestTake: + def test_take_nan_first_datetime(self): + index = DatetimeIndex([pd.NaT, Timestamp("20130101"), Timestamp("20130102")]) + result = index.take([-1, 0, 1]) + expected = DatetimeIndex([index[-1], index[0], index[1]]) + tm.assert_index_equal(result, expected) + def test_take(self): # GH#10295 idx1 = date_range("2011-01-01", "2011-01-31", freq="D", name="idx") diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py index 8df8eef69e9c9..f12f32724b9e1 100644 --- a/pandas/tests/indexes/interval/test_indexing.py +++ b/pandas/tests/indexes/interval/test_indexing.py @@ -11,6 +11,7 @@ Interval, IntervalIndex, NaT, + Series, Timedelta, date_range, timedelta_range, @@ -523,3 +524,37 @@ def test_putmask_td64(self): result = idx.putmask(mask, idx[-1]) expected = IntervalIndex([idx[-1]] * 3 + list(idx[3:])) tm.assert_index_equal(result, expected) + + +class TestGetValue: + @pytest.mark.parametrize("key", [[5], (2, 3)]) + def test_get_value_non_scalar_errors(self, key): + # GH#31117 + idx = IntervalIndex.from_tuples([(1, 3), (2, 4), (3, 5), (7, 10), (3, 10)]) + ser = Series(range(len(idx)), index=idx) + + msg = str(key) + with pytest.raises(InvalidIndexError, match=msg): + with tm.assert_produces_warning(FutureWarning): + idx.get_value(ser, key) + + +class TestContains: + # .__contains__, not .contains + + def test_contains_dunder(self): + + index = IntervalIndex.from_arrays([0, 1], [1, 2], closed="right") + + # __contains__ requires perfect matches to intervals. + assert 0 not in index + assert 1 not in index + assert 2 not in index + + assert Interval(0, 1, closed="right") in index + assert Interval(0, 2, closed="right") not in index + assert Interval(0, 0.5, closed="right") not in index + assert Interval(3, 5, closed="right") not in index + assert Interval(-1, 0, closed="left") not in index + assert Interval(0, 1, closed="left") not in index + assert Interval(0, 1, closed="both") not in index diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 321d1aa34b9af..843885832690f 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -4,8 +4,6 @@ import numpy as np import pytest -from pandas.errors import InvalidIndexError - import pandas as pd from pandas import ( Index, @@ -500,23 +498,6 @@ def test_contains_method(self): ): i.contains(Interval(0, 1)) - def test_contains_dunder(self): - - index = IntervalIndex.from_arrays([0, 1], [1, 2], closed="right") - - # __contains__ requires perfect matches to intervals. - assert 0 not in index - assert 1 not in index - assert 2 not in index - - assert Interval(0, 1, closed="right") in index - assert Interval(0, 2, closed="right") not in index - assert Interval(0, 0.5, closed="right") not in index - assert Interval(3, 5, closed="right") not in index - assert Interval(-1, 0, closed="left") not in index - assert Interval(0, 1, closed="left") not in index - assert Interval(0, 1, closed="both") not in index - def test_dropna(self, closed): expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)], closed=closed) @@ -908,24 +889,6 @@ def test_is_all_dates(self): year_2017_index = IntervalIndex([year_2017]) assert not year_2017_index._is_all_dates - @pytest.mark.parametrize("key", [[5], (2, 3)]) - def test_get_value_non_scalar_errors(self, key): - # GH 31117 - idx = IntervalIndex.from_tuples([(1, 3), (2, 4), (3, 5), (7, 10), (3, 10)]) - s = pd.Series(range(len(idx)), index=idx) - - msg = str(key) - with pytest.raises(InvalidIndexError, match=msg): - with tm.assert_produces_warning(FutureWarning): - idx.get_value(s, key) - - @pytest.mark.parametrize("closed", ["left", "right", "both"]) - def test_pickle_round_trip_closed(self, closed): - # https://github.com/pandas-dev/pandas/issues/35658 - idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed) - result = tm.round_trip_pickle(idx) - tm.assert_index_equal(result, idx) - def test_dir(): # GH#27571 dir(interval_index) should not raise diff --git a/pandas/tests/indexes/interval/test_pickle.py b/pandas/tests/indexes/interval/test_pickle.py new file mode 100644 index 0000000000000..308a90e72eab5 --- /dev/null +++ b/pandas/tests/indexes/interval/test_pickle.py @@ -0,0 +1,13 @@ +import pytest + +from pandas import IntervalIndex +import pandas._testing as tm + + +class TestPickle: + @pytest.mark.parametrize("closed", ["left", "right", "both"]) + def test_pickle_round_trip_closed(self, closed): + # https://github.com/pandas-dev/pandas/issues/35658 + idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed) + result = tm.round_trip_pickle(idx) + tm.assert_index_equal(result, idx) diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py index d2b5a595b8454..cbb4ae0b0d09b 100644 --- a/pandas/tests/indexes/multi/test_compat.py +++ b/pandas/tests/indexes/multi/test_compat.py @@ -96,10 +96,3 @@ def test_inplace_mutation_resets_values(): assert "_values" not in mi2._cache tm.assert_almost_equal(mi2.values, new_values) assert "_values" in mi2._cache - - -def test_pickle_compat_construction(): - # this is testing for pickle compat - # need an object to create with - with pytest.raises(TypeError, match="Must pass both levels and codes"): - MultiIndex() diff --git a/pandas/tests/indexes/multi/test_pickle.py b/pandas/tests/indexes/multi/test_pickle.py new file mode 100644 index 0000000000000..1d8b721404421 --- /dev/null +++ b/pandas/tests/indexes/multi/test_pickle.py @@ -0,0 +1,10 @@ +import pytest + +from pandas import MultiIndex + + +def test_pickle_compat_construction(): + # this is testing for pickle compat + # need an object to create with + with pytest.raises(TypeError, match="Must pass both levels and codes"): + MultiIndex() diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index 1b5e64bca03a0..df2f114e73df2 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -52,14 +52,6 @@ def non_comparable_idx(request): class TestGetItem: - def test_ellipsis(self): - # GH#21282 - idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx") - - result = idx[...] - assert result.equals(idx) - assert result is not idx - def test_getitem_slice_keeps_name(self): idx = period_range("20010101", periods=10, freq="D", name="bob") assert idx.name == idx[1:].name diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py index f7dafd78a801f..91679959e7979 100644 --- a/pandas/tests/indexes/test_any_index.py +++ b/pandas/tests/indexes/test_any_index.py @@ -137,6 +137,12 @@ def test_pickle_preserves_name(self, index): class TestIndexing: + def test_getitem_ellipsis(self, index): + # GH#21282 + result = index[...] + assert result.equals(index) + assert result is not index + def test_slice_keeps_name(self, index): assert index.name == index[1:].name diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 7f9a5c0b50595..59ec66ecc1fe9 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1,8 +1,5 @@ from collections import defaultdict -from datetime import ( - datetime, - timedelta, -) +from datetime import datetime from io import StringIO import math import re @@ -10,10 +7,7 @@ import numpy as np import pytest -from pandas.compat import ( - IS64, - np_datetime64_compat, -) +from pandas.compat import IS64 from pandas.util._test_decorators import async_mark import pandas as pd @@ -27,7 +21,6 @@ RangeIndex, Series, TimedeltaIndex, - Timestamp, date_range, period_range, ) @@ -219,91 +212,6 @@ def test_constructor_simple_new(self, vals, dtype): result = index._simple_new(index.values, dtype) tm.assert_index_equal(result, index) - @pytest.mark.parametrize( - "vals", - [ - [1, 2, 3], - np.array([1, 2, 3]), - np.array([1, 2, 3], dtype=int), - # below should coerce - [1.0, 2.0, 3.0], - np.array([1.0, 2.0, 3.0], dtype=float), - ], - ) - def test_constructor_dtypes_to_int64(self, vals): - index = Index(vals, dtype=int) - assert isinstance(index, Int64Index) - - @pytest.mark.parametrize( - "vals", - [ - [1, 2, 3], - [1.0, 2.0, 3.0], - np.array([1.0, 2.0, 3.0]), - np.array([1, 2, 3], dtype=int), - np.array([1.0, 2.0, 3.0], dtype=float), - ], - ) - def test_constructor_dtypes_to_float64(self, vals): - index = Index(vals, dtype=float) - assert isinstance(index, Float64Index) - - @pytest.mark.parametrize( - "vals", - [ - [1, 2, 3], - np.array([1, 2, 3], dtype=int), - np.array( - [np_datetime64_compat("2011-01-01"), np_datetime64_compat("2011-01-02")] - ), - [datetime(2011, 1, 1), datetime(2011, 1, 2)], - ], - ) - def test_constructor_dtypes_to_categorical(self, vals): - index = Index(vals, dtype="category") - assert isinstance(index, CategoricalIndex) - - @pytest.mark.parametrize("cast_index", [True, False]) - @pytest.mark.parametrize( - "vals", - [ - Index( - np.array( - [ - np_datetime64_compat("2011-01-01"), - np_datetime64_compat("2011-01-02"), - ] - ) - ), - Index([datetime(2011, 1, 1), datetime(2011, 1, 2)]), - ], - ) - def test_constructor_dtypes_to_datetime(self, cast_index, vals): - if cast_index: - index = Index(vals, dtype=object) - assert isinstance(index, Index) - assert index.dtype == object - else: - index = Index(vals) - assert isinstance(index, DatetimeIndex) - - @pytest.mark.parametrize("cast_index", [True, False]) - @pytest.mark.parametrize( - "vals", - [ - np.array([np.timedelta64(1, "D"), np.timedelta64(1, "D")]), - [timedelta(1), timedelta(1)], - ], - ) - def test_constructor_dtypes_to_timedelta(self, cast_index, vals): - if cast_index: - index = Index(vals, dtype=object) - assert isinstance(index, Index) - assert index.dtype == object - else: - index = Index(vals) - assert isinstance(index, TimedeltaIndex) - @pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning") @pytest.mark.parametrize("attr", ["values", "asi8"]) @pytest.mark.parametrize("klass", [Index, DatetimeIndex]) @@ -726,20 +634,6 @@ def test_is_all_dates(self, index, expected): def test_summary(self, index): index._summary() - def test_summary_bug(self): - # GH3869` - ind = Index(["{other}%s", "~:{range}:0"], name="A") - result = ind._summary() - # shouldn't be formatted accidentally. - assert "~:{range}:0" in result - assert "{other}%s" in result - - def test_format_different_scalar_lengths(self): - # GH35439 - idx = Index(["aaaaaaaaa", "b"]) - expected = ["aaaaaaaaa", "b"] - assert idx.format() == expected - def test_format_bug(self): # GH 14626 # windows has different precision on datetime.datetime.now (it doesn't @@ -767,21 +661,6 @@ def test_format_missing(self, vals, nulls_fixture): assert formatted == expected assert index[3] is nulls_fixture - def test_format_with_name_time_info(self): - # bug I fixed 12/20/2011 - dates = date_range("2011-01-01 04:00:00", periods=10, name="something") - - formatted = dates.format(name=True) - assert formatted[0] == "something" - - def test_format_datetime_with_time(self): - t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)]) - - result = t.format() - expected = ["2012-02-07 00:00:00", "2012-02-07 23:00:00"] - assert len(result) == 2 - assert result == expected - @pytest.mark.parametrize("op", ["any", "all"]) def test_logical_compat(self, op, simple_index): index = simple_index @@ -1129,12 +1008,6 @@ def test_outer_join_sort(self): tm.assert_index_equal(result, expected) - def test_nan_first_take_datetime(self): - index = Index([pd.NaT, Timestamp("20130101"), Timestamp("20130102")]) - result = index.take([-1, 0, 1]) - expected = Index([index[-1], index[0], index[1]]) - tm.assert_index_equal(result, expected) - def test_take_fill_value(self): # GH 12631 index = Index(list("ABC"), name="xxx") diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index ed9243a5ba8d0..1592c34b48dd8 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -1,7 +1,7 @@ """ Collection of tests asserting things that should be true for -any index subclass. Makes use of the `indices` fixture defined -in pandas/tests/indexes/conftest.py. +any index subclass except for MultiIndex. Makes use of the `index_flat` +fixture defined in pandas/conftest.py. """ import re @@ -29,6 +29,26 @@ class TestCommon: + @pytest.mark.parametrize("name", [None, "new_name"]) + def test_to_frame(self, name, index_flat): + # see GH#15230, GH#22580 + idx = index_flat + + if name: + idx_name = name + else: + idx_name = idx.name or 0 + + df = idx.to_frame(name=idx_name) + + assert df.index is idx + assert len(df.columns) == 1 + assert df.columns[0] == idx_name + assert df[idx_name].values is not idx.values + + df = idx.to_frame(index=False, name=idx_name) + assert df.index is not idx + def test_droplevel(self, index): # GH 21115 if isinstance(index, MultiIndex): @@ -126,6 +146,46 @@ def test_copy_and_deepcopy(self, index_flat): new_copy = index.copy(deep=True, name="banana") assert new_copy.name == "banana" + def test_copy_name(self, index_flat): + # GH#12309: Check that the "name" argument + # passed at initialization is honored. + index = index_flat + + first = type(index)(index, copy=True, name="mario") + second = type(first)(first, copy=False) + + # Even though "copy=False", we want a new object. + assert first is not second + tm.assert_index_equal(first, second) + + # Not using tm.assert_index_equal() since names differ. + assert index.equals(first) + + assert first.name == "mario" + assert second.name == "mario" + + # TODO: belongs in series arithmetic tests? + s1 = pd.Series(2, index=first) + s2 = pd.Series(3, index=second[:-1]) + # See GH#13365 + s3 = s1 * s2 + assert s3.index.name == "mario" + + def test_copy_name2(self, index_flat): + # GH#35592 + index = index_flat + if isinstance(index, MultiIndex): + return + + assert index.copy(name="mario").name == "mario" + + with pytest.raises(ValueError, match="Length of new names must be 1, got 2"): + index.copy(name=["mario", "luigi"]) + + msg = f"{type(index).__name__}.name must be a hashable type" + with pytest.raises(TypeError, match=msg): + index.copy(name=[["mario"]]) + def test_unique_level(self, index_flat): # don't test a MultiIndex here (as its tested separated) index = index_flat diff --git a/pandas/tests/indexes/test_index_new.py b/pandas/tests/indexes/test_index_new.py index 5c5ec7219d2d7..deeaffaf5b9cc 100644 --- a/pandas/tests/indexes/test_index_new.py +++ b/pandas/tests/indexes/test_index_new.py @@ -1,11 +1,17 @@ """ Tests for the Index constructor conducting inference. """ +from datetime import ( + datetime, + timedelta, +) from decimal import Decimal import numpy as np import pytest +from pandas.compat import np_datetime64_compat + from pandas.core.dtypes.common import is_unsigned_integer_dtype from pandas import ( @@ -27,6 +33,7 @@ ) import pandas._testing as tm from pandas.core.api import ( + Float64Index, Int64Index, UInt64Index, ) @@ -232,6 +239,91 @@ def test_constructor_int_dtype_nan_raises(self, dtype): with pytest.raises(ValueError, match=msg): Index(data, dtype=dtype) + @pytest.mark.parametrize( + "vals", + [ + [1, 2, 3], + np.array([1, 2, 3]), + np.array([1, 2, 3], dtype=int), + # below should coerce + [1.0, 2.0, 3.0], + np.array([1.0, 2.0, 3.0], dtype=float), + ], + ) + def test_constructor_dtypes_to_int64(self, vals): + index = Index(vals, dtype=int) + assert isinstance(index, Int64Index) + + @pytest.mark.parametrize( + "vals", + [ + [1, 2, 3], + [1.0, 2.0, 3.0], + np.array([1.0, 2.0, 3.0]), + np.array([1, 2, 3], dtype=int), + np.array([1.0, 2.0, 3.0], dtype=float), + ], + ) + def test_constructor_dtypes_to_float64(self, vals): + index = Index(vals, dtype=float) + assert isinstance(index, Float64Index) + + @pytest.mark.parametrize( + "vals", + [ + [1, 2, 3], + np.array([1, 2, 3], dtype=int), + np.array( + [np_datetime64_compat("2011-01-01"), np_datetime64_compat("2011-01-02")] + ), + [datetime(2011, 1, 1), datetime(2011, 1, 2)], + ], + ) + def test_constructor_dtypes_to_categorical(self, vals): + index = Index(vals, dtype="category") + assert isinstance(index, CategoricalIndex) + + @pytest.mark.parametrize("cast_index", [True, False]) + @pytest.mark.parametrize( + "vals", + [ + Index( + np.array( + [ + np_datetime64_compat("2011-01-01"), + np_datetime64_compat("2011-01-02"), + ] + ) + ), + Index([datetime(2011, 1, 1), datetime(2011, 1, 2)]), + ], + ) + def test_constructor_dtypes_to_datetime(self, cast_index, vals): + if cast_index: + index = Index(vals, dtype=object) + assert isinstance(index, Index) + assert index.dtype == object + else: + index = Index(vals) + assert isinstance(index, DatetimeIndex) + + @pytest.mark.parametrize("cast_index", [True, False]) + @pytest.mark.parametrize( + "vals", + [ + np.array([np.timedelta64(1, "D"), np.timedelta64(1, "D")]), + [timedelta(1), timedelta(1)], + ], + ) + def test_constructor_dtypes_to_timedelta(self, cast_index, vals): + if cast_index: + index = Index(vals, dtype=object) + assert isinstance(index, Index) + assert index.dtype == object + else: + index = Index(vals) + assert isinstance(index, TimedeltaIndex) + class TestIndexConstructorUnwrapping: # Test passing different arraylike values to pd.Index diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index 66fdaa2778600..0c2f8d0103ceb 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -21,14 +21,6 @@ class TestGetItem: - def test_ellipsis(self): - # GH#21282 - idx = timedelta_range("1 day", "31 day", freq="D", name="idx") - - result = idx[...] - assert result.equals(idx) - assert result is not idx - def test_getitem_slice_keeps_name(self): # GH#4226 tdi = timedelta_range("1d", "5d", freq="H", name="timebucket") diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py index e46eed05caa86..332ab02255911 100644 --- a/pandas/tests/indexing/test_datetime.py +++ b/pandas/tests/indexing/test_datetime.py @@ -130,7 +130,7 @@ def test_nanosecond_getitem_setitem_with_tz(self): expected = DataFrame(-1, index=index, columns=["a"]) tm.assert_frame_equal(result, expected) - def test_getitem_millisecond_resolution(self, frame_or_series): + def test_getitem_str_slice_millisecond_resolution(self, frame_or_series): # GH#33589 keys = [ @@ -152,16 +152,3 @@ def test_getitem_millisecond_resolution(self, frame_or_series): ], ) tm.assert_equal(result, expected) - - def test_str_subclass(self): - # GH 37366 - class mystring(str): - pass - - data = ["2020-10-22 01:21:00+00:00"] - index = pd.DatetimeIndex(data) - df = DataFrame({"a": [1]}, index=index) - df["b"] = 2 - df[mystring("c")] = 3 - expected = DataFrame({"a": [1], "b": [2], mystring("c"): [3]}, index=index) - tm.assert_equal(df, expected) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 2805c8877ed78..6a9ece738952d 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -323,9 +323,9 @@ def test_dups_fancy_indexing3(self): def test_duplicate_int_indexing(self, indexer_sl): # GH 17347 - s = Series(range(3), index=[1, 1, 3]) - expected = s[1] - result = indexer_sl(s)[[1]] + ser = Series(range(3), index=[1, 1, 3]) + expected = Series(range(2), index=[1, 1]) + result = indexer_sl(ser)[[1]] tm.assert_series_equal(result, expected) def test_indexing_mixed_frame_bug(self): @@ -653,13 +653,6 @@ def test_loc_setitem_fullindex_views(self): df.loc[df.index] = df.loc[df.index] tm.assert_frame_equal(df, df2) - def test_float_index_at_iat(self): - s = Series([1, 2, 3], index=[0.1, 0.2, 0.3]) - for el, item in s.items(): - assert s.at[el] == item - for i in range(len(s)): - assert s.iat[i] == i + 1 - def test_rhs_alignment(self): # GH8258, tests that both rows & columns are aligned to what is # assigned to. covers both uniform data-type & multi-type cases @@ -963,7 +956,11 @@ def test_extension_array_cross_section(): def test_extension_array_cross_section_converts(): # all numeric columns -> numeric series df = DataFrame( - {"A": pd.array([1, 2], dtype="Int64"), "B": np.array([1, 2])}, index=["a", "b"] + { + "A": pd.array([1, 2], dtype="Int64"), + "B": np.array([1, 2], dtype="int64"), + }, + index=["a", "b"], ) result = df.loc["a"] expected = Series([1, 1], dtype="Int64", index=["A", "B"], name="a") @@ -983,10 +980,3 @@ def test_extension_array_cross_section_converts(): result = df.iloc[0] tm.assert_series_equal(result, expected) - - -def test_getitem_object_index_float_string(): - # GH 17286 - s = Series([1] * 4, index=Index(["a", "b", "c", 1.0])) - assert s["a"] == 1 - assert s[1.0] == 1 diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py index bf262e6755289..bcb76fb078e74 100644 --- a/pandas/tests/indexing/test_scalar.py +++ b/pandas/tests/indexing/test_scalar.py @@ -77,6 +77,13 @@ def _check(f, func, values=False): class TestAtAndiAT: # at and iat tests that don't need Base class + def test_float_index_at_iat(self): + ser = Series([1, 2, 3], index=[0.1, 0.2, 0.3]) + for el, item in ser.items(): + assert ser.at[el] == item + for i in range(len(ser)): + assert ser.iat[i] == i + 1 + def test_at_iat_coercion(self): # as timestamp is not a tuple! diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py index 03b1c512f9053..4c17917b949ca 100644 --- a/pandas/tests/series/indexing/test_getitem.py +++ b/pandas/tests/series/indexing/test_getitem.py @@ -36,6 +36,12 @@ class TestSeriesGetitemScalars: + def test_getitem_object_index_float_string(self): + # GH#17286 + ser = Series([1] * 4, index=Index(["a", "b", "c", 1.0])) + assert ser["a"] == 1 + assert ser[1.0] == 1 + def test_getitem_float_keys_tuple_values(self): # see GH#13509
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44413
2021-11-12T18:00:43Z
2021-11-14T02:05:39Z
2021-11-14T02:05:39Z
2021-11-14T15:12:55Z
REF: EA quantile logic to EA._quantile
diff --git a/pandas/core/array_algos/quantile.py b/pandas/core/array_algos/quantile.py index a1b40acc2558e..261d19ade080f 100644 --- a/pandas/core/array_algos/quantile.py +++ b/pandas/core/array_algos/quantile.py @@ -1,7 +1,5 @@ from __future__ import annotations -from typing import TYPE_CHECKING - import numpy as np from pandas._typing import ( @@ -9,7 +7,6 @@ npt, ) -from pandas.core.dtypes.common import is_sparse from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, @@ -17,9 +14,6 @@ from pandas.core.nanops import nanpercentile -if TYPE_CHECKING: - from pandas.core.arrays import ExtensionArray - def quantile_compat( values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str @@ -40,23 +34,12 @@ def quantile_compat( if isinstance(values, np.ndarray): fill_value = na_value_for_dtype(values.dtype, compat=False) mask = isna(values) - return _quantile_with_mask(values, mask, fill_value, qs, interpolation) + return quantile_with_mask(values, mask, fill_value, qs, interpolation) else: - # In general we don't want to import from arrays here; - # this is temporary pending discussion in GH#41428 - from pandas.core.arrays import BaseMaskedArray - - if isinstance(values, BaseMaskedArray): - # e.g. IntegerArray, does not implement _from_factorized - out = _quantile_ea_fallback(values, qs, interpolation) - - else: - out = _quantile_ea_compat(values, qs, interpolation) + return values._quantile(qs, interpolation) - return out - -def _quantile_with_mask( +def quantile_with_mask( values: np.ndarray, mask: np.ndarray, fill_value, @@ -114,82 +97,3 @@ def _quantile_with_mask( result = result.T return result - - -def _quantile_ea_compat( - values: ExtensionArray, qs: npt.NDArray[np.float64], interpolation: str -) -> ExtensionArray: - """ - ExtensionArray compatibility layer for _quantile_with_mask. - - We pretend that an ExtensionArray with shape (N,) is actually (1, N,) - for compatibility with non-EA code. - - Parameters - ---------- - values : ExtensionArray - qs : np.ndarray[float64] - interpolation: str - - Returns - ------- - ExtensionArray - """ - # TODO(EA2D): make-believe not needed with 2D EAs - orig = values - - # asarray needed for Sparse, see GH#24600 - mask = np.asarray(values.isna()) - mask = np.atleast_2d(mask) - - arr, fill_value = values._values_for_factorize() - arr = np.atleast_2d(arr) - - result = _quantile_with_mask(arr, mask, fill_value, qs, interpolation) - - if not is_sparse(orig.dtype): - # shape[0] should be 1 as long as EAs are 1D - - if orig.ndim == 2: - # i.e. DatetimeArray - result = type(orig)._from_factorized(result, orig) - - else: - assert result.shape == (1, len(qs)), result.shape - result = type(orig)._from_factorized(result[0], orig) - - # error: Incompatible return value type (got "ndarray", expected "ExtensionArray") - return result # type: ignore[return-value] - - -def _quantile_ea_fallback( - values: ExtensionArray, qs: npt.NDArray[np.float64], interpolation: str -) -> ExtensionArray: - """ - quantile compatibility for ExtensionArray subclasses that do not - implement `_from_factorized`, e.g. IntegerArray. - - Notes - ----- - We assume that all impacted cases are 1D-only. - """ - mask = np.atleast_2d(np.asarray(values.isna())) - npvalues = np.atleast_2d(np.asarray(values)) - - res = _quantile_with_mask( - npvalues, - mask=mask, - fill_value=values.dtype.na_value, - qs=qs, - interpolation=interpolation, - ) - assert res.ndim == 2 - assert res.shape[0] == 1 - res = res[0] - try: - out = type(values)._from_sequence(res, dtype=values.dtype) - except TypeError: - # GH#42626: not able to safely cast Int64 - # for floating point output - out = np.atleast_2d(np.asarray(res, dtype=np.float64)) - return out diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 9d534a5a8d815..21f83f8373586 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -53,6 +53,7 @@ unique, value_counts, ) +from pandas.core.array_algos.quantile import quantile_with_mask from pandas.core.array_algos.transforms import shift from pandas.core.arrays.base import ExtensionArray from pandas.core.construction import extract_array @@ -463,6 +464,30 @@ def value_counts(self, dropna: bool = True): index = Index(index_arr, name=result.index.name) return Series(result._values, index=index, name=result.name) + def _quantile( + self: NDArrayBackedExtensionArrayT, + qs: npt.NDArray[np.float64], + interpolation: str, + ) -> NDArrayBackedExtensionArrayT: + # TODO: disable for Categorical if not ordered? + + # asarray needed for Sparse, see GH#24600 + mask = np.asarray(self.isna()) + mask = np.atleast_2d(mask) + + arr = np.atleast_2d(self._ndarray) + # TODO: something NDArrayBacked-specific instead of _values_for_factorize[1]? + fill_value = self._values_for_factorize()[1] + + res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation) + + result = type(self)._from_factorized(res_values, self) + if self.ndim == 1: + assert result.shape == (1, len(qs)), result.shape + result = result[0] + + return result + # ------------------------------------------------------------------------ # numpy-like methods diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index a64aef64ab49f..d07c1eb398b9a 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -75,6 +75,7 @@ isin, unique, ) +from pandas.core.array_algos.quantile import quantile_with_mask from pandas.core.sorting import ( nargminmax, nargsort, @@ -1494,6 +1495,41 @@ def _empty(cls, shape: Shape, dtype: ExtensionDtype): ) return result + def _quantile( + self: ExtensionArrayT, qs: npt.NDArray[np.float64], interpolation: str + ) -> ExtensionArrayT: + """ + Compute the quantiles of self for each quantile in `qs`. + + Parameters + ---------- + qs : np.ndarray[float64] + interpolation: str + + Returns + ------- + same type as self + """ + # asarray needed for Sparse, see GH#24600 + mask = np.asarray(self.isna()) + mask = np.atleast_2d(mask) + + arr = np.atleast_2d(np.asarray(self)) + fill_value = np.nan + + res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation) + + if self.ndim == 2: + # i.e. DatetimeArray + result = type(self)._from_sequence(res_values) + + else: + # shape[0] should be 1 as long as EAs are 1D + assert res_values.shape == (1, len(qs)), res_values.shape + result = type(self)._from_sequence(res_values[0]) + + return result + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): if any( isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)) for other in inputs diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index b334a167d3824..9d98bd8045006 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -65,6 +65,7 @@ take, ) from pandas.core.array_algos import masked_reductions +from pandas.core.array_algos.quantile import quantile_with_mask from pandas.core.arraylike import OpsMixin from pandas.core.arrays import ExtensionArray from pandas.core.indexers import check_array_indexer @@ -692,6 +693,38 @@ def equals(self, other) -> bool: right = other._data[~other._mask] return array_equivalent(left, right, dtype_equal=True) + def _quantile( + self: BaseMaskedArrayT, qs: npt.NDArray[np.float64], interpolation: str + ) -> BaseMaskedArrayT: + """ + Dispatch to quantile_with_mask, needed because we do not have + _from_factorized. + + Notes + ----- + We assume that all impacted cases are 1D-only. + """ + mask = np.atleast_2d(np.asarray(self.isna())) + npvalues = np.atleast_2d(np.asarray(self)) + + res = quantile_with_mask( + npvalues, + mask=mask, + fill_value=self.dtype.na_value, + qs=qs, + interpolation=interpolation, + ) + assert res.ndim == 2 + assert res.shape[0] == 1 + res = res[0] + try: + out = type(self)._from_sequence(res, dtype=self.dtype) + except TypeError: + # GH#42626: not able to safely cast Int64 + # for floating point output + out = np.asarray(res, dtype=np.float64) + return out + def _reduce(self, name: str, *, skipna: bool = True, **kwargs): if name in {"any", "all"}: return getattr(self, name)(skipna=skipna, **kwargs) diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index c054710a01f75..9b2e391966070 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -863,6 +863,12 @@ def value_counts(self, dropna: bool = True) -> Series: keys = Index(keys) return Series(counts, index=keys) + def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str): + # Special case: the returned array isn't _really_ sparse, so we don't + # wrap it in a SparseArray + result = super()._quantile(qs, interpolation) + return np.asarray(result) + # -------- # Indexing # -------- diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 550bc4ac56d4b..3654f77825ab4 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1310,6 +1310,9 @@ def quantile( assert is_list_like(qs) # caller is responsible for this result = quantile_compat(self.values, np.asarray(qs._values), interpolation) + # ensure_block_shape needed for cases where we start with EA and result + # is ndarray, e.g. IntegerArray, SparseArray + result = ensure_block_shape(result, ndim=2) return new_block_2d(result, placement=self._mgr_locs)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44412
2021-11-12T17:33:53Z
2021-11-28T19:25:22Z
2021-11-28T19:25:22Z
2021-11-28T19:38:51Z
WARN: Add FutureWarning for `DataFrame.to_latex`
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 36b591c3c3142..3d3ec53948a01 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -150,6 +150,7 @@ and a short caption (:issue:`36267`). The keyword ``position`` has been added to set the position. .. ipython:: python + :okwarning: data = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}) table = data.to_latex(position='ht') @@ -161,6 +162,7 @@ one can optionally provide a tuple ``(full_caption, short_caption)`` to add a short caption macro. .. ipython:: python + :okwarning: data = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}) table = data.to_latex(caption=('the full long caption', 'short caption')) diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 1f656f267783f..462e0fd139a94 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -465,6 +465,7 @@ Other Deprecations - Deprecated the 'errors' keyword argument in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, and meth:`DataFrame.mask`; in a future version the argument will be removed (:issue:`44294`) - Deprecated :meth:`PeriodIndex.astype` to ``datetime64[ns]`` or ``DatetimeTZDtype``, use ``obj.to_timestamp(how).tz_localize(dtype.tz)`` instead (:issue:`44398`) - Deprecated :meth:`DateOffset.apply`, use ``offset + other`` instead (:issue:`44522`) +- A deprecation warning is now shown for :meth:`DataFrame.to_latex` indicating the arguments signature may change and emulate more the arguments to :meth:`.Styler.to_latex` in future versions (:issue:`44411`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 888376ea8e1dc..601b8dcd504d6 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3272,6 +3272,7 @@ def to_latex( {returns} See Also -------- + Styler.to_latex : Render a DataFrame to LaTeX with conditional formatting. DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. @@ -3281,7 +3282,7 @@ def to_latex( >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], ... mask=['red', 'purple'], ... weapon=['sai', 'bo staff'])) - >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE + >>> print(df.to_latex(index=False)) # doctest: +SKIP \begin{{tabular}}{{lll}} \toprule name & mask & weapon \\ @@ -3291,6 +3292,15 @@ def to_latex( \bottomrule \end{{tabular}} """ + msg = ( + "In future versions `DataFrame.to_latex` is expected to utilise the base " + "implementation of `Styler.to_latex` for formatting and rendering. " + "The arguments signature may therefore change. It is recommended instead " + "to use `DataFrame.style.to_latex` which also contains additional " + "functionality." + ) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + # Get defaults from the pandas config if self.ndim == 1: self = self.to_frame() diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index b288fafd8f7f6..bb80bd12c1958 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -265,6 +265,7 @@ def test_repr_column_name_unicode_truncation_bug(self): with option_context("display.max_columns", 20): assert "StringCol" in repr(df) + @pytest.mark.filterwarnings("ignore::FutureWarning") def test_latex_repr(self): result = r"""\begin{tabular}{llll} \toprule diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index d9bd8f6809c73..ab0199dca3f24 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -3298,6 +3298,7 @@ def test_repr_html_ipython_config(ip): assert not result.error_in_exec +@pytest.mark.filterwarnings("ignore:In future versions `DataFrame.to_latex`") @pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"]) @pytest.mark.parametrize( "encoding, data", @@ -3319,7 +3320,8 @@ def test_filepath_or_buffer_arg( ): getattr(df, method)(buf=filepath_or_buffer, encoding=encoding) elif encoding == "foo": - with tm.assert_produces_warning(None): + expected_warning = FutureWarning if method == "to_latex" else None + with tm.assert_produces_warning(expected_warning): with pytest.raises(LookupError, match="unknown encoding"): getattr(df, method)(buf=filepath_or_buffer, encoding=encoding) else: @@ -3328,6 +3330,7 @@ def test_filepath_or_buffer_arg( assert_filepath_or_buffer_equals(expected) +@pytest.mark.filterwarnings("ignore::FutureWarning") @pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"]) def test_filepath_or_buffer_bad_arg_raises(float_frame, method): msg = "buf is not a file name and it has no write method" diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 10c8ccae67fb2..01bc94bf594d9 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -19,6 +19,8 @@ RowStringConverter, ) +pytestmark = pytest.mark.filterwarnings("ignore::FutureWarning") + def _dedent(string): """Dedent without new line in the beginning. @@ -1514,3 +1516,15 @@ def test_get_strrow_multindex_multicolumn(self, row_num, expected): ) assert row_string_converter.get_strrow(row_num=row_num) == expected + + def test_future_warning(self): + df = DataFrame([[1]]) + msg = ( + "In future versions `DataFrame.to_latex` is expected to utilise the base " + "implementation of `Styler.to_latex` for formatting and rendering. " + "The arguments signature may therefore change. It is recommended instead " + "to use `DataFrame.style.to_latex` which also contains additional " + "functionality." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + df.to_latex() diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 5f1256c4e5ba3..a782f8dbbc76d 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -348,6 +348,7 @@ def test_read_fspath_all(self, reader, module, path, datapath): else: tm.assert_frame_equal(result, expected) + @pytest.mark.filterwarnings("ignore:In future versions `DataFrame.to_latex`") @pytest.mark.parametrize( "writer_name, writer_kwargs, module", [ diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index d3ff7f4dc7b4c..de34caa7b4387 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -196,6 +196,7 @@ def test_timeseries_repr_object_dtype(self): ts2 = ts.iloc[np.random.randint(0, len(ts) - 1, 400)] repr(ts2).splitlines()[-1] + @pytest.mark.filterwarnings("ignore::FutureWarning") def test_latex_repr(self): result = r"""\begin{tabular}{ll} \toprule
Instead of #41648, which performs a refactor and adds the warning in the same PR, I am proposing adding the warning only for 1.4.0 and then in 2.0 there will be scope to make breaking changes and refactor the arguments signature.
https://api.github.com/repos/pandas-dev/pandas/pulls/44411
2021-11-12T17:25:26Z
2021-11-24T06:19:59Z
2021-11-24T06:19:58Z
2022-11-18T22:08:11Z
BUG: read_csv raising if parse_dates is used with MultiIndex columns
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index d1e209adb1b8f..d3df785c23544 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -574,6 +574,7 @@ I/O - Bug in :func:`json_normalize` where multi-character ``sep`` parameter is incorrectly prefixed to every key (:issue:`43831`) - Bug in :func:`read_csv` with :code:`float_precision="round_trip"` which did not skip initial/trailing whitespace (:issue:`43713`) - Bug in dumping/loading a :class:`DataFrame` with ``yaml.dump(frame)`` (:issue:`42748`) +- Bug in :func:`read_csv` raising ``ValueError`` when ``parse_dates`` was used with ``MultiIndex`` columns (:issue:`8991`) - Period diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 339585810bec1..ba39b6a933a81 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -259,7 +259,8 @@ def _validate_parse_dates_presence(self, columns: list[str]) -> None: # ParseDates = Union[DateGroups, List[DateGroups], # Dict[ColReference, DateGroups]] cols_needed = itertools.chain.from_iterable( - col if is_list_like(col) else [col] for col in self.parse_dates + col if is_list_like(col) and not isinstance(col, tuple) else [col] + for col in self.parse_dates ) else: cols_needed = [] @@ -1091,7 +1092,7 @@ def _isindex(colspec): if isinstance(parse_spec, list): # list of column lists for colspec in parse_spec: - if is_scalar(colspec): + if is_scalar(colspec) or isinstance(colspec, tuple): if isinstance(colspec, int) and colspec not in data_dict: colspec = orig_names[colspec] if _isindex(colspec): @@ -1146,7 +1147,11 @@ def _try_convert_dates(parser: Callable, colspec, data_dict, columns): else: colnames.append(c) - new_name = "_".join([str(x) for x in colnames]) + new_name: tuple | str + if all(isinstance(x, tuple) for x in colnames): + new_name = tuple(map("_".join, zip(*colnames))) + else: + new_name = "_".join([str(x) for x in colnames]) to_parse = [np.asarray(data_dict[c]) for c in colnames if c in data_dict] new_col = parser(*to_parse) diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index c8bea9592e82a..470440290016d 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -1732,6 +1732,39 @@ def test_date_parser_and_names(all_parsers): tm.assert_frame_equal(result, expected) +@skip_pyarrow +def test_date_parser_multiindex_columns(all_parsers): + parser = all_parsers + data = """a,b +1,2 +2019-12-31,6""" + result = parser.read_csv(StringIO(data), parse_dates=[("a", "1")], header=[0, 1]) + expected = DataFrame({("a", "1"): Timestamp("2019-12-31"), ("b", "2"): [6]}) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow +@pytest.mark.parametrize( + "parse_spec, col_name", + [ + ([[("a", "1"), ("b", "2")]], ("a_b", "1_2")), + ({("foo", "1"): [("a", "1"), ("b", "2")]}, ("foo", "1")), + ], +) +def test_date_parser_multiindex_columns_combine_cols(all_parsers, parse_spec, col_name): + parser = all_parsers + data = """a,b,c +1,2,3 +2019-12,-31,6""" + result = parser.read_csv( + StringIO(data), + parse_dates=parse_spec, + header=[0, 1], + ) + expected = DataFrame({col_name: Timestamp("2019-12-31"), ("c", "3"): [6]}) + tm.assert_frame_equal(result, expected) + + @skip_pyarrow def test_date_parser_usecols_thousands(all_parsers): # GH#39365
- [x] closes #8991 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44408
2021-11-12T14:12:49Z
2021-11-14T02:17:45Z
2021-11-14T02:17:44Z
2021-12-15T10:35:54Z
TST: Add nulls fixture to duplicates categorical na test
diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py index f72d85337df8e..8b5557ab6e85f 100644 --- a/pandas/tests/series/methods/test_drop_duplicates.py +++ b/pandas/tests/series/methods/test_drop_duplicates.py @@ -2,7 +2,6 @@ import pytest from pandas import ( - NA, Categorical, Series, ) @@ -225,11 +224,13 @@ def test_drop_duplicates_categorical_bool(self, ordered): assert return_value is None tm.assert_series_equal(sc, tc[~expected]) - def test_drop_duplicates_categorical_bool_na(self): + def test_drop_duplicates_categorical_bool_na(self, nulls_fixture): # GH#44351 ser = Series( Categorical( - [True, False, True, False, NA], categories=[True, False], ordered=True + [True, False, True, False, nulls_fixture], + categories=[True, False], + ordered=True, ) ) result = ser.drop_duplicates() diff --git a/pandas/tests/series/methods/test_duplicated.py b/pandas/tests/series/methods/test_duplicated.py index c61492168da63..1c547ee99efed 100644 --- a/pandas/tests/series/methods/test_duplicated.py +++ b/pandas/tests/series/methods/test_duplicated.py @@ -2,7 +2,6 @@ import pytest from pandas import ( - NA, Categorical, Series, ) @@ -39,11 +38,13 @@ def test_duplicated_nan_none(keep, expected): tm.assert_series_equal(result, expected) -def test_duplicated_categorical_bool_na(): +def test_duplicated_categorical_bool_na(nulls_fixture): # GH#44351 ser = Series( Categorical( - [True, False, True, False, NA], categories=[True, False], ordered=True + [True, False, True, False, nulls_fixture], + categories=[True, False], + ordered=True, ) ) result = ser.duplicated()
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them cc @jorisvandenbossche as a follow up
https://api.github.com/repos/pandas-dev/pandas/pulls/44407
2021-11-12T13:17:30Z
2021-11-12T14:55:43Z
2021-11-12T14:55:43Z
2021-11-12T15:16:11Z
TYP: Typ part of python_parser
diff --git a/pandas/io/parsers/arrow_parser_wrapper.py b/pandas/io/parsers/arrow_parser_wrapper.py index 9fbeeb74901ef..98d1315c6212c 100644 --- a/pandas/io/parsers/arrow_parser_wrapper.py +++ b/pandas/io/parsers/arrow_parser_wrapper.py @@ -110,7 +110,12 @@ def _finalize_output(self, frame: DataFrame) -> DataFrame: multi_index_named = False frame.columns = self.names # we only need the frame not the names - frame.columns, frame = self._do_date_conversions(frame.columns, frame) + # error: Incompatible types in assignment (expression has type + # "Union[List[Union[Union[str, int, float, bool], Union[Period, Timestamp, + # Timedelta, Any]]], Index]", variable has type "Index") [assignment] + frame.columns, frame = self._do_date_conversions( # type: ignore[assignment] + frame.columns, frame + ) if self.index_col is not None: for i, item in enumerate(self.index_col): if is_integer(item): diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 4f5ba3460a3c8..5d03529654b0d 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -10,10 +10,13 @@ Any, Callable, DefaultDict, + Hashable, Iterable, + Mapping, Sequence, cast, final, + overload, ) import warnings @@ -56,6 +59,7 @@ from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.missing import isna +from pandas import DataFrame from pandas.core import algorithms from pandas.core.arrays import Categorical from pandas.core.indexes.api import ( @@ -241,7 +245,7 @@ def _open_handles( errors=kwds.get("encoding_errors", "strict"), ) - def _validate_parse_dates_presence(self, columns: list[str]) -> Iterable: + def _validate_parse_dates_presence(self, columns: Sequence[Hashable]) -> Iterable: """ Check if parse_dates are in columns. @@ -337,11 +341,24 @@ def _should_parse_dates(self, i: int) -> bool: @final def _extract_multi_indexer_columns( - self, header, index_names, passed_names: bool = False + self, + header, + index_names: list | None, + passed_names: bool = False, ): """ - extract and return the names, index_names, col_names - header is a list-of-lists returned from the parsers + Extract and return the names, index_names, col_names if the column + names are a MultiIndex. + + Parameters + ---------- + header: list of lists + The header rows + index_names: list, optional + The names of the future index + passed_names: bool, default False + A flag specifying if names where passed + """ if len(header) < 2: return header[0], index_names, None, passed_names @@ -400,7 +417,7 @@ def extract(r): return names, index_names, col_names, passed_names @final - def _maybe_dedup_names(self, names): + def _maybe_dedup_names(self, names: Sequence[Hashable]) -> Sequence[Hashable]: # see gh-7160 and gh-9424: this helps to provide # immediate alleviation of the duplicate names # issue and appears to be satisfactory to users, @@ -408,7 +425,7 @@ def _maybe_dedup_names(self, names): # would be nice! if self.mangle_dupe_cols: names = list(names) # so we can index - counts: DefaultDict[int | str | tuple, int] = defaultdict(int) + counts: DefaultDict[Hashable, int] = defaultdict(int) is_potential_mi = _is_potential_multi_index(names, self.index_col) for i, col in enumerate(names): @@ -418,6 +435,8 @@ def _maybe_dedup_names(self, names): counts[col] = cur_count + 1 if is_potential_mi: + # for mypy + assert isinstance(col, tuple) col = col[:-1] + (f"{col[-1]}.{cur_count}",) else: col = f"{col}.{cur_count}" @@ -572,7 +591,7 @@ def _agg_index(self, index, try_parse_dates: bool = True) -> Index: @final def _convert_to_ndarrays( self, - dct: dict, + dct: Mapping, na_values, na_fvalues, verbose: bool = False, @@ -664,7 +683,7 @@ def _convert_to_ndarrays( @final def _set_noconvert_dtype_columns( - self, col_indices: list[int], names: list[int | str | tuple] + self, col_indices: list[int], names: Sequence[Hashable] ) -> set[int]: """ Set the columns that should not undergo dtype conversions. @@ -848,7 +867,27 @@ def _cast_types(self, values, cast_type, column): ) from err return values - def _do_date_conversions(self, names, data): + @overload + def _do_date_conversions( + self, + names: Index, + data: DataFrame, + ) -> tuple[Sequence[Hashable] | Index, DataFrame]: + ... + + @overload + def _do_date_conversions( + self, + names: Sequence[Hashable], + data: Mapping[Hashable, ArrayLike], + ) -> tuple[Sequence[Hashable], Mapping[Hashable, ArrayLike]]: + ... + + def _do_date_conversions( + self, + names: Sequence[Hashable] | Index, + data: Mapping[Hashable, ArrayLike] | DataFrame, + ) -> tuple[Sequence[Hashable] | Index, Mapping[Hashable, ArrayLike] | DataFrame]: # returns data, columns if self.parse_dates is not None: @@ -864,7 +903,11 @@ def _do_date_conversions(self, names, data): return names, data - def _check_data_length(self, columns: list[str], data: list[ArrayLike]) -> None: + def _check_data_length( + self, + columns: Sequence[Hashable], + data: Sequence[ArrayLike], + ) -> None: """Checks if length of data is equal to length of column names. One set of trailing commas is allowed. self.index_col not False diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py index e96df3b3f3782..05c963f2d2552 100644 --- a/pandas/io/parsers/c_parser_wrapper.py +++ b/pandas/io/parsers/c_parser_wrapper.py @@ -279,7 +279,7 @@ def read(self, nrows=None): data_tups = sorted(data.items()) data = {k: v for k, (i, v) in zip(names, data_tups)} - names, data = self._do_date_conversions(names, data) + names, date_data = self._do_date_conversions(names, data) else: # rename dict keys @@ -302,13 +302,13 @@ def read(self, nrows=None): data = {k: v for k, (i, v) in zip(names, data_tups)} - names, data = self._do_date_conversions(names, data) - index, names = self._make_index(data, alldata, names) + names, date_data = self._do_date_conversions(names, data) + index, names = self._make_index(date_data, alldata, names) # maybe create a mi on the columns names = self._maybe_make_multi_index_columns(names, self.col_names) - return index, names, data + return index, names, date_data def _filter_usecols(self, names): # hackish diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index 08f8d49dcdf1a..2d1433a8f21c8 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -10,7 +10,10 @@ import sys from typing import ( DefaultDict, + Hashable, Iterator, + Mapping, + Sequence, cast, ) import warnings @@ -19,6 +22,7 @@ import pandas._libs.lib as lib from pandas._typing import ( + ArrayLike, FilePath, ReadCsvBuffer, Scalar, @@ -110,9 +114,10 @@ def __init__( # Get columns in two steps: infer from data, then # infer column indices from self.usecols if it is specified. self._col_indices: list[int] | None = None + columns: list[list[Scalar | None]] try: ( - self.columns, + columns, self.num_original_columns, self.unnamed_cols, ) = self._infer_columns() @@ -123,18 +128,19 @@ def __init__( # Now self.columns has the set of columns that we will process. # The original set is stored in self.original_columns. # error: Cannot determine type of 'index_names' + self.columns: list[Hashable] ( self.columns, self.index_names, self.col_names, _, ) = self._extract_multi_indexer_columns( - self.columns, + columns, self.index_names, # type: ignore[has-type] ) # get popped off for index - self.orig_names: list[int | str | tuple] = list(self.columns) + self.orig_names: list[Hashable] = list(self.columns) # needs to be cleaned/refactored # multiple date column thing turning into a real spaghetti factory @@ -172,7 +178,7 @@ def __init__( ) self.num = re.compile(regex) - def _make_reader(self, f): + def _make_reader(self, f) -> None: sep = self.delimiter if sep is None or len(sep) == 1: @@ -238,7 +244,7 @@ def _read(): # TextIOWrapper, mmap, None]") self.data = reader # type: ignore[assignment] - def read(self, rows=None): + def read(self, rows: int | None = None): try: content = self._get_lines(rows) except StopIteration: @@ -251,7 +257,7 @@ def read(self, rows=None): # done with first read, next time raise StopIteration self._first_chunk = False - columns = list(self.orig_names) + columns: Sequence[Hashable] = list(self.orig_names) if not len(content): # pragma: no cover # DataFrame with the right metadata, even though it's length 0 names = self._maybe_dedup_names(self.orig_names) @@ -275,14 +281,17 @@ def read(self, rows=None): alldata = self._rows_to_cols(content) data, columns = self._exclude_implicit_index(alldata) - data = self._convert_data(data) - columns, data = self._do_date_conversions(columns, data) + conv_data = self._convert_data(data) + columns, conv_data = self._do_date_conversions(columns, conv_data) - index, columns = self._make_index(data, alldata, columns, indexnamerow) + index, columns = self._make_index(conv_data, alldata, columns, indexnamerow) - return index, columns, data + return index, columns, conv_data - def _exclude_implicit_index(self, alldata): + def _exclude_implicit_index( + self, + alldata: list[np.ndarray], + ) -> tuple[Mapping[Hashable, np.ndarray], Sequence[Hashable]]: names = self._maybe_dedup_names(self.orig_names) offset = 0 @@ -304,7 +313,10 @@ def get_chunk(self, size=None): size = self.chunksize # type: ignore[attr-defined] return self.read(rows=size) - def _convert_data(self, data): + def _convert_data( + self, + data: Mapping[Hashable, np.ndarray], + ) -> Mapping[Hashable, ArrayLike]: # apply converters clean_conv = self._clean_mapping(self.converters) clean_dtypes = self._clean_mapping(self.dtype) @@ -336,11 +348,13 @@ def _convert_data(self, data): clean_dtypes, ) - def _infer_columns(self): + def _infer_columns( + self, + ) -> tuple[list[list[Scalar | None]], int, set[Scalar | None]]: names = self.names num_original_columns = 0 clear_buffer = True - unnamed_cols: set[str | int | None] = set() + unnamed_cols: set[Scalar | None] = set() self._header_line = None if self.header is not None: @@ -355,7 +369,7 @@ def _infer_columns(self): have_mi_columns = False header = [header] - columns: list[list[int | str | None]] = [] + columns: list[list[Scalar | None]] = [] for level, hr in enumerate(header): try: line = self._buffered_line() @@ -384,7 +398,7 @@ def _infer_columns(self): line = self.names[:] - this_columns: list[int | str | None] = [] + this_columns: list[Scalar | None] = [] this_unnamed_cols = [] for i, c in enumerate(line): @@ -447,6 +461,7 @@ def _infer_columns(self): if clear_buffer: self._clear_buffer() + first_line: list[Scalar] | None if names is not None: # Read first row after header to check if data are longer try: @@ -522,10 +537,10 @@ def _infer_columns(self): def _handle_usecols( self, - columns: list[list[str | int | None]], - usecols_key: list[str | int | None], + columns: list[list[Scalar | None]], + usecols_key: list[Scalar | None], num_original_columns: int, - ): + ) -> list[list[Scalar | None]]: """ Sets self._col_indices @@ -578,7 +593,7 @@ def _buffered_line(self): else: return self._next_line() - def _check_for_bom(self, first_row): + def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]: """ Checks whether the file begins with the BOM character. If it does, remove it. In addition, if there is quoting @@ -609,6 +624,7 @@ def _check_for_bom(self, first_row): return first_row first_row_bom = first_row[0] + new_row: str if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar: start = 2 @@ -627,9 +643,11 @@ def _check_for_bom(self, first_row): # No quotation so just remove BOM from first element new_row = first_row_bom[1:] - return [new_row] + first_row[1:] - def _is_line_empty(self, line): + new_row_list: list[Scalar] = [new_row] + return new_row_list + first_row[1:] + + def _is_line_empty(self, line: list[Scalar]) -> bool: """ Check if a line is empty or not. @@ -644,7 +662,7 @@ def _is_line_empty(self, line): """ return not line or all(not x for x in line) - def _next_line(self): + def _next_line(self) -> list[Scalar]: if isinstance(self.data, list): while self.skipfunc(self.pos): self.pos += 1 @@ -698,7 +716,7 @@ def _next_line(self): self.buf.append(line) return line - def _alert_malformed(self, msg, row_num): + def _alert_malformed(self, msg: str, row_num: int) -> None: """ Alert a user about a malformed row, depending on value of `self.on_bad_lines` enum. @@ -708,10 +726,12 @@ def _alert_malformed(self, msg, row_num): Parameters ---------- - msg : The error message to display. - row_num : The row number where the parsing error occurred. - Because this row number is displayed, we 1-index, - even though we 0-index internally. + msg: str + The error message to display. + row_num: int + The row number where the parsing error occurred. + Because this row number is displayed, we 1-index, + even though we 0-index internally. """ if self.on_bad_lines == self.BadLineHandleMethod.ERROR: raise ParserError(msg) @@ -719,7 +739,7 @@ def _alert_malformed(self, msg, row_num): base = f"Skipping line {row_num}: " sys.stderr.write(base + msg + "\n") - def _next_iter_line(self, row_num): + def _next_iter_line(self, row_num: int) -> list[Scalar] | None: """ Wrapper around iterating through `self.data` (CSV source). @@ -729,12 +749,16 @@ def _next_iter_line(self, row_num): Parameters ---------- - row_num : The row number of the line being parsed. + row_num: int + The row number of the line being parsed. """ try: # assert for mypy, data is Iterator[str] or None, would error in next assert self.data is not None - return next(self.data) + line = next(self.data) + # for mypy + assert isinstance(line, list) + return line except csv.Error as e: if ( self.on_bad_lines == self.BadLineHandleMethod.ERROR @@ -763,7 +787,7 @@ def _next_iter_line(self, row_num): self._alert_malformed(msg, row_num) return None - def _check_comments(self, lines): + def _check_comments(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: if self.comment is None: return lines ret = [] @@ -784,19 +808,19 @@ def _check_comments(self, lines): ret.append(rl) return ret - def _remove_empty_lines(self, lines): + def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: """ Iterate through the lines and remove any that are either empty or contain only one whitespace value Parameters ---------- - lines : array-like + lines : list of list of Scalars The array of lines that we are to filter. Returns ------- - filtered_lines : array-like + filtered_lines : list of list of Scalars The same array of lines with the "empty" ones removed. """ ret = [] @@ -810,7 +834,7 @@ def _remove_empty_lines(self, lines): ret.append(line) return ret - def _check_thousands(self, lines): + def _check_thousands(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: if self.thousands is None: return lines @@ -818,7 +842,9 @@ def _check_thousands(self, lines): lines=lines, search=self.thousands, replace="" ) - def _search_replace_num_columns(self, lines, search, replace): + def _search_replace_num_columns( + self, lines: list[list[Scalar]], search: str, replace: str + ) -> list[list[Scalar]]: ret = [] for line in lines: rl = [] @@ -835,7 +861,7 @@ def _search_replace_num_columns(self, lines, search, replace): ret.append(rl) return ret - def _check_decimal(self, lines): + def _check_decimal(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: if self.decimal == parser_defaults["decimal"]: return lines @@ -843,12 +869,12 @@ def _check_decimal(self, lines): lines=lines, search=self.decimal, replace="." ) - def _clear_buffer(self): + def _clear_buffer(self) -> None: self.buf = [] _implicit_index = False - def _get_index_name(self, columns): + def _get_index_name(self, columns: list[Hashable]): """ Try several cases to get lines: @@ -863,6 +889,7 @@ def _get_index_name(self, columns): orig_names = list(columns) columns = list(columns) + line: list[Scalar] | None if self._header_line is not None: line = self._header_line else: @@ -871,6 +898,7 @@ def _get_index_name(self, columns): except StopIteration: line = None + next_line: list[Scalar] | None try: next_line = self._next_line() except StopIteration: @@ -917,7 +945,7 @@ def _get_index_name(self, columns): return index_name, orig_names, columns - def _rows_to_cols(self, content): + def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]: col_len = self.num_original_columns if self._implicit_index: @@ -1000,7 +1028,7 @@ def _rows_to_cols(self, content): ] return zipped_content - def _get_lines(self, rows=None): + def _get_lines(self, rows: int | None = None): lines = self.buf new_rows = None
@simonjayhawkins I am wondering about mypy with overloads. The overload for ``_do_date_conversion`` could be more specific, e.g. ``` @overload def _do_date_conversions( self, names: list[Scalar | tuple], data: dict[Scalar | tuple, ArrayLike] | dict[Scalar | tuple, np.ndarray], ) -> tuple[ list[Scalar | tuple], dict[Scalar | tuple, ArrayLike] | dict[Scalar | tuple, np.ndarray], ]: ... ``` could be transformed to ``` @overload def _do_date_conversions( self, names: list[Scalar | tuple], data: dict[Scalar | tuple, ArrayLike], ) -> tuple[ list[Scalar | tuple], dict[Scalar | tuple, ArrayLike], ]: ... @overload def _do_date_conversions( self, names: list[Scalar | tuple], data: dict[Scalar | tuple, np.ndarray], ) -> tuple[ list[Scalar | tuple], dict[Scalar | tuple, np.ndarray], ]: ... ``` But in this case mypy complains about: `` error: Overloaded function signature 3 will never be matched: signature 2's parameter type(s) are the same or broader [misc]`` On the other side, if typing this only with ``` @overload def _do_date_conversions( self, names: list[Scalar | tuple], data: dict[Scalar | tuple, ArrayLike], ) -> tuple[ list[Scalar | tuple], dict[Scalar | tuple, ArrayLike], ]: ... ``` and passing a ``dict[Scalar | tuple, np.ndarray]`` mypy complains with ``` pandas/io/parsers/python_parser.py:283: error: Argument 2 to "_do_date_conversions" of "ParserBase" has incompatible type "Dict[Union[Union[Union[str, int, float, bool], Union[Period, Timestamp, Timedelta, Any]], Tuple[Any, ...]], ndarray[Any, Any]]"; expected "Dict[Union[Union[Union[str, int, float, bool], Union[Period, Timestamp, Timedelta, Any]], Tuple[Any, ...]], Union[ExtensionArray, ndarray[Any, Any]]]" [arg-type] pandas/io/parsers/python_parser.py:283: note: "Dict" is invariant -- see https://mypy.readthedocs.io/en/stable/common_issues.html#variance pandas/io/parsers/python_parser.py:283: note: Consider using "Mapping" instead, which is covariant in the value type ``` This looks inconsistent. I think the overload should accept the distinction between ``ArrayLike`` and ``np.ndarray`` with dicts or lists? Technically we could use ``Mapping`` probably, but we would loose some strictness in this case and the object is alwyas a ``dict`` and we know if passing only np.ndarray we will get them in return. On another topic: Should we use an alias for ``Scalar | tuple``? We need this throughout the code to indicate column names
https://api.github.com/repos/pandas-dev/pandas/pulls/44406
2021-11-12T13:06:14Z
2021-11-28T23:52:59Z
2021-11-28T23:52:59Z
2021-11-29T11:38:45Z
BUG: .get_indexer_non_unique() must return an array of ints (#44084)
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index d1e209adb1b8f..2d70f361ba9cd 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -515,7 +515,7 @@ Strings Interval ^^^^^^^^ -- +- Bug in :meth:`IntervalIndex.get_indexer_non_unique` returning boolean mask instead of array of integers for a non unique and non monotonic index (:issue:`44084`) - Indexing diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 5791f89828ca3..885c922d1ee0f 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -727,6 +727,8 @@ def _get_indexer_pointwise( if isinstance(locs, slice): # Only needed for get_indexer_non_unique locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp") + elif not self.is_unique and not self.is_monotonic: + locs = np.where(locs)[0] locs = np.array(locs, ndmin=1) except KeyError: missing.append(i) diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py index 8df8eef69e9c9..75f7c69ce5300 100644 --- a/pandas/tests/indexes/interval/test_indexing.py +++ b/pandas/tests/indexes/interval/test_indexing.py @@ -8,8 +8,10 @@ from pandas import ( NA, CategoricalIndex, + Index, Interval, IntervalIndex, + MultiIndex, NaT, Timedelta, date_range, @@ -373,6 +375,31 @@ def test_get_indexer_with_nans(self): expected = np.array([0, 1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) + def test_get_index_non_unique_non_monotonic(self): + # GH#44084 (root cause) + index = IntervalIndex.from_tuples( + [(0.0, 1.0), (1.0, 2.0), (0.0, 1.0), (1.0, 2.0)] + ) + + result, _ = index.get_indexer_non_unique([Interval(1.0, 2.0)]) + expected = np.array([1, 3], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_multiindex_with_intervals(self): + # GH#44084 (MultiIndex case as reported) + interval_index = IntervalIndex.from_tuples( + [(2.0, 3.0), (0.0, 1.0), (1.0, 2.0)], name="interval" + ) + foo_index = Index([1, 2, 3], name="foo") + + multi_index = MultiIndex.from_product([foo_index, interval_index]) + + result = multi_index.get_level_values("interval").get_indexer_for( + [Interval(0.0, 1.0)] + ) + expected = np.array([1, 4, 7], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + class TestSliceLocs: def test_slice_locs_with_interval(self):
GH#44084 boils down to the following. According to the docs `.get_indexer_non_unique()` is supposed to return "integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values". However, for an index that is non unique and non monotonic it returns a boolean mask. That is because it uses `.get_loc()` which for non unique, non monotonic indexes returns a boolean mask. This patch catches that case and converts the boolean mask from `.get_loc()` into the corresponding array of integers if the index is not unique and not monotonic. - [x] closes #44084 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44404
2021-11-12T09:32:22Z
2021-11-14T02:26:44Z
2021-11-14T02:26:43Z
2021-11-16T12:53:04Z
Backport PR #44356 on branch 1.3.x (Fixed regression in Series.duplicated for categorical dtype with bool categories)
diff --git a/doc/source/whatsnew/v1.3.5.rst b/doc/source/whatsnew/v1.3.5.rst index 589092c0dd7e3..951b05b65c81b 100644 --- a/doc/source/whatsnew/v1.3.5.rst +++ b/doc/source/whatsnew/v1.3.5.rst @@ -16,6 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in :meth:`Series.equals` when comparing floats with dtype object to None (:issue:`44190`) - Fixed performance regression in :func:`read_csv` (:issue:`44106`) +- Fixed regression in :meth:`Series.duplicated` and :meth:`Series.drop_duplicates` when Series has :class:`Categorical` dtype with boolean categories (:issue:`44351`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 3ab0350f23c5a..eb8a1dc5f0e73 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -139,7 +139,7 @@ def _ensure_data(values: ArrayLike) -> tuple[np.ndarray, DtypeObj]: # i.e. all-bool Categorical, BooleanArray try: return np.asarray(values).astype("uint8", copy=False), values.dtype - except TypeError: + except (TypeError, ValueError): # GH#42107 we have pd.NAs present return np.asarray(values), values.dtype diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py index 7eb51f8037792..f72d85337df8e 100644 --- a/pandas/tests/series/methods/test_drop_duplicates.py +++ b/pandas/tests/series/methods/test_drop_duplicates.py @@ -2,6 +2,7 @@ import pytest from pandas import ( + NA, Categorical, Series, ) @@ -224,6 +225,20 @@ def test_drop_duplicates_categorical_bool(self, ordered): assert return_value is None tm.assert_series_equal(sc, tc[~expected]) + def test_drop_duplicates_categorical_bool_na(self): + # GH#44351 + ser = Series( + Categorical( + [True, False, True, False, NA], categories=[True, False], ordered=True + ) + ) + result = ser.drop_duplicates() + expected = Series( + Categorical([True, False, np.nan], categories=[True, False], ordered=True), + index=[0, 1, 4], + ) + tm.assert_series_equal(result, expected) + def test_drop_duplicates_pos_args_deprecation(): # GH#41485 diff --git a/pandas/tests/series/methods/test_duplicated.py b/pandas/tests/series/methods/test_duplicated.py index 5cc297913e851..c61492168da63 100644 --- a/pandas/tests/series/methods/test_duplicated.py +++ b/pandas/tests/series/methods/test_duplicated.py @@ -1,7 +1,11 @@ import numpy as np import pytest -from pandas import Series +from pandas import ( + NA, + Categorical, + Series, +) import pandas._testing as tm @@ -33,3 +37,15 @@ def test_duplicated_nan_none(keep, expected): result = ser.duplicated(keep=keep) tm.assert_series_equal(result, expected) + + +def test_duplicated_categorical_bool_na(): + # GH#44351 + ser = Series( + Categorical( + [True, False, True, False, NA], categories=[True, False], ordered=True + ) + ) + result = ser.duplicated() + expected = Series([False, False, True, True, False]) + tm.assert_series_equal(result, expected)
Backport PR #44356
https://api.github.com/repos/pandas-dev/pandas/pulls/44402
2021-11-12T08:33:01Z
2021-11-12T09:46:04Z
2021-11-12T09:46:04Z
2021-11-12T09:51:40Z
BUG: DataFrame.stack with EA columns
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 8732e1c397ce5..d1e209adb1b8f 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -623,6 +623,8 @@ Reshaping - Bug in :func:`crosstab` would fail when inputs are lists or tuples (:issue:`44076`) - Bug in :meth:`DataFrame.append` failing to retain ``index.name`` when appending a list of :class:`Series` objects (:issue:`44109`) - Fixed metadata propagation in :meth:`Dataframe.apply` method, consequently fixing the same issue for :meth:`Dataframe.transform`, :meth:`Dataframe.nunique` and :meth:`Dataframe.mode` (:issue:`28283`) +- Bug in :meth:`DataFrame.stack` with ``ExtensionDtype`` columns incorrectly raising (:issue:`43561`) +- Sparse ^^^^^^ diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 9c7107ab40644..6c6b14653df75 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -745,13 +745,15 @@ def _convert_level_number(level_num, columns): if frame._is_homogeneous_type and is_extension_array_dtype( frame.dtypes.iloc[0] ): + # TODO(EA2D): won't need special case, can go through .values + # paths below (might change to ._values) dtype = this[this.columns[loc]].dtypes.iloc[0] subset = this[this.columns[loc]] value_slice = dtype.construct_array_type()._concat_same_type( [x._values for _, x in subset.items()] ) - N, K = this.shape + N, K = subset.shape idx = np.arange(N * K).reshape(K, N).T.ravel() value_slice = value_slice.take(idx) diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py index 404baecdfecac..62512249dabfc 100644 --- a/pandas/tests/frame/test_stack_unstack.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -2099,3 +2099,27 @@ def test_stack_unsorted(self): result = DF.stack(["VAR", "TYP"]).sort_index() expected = DF.sort_index(axis=1).stack(["VAR", "TYP"]).sort_index() tm.assert_series_equal(result, expected) + + def test_stack_nullable_dtype(self): + # GH#43561 + columns = MultiIndex.from_product( + [["54511", "54515"], ["r", "t_mean"]], names=["station", "element"] + ) + index = Index([1, 2, 3], name="time") + + arr = np.array([[50, 226, 10, 215], [10, 215, 9, 220], [305, 232, 111, 220]]) + df = DataFrame(arr, columns=columns, index=index, dtype=pd.Int64Dtype()) + + result = df.stack("station") + + expected = df.astype(np.int64).stack("station").astype(pd.Int64Dtype()) + tm.assert_frame_equal(result, expected) + + # non-homogeneous case + df[df.columns[0]] = df[df.columns[0]].astype(pd.Float64Dtype()) + result = df.stack("station") + + # TODO(EA2D): we get object dtype because DataFrame.values can't + # be an EA + expected = df.astype(object).stack("station") + tm.assert_frame_equal(result, expected)
- [x] closes #43561 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44401
2021-11-12T00:00:02Z
2021-11-12T03:11:32Z
2021-11-12T03:11:32Z
2021-11-12T17:19:01Z
CLN: Refactor extract multiindex header call
diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 8cdcc05f60266..339585810bec1 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -314,14 +314,14 @@ def _should_parse_dates(self, i: int) -> bool: @final def _extract_multi_indexer_columns( - self, header, index_names, col_names, passed_names: bool = False + self, header, index_names, passed_names: bool = False ): """ extract and return the names, index_names, col_names header is a list-of-lists returned from the parsers """ if len(header) < 2: - return header[0], index_names, col_names, passed_names + return header[0], index_names, None, passed_names # the names are the tuples of the header that are not the index cols # 0 is the name of the index, assuming index_col is a list of column diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py index 32ca3aaeba6cc..352dd998dda0f 100644 --- a/pandas/io/parsers/c_parser_wrapper.py +++ b/pandas/io/parsers/c_parser_wrapper.py @@ -78,25 +78,18 @@ def __init__(self, src: FilePathOrBuffer, **kwds): if self._reader.header is None: self.names = None else: - if len(self._reader.header) > 1: - # we have a multi index in the columns - # error: Cannot determine type of 'names' - # error: Cannot determine type of 'index_names' - # error: Cannot determine type of 'col_names' - ( - self.names, # type: ignore[has-type] - self.index_names, - self.col_names, - passed_names, - ) = self._extract_multi_indexer_columns( - self._reader.header, - self.index_names, # type: ignore[has-type] - self.col_names, # type: ignore[has-type] - passed_names, - ) - else: - # error: Cannot determine type of 'names' - self.names = list(self._reader.header[0]) # type: ignore[has-type] + # error: Cannot determine type of 'names' + # error: Cannot determine type of 'index_names' + ( + self.names, # type: ignore[has-type] + self.index_names, + self.col_names, + passed_names, + ) = self._extract_multi_indexer_columns( + self._reader.header, + self.index_names, # type: ignore[has-type] + passed_names, + ) # error: Cannot determine type of 'names' if self.names is None: # type: ignore[has-type] diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index af253fc062632..b0e868b260369 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -117,24 +117,16 @@ def __init__(self, f: FilePathOrBuffer | list, **kwds): # Now self.columns has the set of columns that we will process. # The original set is stored in self.original_columns. - if len(self.columns) > 1: - # we are processing a multi index column - # error: Cannot determine type of 'index_names' - # error: Cannot determine type of 'col_names' - ( - self.columns, - self.index_names, - self.col_names, - _, - ) = self._extract_multi_indexer_columns( - self.columns, - self.index_names, # type: ignore[has-type] - self.col_names, # type: ignore[has-type] - ) - # Update list of original names to include all indices. - self.num_original_columns = len(self.columns) - else: - self.columns = self.columns[0] + # error: Cannot determine type of 'index_names' + ( + self.columns, + self.index_names, + self.col_names, + _, + ) = self._extract_multi_indexer_columns( + self.columns, + self.index_names, # type: ignore[has-type] + ) # get popped off for index self.orig_names: list[int | str | tuple] = list(self.columns)
I am in the process of typing parts of the parser modules. I stumbled across this function. The check I have removed is performed inside the function and self.col_names is always None when inserting, so no need to pass it at all
https://api.github.com/repos/pandas-dev/pandas/pulls/44399
2021-11-11T21:13:29Z
2021-11-12T03:10:37Z
2021-11-12T03:10:37Z
2021-11-12T08:34:12Z
DEPR: PeriodIndex.astype(dt64)
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 99a66c7e5454b..8a8c1208b7b89 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -400,6 +400,8 @@ Other Deprecations - Deprecated casting behavior when setting timezone-aware value(s) into a timezone-aware :class:`Series` or :class:`DataFrame` column when the timezones do not match. Previously this cast to object dtype. In a future version, the values being inserted will be converted to the series or column's existing timezone (:issue:`37605`) - Deprecated casting behavior when passing an item with mismatched-timezone to :meth:`DatetimeIndex.insert`, :meth:`DatetimeIndex.putmask`, :meth:`DatetimeIndex.where` :meth:`DatetimeIndex.fillna`, :meth:`Series.mask`, :meth:`Series.where`, :meth:`Series.fillna`, :meth:`Series.shift`, :meth:`Series.replace`, :meth:`Series.reindex` (and :class:`DataFrame` column analogues). In the past this has cast to object dtype. In a future version, these will cast the passed item to the index or series's timezone (:issue:`37605`) - Deprecated the 'errors' keyword argument in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, and meth:`DataFrame.mask`; in a future version the argument will be removed (:issue:`44294`) +- Deprecated :meth:`PeriodIndex.astype` to ``datetime64[ns]`` or ``DatetimeTZDtype``, use ``obj.to_timestamp(how).tz_localize(dtype.tz)`` instead (:issue:`44398`) +- .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index fd5b5bb7396af..1db476065a5c8 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -25,6 +25,7 @@ DtypeObj, ) from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_datetime64_any_dtype, @@ -353,6 +354,14 @@ def astype(self, dtype, copy: bool = True, how=lib.no_default): if is_datetime64_any_dtype(dtype): # 'how' is index-specific, isn't part of the EA interface. + # GH#44398 deprecate astype(dt64), matching Series behavior + warnings.warn( + f"Converting {type(self).__name__} to DatetimeIndex with " + "'astype' is deprecated and will raise in a future version. " + "Use `obj.to_timestamp(how).tz_localize(dtype.tz)` instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) tz = getattr(dtype, "tz", None) return self.to_timestamp(how=how).tz_localize(tz) diff --git a/pandas/tests/indexes/period/methods/test_astype.py b/pandas/tests/indexes/period/methods/test_astype.py index e2340a2db02f7..c44f2efed1fcc 100644 --- a/pandas/tests/indexes/period/methods/test_astype.py +++ b/pandas/tests/indexes/period/methods/test_astype.py @@ -164,7 +164,10 @@ def test_period_astype_to_timestamp(self): assert res.freq == exp.freq exp = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], tz="US/Eastern") - res = pi.astype("datetime64[ns, US/Eastern]") + msg = "Use `obj.to_timestamp" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#44398 + res = pi.astype("datetime64[ns, US/Eastern]") tm.assert_index_equal(res, exp) assert res.freq == exp.freq diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index ed9243a5ba8d0..28be474b28de1 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -332,6 +332,9 @@ def test_astype_preserves_name(self, index, dtype): ): # This astype is deprecated in favor of tz_localize warn = FutureWarning + elif isinstance(index, PeriodIndex) and dtype == "datetime64[ns]": + # Deprecated in favor of to_timestamp GH#44398 + warn = FutureWarning try: # Some of these conversions cannot succeed so we use a try / except with tm.assert_produces_warning(warn):
- [ ] closes #xxxx - [x] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry Match the Series/EA behavior
https://api.github.com/repos/pandas-dev/pandas/pulls/44398
2021-11-11T21:07:33Z
2021-11-14T03:20:32Z
2021-11-14T03:20:32Z
2021-12-24T17:09:40Z
ENH: Support timespec argument in Timestamp.isoformat()
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 8732e1c397ce5..a6751c486f25b 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -184,6 +184,7 @@ Other enhancements - :meth:`DataFrame.dropna` now accepts a single label as ``subset`` along with array-like (:issue:`41021`) - :meth:`read_excel` now accepts a ``decimal`` argument that allow the user to specify the decimal point when parsing string columns to numeric (:issue:`14403`) - :meth:`.GroupBy.mean` now supports `Numba <http://numba.pydata.org/>`_ execution with the ``engine`` keyword (:issue:`43731`) +- :meth:`Timestamp.isoformat`, now handles the ``timespec`` argument from the base :class:``datetime`` class (:issue:`26131`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 2aebf75ba35d4..09bfc4527a428 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -295,7 +295,7 @@ cdef class _NaT(datetime): def __str__(self) -> str: return "NaT" - def isoformat(self, sep="T") -> str: + def isoformat(self, sep: str = "T", timespec: str = "auto") -> str: # This allows Timestamp(ts.isoformat()) to always correctly roundtrip. return "NaT" diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 613da5a691736..28b8158548ca8 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -737,9 +737,42 @@ cdef class _Timestamp(ABCTimestamp): # ----------------------------------------------------------------- # Rendering Methods - def isoformat(self, sep: str = "T") -> str: - base = super(_Timestamp, self).isoformat(sep=sep) - if self.nanosecond == 0: + def isoformat(self, sep: str = "T", timespec: str = "auto") -> str: + """ + Return the time formatted according to ISO. + + The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmmnnn'. + By default, the fractional part is omitted if self.microsecond == 0 + and self.nanosecond == 0. + + If self.tzinfo is not None, the UTC offset is also attached, giving + giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmmnnn+HH:MM'. + + Parameters + ---------- + sep : str, default 'T' + String used as the separator between the date and time. + + timespec : str, default 'auto' + Specifies the number of additional terms of the time to include. + The valid values are 'auto', 'hours', 'minutes', 'seconds', + 'milliseconds', 'microseconds', and 'nanoseconds'. + + Returns + ------- + str + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + >>> ts.isoformat() + '2020-03-14T15:32:52.192548651' + >>> ts.isoformat(timespec='microseconds') + '2020-03-14T15:32:52.192548' + """ + base_ts = "microseconds" if timespec == "nanoseconds" else timespec + base = super(_Timestamp, self).isoformat(sep=sep, timespec=base_ts) + if self.nanosecond == 0 and timespec != "nanoseconds": return base if self.tzinfo is not None: @@ -747,10 +780,11 @@ cdef class _Timestamp(ABCTimestamp): else: base1, base2 = base, "" - if self.microsecond != 0: - base1 += f"{self.nanosecond:03d}" - else: - base1 += f".{self.nanosecond:09d}" + if timespec == "nanoseconds" or (timespec == "auto" and self.nanosecond): + if self.microsecond: + base1 += f"{self.nanosecond:03d}" + else: + base1 += f".{self.nanosecond:09d}" return base1 + base2 diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 21ed57813b60d..b9718249b38c8 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -182,6 +182,7 @@ def test_nat_methods_nat(method): def test_nat_iso_format(get_nat): # see gh-12300 assert get_nat("NaT").isoformat() == "NaT" + assert get_nat("NaT").isoformat(timespec="nanoseconds") == "NaT" @pytest.mark.parametrize( @@ -325,6 +326,10 @@ def test_nat_doc_strings(compare): klass, method = compare klass_doc = getattr(klass, method).__doc__ + # Ignore differences with Timestamp.isoformat() as they're intentional + if klass == Timestamp and method == "isoformat": + return + nat_doc = getattr(NaT, method).__doc__ assert klass_doc == nat_doc diff --git a/pandas/tests/scalar/timestamp/test_formats.py b/pandas/tests/scalar/timestamp/test_formats.py new file mode 100644 index 0000000000000..71dbf3539bdb2 --- /dev/null +++ b/pandas/tests/scalar/timestamp/test_formats.py @@ -0,0 +1,71 @@ +import pytest + +from pandas import Timestamp + +ts_no_ns = Timestamp( + year=2019, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=132263, +) +ts_ns = Timestamp( + year=2019, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=132263, + nanosecond=123, +) +ts_ns_tz = Timestamp( + year=2019, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=132263, + nanosecond=123, + tz="UTC", +) +ts_no_us = Timestamp( + year=2019, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=0, + nanosecond=123, +) + + +@pytest.mark.parametrize( + "ts, timespec, expected_iso", + [ + (ts_no_ns, "auto", "2019-05-18T15:17:08.132263"), + (ts_no_ns, "seconds", "2019-05-18T15:17:08"), + (ts_no_ns, "nanoseconds", "2019-05-18T15:17:08.132263000"), + (ts_ns, "auto", "2019-05-18T15:17:08.132263123"), + (ts_ns, "hours", "2019-05-18T15"), + (ts_ns, "minutes", "2019-05-18T15:17"), + (ts_ns, "seconds", "2019-05-18T15:17:08"), + (ts_ns, "milliseconds", "2019-05-18T15:17:08.132"), + (ts_ns, "microseconds", "2019-05-18T15:17:08.132263"), + (ts_ns, "nanoseconds", "2019-05-18T15:17:08.132263123"), + (ts_ns_tz, "auto", "2019-05-18T15:17:08.132263123+00:00"), + (ts_ns_tz, "hours", "2019-05-18T15+00:00"), + (ts_ns_tz, "minutes", "2019-05-18T15:17+00:00"), + (ts_ns_tz, "seconds", "2019-05-18T15:17:08+00:00"), + (ts_ns_tz, "milliseconds", "2019-05-18T15:17:08.132+00:00"), + (ts_ns_tz, "microseconds", "2019-05-18T15:17:08.132263+00:00"), + (ts_ns_tz, "nanoseconds", "2019-05-18T15:17:08.132263123+00:00"), + (ts_no_us, "auto", "2019-05-18T15:17:08.000000123"), + ], +) +def test_isoformat(ts, timespec, expected_iso): + assert ts.isoformat(timespec=timespec) == expected_iso
- [x] closes #26131 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry This is an update of PR #38550. I added support for "nanoseconds" as an argument, expanded the test cases, and addressed most of the comments in the original PR.
https://api.github.com/repos/pandas-dev/pandas/pulls/44397
2021-11-11T20:30:25Z
2021-11-14T03:19:08Z
2021-11-14T03:19:08Z
2021-11-14T03:19:18Z
[ArrayManager] Array version of putmask logic
diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 1cd9fe65407ba..e318659d9f355 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -89,6 +89,7 @@ new_block, to_native_types, ) +from pandas.core.internals.methods import putmask_flexible if TYPE_CHECKING: from pandas import Float64Index @@ -190,7 +191,7 @@ def __repr__(self) -> str: def apply( self: T, f, - align_keys: list[str] | None = None, + align_keys: list[str] | None = None, # not used for ArrayManager ignore_failures: bool = False, **kwargs, ) -> T: @@ -201,7 +202,6 @@ def apply( ---------- f : str or callable Name of the Array method to apply. - align_keys: List[str] or None, default None ignore_failures: bool, default False **kwargs Keywords to pass to `f` @@ -212,32 +212,14 @@ def apply( """ assert "filter" not in kwargs - align_keys = align_keys or [] result_arrays: list[np.ndarray] = [] result_indices: list[int] = [] # fillna: Series/DataFrame is responsible for making sure value is aligned - aligned_args = {k: kwargs[k] for k in align_keys} - if f == "apply": f = kwargs.pop("func") for i, arr in enumerate(self.arrays): - - if aligned_args: - - for k, obj in aligned_args.items(): - if isinstance(obj, (ABCSeries, ABCDataFrame)): - # The caller is responsible for ensuring that - # obj.axes[-1].equals(self.items) - if obj.ndim == 1: - kwargs[k] = obj.iloc[i] - else: - kwargs[k] = obj.iloc[:, i]._values - else: - # otherwise we have an array-like - kwargs[k] = obj[i] - try: if callable(f): applied = f(arr, **kwargs) @@ -352,12 +334,28 @@ def putmask(self, mask, new, align: bool = True): align_keys = ["mask"] new = extract_array(new, extract_numpy=True) - return self.apply_with_block( - "putmask", - align_keys=align_keys, - mask=mask, - new=new, - ) + kwargs = {"mask": mask, "new": new} + aligned_kwargs = {k: kwargs[k] for k in align_keys} + + for i, arr in enumerate(self.arrays): + + for k, obj in aligned_kwargs.items(): + if isinstance(obj, (ABCSeries, ABCDataFrame)): + # The caller is responsible for ensuring that + # obj.axes[-1].equals(self.items) + if obj.ndim == 1: + kwargs[k] = obj._values + else: + kwargs[k] = obj.iloc[:, i]._values + else: + # otherwise we have an ndarray + if self.ndim == 2: + kwargs[k] = obj[i] + + new = putmask_flexible(arr, **kwargs) + self.arrays[i] = new + + return self def diff(self: T, n: int, axis: int) -> T: if axis == 1: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 46e5b5b9c53ad..33efa8e11ac32 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -110,6 +110,7 @@ is_empty_indexer, is_scalar_indexer, ) +from pandas.core.internals.methods import putmask_flexible_ea import pandas.core.missing as missing if TYPE_CHECKING: @@ -1417,30 +1418,7 @@ def putmask(self, mask, new) -> list[Block]: """ See Block.putmask.__doc__ """ - mask = extract_bool_array(mask) - - new_values = self.values - - if mask.ndim == new_values.ndim + 1: - # TODO(EA2D): unnecessary with 2D EAs - mask = mask.reshape(new_values.shape) - - try: - # Caller is responsible for ensuring matching lengths - new_values._putmask(mask, new) - except TypeError: - if not is_interval_dtype(self.dtype): - # Discussion about what we want to support in the general - # case GH#39584 - raise - - blk = self.coerce_to_target_dtype(new) - if blk.dtype == _dtype_obj: - # For now at least, only support casting e.g. - # Interval[int64]->Interval[float64], - raise - return blk.putmask(mask, new) - + new_values = putmask_flexible_ea(self.values, mask, new) nb = type(self)(new_values, placement=self._mgr_locs, ndim=self.ndim) return [nb] diff --git a/pandas/core/internals/methods.py b/pandas/core/internals/methods.py new file mode 100644 index 0000000000000..e844c905accd3 --- /dev/null +++ b/pandas/core/internals/methods.py @@ -0,0 +1,118 @@ +""" +Wrappers around array_algos with internals-specific logic +""" +from __future__ import annotations + +import numpy as np + +from pandas.core.dtypes.cast import ( + can_hold_element, + find_common_type, + infer_dtype_from, +) +from pandas.core.dtypes.common import is_interval_dtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + na_value_for_dtype, +) + +from pandas.core.array_algos.putmask import ( + extract_bool_array, + putmask_smart, + putmask_without_repeat, + setitem_datetimelike_compat, + validate_putmask, +) +from pandas.core.arrays import ExtensionArray +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray + + +def putmask_flexible(array: np.ndarray | ExtensionArray, mask, new): + """ + Putmask implementation for ArrayManager.putmask. + + Flexible version that will upcast if needed. + """ + if isinstance(array, np.ndarray): + return putmask_flexible_ndarray(array, mask=mask, new=new) + else: + return putmask_flexible_ea(array, mask=mask, new=new) + + +def putmask_flexible_ndarray(array: np.ndarray, mask, new): + """ + Putmask implementation for ArrayManager putmask for ndarray. + + Flexible version that will upcast if needed. + """ + mask, noop = validate_putmask(array, mask) + assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) + + # if we are passed a scalar None, convert it here + if not array.dtype == "object" and is_valid_na_for_dtype(new, array.dtype): + new = na_value_for_dtype(array.dtype, compat=False) + + if can_hold_element(array, new): + putmask_without_repeat(array, mask, new) + return array + + elif noop: + return array + + dtype, _ = infer_dtype_from(new) + if dtype.kind in ["m", "M"]: + array = array.astype(object) + # convert to list to avoid numpy coercing datetimelikes to integers + new = setitem_datetimelike_compat(array, mask.sum(), new) + # putmask_smart below converts it back to array + np.putmask(array, mask, new) + return array + + new_values = putmask_smart(array, mask, new) + return new_values + + +def _coerce_to_target_dtype(array, new): + dtype, _ = infer_dtype_from(new, pandas_dtype=True) + new_dtype = find_common_type([array.dtype, dtype]) + return array.astype(new_dtype, copy=False) + + +def putmask_flexible_ea(array: ExtensionArray, mask, new): + """ + Putmask implementation for ArrayManager putmask for EA. + + Flexible version that will upcast if needed. + """ + mask = extract_bool_array(mask) + + if mask.ndim == array.ndim + 1: + # TODO(EA2D): unnecessary with 2D EAs + mask = mask.reshape(array.shape) + + if isinstance(array, NDArrayBackedExtensionArray): + if not can_hold_element(array, new): + array = _coerce_to_target_dtype(array, new) + return putmask_flexible(array, mask, new) + + try: + array._putmask(mask, new) + except TypeError: + if not is_interval_dtype(array.dtype): + # Discussion about what we want to support in the general + # case GH#39584 + raise + + array = _coerce_to_target_dtype(array, new) + if array.dtype == np.dtype("object"): + # For now at least, only support casting e.g. + # Interval[int64]->Interval[float64], + raise + return putmask_flexible_ea(array, mask, new) + + return array diff --git a/pandas/tests/internals/test_api.py b/pandas/tests/internals/test_api.py index c759cc163106d..3afcf8917cea8 100644 --- a/pandas/tests/internals/test_api.py +++ b/pandas/tests/internals/test_api.py @@ -19,6 +19,7 @@ def test_namespace(): "blocks", "concat", "managers", + "methods", "construction", "array_manager", "base",
xref #39146 Trying to remove the usage of `apply_with_block` for putmask. (I also need to this for the Copy-on-Write branch to be able to perform the copy on write in putmask)
https://api.github.com/repos/pandas-dev/pandas/pulls/44396
2021-11-11T20:21:45Z
2023-02-22T12:59:22Z
null
2023-02-22T12:59:23Z
TST: parametrize arithmetic tests
diff --git a/pandas/tests/arithmetic/common.py b/pandas/tests/arithmetic/common.py index af70cdfe538bb..f3173e8f0eb57 100644 --- a/pandas/tests/arithmetic/common.py +++ b/pandas/tests/arithmetic/common.py @@ -11,7 +11,26 @@ array, ) import pandas._testing as tm -from pandas.core.arrays import PandasArray +from pandas.core.arrays import ( + BooleanArray, + PandasArray, +) + + +def assert_cannot_add(left, right, msg="cannot add"): + """ + Helper to assert that left and right cannot be added. + + Parameters + ---------- + left : object + right : object + msg : str, default "cannot add" + """ + with pytest.raises(TypeError, match=msg): + left + right + with pytest.raises(TypeError, match=msg): + right + left def assert_invalid_addsub_type(left, right, msg=None): @@ -79,21 +98,29 @@ def xbox2(x): # just exclude PandasArray[bool] if isinstance(x, PandasArray): return x._ndarray + if isinstance(x, BooleanArray): + # NB: we are assuming no pd.NAs for now + return x.astype(bool) return x + # rev_box: box to use for reversed comparisons + rev_box = xbox + if isinstance(right, Index) and isinstance(left, Series): + rev_box = np.array + result = xbox2(left == right) expected = xbox(np.zeros(result.shape, dtype=np.bool_)) tm.assert_equal(result, expected) result = xbox2(right == left) - tm.assert_equal(result, expected) + tm.assert_equal(result, rev_box(expected)) result = xbox2(left != right) tm.assert_equal(result, ~expected) result = xbox2(right != left) - tm.assert_equal(result, ~expected) + tm.assert_equal(result, rev_box(~expected)) msg = "|".join( [ diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index bff461dbc7038..87bbdfb3c808f 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -41,6 +41,7 @@ ) from pandas.core.ops import roperator from pandas.tests.arithmetic.common import ( + assert_cannot_add, assert_invalid_addsub_type, assert_invalid_comparison, get_upcast_box, @@ -99,6 +100,7 @@ def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_arra @pytest.mark.parametrize( "other", [ + # GH#4968 invalid date/int comparisons list(range(10)), np.arange(10), np.arange(10).astype(np.float32), @@ -111,13 +113,14 @@ def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_arra pd.period_range("1971-01-01", freq="D", periods=10).astype(object), ], ) - def test_dt64arr_cmp_arraylike_invalid(self, other, tz_naive_fixture): - # We don't parametrize this over box_with_array because listlike - # other plays poorly with assert_invalid_comparison reversed checks + def test_dt64arr_cmp_arraylike_invalid( + self, other, tz_naive_fixture, box_with_array + ): tz = tz_naive_fixture dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data - assert_invalid_comparison(dta, other, tm.to_array) + obj = tm.box_expected(dta, box_with_array) + assert_invalid_comparison(obj, other, box_with_array) def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture): tz = tz_naive_fixture @@ -215,18 +218,6 @@ def test_nat_comparisons( tm.assert_series_equal(result, expected) - def test_comparison_invalid(self, tz_naive_fixture, box_with_array): - # GH#4968 - # invalid date/int comparisons - tz = tz_naive_fixture - ser = Series(range(5)) - ser2 = Series(date_range("20010101", periods=5, tz=tz)) - - ser = tm.box_expected(ser, box_with_array) - ser2 = tm.box_expected(ser2, box_with_array) - - assert_invalid_comparison(ser, ser2, box_with_array) - @pytest.mark.parametrize( "data", [ @@ -315,8 +306,8 @@ def test_timestamp_compare_series(self, left, right): tm.assert_series_equal(result, expected) # Compare to NaT with series containing NaT - expected = left_f(s_nat, Timestamp("nat")) - result = right_f(Timestamp("nat"), s_nat) + expected = left_f(s_nat, NaT) + result = right_f(NaT, s_nat) tm.assert_series_equal(result, expected) def test_dt64arr_timestamp_equality(self, box_with_array): @@ -832,17 +823,6 @@ def test_dt64arr_add_timedeltalike_scalar( result = rng + two_hours tm.assert_equal(result, expected) - def test_dt64arr_iadd_timedeltalike_scalar( - self, tz_naive_fixture, two_hours, box_with_array - ): - tz = tz_naive_fixture - - rng = date_range("2000-01-01", "2000-02-01", tz=tz) - expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz) - - rng = tm.box_expected(rng, box_with_array) - expected = tm.box_expected(expected, box_with_array) - rng += two_hours tm.assert_equal(rng, expected) @@ -860,17 +840,6 @@ def test_dt64arr_sub_timedeltalike_scalar( result = rng - two_hours tm.assert_equal(result, expected) - def test_dt64arr_isub_timedeltalike_scalar( - self, tz_naive_fixture, two_hours, box_with_array - ): - tz = tz_naive_fixture - - rng = date_range("2000-01-01", "2000-02-01", tz=tz) - expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz) - - rng = tm.box_expected(rng, box_with_array) - expected = tm.box_expected(expected, box_with_array) - rng -= two_hours tm.assert_equal(rng, expected) @@ -1071,21 +1040,14 @@ def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array): dt64vals = dti.values dtarr = tm.box_expected(dti, box_with_array) - msg = "cannot add" - with pytest.raises(TypeError, match=msg): - dtarr + dt64vals - with pytest.raises(TypeError, match=msg): - dt64vals + dtarr + assert_cannot_add(dtarr, dt64vals) def test_dt64arr_add_timestamp_raises(self, box_with_array): # GH#22163 ensure DataFrame doesn't cast Timestamp to i8 idx = DatetimeIndex(["2011-01-01", "2011-01-02"]) + ts = idx[0] idx = tm.box_expected(idx, box_with_array) - msg = "cannot add" - with pytest.raises(TypeError, match=msg): - idx + Timestamp("2011-01-01") - with pytest.raises(TypeError, match=msg): - Timestamp("2011-01-01") + idx + assert_cannot_add(idx, ts) # ------------------------------------------------------------- # Other Invalid Addition/Subtraction @@ -1267,13 +1229,12 @@ def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array): dates = tm.box_expected(dates, box_with_array) expected = tm.box_expected(expected, box_with_array) - # TODO: parametrize over the scalar being added? radd? sub? - offset = dates + pd.offsets.Hour(5) - tm.assert_equal(offset, expected) - offset = dates + np.timedelta64(5, "h") - tm.assert_equal(offset, expected) - offset = dates + timedelta(hours=5) - tm.assert_equal(offset, expected) + # TODO: sub? + for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]: + offset = dates + scalar + tm.assert_equal(offset, expected) + offset = scalar + dates + tm.assert_equal(offset, expected) # ------------------------------------------------------------- # RelativeDelta DateOffsets @@ -1941,8 +1902,7 @@ def test_dt64_mul_div_numeric_invalid(self, one, dt64_series): one / dt64_series # TODO: parametrize over box - @pytest.mark.parametrize("op", ["__add__", "__radd__", "__sub__", "__rsub__"]) - def test_dt64_series_add_intlike(self, tz_naive_fixture, op): + def test_dt64_series_add_intlike(self, tz_naive_fixture): # GH#19123 tz = tz_naive_fixture dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz) @@ -1950,21 +1910,16 @@ def test_dt64_series_add_intlike(self, tz_naive_fixture, op): other = Series([20, 30, 40], dtype="uint8") - method = getattr(ser, op) msg = "|".join( [ "Addition/subtraction of integers and integer-arrays", "cannot subtract .* from ndarray", ] ) - with pytest.raises(TypeError, match=msg): - method(1) - with pytest.raises(TypeError, match=msg): - method(other) - with pytest.raises(TypeError, match=msg): - method(np.array(other)) - with pytest.raises(TypeError, match=msg): - method(pd.Index(other)) + assert_invalid_addsub_type(ser, 1, msg) + assert_invalid_addsub_type(ser, other, msg) + assert_invalid_addsub_type(ser, np.array(other), msg) + assert_invalid_addsub_type(ser, pd.Index(other), msg) # ------------------------------------------------------------- # Timezone-Centric Tests @@ -2062,7 +2017,9 @@ def test_dti_add_intarray_tick(self, int_holder, freq): dti = date_range("2016-01-01", periods=2, freq=freq) other = int_holder([4, -1]) - msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from" + msg = "|".join( + ["Addition/subtraction of integers", "cannot subtract DatetimeArray from"] + ) assert_invalid_addsub_type(dti, other, msg) @pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"]) @@ -2072,7 +2029,9 @@ def test_dti_add_intarray_non_tick(self, int_holder, freq): dti = date_range("2016-01-01", periods=2, freq=freq) other = int_holder([4, -1]) - msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from" + msg = "|".join( + ["Addition/subtraction of integers", "cannot subtract DatetimeArray from"] + ) assert_invalid_addsub_type(dti, other, msg) @pytest.mark.parametrize("int_holder", [np.array, pd.Index]) @@ -2222,10 +2181,7 @@ def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz): dtarr = tm.box_expected(dti, box_with_array) msg = "cannot add DatetimeArray and" - with pytest.raises(TypeError, match=msg): - dtarr + addend - with pytest.raises(TypeError, match=msg): - addend + dtarr + assert_cannot_add(dtarr, addend, msg) # ------------------------------------------------------------- diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 9932adccdbaf2..3bf5fdb257c2a 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -29,6 +29,7 @@ UInt64Index, ) from pandas.core.computation import expressions as expr +from pandas.tests.arithmetic.common import assert_invalid_comparison @pytest.fixture(params=[Index, Series, tm.to_array]) @@ -84,25 +85,13 @@ def test_operator_series_comparison_zerorank(self): expected = 0.0 > Series([1, 2, 3]) tm.assert_series_equal(result, expected) - def test_df_numeric_cmp_dt64_raises(self): + def test_df_numeric_cmp_dt64_raises(self, box_with_array): # GH#8932, GH#22163 ts = pd.Timestamp.now() - df = pd.DataFrame({"x": range(5)}) + obj = np.array(range(5)) + obj = tm.box_expected(obj, box_with_array) - msg = ( - "'[<>]' not supported between instances of 'numpy.ndarray' and 'Timestamp'" - ) - with pytest.raises(TypeError, match=msg): - df > ts - with pytest.raises(TypeError, match=msg): - df < ts - with pytest.raises(TypeError, match=msg): - ts < df - with pytest.raises(TypeError, match=msg): - ts > df - - assert not (df == ts).any().any() - assert (df != ts).all().all() + assert_invalid_comparison(obj, ts, box_with_array) def test_compare_invalid(self): # GH#8058 diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py index 9a586fd553428..3069868ebb677 100644 --- a/pandas/tests/arithmetic/test_object.py +++ b/pandas/tests/arithmetic/test_object.py @@ -21,17 +21,15 @@ class TestObjectComparisons: - def test_comparison_object_numeric_nas(self): + def test_comparison_object_numeric_nas(self, comparison_op): ser = Series(np.random.randn(10), dtype=object) shifted = ser.shift(2) - ops = ["lt", "le", "gt", "ge", "eq", "ne"] - for op in ops: - func = getattr(operator, op) + func = comparison_op - result = func(ser, shifted) - expected = func(ser.astype(float), shifted.astype(float)) - tm.assert_series_equal(result, expected) + result = func(ser, shifted) + expected = func(ser.astype(float), shifted.astype(float)) + tm.assert_series_equal(result, expected) def test_object_comparisons(self): ser = Series(["a", "b", np.nan, "c", "a"]) @@ -141,11 +139,13 @@ def test_objarr_radd_str_invalid(self, dtype, data, box_with_array): ser = Series(data, dtype=dtype) ser = tm.box_expected(ser, box_with_array) - msg = ( - "can only concatenate str|" - "did not contain a loop with signature matching types|" - "unsupported operand type|" - "must be str" + msg = "|".join( + [ + "can only concatenate str", + "did not contain a loop with signature matching types", + "unsupported operand type", + "must be str", + ] ) with pytest.raises(TypeError, match=msg): "foo_" + ser @@ -159,7 +159,9 @@ def test_objarr_add_invalid(self, op, box_with_array): obj_ser.name = "objects" obj_ser = tm.box_expected(obj_ser, box) - msg = "can only concatenate str|unsupported operand type|must be str" + msg = "|".join( + ["can only concatenate str", "unsupported operand type", "must be str"] + ) with pytest.raises(Exception, match=msg): op(obj_ser, 1) with pytest.raises(Exception, match=msg): diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index f8814a33292ec..f4404a3483e6f 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -26,6 +26,7 @@ from pandas.core import ops from pandas.core.arrays import TimedeltaArray from pandas.tests.arithmetic.common import ( + assert_invalid_addsub_type, assert_invalid_comparison, get_upcast_box, ) @@ -39,6 +40,20 @@ class TestPeriodArrayLikeComparisons: # DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison # tests will eventually end up here. + @pytest.mark.parametrize("other", ["2017", Period("2017", freq="D")]) + def test_eq_scalar(self, other, box_with_array): + + idx = PeriodIndex(["2017", "2017", "2018"], freq="D") + idx = tm.box_expected(idx, box_with_array) + xbox = get_upcast_box(idx, other, True) + + expected = np.array([True, True, False]) + expected = tm.box_expected(expected, xbox) + + result = idx == other + + tm.assert_equal(result, expected) + def test_compare_zerodim(self, box_with_array): # GH#26689 make sure we unbox zero-dimensional arrays @@ -54,9 +69,20 @@ def test_compare_zerodim(self, box_with_array): tm.assert_equal(result, expected) @pytest.mark.parametrize( - "scalar", ["foo", Timestamp.now(), Timedelta(days=4), 9, 9.5] + "scalar", + [ + "foo", + Timestamp.now(), + Timedelta(days=4), + 9, + 9.5, + 2000, # specifically don't consider 2000 to match Period("2000", "D") + False, + None, + ], ) def test_compare_invalid_scalar(self, box_with_array, scalar): + # GH#28980 # comparison with scalar that cannot be interpreted as a Period pi = period_range("2000", periods=4) parr = tm.box_expected(pi, box_with_array) @@ -70,6 +96,11 @@ def test_compare_invalid_scalar(self, box_with_array, scalar): np.arange(4), np.arange(4).astype(np.float64), list(range(4)), + # match Period semantics by not treating integers as Periods + [2000, 2001, 2002, 2003], + np.arange(2000, 2004), + np.arange(2000, 2004).astype(object), + pd.Index([2000, 2001, 2002, 2003]), ], ) def test_compare_invalid_listlike(self, box_with_array, other): @@ -138,68 +169,27 @@ def test_compare_object_dtype(self, box_with_array, other_box): class TestPeriodIndexComparisons: # TODO: parameterize over boxes - @pytest.mark.parametrize("other", ["2017", Period("2017", freq="D")]) - def test_eq(self, other): - idx = PeriodIndex(["2017", "2017", "2018"], freq="D") - expected = np.array([True, True, False]) - result = idx == other - - tm.assert_numpy_array_equal(result, expected) - - @pytest.mark.parametrize( - "other", - [ - 2017, - [2017, 2017, 2017], - np.array([2017, 2017, 2017]), - np.array([2017, 2017, 2017], dtype=object), - pd.Index([2017, 2017, 2017]), - ], - ) - def test_eq_integer_disallowed(self, other): - # match Period semantics by not treating integers as Periods - - idx = PeriodIndex(["2017", "2017", "2018"], freq="D") - expected = np.array([False, False, False]) - result = idx == other - - tm.assert_numpy_array_equal(result, expected) - msg = "|".join( - [ - "not supported between instances of 'Period' and 'int'", - r"Invalid comparison between dtype=period\[D\] and ", - ] - ) - with pytest.raises(TypeError, match=msg): - idx < other - with pytest.raises(TypeError, match=msg): - idx > other - with pytest.raises(TypeError, match=msg): - idx <= other - with pytest.raises(TypeError, match=msg): - idx >= other - def test_pi_cmp_period(self): idx = period_range("2007-01", periods=20, freq="M") + per = idx[10] - result = idx < idx[10] + result = idx < per exp = idx.values < idx.values[10] tm.assert_numpy_array_equal(result, exp) # Tests Period.__richcmp__ against ndarray[object, ndim=2] - result = idx.values.reshape(10, 2) < idx[10] + result = idx.values.reshape(10, 2) < per tm.assert_numpy_array_equal(result, exp.reshape(10, 2)) # Tests Period.__richcmp__ against ndarray[object, ndim=0] - result = idx < np.array(idx[10]) + result = idx < np.array(per) tm.assert_numpy_array_equal(result, exp) # TODO: moved from test_datetime64; de-duplicate with version below def test_parr_cmp_period_scalar2(self, box_with_array): pi = period_range("2000-01-01", periods=10, freq="D") - val = Period("2000-01-04", freq="D") - + val = pi[3] expected = [x > val for x in pi] ser = tm.box_expected(pi, box_with_array) @@ -326,23 +316,24 @@ def test_parr_cmp_pi_mismatched_freq(self, freq, box_with_array): @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) def test_pi_cmp_nat(self, freq): idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq) + per = idx1[1] - result = idx1 > Period("2011-02", freq=freq) + result = idx1 > per exp = np.array([False, False, False, True]) tm.assert_numpy_array_equal(result, exp) - result = Period("2011-02", freq=freq) < idx1 + result = per < idx1 tm.assert_numpy_array_equal(result, exp) - result = idx1 == Period("NaT", freq=freq) + result = idx1 == pd.NaT exp = np.array([False, False, False, False]) tm.assert_numpy_array_equal(result, exp) - result = Period("NaT", freq=freq) == idx1 + result = pd.NaT == idx1 tm.assert_numpy_array_equal(result, exp) - result = idx1 != Period("NaT", freq=freq) + result = idx1 != pd.NaT exp = np.array([True, True, True, True]) tm.assert_numpy_array_equal(result, exp) - result = Period("NaT", freq=freq) != idx1 + result = pd.NaT != idx1 tm.assert_numpy_array_equal(result, exp) idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq) @@ -475,28 +466,29 @@ def test_pi_comp_period(self): idx = PeriodIndex( ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" ) + per = idx[2] - f = lambda x: x == Period("2011-03", freq="M") + f = lambda x: x == per exp = np.array([False, False, True, False], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") == x + f = lambda x: per == x self._check(idx, f, exp) - f = lambda x: x != Period("2011-03", freq="M") + f = lambda x: x != per exp = np.array([True, True, False, True], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") != x + f = lambda x: per != x self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") >= x + f = lambda x: per >= x exp = np.array([True, True, True, False], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: x > Period("2011-03", freq="M") + f = lambda x: x > per exp = np.array([False, False, False, True], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") >= x + f = lambda x: per >= x exp = np.array([True, True, True, False], dtype=np.bool_) self._check(idx, f, exp) @@ -504,11 +496,12 @@ def test_pi_comp_period_nat(self): idx = PeriodIndex( ["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx" ) + per = idx[2] - f = lambda x: x == Period("2011-03", freq="M") + f = lambda x: x == per exp = np.array([False, False, True, False], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") == x + f = lambda x: per == x self._check(idx, f, exp) f = lambda x: x == pd.NaT @@ -517,10 +510,10 @@ def test_pi_comp_period_nat(self): f = lambda x: pd.NaT == x self._check(idx, f, exp) - f = lambda x: x != Period("2011-03", freq="M") + f = lambda x: x != per exp = np.array([True, True, False, True], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") != x + f = lambda x: per != x self._check(idx, f, exp) f = lambda x: x != pd.NaT @@ -529,11 +522,11 @@ def test_pi_comp_period_nat(self): f = lambda x: pd.NaT != x self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") >= x + f = lambda x: per >= x exp = np.array([True, False, True, False], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: x < Period("2011-03", freq="M") + f = lambda x: x < per exp = np.array([True, False, False, False], dtype=np.bool_) self._check(idx, f, exp) @@ -696,20 +689,6 @@ def test_sub_n_gt_1_offsets(self, offset, kwd_name, n): # ------------------------------------------------------------- # Invalid Operations - @pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])]) - @pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub]) - def test_parr_add_sub_float_raises(self, op, other, box_with_array): - dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D") - pi = dti.to_period("D") - pi = tm.box_expected(pi, box_with_array) - msg = ( - r"unsupported operand type\(s\) for [+-]: .* and .*|" - "Concatenation operation is not implemented for NumPy arrays" - ) - - with pytest.raises(TypeError, match=msg): - op(pi, other) - @pytest.mark.parametrize( "other", [ @@ -723,6 +702,8 @@ def test_parr_add_sub_float_raises(self, op, other, box_with_array): pd.date_range("2016-01-01", periods=3, freq="S")._data, pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data, # Miscellaneous invalid types + 3.14, + np.array([2.0, 3.0, 4.0]), ], ) def test_parr_add_sub_invalid(self, other, box_with_array): @@ -730,11 +711,15 @@ def test_parr_add_sub_invalid(self, other, box_with_array): rng = period_range("1/1/2000", freq="D", periods=3) rng = tm.box_expected(rng, box_with_array) - msg = ( - r"(:?cannot add PeriodArray and .*)" - r"|(:?cannot subtract .* from (:?a\s)?.*)" - r"|(:?unsupported operand type\(s\) for \+: .* and .*)" + msg = "|".join( + [ + r"(:?cannot add PeriodArray and .*)", + r"(:?cannot subtract .* from (:?a\s)?.*)", + r"(:?unsupported operand type\(s\) for \+: .* and .*)", + r"unsupported operand type\(s\) for [+-]: .* and .*", + ] ) + assert_invalid_addsub_type(rng, other, msg) with pytest.raises(TypeError, match=msg): rng + other with pytest.raises(TypeError, match=msg): @@ -1034,9 +1019,11 @@ def test_pi_add_timedeltalike_minute_gt1(self, three_days): result = rng - other tm.assert_index_equal(result, expected) - msg = ( - r"(:?bad operand type for unary -: 'PeriodArray')" - r"|(:?cannot subtract PeriodArray from timedelta64\[[hD]\])" + msg = "|".join( + [ + r"(:?bad operand type for unary -: 'PeriodArray')", + r"(:?cannot subtract PeriodArray from timedelta64\[[hD]\])", + ] ) with pytest.raises(TypeError, match=msg): other - rng @@ -1261,7 +1248,7 @@ def test_parr_add_sub_object_array(self): class TestPeriodSeriesArithmetic: - def test_ops_series_timedelta(self): + def test_parr_add_timedeltalike_scalar(self, three_days, box_with_array): # GH#13043 ser = Series( [Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")], @@ -1270,21 +1257,18 @@ def test_ops_series_timedelta(self): assert ser.dtype == "Period[D]" expected = Series( - [Period("2015-01-02", freq="D"), Period("2015-01-03", freq="D")], + [Period("2015-01-04", freq="D"), Period("2015-01-05", freq="D")], name="xxx", ) - result = ser + Timedelta("1 days") - tm.assert_series_equal(result, expected) - - result = Timedelta("1 days") + ser - tm.assert_series_equal(result, expected) + obj = tm.box_expected(ser, box_with_array) + expected = tm.box_expected(expected, box_with_array) - result = ser + pd.tseries.offsets.Day() - tm.assert_series_equal(result, expected) + result = obj + three_days + tm.assert_equal(result, expected) - result = pd.tseries.offsets.Day() + ser - tm.assert_series_equal(result, expected) + result = three_days + obj + tm.assert_equal(result, expected) def test_ops_series_period(self): # GH#13043 @@ -1368,9 +1352,13 @@ def test_parr_ops_errors(self, ng, func, box_with_array): ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" ) obj = tm.box_expected(idx, box_with_array) - msg = ( - r"unsupported operand type\(s\)|can only concatenate|" - r"must be str|object to str implicitly" + msg = "|".join( + [ + r"unsupported operand type\(s\)", + "can only concatenate", + r"must be str", + "object to str implicitly", + ] ) with pytest.raises(TypeError, match=msg): @@ -1544,11 +1532,3 @@ def test_pi_sub_period_nat(self): exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx") tm.assert_index_equal(idx - Period("NaT", freq="M"), exp) tm.assert_index_equal(Period("NaT", freq="M") - idx, exp) - - @pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None]) - def test_comparison_operations(self, scalars): - # GH 28980 - expected = Series([False, False]) - s = Series([Period("2019"), Period("2020")], dtype="period[A-DEC]") - result = s == scalars - tm.assert_series_equal(result, expected) diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index 86980ad42766e..8078e8c90a2bf 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -84,11 +84,6 @@ def test_compare_timedelta64_zerodim(self, box_with_array): expected = tm.box_expected(expected, xbox) tm.assert_equal(res, expected) - msg = "Invalid comparison between dtype" - with pytest.raises(TypeError, match=msg): - # zero-dim of wrong dtype should still raise - tdi >= np.array(4) - @pytest.mark.parametrize( "td_scalar", [ @@ -120,6 +115,7 @@ def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar): Timestamp.now().to_datetime64(), Timestamp.now().to_pydatetime(), Timestamp.now().date(), + np.array(4), # zero-dim mismatched dtype ], ) def test_td64_comparisons_invalid(self, box_with_array, invalid): @@ -146,17 +142,18 @@ def test_td64_comparisons_invalid(self, box_with_array, invalid): pd.period_range("1971-01-01", freq="D", periods=10).astype(object), ], ) - def test_td64arr_cmp_arraylike_invalid(self, other): + def test_td64arr_cmp_arraylike_invalid(self, other, box_with_array): # We don't parametrize this over box_with_array because listlike # other plays poorly with assert_invalid_comparison reversed checks rng = timedelta_range("1 days", periods=10)._data - assert_invalid_comparison(rng, other, tm.to_array) + rng = tm.box_expected(rng, box_with_array) + assert_invalid_comparison(rng, other, box_with_array) def test_td64arr_cmp_mixed_invalid(self): rng = timedelta_range("1 days", periods=5)._data - other = np.array([0, 1, 2, rng[3], Timestamp.now()]) + result = rng == other expected = np.array([False, False, False, True, False]) tm.assert_numpy_array_equal(result, expected) @@ -1623,10 +1620,7 @@ def test_td64arr_div_td64_scalar(self, m, unit, box_with_array): box = box_with_array xbox = np.ndarray if box is pd.array else box - startdate = Series(pd.date_range("2013-01-01", "2013-01-03")) - enddate = Series(pd.date_range("2013-03-01", "2013-03-03")) - - ser = enddate - startdate + ser = Series([Timedelta(days=59)] * 3) ser[2] = np.nan flat = ser ser = tm.box_expected(ser, box)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44395
2021-11-11T18:15:36Z
2021-11-11T21:02:19Z
2021-11-11T21:02:18Z
2021-11-11T21:08:49Z
fix documentation on options in read_csv
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 49c2b28207ed5..6d3cc84a31d05 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -86,7 +86,7 @@ delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``. delimiter : str, default ``None`` Alias for sep. -header : int, list of int, default 'infer' +header : int, list of int, None, default 'infer' Row number(s) to use as the column names, and the start of the data. Default behavior is to infer the column names: if no names are passed the behavior is identical to ``header=0`` and column
Noticed this due to pylance (typing) showing header=None was wrong. Don't know how it will propagate to the typings. - [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44391
2021-11-11T13:09:51Z
2021-11-11T14:15:28Z
2021-11-11T14:15:28Z
2021-11-11T14:15:32Z
BUG: handle NaNs in FloatingArray.equals
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 8db9be21ca4ef..466c8b21e89bf 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -692,6 +692,7 @@ Other - Bug in :meth:`RangeIndex.difference` with ``sort=None`` and ``step<0`` failing to sort (:issue:`44085`) - Bug in :meth:`Series.to_frame` and :meth:`Index.to_frame` ignoring the ``name`` argument when ``name=None`` is explicitly passed (:issue:`44212`) - Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` with ``value=None`` and ExtensionDtypes (:issue:`44270`) +- Bug in :meth:`FloatingArray.equals` failing to consider two arrays equal if they contain ``np.nan`` values (:issue:`44382`) - .. ***DO NOT USE THIS SECTION*** diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index b11b11ded2f22..1797f1aff4235 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -627,6 +627,21 @@ def value_counts(self, dropna: bool = True) -> Series: return Series(counts, index=index) + @doc(ExtensionArray.equals) + def equals(self, other) -> bool: + if type(self) != type(other): + return False + if other.dtype != self.dtype: + return False + + # GH#44382 if e.g. self[1] is np.nan and other[1] is pd.NA, we are NOT + # equal. + return np.array_equal(self._mask, other._mask) and np.array_equal( + self._data[~self._mask], + other._data[~other._mask], + equal_nan=True, + ) + def _reduce(self, name: str, *, skipna: bool = True, **kwargs): if name in {"any", "all"}: return getattr(self, name)(skipna=skipna, **kwargs) diff --git a/pandas/tests/arrays/floating/test_comparison.py b/pandas/tests/arrays/floating/test_comparison.py index c4163c25ae74d..a429649f1ce1d 100644 --- a/pandas/tests/arrays/floating/test_comparison.py +++ b/pandas/tests/arrays/floating/test_comparison.py @@ -1,7 +1,9 @@ +import numpy as np import pytest import pandas as pd import pandas._testing as tm +from pandas.core.arrays import FloatingArray from pandas.tests.arrays.masked_shared import ( ComparisonOps, NumericOps, @@ -34,3 +36,30 @@ def test_equals(): a1 = pd.array([1, 2, None], dtype="Float64") a2 = pd.array([1, 2, None], dtype="Float32") assert a1.equals(a2) is False + + +def test_equals_nan_vs_na(): + # GH#44382 + + mask = np.zeros(3, dtype=bool) + data = np.array([1.0, np.nan, 3.0], dtype=np.float64) + + left = FloatingArray(data, mask) + assert left.equals(left) + tm.assert_extension_array_equal(left, left) + + assert left.equals(left.copy()) + assert left.equals(FloatingArray(data.copy(), mask.copy())) + + mask2 = np.array([False, True, False], dtype=bool) + data2 = np.array([1.0, 2.0, 3.0], dtype=np.float64) + right = FloatingArray(data2, mask2) + assert right.equals(right) + tm.assert_extension_array_equal(right, right) + + assert not left.equals(right) + + # with mask[1] = True, the only difference is data[1], which should + # not matter for equals + mask[1] = True + assert left.equals(right)
- [x] closes #44382 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44390
2021-11-11T03:13:17Z
2021-11-13T17:21:41Z
2021-11-13T17:21:40Z
2021-11-13T19:24:15Z
Backport PR #44388 on branch 1.3.x (CI: Use conda-forge to create Python 3.10 env)
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml index 78506e3cb61ce..d167397fd09f9 100644 --- a/.github/workflows/sdist.yml +++ b/.github/workflows/sdist.yml @@ -53,6 +53,7 @@ jobs: - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: pandas-sdist + channels: conda-forge python-version: '${{ matrix.python-version }}' - name: Install pandas from sdist
Backport PR #44388: CI: Use conda-forge to create Python 3.10 env
https://api.github.com/repos/pandas-dev/pandas/pulls/44389
2021-11-11T02:48:36Z
2021-11-11T03:50:32Z
2021-11-11T03:50:32Z
2021-11-11T03:50:32Z
CI: Use conda-forge to create Python 3.10 env
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml index 7692dc522522f..92a9f2a5fb97c 100644 --- a/.github/workflows/sdist.yml +++ b/.github/workflows/sdist.yml @@ -53,6 +53,7 @@ jobs: - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: pandas-sdist + channels: conda-forge python-version: '${{ matrix.python-version }}' - name: Install pandas from sdist
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry Anaconda messed up their recipe :(. Fixes the sdist job.
https://api.github.com/repos/pandas-dev/pandas/pulls/44388
2021-11-11T01:40:10Z
2021-11-11T02:48:10Z
2021-11-11T02:48:10Z
2021-11-11T03:01:13Z
ENH: implement EA._putmask
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 8deeb44f65188..674379f6d65f8 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -310,7 +310,7 @@ def _wrap_reduction_result(self, axis: int | None, result): # ------------------------------------------------------------------------ # __array_function__ methods - def putmask(self, mask: np.ndarray, value) -> None: + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: """ Analogue to np.putmask(self, mask, value) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 70841197761a9..a64aef64ab49f 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1409,6 +1409,33 @@ def insert(self: ExtensionArrayT, loc: int, item) -> ExtensionArrayT: return type(self)._concat_same_type([self[:loc], item_arr, self[loc:]]) + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: + """ + Analogue to np.putmask(self, mask, value) + + Parameters + ---------- + mask : np.ndarray[bool] + value : scalar or listlike + If listlike, must be arraylike with same length as self. + + Returns + ------- + None + + Notes + ----- + Unlike np.putmask, we do not repeat listlike values with mismatched length. + 'value' should either be a scalar or an arraylike with the same length + as self. + """ + if is_list_like(value): + val = value[mask] + else: + val = value + + self[mask] = val + def _where( self: ExtensionArrayT, mask: npt.NDArray[np.bool_], value ) -> ExtensionArrayT: diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index d5718d59bf8b0..01bf5ec0633b5 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -36,6 +36,7 @@ PositionalIndexer, ScalarIndexer, SequenceIndexer, + npt, ) from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender @@ -1482,15 +1483,15 @@ def to_tuples(self, na_tuple=True) -> np.ndarray: # --------------------------------------------------------------------- - def putmask(self, mask: np.ndarray, value) -> None: + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: value_left, value_right = self._validate_setitem_value(value) if isinstance(self._left, np.ndarray): np.putmask(self._left, mask, value_left) np.putmask(self._right, mask, value_right) else: - self._left.putmask(mask, value_left) - self._right.putmask(mask, value_right) + self._left._putmask(mask, value_left) + self._right._putmask(mask, value_right) def insert(self: IntervalArrayT, loc: int, item: Interval) -> IntervalArrayT: """ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ba7dde7d2a4d8..2514702b036dd 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4444,8 +4444,7 @@ def _join_non_unique( if isinstance(join_array, np.ndarray): np.putmask(join_array, mask, right) else: - # error: "ExtensionArray" has no attribute "putmask" - join_array.putmask(mask, right) # type: ignore[attr-defined] + join_array._putmask(mask, right) join_index = self._wrap_joined_index(join_array, other) @@ -5051,8 +5050,7 @@ def putmask(self, mask, value) -> Index: else: # Note: we use the original value here, not converted, as # _validate_fill_value is not idempotent - # error: "ExtensionArray" has no attribute "putmask" - values.putmask(mask, value) # type: ignore[attr-defined] + values._putmask(mask, value) return self._shallow_copy(values) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 2589015e0f0b1..66a40b962e183 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1415,15 +1415,13 @@ def putmask(self, mask, new) -> list[Block]: new_values = self.values - if isinstance(new, (np.ndarray, ExtensionArray)) and len(new) == len(mask): - new = new[mask] - if mask.ndim == new_values.ndim + 1: # TODO(EA2D): unnecessary with 2D EAs mask = mask.reshape(new_values.shape) try: - new_values[mask] = new + # Caller is responsible for ensuring matching lengths + new_values._putmask(mask, new) except TypeError: if not is_interval_dtype(self.dtype): # Discussion about what we want to support in the general @@ -1704,7 +1702,7 @@ def putmask(self, mask, new) -> list[Block]: return self.coerce_to_target_dtype(new).putmask(mask, new) arr = self.values - arr.T.putmask(mask, new) + arr.T._putmask(mask, new) return [self] def where(self, other, cond) -> list[Block]:
Broken off from #43930
https://api.github.com/repos/pandas-dev/pandas/pulls/44387
2021-11-11T00:42:15Z
2021-11-11T17:50:22Z
2021-11-11T17:50:22Z
2021-11-11T17:53:41Z
TST: make get_upcast_box more flexible
diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py index e8283a222d86a..c2c55a4060f7a 100644 --- a/pandas/_testing/__init__.py +++ b/pandas/_testing/__init__.py @@ -259,7 +259,7 @@ def box_expected(expected, box_cls, transpose=True): expected = DatetimeArray(expected) elif box_cls is TimedeltaArray: expected = TimedeltaArray(expected) - elif box_cls is np.ndarray: + elif box_cls is np.ndarray or box_cls is np.array: expected = np.array(expected) elif box_cls is to_array: expected = to_array(expected) diff --git a/pandas/tests/arithmetic/common.py b/pandas/tests/arithmetic/common.py index 6f4e35ad4dfb2..af70cdfe538bb 100644 --- a/pandas/tests/arithmetic/common.py +++ b/pandas/tests/arithmetic/common.py @@ -34,26 +34,29 @@ def assert_invalid_addsub_type(left, right, msg=None): right - left -def get_expected_box(box): +def get_upcast_box(left, right, is_cmp: bool = False): """ - Get the box to use for 'expected' in a comparison operation. - """ - if box in [Index, array]: - return np.ndarray - return box - + Get the box to use for 'expected' in an arithmetic or comparison operation. -def get_upcast_box(box, vector): - """ - Given two box-types, find the one that takes priority. + Parameters + left : Any + right : Any + is_cmp : bool, default False + Whether the operation is a comparison method. """ - if box is DataFrame or isinstance(vector, DataFrame): + + if isinstance(left, DataFrame) or isinstance(right, DataFrame): return DataFrame - if box is Series or isinstance(vector, Series): + if isinstance(left, Series) or isinstance(right, Series): + if is_cmp and isinstance(left, Index): + # Index does not defer for comparisons + return np.array return Series - if box is Index or isinstance(vector, Index): + if isinstance(left, Index) or isinstance(right, Index): + if is_cmp: + return np.array return Index - return box + return tm.to_array def assert_invalid_comparison(left, right, box): diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 82f1e60f0aea5..44a70d3933b66 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -43,7 +43,6 @@ from pandas.tests.arithmetic.common import ( assert_invalid_addsub_type, assert_invalid_comparison, - get_expected_box, get_upcast_box, ) @@ -60,12 +59,12 @@ def test_compare_zerodim(self, tz_naive_fixture, box_with_array): # Test comparison with zero-dimensional array is unboxed tz = tz_naive_fixture box = box_with_array - xbox = get_expected_box(box) dti = date_range("20130101", periods=3, tz=tz) other = np.array(dti.to_numpy()[0]) dtarr = tm.box_expected(dti, box) + xbox = get_upcast_box(dtarr, other, True) result = dtarr <= other expected = np.array([True, False, False]) expected = tm.box_expected(expected, xbox) @@ -147,12 +146,12 @@ def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array): # GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly tz = tz_naive_fixture box = box_with_array - xbox = get_expected_box(box) ts = Timestamp.now(tz) ser = Series([ts, NaT]) obj = tm.box_expected(ser, box) + xbox = get_upcast_box(obj, ts, True) expected = Series([True, False], dtype=np.bool_) expected = tm.box_expected(expected, xbox) @@ -244,10 +243,9 @@ def test_nat_comparisons_scalar(self, dtype, data, box_with_array): # on older numpys (since they check object identity) return - xbox = get_expected_box(box) - left = Series(data, dtype=dtype) left = tm.box_expected(left, box) + xbox = get_upcast_box(left, NaT, True) expected = [False, False, False] expected = tm.box_expected(expected, xbox) @@ -323,10 +321,10 @@ def test_timestamp_compare_series(self, left, right): def test_dt64arr_timestamp_equality(self, box_with_array): # GH#11034 - xbox = get_expected_box(box_with_array) ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT]) ser = tm.box_expected(ser, box_with_array) + xbox = get_upcast_box(ser, ser, True) result = ser != ser expected = tm.box_expected([False, False, True], xbox) @@ -417,13 +415,12 @@ def test_dti_cmp_nat(self, dtype, box_with_array): # on older numpys (since they check object identity) return - xbox = get_expected_box(box_with_array) - left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")]) right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")]) left = tm.box_expected(left, box_with_array) right = tm.box_expected(right, box_with_array) + xbox = get_upcast_box(left, right, True) lhs, rhs = left, right if dtype is object: @@ -642,12 +639,11 @@ def test_scalar_comparison_tzawareness( self, comparison_op, other, tz_aware_fixture, box_with_array ): op = comparison_op - box = box_with_array tz = tz_aware_fixture dti = date_range("2016-01-01", periods=2, tz=tz) - xbox = get_expected_box(box) dtarr = tm.box_expected(dti, box_with_array) + xbox = get_upcast_box(dtarr, other, True) if op in [operator.eq, operator.ne]: exbool = op is operator.ne expected = np.array([exbool, exbool], dtype=bool) @@ -2421,14 +2417,13 @@ def test_dti_addsub_offset_arraylike( self, tz_naive_fixture, names, op, index_or_series ): # GH#18849, GH#19744 - box = pd.Index other_box = index_or_series tz = tz_naive_fixture dti = date_range("2017-01-01", periods=2, tz=tz, name=names[0]) other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1]) - xbox = get_upcast_box(box, other) + xbox = get_upcast_box(dti, other) with tm.assert_produces_warning(PerformanceWarning): res = op(dti, other) @@ -2448,7 +2443,7 @@ def test_dti_addsub_object_arraylike( dti = date_range("2017-01-01", periods=2, tz=tz) dtarr = tm.box_expected(dti, box_with_array) other = other_box([pd.offsets.MonthEnd(), Timedelta(days=4)]) - xbox = get_upcast_box(box_with_array, other) + xbox = get_upcast_box(dtarr, other) expected = DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture) expected = tm.box_expected(expected, xbox) diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index 41c2cb2cc4f1e..f8814a33292ec 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -27,7 +27,7 @@ from pandas.core.arrays import TimedeltaArray from pandas.tests.arithmetic.common import ( assert_invalid_comparison, - get_expected_box, + get_upcast_box, ) # ------------------------------------------------------------------ @@ -41,12 +41,13 @@ class TestPeriodArrayLikeComparisons: def test_compare_zerodim(self, box_with_array): # GH#26689 make sure we unbox zero-dimensional arrays - xbox = get_expected_box(box_with_array) pi = period_range("2000", periods=4) other = np.array(pi.to_numpy()[0]) pi = tm.box_expected(pi, box_with_array) + xbox = get_upcast_box(pi, other, True) + result = pi <= other expected = np.array([True, False, False, False]) expected = tm.box_expected(expected, xbox) @@ -78,11 +79,11 @@ def test_compare_invalid_listlike(self, box_with_array, other): @pytest.mark.parametrize("other_box", [list, np.array, lambda x: x.astype(object)]) def test_compare_object_dtype(self, box_with_array, other_box): - xbox = get_expected_box(box_with_array) pi = period_range("2000", periods=5) parr = tm.box_expected(pi, box_with_array) other = other_box(pi) + xbox = get_upcast_box(parr, other, True) expected = np.array([True, True, True, True, True]) expected = tm.box_expected(expected, xbox) @@ -195,14 +196,15 @@ def test_pi_cmp_period(self): # TODO: moved from test_datetime64; de-duplicate with version below def test_parr_cmp_period_scalar2(self, box_with_array): - xbox = get_expected_box(box_with_array) - pi = period_range("2000-01-01", periods=10, freq="D") val = Period("2000-01-04", freq="D") + expected = [x > val for x in pi] ser = tm.box_expected(pi, box_with_array) + xbox = get_upcast_box(ser, val, True) + expected = tm.box_expected(expected, xbox) result = ser > val tm.assert_equal(result, expected) @@ -216,11 +218,10 @@ def test_parr_cmp_period_scalar2(self, box_with_array): @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) def test_parr_cmp_period_scalar(self, freq, box_with_array): # GH#13200 - xbox = get_expected_box(box_with_array) - base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) base = tm.box_expected(base, box_with_array) per = Period("2011-02", freq=freq) + xbox = get_upcast_box(base, per, True) exp = np.array([False, True, False, False]) exp = tm.box_expected(exp, xbox) @@ -255,14 +256,14 @@ def test_parr_cmp_period_scalar(self, freq, box_with_array): @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) def test_parr_cmp_pi(self, freq, box_with_array): # GH#13200 - xbox = get_expected_box(box_with_array) - base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) base = tm.box_expected(base, box_with_array) # TODO: could also box idx? idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq) + xbox = get_upcast_box(base, idx, True) + exp = np.array([False, False, True, False]) exp = tm.box_expected(exp, xbox) tm.assert_equal(base == idx, exp) diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index b8fa6c79b1b93..86980ad42766e 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -1542,13 +1542,13 @@ def test_tdi_mul_float_series(self, box_with_array): ) def test_tdi_rmul_arraylike(self, other, box_with_array): box = box_with_array - xbox = get_upcast_box(box, other) tdi = TimedeltaIndex(["1 Day"] * 10) - expected = timedelta_range("1 days", "10 days") - expected._data.freq = None + expected = timedelta_range("1 days", "10 days")._with_freq(None) tdi = tm.box_expected(tdi, box) + xbox = get_upcast_box(tdi, other) + expected = tm.box_expected(expected, xbox) result = other * tdi @@ -2000,7 +2000,6 @@ def test_td64arr_rmul_numeric_array( ): # GH#4521 # divide/multiply by integers - xbox = get_upcast_box(box_with_array, vector) tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") vector = vector.astype(any_real_numpy_dtype) @@ -2008,6 +2007,8 @@ def test_td64arr_rmul_numeric_array( expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]") tdser = tm.box_expected(tdser, box_with_array) + xbox = get_upcast_box(tdser, vector) + expected = tm.box_expected(expected, xbox) result = tdser * vector @@ -2026,7 +2027,6 @@ def test_td64arr_div_numeric_array( ): # GH#4521 # divide/multiply by integers - xbox = get_upcast_box(box_with_array, vector) tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") vector = vector.astype(any_real_numpy_dtype) @@ -2034,6 +2034,7 @@ def test_td64arr_div_numeric_array( expected = Series(["2.95D", "1D 23H 12m", "NaT"], dtype="timedelta64[ns]") tdser = tm.box_expected(tdser, box_with_array) + xbox = get_upcast_box(tdser, vector) expected = tm.box_expected(expected, xbox) result = tdser / vector @@ -2085,7 +2086,7 @@ def test_td64arr_mul_int_series(self, box_with_array, names): ) tdi = tm.box_expected(tdi, box) - xbox = get_upcast_box(box, ser) + xbox = get_upcast_box(tdi, ser) expected = tm.box_expected(expected, xbox) @@ -2117,9 +2118,8 @@ def test_float_series_rdiv_td64arr(self, box_with_array, names): name=xname, ) - xbox = get_upcast_box(box, ser) - tdi = tm.box_expected(tdi, box) + xbox = get_upcast_box(tdi, ser) expected = tm.box_expected(expected, xbox) result = ser.__rtruediv__(tdi)
Will make it easier to parametrize these tests
https://api.github.com/repos/pandas-dev/pandas/pulls/44385
2021-11-10T20:23:09Z
2021-11-11T00:39:48Z
2021-11-11T00:39:48Z
2021-11-11T00:41:29Z
TST/COMPAT: update csv test to infer time with pyarrow>=6.0
diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py index 9bf7139769baa..f9b9409317774 100644 --- a/pandas/compat/pyarrow.py +++ b/pandas/compat/pyarrow.py @@ -12,9 +12,11 @@ pa_version_under3p0 = _palv < Version("3.0.0") pa_version_under4p0 = _palv < Version("4.0.0") pa_version_under5p0 = _palv < Version("5.0.0") + pa_version_under6p0 = _palv < Version("6.0.0") except ImportError: pa_version_under1p0 = True pa_version_under2p0 = True pa_version_under3p0 = True pa_version_under4p0 = True pa_version_under5p0 = True + pa_version_under6p0 = True diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 17c107814995c..c8bea9592e82a 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -26,6 +26,7 @@ is_platform_windows, np_array_datetime64_compat, ) +from pandas.compat.pyarrow import pa_version_under6p0 import pandas as pd from pandas import ( @@ -431,6 +432,11 @@ def test_date_col_as_index_col(all_parsers): columns=["X0", "X2", "X3", "X4", "X5", "X6", "X7"], index=index, ) + if parser.engine == "pyarrow" and not pa_version_under6p0: + # https://github.com/pandas-dev/pandas/issues/44231 + # pyarrow 6.0 starts to infer time type + expected["X2"] = pd.to_datetime("1970-01-01" + expected["X2"]).dt.time + tm.assert_frame_equal(result, expected)
Closes #44231
https://api.github.com/repos/pandas-dev/pandas/pulls/44381
2021-11-10T08:47:32Z
2021-11-10T19:25:11Z
2021-11-10T19:25:10Z
2021-11-10T19:25:49Z
REF/TST: collect index tests
diff --git a/pandas/tests/indexes/datetimelike_/test_is_monotonic.py b/pandas/tests/indexes/datetimelike_/test_is_monotonic.py new file mode 100644 index 0000000000000..22247c982edbc --- /dev/null +++ b/pandas/tests/indexes/datetimelike_/test_is_monotonic.py @@ -0,0 +1,46 @@ +from pandas import ( + Index, + NaT, + date_range, +) + + +def test_is_monotonic_with_nat(): + # GH#31437 + # PeriodIndex.is_monotonic should behave analogously to DatetimeIndex, + # in particular never be monotonic when we have NaT + dti = date_range("2016-01-01", periods=3) + pi = dti.to_period("D") + tdi = Index(dti.view("timedelta64[ns]")) + + for obj in [pi, pi._engine, dti, dti._engine, tdi, tdi._engine]: + if isinstance(obj, Index): + # i.e. not Engines + assert obj.is_monotonic + assert obj.is_monotonic_increasing + assert not obj.is_monotonic_decreasing + assert obj.is_unique + + dti1 = dti.insert(0, NaT) + pi1 = dti1.to_period("D") + tdi1 = Index(dti1.view("timedelta64[ns]")) + + for obj in [pi1, pi1._engine, dti1, dti1._engine, tdi1, tdi1._engine]: + if isinstance(obj, Index): + # i.e. not Engines + assert not obj.is_monotonic + assert not obj.is_monotonic_increasing + assert not obj.is_monotonic_decreasing + assert obj.is_unique + + dti2 = dti.insert(3, NaT) + pi2 = dti2.to_period("H") + tdi2 = Index(dti2.view("timedelta64[ns]")) + + for obj in [pi2, pi2._engine, dti2, dti2._engine, tdi2, tdi2._engine]: + if isinstance(obj, Index): + # i.e. not Engines + assert not obj.is_monotonic + assert not obj.is_monotonic_increasing + assert not obj.is_monotonic_decreasing + assert obj.is_unique diff --git a/pandas/tests/indexes/datetimes/methods/test_isocalendar.py b/pandas/tests/indexes/datetimes/methods/test_isocalendar.py new file mode 100644 index 0000000000000..128a8b3e10eb3 --- /dev/null +++ b/pandas/tests/indexes/datetimes/methods/test_isocalendar.py @@ -0,0 +1,20 @@ +from pandas import ( + DataFrame, + DatetimeIndex, +) +import pandas._testing as tm + + +def test_isocalendar_returns_correct_values_close_to_new_year_with_tz(): + # GH#6538: Check that DatetimeIndex and its TimeStamp elements + # return the same weekofyear accessor close to new year w/ tz + dates = ["2013/12/29", "2013/12/30", "2013/12/31"] + dates = DatetimeIndex(dates, tz="Europe/Brussels") + result = dates.isocalendar() + expected_data_frame = DataFrame( + [[2013, 52, 7], [2014, 1, 1], [2014, 1, 2]], + columns=["year", "week", "day"], + index=dates, + dtype="UInt32", + ) + tm.assert_frame_equal(result, expected_data_frame) diff --git a/pandas/tests/indexes/datetimes/test_asof.py b/pandas/tests/indexes/datetimes/test_asof.py index c794aefc6a48b..7adc400302cb9 100644 --- a/pandas/tests/indexes/datetimes/test_asof.py +++ b/pandas/tests/indexes/datetimes/test_asof.py @@ -1,8 +1,12 @@ +from datetime import timedelta + from pandas import ( Index, Timestamp, date_range, + isna, ) +import pandas._testing as tm class TestAsOf: @@ -12,3 +16,16 @@ def test_asof_partial(self): result = index.asof("2010-02") assert result == expected assert not isinstance(result, Index) + + def test_asof(self): + index = tm.makeDateIndex(100) + + dt = index[0] + assert index.asof(dt) == dt + assert isna(index.asof(dt - timedelta(1))) + + dt = index[-1] + assert index.asof(dt + timedelta(1)) == dt + + dt = index[0].to_pydatetime() + assert isinstance(index.asof(dt), Timestamp) diff --git a/pandas/tests/indexes/datetimes/test_freq_attr.py b/pandas/tests/indexes/datetimes/test_freq_attr.py new file mode 100644 index 0000000000000..f5821a316358d --- /dev/null +++ b/pandas/tests/indexes/datetimes/test_freq_attr.py @@ -0,0 +1,61 @@ +import pytest + +from pandas import ( + DatetimeIndex, + date_range, +) + +from pandas.tseries.offsets import ( + BDay, + DateOffset, + Day, + Hour, +) + + +class TestFreq: + def test_freq_setter_errors(self): + # GH#20678 + idx = DatetimeIndex(["20180101", "20180103", "20180105"]) + + # setting with an incompatible freq + msg = ( + "Inferred frequency 2D from passed values does not conform to " + "passed frequency 5D" + ) + with pytest.raises(ValueError, match=msg): + idx._data.freq = "5D" + + # setting with non-freq string + with pytest.raises(ValueError, match="Invalid frequency"): + idx._data.freq = "foo" + + @pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []]) + @pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)]) + @pytest.mark.parametrize("tz", [None, "US/Eastern"]) + def test_freq_setter(self, values, freq, tz): + # GH#20678 + idx = DatetimeIndex(values, tz=tz) + + # can set to an offset, converting from string if necessary + idx._data.freq = freq + assert idx.freq == freq + assert isinstance(idx.freq, DateOffset) + + # can reset to None + idx._data.freq = None + assert idx.freq is None + + def test_freq_view_safe(self): + # Setting the freq for one DatetimeIndex shouldn't alter the freq + # for another that views the same data + + dti = date_range("2016-01-01", periods=5) + dta = dti._data + + dti2 = DatetimeIndex(dta)._with_freq(None) + assert dti2.freq is None + + # Original was not altered + assert dti.freq == "D" + assert dta.freq == "D" diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index f0757d0ba555e..44c353315562a 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -297,21 +297,6 @@ def test_week_and_weekofyear_are_deprecated(): idx.weekofyear -def test_isocalendar_returns_correct_values_close_to_new_year_with_tz(): - # GH 6538: Check that DatetimeIndex and its TimeStamp elements - # return the same weekofyear accessor close to new year w/ tz - dates = ["2013/12/29", "2013/12/30", "2013/12/31"] - dates = DatetimeIndex(dates, tz="Europe/Brussels") - result = dates.isocalendar() - expected_data_frame = pd.DataFrame( - [[2013, 52, 7], [2014, 1, 1], [2014, 1, 2]], - columns=["year", "week", "day"], - index=dates, - dtype="UInt32", - ) - tm.assert_frame_equal(result, expected_data_frame) - - def test_add_timedelta_preserves_freq(): # GH#37295 should hold for any DTI with freq=None or Tick freq tz = "Canada/Eastern" diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 7df94b5820e5d..d6ef4198fad2e 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -6,43 +6,17 @@ from pandas.compat import IS64 from pandas import ( - DateOffset, DatetimeIndex, Index, - Series, bdate_range, date_range, ) import pandas._testing as tm -from pandas.tseries.offsets import ( - BDay, - Day, - Hour, -) - START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) class TestDatetimeIndexOps: - def test_ops_properties_basic(self, datetime_series): - - # sanity check that the behavior didn't change - # GH#7206 - for op in ["year", "day", "second", "weekday"]: - msg = f"'Series' object has no attribute '{op}'" - with pytest.raises(AttributeError, match=msg): - getattr(datetime_series, op) - - # attribute access should still work! - s = Series({"year": 2000, "month": 1, "day": 10}) - assert s.year == 2000 - assert s.month == 1 - assert s.day == 10 - msg = "'Series' object has no attribute 'weekday'" - with pytest.raises(AttributeError, match=msg): - s.weekday - @pytest.mark.parametrize( "freq,expected", [ @@ -74,72 +48,28 @@ def test_infer_freq(self, freq_sample): tm.assert_index_equal(idx, result) assert result.freq == freq_sample - @pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []]) - @pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)]) - @pytest.mark.parametrize("tz", [None, "US/Eastern"]) - def test_freq_setter(self, values, freq, tz): - # GH 20678 - idx = DatetimeIndex(values, tz=tz) - - # can set to an offset, converting from string if necessary - idx._data.freq = freq - assert idx.freq == freq - assert isinstance(idx.freq, DateOffset) - - # can reset to None - idx._data.freq = None - assert idx.freq is None - - def test_freq_setter_errors(self): - # GH 20678 - idx = DatetimeIndex(["20180101", "20180103", "20180105"]) - - # setting with an incompatible freq - msg = ( - "Inferred frequency 2D from passed values does not conform to " - "passed frequency 5D" - ) - with pytest.raises(ValueError, match=msg): - idx._data.freq = "5D" - - # setting with non-freq string - with pytest.raises(ValueError, match="Invalid frequency"): - idx._data.freq = "foo" - - def test_freq_view_safe(self): - # Setting the freq for one DatetimeIndex shouldn't alter the freq - # for another that views the same data - - dti = date_range("2016-01-01", periods=5) - dta = dti._data - - dti2 = DatetimeIndex(dta)._with_freq(None) - assert dti2.freq is None - - # Original was not altered - assert dti.freq == "D" - assert dta.freq == "D" - +@pytest.mark.parametrize("freq", ["B", "C"]) class TestBusinessDatetimeIndex: - def setup_method(self, method): - self.rng = bdate_range(START, END) + @pytest.fixture + def rng(self, freq): + return bdate_range(START, END, freq=freq) - def test_comparison(self): - d = self.rng[10] + def test_comparison(self, rng): + d = rng[10] - comp = self.rng > d + comp = rng > d assert comp[11] assert not comp[9] - def test_copy(self): - cp = self.rng.copy() + def test_copy(self, rng): + cp = rng.copy() repr(cp) - tm.assert_index_equal(cp, self.rng) + tm.assert_index_equal(cp, rng) - def test_identical(self): - t1 = self.rng.copy() - t2 = self.rng.copy() + def test_identical(self, rng): + t1 = rng.copy() + t2 = rng.copy() assert t1.identical(t2) # name @@ -153,20 +83,3 @@ def test_identical(self): t2v = Index(t2.values) assert t1.equals(t2v) assert not t1.identical(t2v) - - -class TestCustomDatetimeIndex: - def setup_method(self, method): - self.rng = bdate_range(START, END, freq="C") - - def test_comparison(self): - d = self.rng[10] - - comp = self.rng > d - assert comp[11] - assert not comp[9] - - def test_copy(self): - cp = self.rng.copy() - repr(cp) - tm.assert_index_equal(cp, self.rng) diff --git a/pandas/tests/indexes/period/test_freq_attr.py b/pandas/tests/indexes/period/test_freq_attr.py new file mode 100644 index 0000000000000..3bf3e700e5e72 --- /dev/null +++ b/pandas/tests/indexes/period/test_freq_attr.py @@ -0,0 +1,21 @@ +import pytest + +from pandas import ( + offsets, + period_range, +) +import pandas._testing as tm + + +class TestFreq: + def test_freq_setter_deprecated(self): + # GH#20678 + idx = period_range("2018Q1", periods=4, freq="Q") + + # no warning for getter + with tm.assert_produces_warning(None): + idx.freq + + # warning for setter + with pytest.raises(AttributeError, match="can't set attribute"): + idx.freq = offsets.Day() diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index a7dad4e7f352c..f07107e9d3277 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -38,12 +38,6 @@ def index(self, request): def test_pickle_compat_construction(self): super().test_pickle_compat_construction() - @pytest.mark.parametrize("freq", ["D", "M", "A"]) - def test_pickle_round_trip(self, freq): - idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq=freq) - result = tm.round_trip_pickle(idx) - tm.assert_index_equal(result, idx) - def test_where(self): # This is handled in test_indexing pass @@ -307,13 +301,6 @@ def test_with_multi_index(self): assert isinstance(s.index.values[0][0], Period) - def test_pickle_freq(self): - # GH2891 - prng = period_range("1/1/2011", "1/1/2012", freq="M") - new_prng = tm.round_trip_pickle(prng) - assert new_prng.freq == offsets.MonthEnd() - assert new_prng.freqstr == "M" - def test_map(self): # test_map_dictlike generally tests @@ -341,47 +328,6 @@ def test_maybe_convert_timedelta(): pi._maybe_convert_timedelta(offset) -def test_is_monotonic_with_nat(): - # GH#31437 - # PeriodIndex.is_monotonic should behave analogously to DatetimeIndex, - # in particular never be monotonic when we have NaT - dti = date_range("2016-01-01", periods=3) - pi = dti.to_period("D") - tdi = Index(dti.view("timedelta64[ns]")) - - for obj in [pi, pi._engine, dti, dti._engine, tdi, tdi._engine]: - if isinstance(obj, Index): - # i.e. not Engines - assert obj.is_monotonic - assert obj.is_monotonic_increasing - assert not obj.is_monotonic_decreasing - assert obj.is_unique - - dti1 = dti.insert(0, NaT) - pi1 = dti1.to_period("D") - tdi1 = Index(dti1.view("timedelta64[ns]")) - - for obj in [pi1, pi1._engine, dti1, dti1._engine, tdi1, tdi1._engine]: - if isinstance(obj, Index): - # i.e. not Engines - assert not obj.is_monotonic - assert not obj.is_monotonic_increasing - assert not obj.is_monotonic_decreasing - assert obj.is_unique - - dti2 = dti.insert(3, NaT) - pi2 = dti2.to_period("H") - tdi2 = Index(dti2.view("timedelta64[ns]")) - - for obj in [pi2, pi2._engine, dti2, dti2._engine, tdi2, tdi2._engine]: - if isinstance(obj, Index): - # i.e. not Engines - assert not obj.is_monotonic - assert not obj.is_monotonic_increasing - assert not obj.is_monotonic_decreasing - assert obj.is_unique - - @pytest.mark.parametrize("array", [True, False]) def test_dunder_array(array): obj = PeriodIndex(["2000-01-01", "2001-01-01"], freq="D") diff --git a/pandas/tests/indexes/period/test_pickle.py b/pandas/tests/indexes/period/test_pickle.py new file mode 100644 index 0000000000000..82f906d1e361f --- /dev/null +++ b/pandas/tests/indexes/period/test_pickle.py @@ -0,0 +1,26 @@ +import numpy as np +import pytest + +from pandas import ( + NaT, + PeriodIndex, + period_range, +) +import pandas._testing as tm + +from pandas.tseries import offsets + + +class TestPickle: + @pytest.mark.parametrize("freq", ["D", "M", "A"]) + def test_pickle_round_trip(self, freq): + idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq=freq) + result = tm.round_trip_pickle(idx) + tm.assert_index_equal(result, idx) + + def test_pickle_freq(self): + # GH#2891 + prng = period_range("1/1/2011", "1/1/2012", freq="M") + new_prng = tm.round_trip_pickle(prng) + assert new_prng.freq == offsets.MonthEnd() + assert new_prng.freqstr == "M" diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_resolution.py similarity index 56% rename from pandas/tests/indexes/period/test_ops.py rename to pandas/tests/indexes/period/test_resolution.py index 9ebe44fb16c8d..7ecbde75cfa47 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_resolution.py @@ -1,10 +1,9 @@ import pytest import pandas as pd -import pandas._testing as tm -class TestPeriodIndexOps: +class TestResolution: @pytest.mark.parametrize( "freq,expected", [ @@ -22,15 +21,3 @@ class TestPeriodIndexOps: def test_resolution(self, freq, expected): idx = pd.period_range(start="2013-04-01", periods=30, freq=freq) assert idx.resolution == expected - - def test_freq_setter_deprecated(self): - # GH 20678 - idx = pd.period_range("2018Q1", periods=4, freq="Q") - - # no warning for getter - with tm.assert_produces_warning(None): - idx.freq - - # warning for setter - with pytest.raises(AttributeError, match="can't set attribute"): - idx.freq = pd.offsets.Day() diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py index 39a1ddcbc8a6a..f7dafd78a801f 100644 --- a/pandas/tests/indexes/test_any_index.py +++ b/pandas/tests/indexes/test_any_index.py @@ -84,6 +84,13 @@ def test_is_type_compatible_deprecation(index): index.is_type_compatible(index.inferred_type) +def test_is_mixed_deprecated(index): + # GH#32922 + msg = "Index.is_mixed is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + index.is_mixed() + + class TestConversion: def test_to_series(self, index): # assert that we are creating a copy of the index diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 50be69fb93d7c..7f9a5c0b50595 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -29,7 +29,6 @@ TimedeltaIndex, Timestamp, date_range, - isna, period_range, ) import pandas._testing as tm @@ -395,15 +394,6 @@ def test_constructor_empty_special(self, empty, klass): assert isinstance(empty, klass) assert not len(empty) - def test_constructor_overflow_int64(self): - # see gh-15832 - msg = ( - "The elements provided in the data cannot " - "all be casted to the dtype int64" - ) - with pytest.raises(OverflowError, match=msg): - Index([np.iinfo(np.uint64).max - 1], dtype="int64") - @pytest.mark.parametrize( "index", [ @@ -502,18 +492,6 @@ def test_is_(self): ind2 = Index(arr, copy=False) assert not ind1.is_(ind2) - @pytest.mark.parametrize("index", ["datetime"], indirect=True) - def test_asof(self, index): - d = index[0] - assert index.asof(d) == d - assert isna(index.asof(d - timedelta(1))) - - d = index[-1] - assert index.asof(d + timedelta(1)) == d - - d = index[0].to_pydatetime() - assert isinstance(index.asof(d), Timestamp) - def test_asof_numeric_vs_bool_raises(self): left = Index([1, 2, 3]) right = Index([True, False]) @@ -699,12 +677,6 @@ def test_append_empty_preserve_name(self, name, expected): result = left.append(right) assert result.name == expected - def test_is_mixed_deprecated(self, simple_index): - # GH#32922 - index = simple_index - with tm.assert_produces_warning(FutureWarning): - index.is_mixed() - @pytest.mark.parametrize( "index, expected", [ diff --git a/pandas/tests/indexes/test_index_new.py b/pandas/tests/indexes/test_index_new.py index 293aa6dd57124..5c5ec7219d2d7 100644 --- a/pandas/tests/indexes/test_index_new.py +++ b/pandas/tests/indexes/test_index_new.py @@ -272,3 +272,14 @@ def __array__(self, dtype=None) -> np.ndarray: expected = Index(array) result = Index(ArrayLike(array)) tm.assert_index_equal(result, expected) + + +class TestIndexConstructionErrors: + def test_constructor_overflow_int64(self): + # see GH#15832 + msg = ( + "The elements provided in the data cannot " + "all be casted to the dtype int64" + ) + with pytest.raises(OverflowError, match=msg): + Index([np.iinfo(np.uint64).max - 1], dtype="int64") diff --git a/pandas/tests/indexes/timedeltas/test_freq_attr.py b/pandas/tests/indexes/timedeltas/test_freq_attr.py new file mode 100644 index 0000000000000..39b9c11aa833c --- /dev/null +++ b/pandas/tests/indexes/timedeltas/test_freq_attr.py @@ -0,0 +1,61 @@ +import pytest + +from pandas import TimedeltaIndex + +from pandas.tseries.offsets import ( + DateOffset, + Day, + Hour, +) + + +class TestFreq: + @pytest.mark.parametrize("values", [["0 days", "2 days", "4 days"], []]) + @pytest.mark.parametrize("freq", ["2D", Day(2), "48H", Hour(48)]) + def test_freq_setter(self, values, freq): + # GH#20678 + idx = TimedeltaIndex(values) + + # can set to an offset, converting from string if necessary + idx._data.freq = freq + assert idx.freq == freq + assert isinstance(idx.freq, DateOffset) + + # can reset to None + idx._data.freq = None + assert idx.freq is None + + def test_freq_setter_errors(self): + # GH#20678 + idx = TimedeltaIndex(["0 days", "2 days", "4 days"]) + + # setting with an incompatible freq + msg = ( + "Inferred frequency 2D from passed values does not conform to " + "passed frequency 5D" + ) + with pytest.raises(ValueError, match=msg): + idx._data.freq = "5D" + + # setting with a non-fixed frequency + msg = r"<2 \* BusinessDays> is a non-fixed frequency" + with pytest.raises(ValueError, match=msg): + idx._data.freq = "2B" + + # setting with non-freq string + with pytest.raises(ValueError, match="Invalid frequency"): + idx._data.freq = "foo" + + def test_freq_view_safe(self): + # Setting the freq for one TimedeltaIndex shouldn't alter the freq + # for another that views the same data + + tdi = TimedeltaIndex(["0 days", "2 days", "4 days"], freq="2D") + tda = tdi._data + + tdi2 = TimedeltaIndex(tda)._with_freq(None) + assert tdi2.freq is None + + # Original was not altered + assert tdi.freq == "2D" + assert tda.freq == "2D" diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index fc8abb83ed302..66fdaa2778600 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -340,3 +340,17 @@ def test_slice_invalid_str_with_timedeltaindex( indexer_sl(obj)[:"foo"] with pytest.raises(TypeError, match=msg): indexer_sl(obj)[tdi[0] : "foo"] + + +class TestContains: + def test_contains_nonunique(self): + # GH#9512 + for vals in ( + [0, 1, 0], + [0, 0, -1], + [0, -1, -1], + ["00:01:00", "00:01:00", "00:02:00"], + ["00:01:00", "00:01:00", "00:00:01"], + ): + idx = TimedeltaIndex(vals) + assert idx[0] in idx diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index f5d601bcfbcd1..f6013baf86edc 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -1,86 +1,14 @@ -import pytest - from pandas import ( TimedeltaIndex, timedelta_range, ) import pandas._testing as tm -from pandas.tseries.offsets import ( - DateOffset, - Day, - Hour, -) - class TestTimedeltaIndexOps: - def test_nonunique_contains(self): - # GH 9512 - for idx in map( - TimedeltaIndex, - ( - [0, 1, 0], - [0, 0, -1], - [0, -1, -1], - ["00:01:00", "00:01:00", "00:02:00"], - ["00:01:00", "00:01:00", "00:00:01"], - ), - ): - assert idx[0] in idx - def test_infer_freq(self, freq_sample): # GH#11018 idx = timedelta_range("1", freq=freq_sample, periods=10) result = TimedeltaIndex(idx.asi8, freq="infer") tm.assert_index_equal(idx, result) assert result.freq == freq_sample - - @pytest.mark.parametrize("values", [["0 days", "2 days", "4 days"], []]) - @pytest.mark.parametrize("freq", ["2D", Day(2), "48H", Hour(48)]) - def test_freq_setter(self, values, freq): - # GH 20678 - idx = TimedeltaIndex(values) - - # can set to an offset, converting from string if necessary - idx._data.freq = freq - assert idx.freq == freq - assert isinstance(idx.freq, DateOffset) - - # can reset to None - idx._data.freq = None - assert idx.freq is None - - def test_freq_setter_errors(self): - # GH 20678 - idx = TimedeltaIndex(["0 days", "2 days", "4 days"]) - - # setting with an incompatible freq - msg = ( - "Inferred frequency 2D from passed values does not conform to " - "passed frequency 5D" - ) - with pytest.raises(ValueError, match=msg): - idx._data.freq = "5D" - - # setting with a non-fixed frequency - msg = r"<2 \* BusinessDays> is a non-fixed frequency" - with pytest.raises(ValueError, match=msg): - idx._data.freq = "2B" - - # setting with non-freq string - with pytest.raises(ValueError, match="Invalid frequency"): - idx._data.freq = "foo" - - def test_freq_view_safe(self): - # Setting the freq for one TimedeltaIndex shouldn't alter the freq - # for another that views the same data - - tdi = TimedeltaIndex(["0 days", "2 days", "4 days"], freq="2D") - tda = tdi._data - - tdi2 = TimedeltaIndex(tda)._with_freq(None) - assert tdi2.freq is None - - # Original was not altered - assert tdi.freq == "2D" - assert tda.freq == "2D" diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index aaf98e46f2f09..4e4eb89328540 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -191,3 +191,20 @@ def test_unknown_attribute(self): msg = "'Series' object has no attribute 'foo'" with pytest.raises(AttributeError, match=msg): ser.foo + + def test_datetime_series_no_datelike_attrs(self, datetime_series): + # GH#7206 + for op in ["year", "day", "second", "weekday"]: + msg = f"'Series' object has no attribute '{op}'" + with pytest.raises(AttributeError, match=msg): + getattr(datetime_series, op) + + def test_series_datetimelike_attribute_access(self): + # attribute access should still work! + ser = Series({"year": 2000, "month": 1, "day": 10}) + assert ser.year == 2000 + assert ser.month == 1 + assert ser.day == 10 + msg = "'Series' object has no attribute 'weekday'" + with pytest.raises(AttributeError, match=msg): + ser.weekday
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44377
2021-11-09T23:05:55Z
2021-11-11T23:42:16Z
2021-11-11T23:42:16Z
2021-11-11T23:51:23Z
CLN: misplaced indexing tests
diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 44a70d3933b66..bff461dbc7038 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -359,6 +359,39 @@ def test_dt64arr_timestamp_equality(self, box_with_array): expected = tm.box_expected([False, False, False], xbox) tm.assert_equal(result, expected) + @pytest.mark.parametrize( + "datetimelike", + [ + Timestamp("20130101"), + datetime(2013, 1, 1), + np.datetime64("2013-01-01T00:00", "ns"), + ], + ) + @pytest.mark.parametrize( + "op,expected", + [ + (operator.lt, [True, False, False, False]), + (operator.le, [True, True, False, False]), + (operator.eq, [False, True, False, False]), + (operator.gt, [False, False, False, True]), + ], + ) + def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected): + # GH#17965, test for ability to compare datetime64[ns] columns + # to datetimelike + ser = Series( + [ + Timestamp("20120101"), + Timestamp("20130101"), + np.nan, + Timestamp("20130103"), + ], + name="A", + ) + result = op(ser, datetimelike) + expected = Series(expected, name="A") + tm.assert_series_equal(result, expected) + class TestDatetimeIndexComparisons: diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index 896c43db5e356..2f32f9e18311d 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -1,7 +1,6 @@ """ test partial slicing on Series/Frame """ from datetime import datetime -import operator import numpy as np import pytest @@ -412,40 +411,6 @@ def test_loc_datetime_length_one(self): result = df.loc["2016-10-01T00:00:00":] tm.assert_frame_equal(result, df) - @pytest.mark.parametrize( - "datetimelike", - [ - Timestamp("20130101"), - datetime(2013, 1, 1), - np.datetime64("2013-01-01T00:00", "ns"), - ], - ) - @pytest.mark.parametrize( - "op,expected", - [ - (operator.lt, [True, False, False, False]), - (operator.le, [True, True, False, False]), - (operator.eq, [False, True, False, False]), - (operator.gt, [False, False, False, True]), - ], - ) - def test_selection_by_datetimelike(self, datetimelike, op, expected): - # GH issue #17965, test for ability to compare datetime64[ns] columns - # to datetimelike - df = DataFrame( - { - "A": [ - Timestamp("20120101"), - Timestamp("20130101"), - np.nan, - Timestamp("20130103"), - ] - } - ) - result = op(df.A, datetimelike) - expected = Series(expected, name="A") - tm.assert_series_equal(result, expected) - @pytest.mark.parametrize( "start", [ diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index dfa750bf933a0..1b5e64bca03a0 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -205,6 +205,7 @@ def test_getitem_seconds(self): # GH7116 # these show deprecations as we are trying # to slice with non-integer indexers + # FIXME: don't leave commented-out # with pytest.raises(IndexError): # idx[v] continue @@ -814,12 +815,6 @@ def test_get_value(self): result2 = idx2.get_value(input2, p1) tm.assert_series_equal(result2, expected2) - def test_loc_str(self): - # https://github.com/pandas-dev/pandas/issues/33964 - index = period_range(start="2000", periods=20, freq="B") - series = Series(range(20), index=index) - assert series.loc["2000-01-14"] == 9 - @pytest.mark.parametrize("freq", ["H", "D"]) def test_get_value_datetime_hourly(self, freq): # get_loc and get_value should treat datetime objects symmetrically diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index e6c31d22e626f..a7dad4e7f352c 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -211,7 +211,7 @@ def _check_all_fields(self, periodindex): ] periods = list(periodindex) - s = Series(periodindex) + ser = Series(periodindex) for field in fields: field_idx = getattr(periodindex, field) @@ -219,10 +219,10 @@ def _check_all_fields(self, periodindex): for x, val in zip(periods, field_idx): assert getattr(x, field) == val - if len(s) == 0: + if len(ser) == 0: continue - field_s = getattr(s.dt, field) + field_s = getattr(ser.dt, field) assert len(periodindex) == len(field_s) for x, val in zip(periods, field_s): assert getattr(x, field) == val diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 2a5051b2982bb..f5d601bcfbcd1 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -1,8 +1,6 @@ -import numpy as np import pytest from pandas import ( - Series, TimedeltaIndex, timedelta_range, ) @@ -30,15 +28,6 @@ def test_nonunique_contains(self): ): assert idx[0] in idx - def test_unknown_attribute(self): - # see gh-9680 - tdi = timedelta_range(start=0, periods=10, freq="1s") - ts = Series(np.random.normal(size=10), index=tdi) - assert "foo" not in ts.__dict__.keys() - msg = "'Series' object has no attribute 'foo'" - with pytest.raises(AttributeError, match=msg): - ts.foo - def test_infer_freq(self, freq_sample): # GH#11018 idx = timedelta_range("1", freq=freq_sample, periods=10) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index b0aa05371271b..ed9b5cc0850b9 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -2941,3 +2941,9 @@ def test_loc_set_multiple_items_in_multiple_new_columns(self): ) tm.assert_frame_equal(df, expected) + + def test_getitem_loc_str_periodindex(self): + # GH#33964 + index = pd.period_range(start="2000", periods=20, freq="B") + series = Series(range(20), index=index) + assert series.loc["2000-01-14"] == 9 diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index d77f831bee8bc..6c3587c7eeada 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -377,17 +377,3 @@ def test_frozenset_index(): assert s[idx1] == 2 s[idx1] = 3 assert s[idx1] == 3 - - -def test_boolean_index(): - # GH18579 - s1 = Series([1, 2, 3], index=[4, 5, 6]) - s2 = Series([1, 3, 2], index=s1 == 2) - tm.assert_series_equal(Series([1, 3, 2], [False, True, False]), s2) - - -def test_index_ndim_gt_1_raises(): - # GH18579 - df = DataFrame([[1, 2], [3, 4], [5, 6]], index=[3, 6, 9]) - with pytest.raises(ValueError, match="Index data must be 1-dimensional"): - Series([1, 3, 2], index=df) diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index b49c209a59a06..aaf98e46f2f09 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -182,3 +182,12 @@ def test_inspect_getmembers(self): ser = Series(dtype=object) with tm.assert_produces_warning(None): inspect.getmembers(ser) + + def test_unknown_attribute(self): + # GH#9680 + tdi = pd.timedelta_range(start=0, periods=10, freq="1s") + ser = Series(np.random.normal(size=10), index=tdi) + assert "foo" not in ser.__dict__.keys() + msg = "'Series' object has no attribute 'foo'" + with pytest.raises(AttributeError, match=msg): + ser.foo diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 2c33284df18c5..1b488b4cf0b77 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -154,6 +154,12 @@ def test_constructor(self, datetime_series): with pytest.raises(NotImplementedError, match=msg): Series(m) + def test_constructor_index_ndim_gt_1_raises(self): + # GH#18579 + df = DataFrame([[1, 2], [3, 4], [5, 6]], index=[3, 6, 9]) + with pytest.raises(ValueError, match="Index data must be 1-dimensional"): + Series([1, 3, 2], index=df) + @pytest.mark.parametrize("input_class", [list, dict, OrderedDict]) def test_constructor_empty(self, input_class): with tm.assert_produces_warning(FutureWarning): @@ -276,6 +282,15 @@ def test_constructor_list_like(self): result = Series(obj, index=[0, 1, 2]) tm.assert_series_equal(result, expected) + def test_constructor_boolean_index(self): + # GH#18579 + s1 = Series([1, 2, 3], index=[4, 5, 6]) + + index = s1 == 2 + result = Series([1, 3, 2], index=index) + expected = Series([1, 3, 2], index=[False, True, False]) + tm.assert_series_equal(result, expected) + @pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"]) def test_constructor_index_dtype(self, dtype): # GH 17088
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44375
2021-11-09T22:17:05Z
2021-11-11T13:40:52Z
2021-11-11T13:40:52Z
2021-11-11T15:12:14Z
TST: Make tests for groupby median/mean more strict on dtype
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 3c402480ea2ec..e5870a206f419 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -394,8 +394,7 @@ def test_median_empty_bins(observed): result = df.groupby(bins, observed=observed).median() expected = df.groupby(bins, observed=observed).agg(lambda x: x.median()) - # TODO: GH 41137 - tm.assert_frame_equal(result, expected, check_dtype=False) + tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 8436c2db445ee..34e8e2ac3e84a 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -1692,8 +1692,6 @@ def f(data, add_arg): df = DataFrame({"A": 1, "B": 2}, index=date_range("2017", periods=10)) result = df.groupby("A").resample("D").agg(f, multiplier).astype(float) expected = df.groupby("A").resample("D").mean().multiply(multiplier) - # TODO: GH 41137 - expected = expected.astype("float64") tm.assert_frame_equal(result, expected)
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them Followup to #41139.
https://api.github.com/repos/pandas-dev/pandas/pulls/44374
2021-11-09T22:06:34Z
2021-11-11T00:17:10Z
2021-11-11T00:17:09Z
2021-11-11T22:27:28Z
TYP: changed variable cat_array to cat_array_list in dtypes.py
diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml index 7692dc522522f..92a9f2a5fb97c 100644 --- a/.github/workflows/sdist.yml +++ b/.github/workflows/sdist.yml @@ -53,6 +53,7 @@ jobs: - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: pandas-sdist + channels: conda-forge python-version: '${{ matrix.python-version }}' - name: Install pandas from sdist diff --git a/MANIFEST.in b/MANIFEST.in index f616fad6b1557..c6ddc79eaa83c 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -33,6 +33,7 @@ global-exclude *.xlsb global-exclude *.xlsm global-exclude *.xlsx global-exclude *.xpt +global-exclude *.cpt global-exclude *.xz global-exclude *.zip global-exclude *~ diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py index 155dd6f8e13a0..d8578ed604ae3 100644 --- a/asv_bench/benchmarks/series_methods.py +++ b/asv_bench/benchmarks/series_methods.py @@ -152,6 +152,18 @@ def time_value_counts(self, N, dtype): self.s.value_counts() +class ValueCountsObjectDropNAFalse: + + params = [10 ** 3, 10 ** 4, 10 ** 5] + param_names = ["N"] + + def setup(self, N): + self.s = Series(np.random.randint(0, N, size=10 * N)).astype("object") + + def time_value_counts(self, N): + self.s.value_counts(dropna=False) + + class Mode: params = [[10 ** 3, 10 ** 4, 10 ** 5], ["int", "uint", "float", "object"]] @@ -164,6 +176,18 @@ def time_mode(self, N, dtype): self.s.mode() +class ModeObjectDropNAFalse: + + params = [10 ** 3, 10 ** 4, 10 ** 5] + param_names = ["N"] + + def setup(self, N): + self.s = Series(np.random.randint(0, N, size=10 * N)).astype("object") + + def time_mode(self, N): + self.s.mode(dropna=False) + + class Dir: def setup(self): self.s = Series(index=tm.makeStringIndex(10000)) diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index c2ca3df5ca23d..e2f8ac09d8873 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -102,7 +102,7 @@ header : int or list of ints, default ``'infer'`` names : array-like, default ``None`` List of column names to use. If file contains no header row, then you should explicitly pass ``header=None``. Duplicates in this list are not allowed. -index_col : int, str, sequence of int / str, or False, default ``None`` +index_col : int, str, sequence of int / str, or False, optional, default ``None`` Column(s) to use as the row labels of the ``DataFrame``, either given as string name or column index. If a sequence of int / str is given, a MultiIndex is used. @@ -120,7 +120,8 @@ usecols : list-like or callable, default ``None`` Return a subset of the columns. If list-like, all elements must either be positional (i.e. integer indices into the document columns) or strings that correspond to column names provided either by the user in ``names`` or - inferred from the document header row(s). For example, a valid list-like + inferred from the document header row(s). If ``names`` are given, the document + header row(s) are not taken into account. For example, a valid list-like ``usecols`` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``. Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. To @@ -348,7 +349,7 @@ dialect : str or :class:`python:csv.Dialect` instance, default ``None`` Error handling ++++++++++++++ -error_bad_lines : boolean, default ``None`` +error_bad_lines : boolean, optional, default ``None`` Lines with too many fields (e.g. a csv line with too many commas) will by default cause an exception to be raised, and no ``DataFrame`` will be returned. If ``False``, then these "bad lines" will dropped from the @@ -358,7 +359,7 @@ error_bad_lines : boolean, default ``None`` .. deprecated:: 1.3.0 The ``on_bad_lines`` parameter should be used instead to specify behavior upon encountering a bad line instead. -warn_bad_lines : boolean, default ``None`` +warn_bad_lines : boolean, optional, default ``None`` If error_bad_lines is ``False``, and warn_bad_lines is ``True``, a warning for each "bad line" will be output. diff --git a/doc/source/whatsnew/v1.3.5.rst b/doc/source/whatsnew/v1.3.5.rst index 589092c0dd7e3..951b05b65c81b 100644 --- a/doc/source/whatsnew/v1.3.5.rst +++ b/doc/source/whatsnew/v1.3.5.rst @@ -16,6 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in :meth:`Series.equals` when comparing floats with dtype object to None (:issue:`44190`) - Fixed performance regression in :func:`read_csv` (:issue:`44106`) +- Fixed regression in :meth:`Series.duplicated` and :meth:`Series.drop_duplicates` when Series has :class:`Categorical` dtype with boolean categories (:issue:`44351`) - .. --------------------------------------------------------------------------- diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 99a66c7e5454b..a593a03de5c25 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -15,6 +15,31 @@ including other versions of pandas. Enhancements ~~~~~~~~~~~~ +.. _whatsnew_140.enhancements.warning_lineno: + +Improved warning messages +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Previously, warning messages may have pointed to lines within the pandas library. Running the script ``setting_with_copy_warning.py`` + +.. code-block:: python + + import pandas as pd + + df = pd.DataFrame({'a': [1, 2, 3]}) + df[:2].loc[:, 'a'] = 5 + +with pandas 1.3 resulted in:: + + .../site-packages/pandas/core/indexing.py:1951: SettingWithCopyWarning: + A value is trying to be set on a copy of a slice from a DataFrame. + +This made it difficult to determine where the warning was being generated from. Now pandas will inspect the call stack, reporting the first line outside of the pandas library that gave rise to the warning. The output of the above script is now:: + + setting_with_copy_warning.py:4: SettingWithCopyWarning: + A value is trying to be set on a copy of a slice from a DataFrame. + + .. _whatsnew_140.enhancements.numeric_index: More flexible numeric dtypes for indexes @@ -184,6 +209,7 @@ Other enhancements - :meth:`DataFrame.dropna` now accepts a single label as ``subset`` along with array-like (:issue:`41021`) - :meth:`read_excel` now accepts a ``decimal`` argument that allow the user to specify the decimal point when parsing string columns to numeric (:issue:`14403`) - :meth:`.GroupBy.mean` now supports `Numba <http://numba.pydata.org/>`_ execution with the ``engine`` keyword (:issue:`43731`) +- :meth:`Timestamp.isoformat`, now handles the ``timespec`` argument from the base :class:``datetime`` class (:issue:`26131`) .. --------------------------------------------------------------------------- @@ -240,6 +266,38 @@ Now the float-dtype is respected. Since the common dtype for these DataFrames is *New behavior*: +.. ipython:: python + + res + +.. _whatsnew_140.notable_bug_fixes.value_counts_and_mode_do_not_coerse_to_nan: + +Null-values are no longer coerced to NaN-value in value_counts and mode +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:meth:`Series.value_counts` and :meth:`Series.mode` no longer coerce ``None``, ``NaT`` and other null-values to a NaN-value for ``np.object``-dtype. This behavior is now consistent with ``unique``, ``isin`` and others (:issue:`42688`). + +.. ipython:: python + + s = pd.Series([True, None, pd.NaT, None, pd.NaT, None]) + res = s.value_counts(dropna=False) + +Previously, all null-values were replaced by a NaN-value. + +*Previous behavior*: + +.. code-block:: ipython + + In [3]: res + Out[3]: + NaN 5 + True 1 + dtype: int64 + +Now null-values are no longer mangled. + +*New behavior*: + .. ipython:: python res @@ -400,6 +458,8 @@ Other Deprecations - Deprecated casting behavior when setting timezone-aware value(s) into a timezone-aware :class:`Series` or :class:`DataFrame` column when the timezones do not match. Previously this cast to object dtype. In a future version, the values being inserted will be converted to the series or column's existing timezone (:issue:`37605`) - Deprecated casting behavior when passing an item with mismatched-timezone to :meth:`DatetimeIndex.insert`, :meth:`DatetimeIndex.putmask`, :meth:`DatetimeIndex.where` :meth:`DatetimeIndex.fillna`, :meth:`Series.mask`, :meth:`Series.where`, :meth:`Series.fillna`, :meth:`Series.shift`, :meth:`Series.replace`, :meth:`Series.reindex` (and :class:`DataFrame` column analogues). In the past this has cast to object dtype. In a future version, these will cast the passed item to the index or series's timezone (:issue:`37605`) - Deprecated the 'errors' keyword argument in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, and meth:`DataFrame.mask`; in a future version the argument will be removed (:issue:`44294`) +- Deprecated :meth:`PeriodIndex.astype` to ``datetime64[ns]`` or ``DatetimeTZDtype``, use ``obj.to_timestamp(how).tz_localize(dtype.tz)`` instead (:issue:`44398`) +- .. --------------------------------------------------------------------------- @@ -506,6 +566,7 @@ Conversion - Bug in :class:`Series` constructor returning 0 for missing values with dtype ``int64`` and ``False`` for dtype ``bool`` (:issue:`43017`, :issue:`43018`) - Bug in :class:`IntegerDtype` not allowing coercion from string dtype (:issue:`25472`) - Bug in :func:`to_datetime` with ``arg:xr.DataArray`` and ``unit="ns"`` specified raises TypeError (:issue:`44053`) +- Bug in :meth:`DataFrame.convert_dtypes` not returning the correct type when a subclass does not overload :meth:`_constructor_sliced` (:issue:`43201`) - Strings @@ -515,7 +576,7 @@ Strings Interval ^^^^^^^^ -- +- Bug in :meth:`IntervalIndex.get_indexer_non_unique` returning boolean mask instead of array of integers for a non unique and non monotonic index (:issue:`44084`) - Indexing @@ -543,12 +604,14 @@ Indexing - Bug when setting string-backed :class:`Categorical` values that can be parsed to datetimes into a :class:`DatetimeArray` or :class:`Series` or :class:`DataFrame` column backed by :class:`DatetimeArray` failing to parse these strings (:issue:`44236`) - Bug in :meth:`Series.__setitem__` with an integer dtype other than ``int64`` setting with a ``range`` object unnecessarily upcasting to ``int64`` (:issue:`44261`) - Bug in :meth:`Series.__setitem__` with a boolean mask indexer setting a listlike value of length 1 incorrectly broadcasting that value (:issue:`44265`) +- Bug in :meth:`DataFrame.loc.__setitem__` and :meth:`DataFrame.iloc.__setitem__` with mixed dtypes sometimes failing to operate in-place (:issue:`44345`) - Missing ^^^^^^^ - Bug in :meth:`DataFrame.fillna` with limit and no method ignores axis='columns' or ``axis = 1`` (:issue:`40989`) - Bug in :meth:`DataFrame.fillna` not replacing missing values when using a dict-like ``value`` and duplicate column names (:issue:`43476`) +- Bug in constructing a :class:`DataFrame` with a dictionary ``np.datetime64`` as a value and ``dtype='timedelta64[ns]'``, or vice-versa, incorrectly casting instead of raising (:issue:`??`) - MultiIndex @@ -572,18 +635,21 @@ I/O - Bug in :func:`read_csv`, changed exception class when expecting a file path name or file-like object from ``OSError`` to ``TypeError`` (:issue:`43366`) - Bug in :func:`read_json` not handling non-numpy dtypes correctly (especially ``category``) (:issue:`21892`, :issue:`33205`) - Bug in :func:`json_normalize` where multi-character ``sep`` parameter is incorrectly prefixed to every key (:issue:`43831`) +- Bug in :func:`json_normalize` where reading data with missing multi-level metadata would not respect errors="ignore" (:issue:`44312`) - Bug in :func:`read_csv` with :code:`float_precision="round_trip"` which did not skip initial/trailing whitespace (:issue:`43713`) - Bug in dumping/loading a :class:`DataFrame` with ``yaml.dump(frame)`` (:issue:`42748`) +- Bug in :func:`read_csv` raising ``ValueError`` when ``parse_dates`` was used with ``MultiIndex`` columns (:issue:`8991`) - Period ^^^^^^ - Bug in adding a :class:`Period` object to a ``np.timedelta64`` object incorrectly raising ``TypeError`` (:issue:`44182`) +- Bug in :meth:`PeriodIndex.to_timestamp` when the index has ``freq="B"`` inferring ``freq="D"`` for its result instead of ``freq="B"`` (:issue:`44105`) - Plotting ^^^^^^^^ -- +- When given non-numeric data, :meth:`DataFrame.boxplot` now raises a ``ValueError`` rather than a cryptic ``KeyError`` or ``ZeroDivsionError``, in line with other plotting functions like :meth:`DataFrame.hist`. (:issue:`43480`) - Groupby/resample/rolling @@ -622,6 +688,8 @@ Reshaping - Bug in :func:`crosstab` would fail when inputs are lists or tuples (:issue:`44076`) - Bug in :meth:`DataFrame.append` failing to retain ``index.name`` when appending a list of :class:`Series` objects (:issue:`44109`) - Fixed metadata propagation in :meth:`Dataframe.apply` method, consequently fixing the same issue for :meth:`Dataframe.transform`, :meth:`Dataframe.nunique` and :meth:`Dataframe.mode` (:issue:`28283`) +- Bug in :meth:`DataFrame.stack` with ``ExtensionDtype`` columns incorrectly raising (:issue:`43561`) +- Sparse ^^^^^^ @@ -652,11 +720,13 @@ Styler Other ^^^^^ +- Bug in :meth:`DataFrame.astype` with non-unique columns and a :class:`Series` ``dtype`` argument (:issue:`44417`) - Bug in :meth:`CustomBusinessMonthBegin.__add__` (:meth:`CustomBusinessMonthEnd.__add__`) not applying the extra ``offset`` parameter when beginning (end) of the target month is already a business day (:issue:`41356`) - Bug in :meth:`RangeIndex.union` with another ``RangeIndex`` with matching (even) ``step`` and starts differing by strictly less than ``step / 2`` (:issue:`44019`) - Bug in :meth:`RangeIndex.difference` with ``sort=None`` and ``step<0`` failing to sort (:issue:`44085`) - Bug in :meth:`Series.to_frame` and :meth:`Index.to_frame` ignoring the ``name`` argument when ``name=None`` is explicitly passed (:issue:`44212`) - Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` with ``value=None`` and ExtensionDtypes (:issue:`44270`) +- Bug in :meth:`FloatingArray.equals` failing to consider two arrays equal if they contain ``np.nan`` values (:issue:`44382`) - .. ***DO NOT USE THIS SECTION*** diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in index fb8ce79a924a4..e5e64f8dc7b5f 100644 --- a/pandas/_libs/hashtable_func_helper.pxi.in +++ b/pandas/_libs/hashtable_func_helper.pxi.in @@ -31,7 +31,7 @@ dtypes = [('Complex128', 'complex128', 'complex128', @cython.wraparound(False) @cython.boundscheck(False) {{if dtype == 'object'}} -cdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna, navalue=np.NaN): +cdef value_count_{{dtype}}(ndarray[{{dtype}}] values, bint dropna): {{else}} cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna): {{endif}} @@ -42,7 +42,6 @@ cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna): # Don't use Py_ssize_t, since table.n_buckets is unsigned khiter_t k - bint is_null {{c_type}} val @@ -61,11 +60,7 @@ cdef value_count_{{dtype}}(const {{dtype}}_t[:] values, bint dropna): for i in range(n): val = values[i] - is_null = checknull(val) - if not is_null or not dropna: - # all nas become the same representative: - if is_null: - val = navalue + if not dropna or not checknull(val): k = kh_get_{{ttype}}(table, <PyObject*>val) if k != table.n_buckets: table.vals[k] += 1 diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 2aebf75ba35d4..09bfc4527a428 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -295,7 +295,7 @@ cdef class _NaT(datetime): def __str__(self) -> str: return "NaT" - def isoformat(self, sep="T") -> str: + def isoformat(self, sep: str = "T", timespec: str = "auto") -> str: # This allows Timestamp(ts.isoformat()) to always correctly roundtrip. return "NaT" diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 613da5a691736..28b8158548ca8 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -737,9 +737,42 @@ cdef class _Timestamp(ABCTimestamp): # ----------------------------------------------------------------- # Rendering Methods - def isoformat(self, sep: str = "T") -> str: - base = super(_Timestamp, self).isoformat(sep=sep) - if self.nanosecond == 0: + def isoformat(self, sep: str = "T", timespec: str = "auto") -> str: + """ + Return the time formatted according to ISO. + + The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmmnnn'. + By default, the fractional part is omitted if self.microsecond == 0 + and self.nanosecond == 0. + + If self.tzinfo is not None, the UTC offset is also attached, giving + giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmmnnn+HH:MM'. + + Parameters + ---------- + sep : str, default 'T' + String used as the separator between the date and time. + + timespec : str, default 'auto' + Specifies the number of additional terms of the time to include. + The valid values are 'auto', 'hours', 'minutes', 'seconds', + 'milliseconds', 'microseconds', and 'nanoseconds'. + + Returns + ------- + str + + Examples + -------- + >>> ts = pd.Timestamp('2020-03-14T15:32:52.192548651') + >>> ts.isoformat() + '2020-03-14T15:32:52.192548651' + >>> ts.isoformat(timespec='microseconds') + '2020-03-14T15:32:52.192548' + """ + base_ts = "microseconds" if timespec == "nanoseconds" else timespec + base = super(_Timestamp, self).isoformat(sep=sep, timespec=base_ts) + if self.nanosecond == 0 and timespec != "nanoseconds": return base if self.tzinfo is not None: @@ -747,10 +780,11 @@ cdef class _Timestamp(ABCTimestamp): else: base1, base2 = base, "" - if self.microsecond != 0: - base1 += f"{self.nanosecond:03d}" - else: - base1 += f".{self.nanosecond:09d}" + if timespec == "nanoseconds" or (timespec == "auto" and self.nanosecond): + if self.microsecond: + base1 += f"{self.nanosecond:03d}" + else: + base1 += f".{self.nanosecond:09d}" return base1 + base2 diff --git a/pandas/_testing/__init__.py b/pandas/_testing/__init__.py index e8283a222d86a..4f9ef2c3c3ffa 100644 --- a/pandas/_testing/__init__.py +++ b/pandas/_testing/__init__.py @@ -82,6 +82,7 @@ assert_extension_array_equal, assert_frame_equal, assert_index_equal, + assert_indexing_slices_equivalent, assert_interval_array_equal, assert_is_sorted, assert_is_valid_plot_return_object, @@ -259,7 +260,7 @@ def box_expected(expected, box_cls, transpose=True): expected = DatetimeArray(expected) elif box_cls is TimedeltaArray: expected = TimedeltaArray(expected) - elif box_cls is np.ndarray: + elif box_cls is np.ndarray or box_cls is np.array: expected = np.array(expected) elif box_cls is to_array: expected = to_array(expected) diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index c9f7fd43c1050..54f74bd1ae107 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -11,6 +11,7 @@ ) from pandas._libs.missing import is_matching_na import pandas._libs.testing as _testing +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_bool, @@ -106,7 +107,7 @@ def assert_almost_equal( "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # https://github.com/python/mypy/issues/7642 # error: Argument 1 to "_get_tol_from_less_precise" has incompatible @@ -340,7 +341,7 @@ def _get_ilevel_values(index, level): "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # https://github.com/python/mypy/issues/7642 # error: Argument 1 to "_get_tol_from_less_precise" has incompatible @@ -818,7 +819,7 @@ def assert_extension_array_equal( "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) rtol = atol = _get_tol_from_less_precise(check_less_precise) @@ -964,7 +965,7 @@ def assert_series_equal( "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) rtol = atol = _get_tol_from_less_precise(check_less_precise) @@ -1247,7 +1248,7 @@ def assert_frame_equal( "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) rtol = atol = _get_tol_from_less_precise(check_less_precise) @@ -1444,3 +1445,17 @@ def is_extension_array_dtype_and_needs_i8_conversion(left_dtype, right_dtype) -> Related to issue #37609 """ return is_extension_array_dtype(left_dtype) and needs_i8_conversion(right_dtype) + + +def assert_indexing_slices_equivalent(ser: Series, l_slc: slice, i_slc: slice): + """ + Check that ser.iloc[i_slc] matches ser.loc[l_slc] and, if applicable, + ser[l_slc]. + """ + expected = ser.iloc[i_slc] + + assert_series_equal(ser.loc[l_slc], expected) + + if not ser.index.is_integer(): + # For integer indices, .loc and plain getitem are position-based. + assert_series_equal(ser[l_slc], expected) diff --git a/pandas/compat/pyarrow.py b/pandas/compat/pyarrow.py index 9bf7139769baa..f9b9409317774 100644 --- a/pandas/compat/pyarrow.py +++ b/pandas/compat/pyarrow.py @@ -12,9 +12,11 @@ pa_version_under3p0 = _palv < Version("3.0.0") pa_version_under4p0 = _palv < Version("4.0.0") pa_version_under5p0 = _palv < Version("5.0.0") + pa_version_under6p0 = _palv < Version("6.0.0") except ImportError: pa_version_under1p0 = True pa_version_under2p0 = True pa_version_under3p0 = True pa_version_under4p0 = True pa_version_under5p0 = True + pa_version_under6p0 = True diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index c31368f179ef0..07fa5799fe371 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -9,6 +9,7 @@ import warnings from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level class DirNamesMixin: @@ -267,7 +268,7 @@ def decorator(accessor): f"{repr(name)} for type {repr(cls)} is overriding a preexisting " f"attribute with the same name.", UserWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) setattr(cls, name, CachedAccessor(name, accessor)) cls._accessors.add(name) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index c1b587ce3a6b2..acc66ae9deca7 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -35,6 +35,7 @@ npt, ) from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import ( construct_1d_object_array_from_listlike, @@ -148,7 +149,7 @@ def _ensure_data(values: ArrayLike) -> np.ndarray: # i.e. all-bool Categorical, BooleanArray try: return np.asarray(values).astype("uint8", copy=False) - except TypeError: + except (TypeError, ValueError): # GH#42107 we have pd.NAs present return np.asarray(values) @@ -1550,7 +1551,7 @@ def searchsorted( _diff_special = {"float64", "float32", "int64", "int32", "int16", "int8"} -def diff(arr, n: int, axis: int = 0, stacklevel: int = 3): +def diff(arr, n: int, axis: int = 0): """ difference of n between self, analogous to s-s.shift(n) @@ -1596,7 +1597,7 @@ def diff(arr, n: int, axis: int = 0, stacklevel: int = 3): "dtype lost in 'diff()'. In the future this will raise a " "TypeError. Convert to a suitable dtype prior to calling 'diff'.", FutureWarning, - stacklevel=stacklevel, + stacklevel=find_stack_level(), ) arr = np.asarray(arr) dtype = arr.dtype diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py index 54324bf721945..1f37e0e5d249a 100644 --- a/pandas/core/array_algos/putmask.py +++ b/pandas/core/array_algos/putmask.py @@ -4,7 +4,6 @@ from __future__ import annotations from typing import Any -import warnings import numpy as np @@ -15,16 +14,12 @@ ) from pandas.core.dtypes.cast import ( + can_hold_element, convert_scalar_for_putitemlike, find_common_type, infer_dtype_from, ) -from pandas.core.dtypes.common import ( - is_float_dtype, - is_integer_dtype, - is_list_like, -) -from pandas.core.dtypes.missing import isna_compat +from pandas.core.dtypes.common import is_list_like from pandas.core.arrays import ExtensionArray @@ -75,7 +70,7 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd `values`, updated in-place. mask : np.ndarray[bool] Applies to both sides (array like). - new : `new values` either scalar or an array like aligned with `values` + new : listlike `new values` aligned with `values` Returns ------- @@ -89,9 +84,6 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd # we cannot use np.asarray() here as we cannot have conversions # that numpy does when numeric are mixed with strings - if not is_list_like(new): - new = np.broadcast_to(new, mask.shape) - # see if we are only masking values that if putted # will work in the current dtype try: @@ -100,33 +92,19 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd # TypeError: only integer scalar arrays can be converted to a scalar index pass else: - # make sure that we have a nullable type if we have nulls - if not isna_compat(values, nn[0]): - pass - elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)): - # only compare integers/floats - pass - elif not (is_float_dtype(values.dtype) or is_integer_dtype(values.dtype)): - # only compare integers/floats - pass - else: - - # we ignore ComplexWarning here - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", np.ComplexWarning) - nn_at = nn.astype(values.dtype) - - comp = nn == nn_at - if is_list_like(comp) and comp.all(): - nv = values.copy() - nv[mask] = nn_at - return nv + # We only get to putmask_smart when we cannot hold 'new' in values. + # The "smart" part of putmask_smart is checking if we can hold new[mask] + # in values, in which case we can still avoid the need to cast. + if can_hold_element(values, nn): + values[mask] = nn + return values new = np.asarray(new) if values.dtype.kind == new.dtype.kind: # preserves dtype if possible - return _putmask_preserve(values, new, mask) + np.putmask(values, mask, new) + return values dtype = find_common_type([values.dtype, new.dtype]) # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has incompatible type @@ -135,15 +113,8 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd # List[Any], _DTypeDict, Tuple[Any, Any]]]" values = values.astype(dtype) # type: ignore[arg-type] - return _putmask_preserve(values, new, mask) - - -def _putmask_preserve(new_values: np.ndarray, new, mask: npt.NDArray[np.bool_]): - try: - new_values[mask] = new[mask] - except (IndexError, ValueError): - new_values[mask] = new - return new_values + np.putmask(values, mask, new) + return values def putmask_without_repeat( diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py index fe09a044566f8..d91404ff05157 100644 --- a/pandas/core/arraylike.py +++ b/pandas/core/arraylike.py @@ -11,6 +11,7 @@ import numpy as np from pandas._libs import lib +from pandas.util._exceptions import find_stack_level from pandas.core.construction import extract_array from pandas.core.ops import ( @@ -210,7 +211,7 @@ def _maybe_fallback(ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any): "or align manually (eg 'df1, df2 = df1.align(df2)') before passing to " "the ufunc to obtain the future behaviour and silence this warning.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) # keep the first dataframe of the inputs, other DataFrame/Series is @@ -336,7 +337,9 @@ def reconstruct(result): "Consider explicitly converting the DataFrame " "to an array with '.to_numpy()' first." ) - warnings.warn(msg.format(ufunc), FutureWarning, stacklevel=4) + warnings.warn( + msg.format(ufunc), FutureWarning, stacklevel=find_stack_level() + ) return result raise NotImplementedError return result diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 8deeb44f65188..674379f6d65f8 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -310,7 +310,7 @@ def _wrap_reduction_result(self, axis: int | None, result): # ------------------------------------------------------------------------ # __array_function__ methods - def putmask(self, mask: np.ndarray, value) -> None: + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: """ Analogue to np.putmask(self, mask, value) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 70841197761a9..a64aef64ab49f 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1409,6 +1409,33 @@ def insert(self: ExtensionArrayT, loc: int, item) -> ExtensionArrayT: return type(self)._concat_same_type([self[:loc], item_arr, self[loc:]]) + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: + """ + Analogue to np.putmask(self, mask, value) + + Parameters + ---------- + mask : np.ndarray[bool] + value : scalar or listlike + If listlike, must be arraylike with same length as self. + + Returns + ------- + None + + Notes + ----- + Unlike np.putmask, we do not repeat listlike values with mismatched length. + 'value' should either be a scalar or an arraylike with the same length + as self. + """ + if is_list_like(value): + val = value[mask] + else: + val = value + + self[mask] = val + def _where( self: ExtensionArrayT, mask: npt.NDArray[np.bool_], value ) -> ExtensionArrayT: diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index c7f587b35f557..f205773d1b03d 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -6,7 +6,6 @@ from shutil import get_terminal_size from typing import ( TYPE_CHECKING, - Any, Hashable, Sequence, TypeVar, @@ -38,10 +37,6 @@ Dtype, NpDtype, Ordered, - PositionalIndexer2D, - PositionalIndexerTuple, - ScalarIndexer, - SequenceIndexer, Shape, npt, type_t, @@ -102,7 +97,10 @@ take_nd, unique1d, ) -from pandas.core.arrays._mixins import NDArrayBackedExtensionArray +from pandas.core.arrays._mixins import ( + NDArrayBackedExtensionArray, + ravel_compat, +) from pandas.core.base import ( ExtensionArray, NoNewAttributesMixin, @@ -113,7 +111,6 @@ extract_array, sanitize_array, ) -from pandas.core.indexers import deprecate_ndim_indexing from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.sorting import nargsort from pandas.core.strings.object_array import ObjectStringArrayMixin @@ -393,7 +390,7 @@ def __init__( "Allowing scalars in the Categorical constructor is deprecated " "and will raise in a future version. Use `[value]` instead", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) values = [values] @@ -424,13 +421,8 @@ def __init__( if null_mask.any(): # We remove null values here, then below will re-insert # them, grep "full_codes" - - # error: Incompatible types in assignment (expression has type - # "List[Any]", variable has type "ExtensionArray") - arr = [ # type: ignore[assignment] - values[idx] for idx in np.where(~null_mask)[0] - ] - arr = sanitize_array(arr, None) + arr_lst = [values[idx] for idx in np.where(~null_mask)[0]] + arr = sanitize_array(arr_lst, None) values = arr if dtype.categories is None: @@ -948,7 +940,7 @@ def set_categories( "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1048,7 +1040,7 @@ def rename_categories(self, new_categories, inplace=no_default): "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1180,7 +1172,7 @@ def add_categories(self, new_categories, inplace=no_default): "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1255,7 +1247,7 @@ def remove_categories(self, removals, inplace=no_default): "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1330,7 +1322,7 @@ def remove_unused_categories(self, inplace=no_default): "remove_unused_categories is deprecated and " "will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1484,6 +1476,7 @@ def _validate_scalar(self, fill_value): # ------------------------------------------------------------- + @ravel_compat def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: """ The numpy array interface. @@ -1886,7 +1879,7 @@ def to_dense(self) -> np.ndarray: "Categorical.to_dense is deprecated and will be removed in " "a future version. Use np.asarray(cat) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return np.asarray(self) @@ -1903,7 +1896,7 @@ def _codes(self, value: np.ndarray): "Setting the codes on a Categorical is deprecated and will raise in " "a future version. Create a new Categorical object instead", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # GH#40606 NDArrayBacked.__init__(self, value, self.dtype) @@ -1926,7 +1919,7 @@ def take_nd(self, indexer, allow_fill: bool = False, fill_value=None): warn( "Categorical.take_nd is deprecated, use Categorical.take instead", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.take(indexer, allow_fill=allow_fill, fill_value=fill_value) @@ -1934,7 +1927,10 @@ def __iter__(self): """ Returns an Iterator over the values of this Categorical. """ - return iter(self._internal_get_values().tolist()) + if self.ndim == 1: + return iter(self._internal_get_values().tolist()) + else: + return (self[n] for n in range(len(self))) def __contains__(self, key) -> bool: """ @@ -2053,27 +2049,6 @@ def __repr__(self) -> str: # ------------------------------------------------------------------ - @overload - def __getitem__(self, key: ScalarIndexer) -> Any: - ... - - @overload - def __getitem__( - self: CategoricalT, - key: SequenceIndexer | PositionalIndexerTuple, - ) -> CategoricalT: - ... - - def __getitem__(self: CategoricalT, key: PositionalIndexer2D) -> CategoricalT | Any: - """ - Return an item. - """ - result = super().__getitem__(key) - if getattr(result, "ndim", 0) > 1: - result = result._ndarray - deprecate_ndim_indexing(result) - return result - def _validate_listlike(self, value): # NB: here we assume scalar-like tuples have already been excluded value = extract_array(value, extract_numpy=True) @@ -2311,7 +2286,19 @@ def _concat_same_type( ) -> CategoricalT: from pandas.core.dtypes.concat import union_categoricals - return union_categoricals(to_concat) + result = union_categoricals(to_concat) + + # in case we are concatenating along axis != 0, we need to reshape + # the result from union_categoricals + first = to_concat[0] + if axis >= first.ndim: + raise ValueError + if axis == 1: + if not all(len(x) == len(first) for x in to_concat): + raise ValueError + # TODO: Will this get contiguity wrong? + result = result.reshape(-1, len(to_concat), order="F") + return result # ------------------------------------------------------------------ @@ -2352,7 +2339,7 @@ def is_dtype_equal(self, other) -> bool: "Categorical.is_dtype_equal is deprecated and will be removed " "in a future version", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) try: return self._categories_match_up_to_permutation(other) @@ -2699,6 +2686,11 @@ def _get_codes_for_values(values, categories: Index) -> np.ndarray: """ dtype_equal = is_dtype_equal(values.dtype, categories.dtype) + if values.ndim > 1: + flat = values.ravel() + codes = _get_codes_for_values(flat, categories) + return codes.reshape(values.shape) + if isinstance(categories.dtype, ExtensionDtype) and is_object_dtype(values): # Support inferring the correct extension dtype from an array of # scalar objects. e.g. diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index f8aa1656c8c30..2e1ebf9d5a266 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -416,13 +416,12 @@ def astype(self, dtype, copy: bool = True): elif is_integer_dtype(dtype): # we deliberately ignore int32 vs. int64 here. # See https://github.com/pandas-dev/pandas/issues/24381 for more. - level = find_stack_level() warnings.warn( f"casting {self.dtype} values to int64 with .astype(...) is " "deprecated and will raise in a future version. " "Use .view(...) instead.", FutureWarning, - stacklevel=level, + stacklevel=find_stack_level(), ) values = self.asi8 diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 4fecbe4be9681..a0a7ef3501d7f 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1206,7 +1206,7 @@ def to_perioddelta(self, freq) -> TimedeltaArray: "Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.", FutureWarning, # stacklevel chosen to be correct for when called from DatetimeIndex - stacklevel=3, + stacklevel=find_stack_level(), ) from pandas.core.arrays.timedeltas import TimedeltaArray @@ -1373,7 +1373,7 @@ def weekofyear(self): "weekofyear and return an Index, you may call " "pd.Int64Index(idx.isocalendar().week)", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) week_series = self.isocalendar().week if week_series.hasnans: diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index d5718d59bf8b0..01bf5ec0633b5 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -36,6 +36,7 @@ PositionalIndexer, ScalarIndexer, SequenceIndexer, + npt, ) from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender @@ -1482,15 +1483,15 @@ def to_tuples(self, na_tuple=True) -> np.ndarray: # --------------------------------------------------------------------- - def putmask(self, mask: np.ndarray, value) -> None: + def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: value_left, value_right = self._validate_setitem_value(value) if isinstance(self._left, np.ndarray): np.putmask(self._left, mask, value_left) np.putmask(self._right, mask, value_right) else: - self._left.putmask(mask, value_left) - self._right.putmask(mask, value_right) + self._left._putmask(mask, value_left) + self._right._putmask(mask, value_right) def insert(self: IntervalArrayT, loc: int, item: Interval) -> IntervalArrayT: """ diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index b11b11ded2f22..568f3484e78e4 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -47,6 +47,7 @@ ) from pandas.core.dtypes.inference import is_array_like from pandas.core.dtypes.missing import ( + array_equivalent, isna, notna, ) @@ -627,6 +628,22 @@ def value_counts(self, dropna: bool = True) -> Series: return Series(counts, index=index) + @doc(ExtensionArray.equals) + def equals(self, other) -> bool: + if type(self) != type(other): + return False + if other.dtype != self.dtype: + return False + + # GH#44382 if e.g. self[1] is np.nan and other[1] is pd.NA, we are NOT + # equal. + if not np.array_equal(self._mask, other._mask): + return False + + left = self._data[~self._mask] + right = other._data[~other._mask] + return array_equivalent(left, right, dtype_equal=True) + def _reduce(self, name: str, *, skipna: bool = True, **kwargs): if name in {"any", "all"}: return getattr(self, name)(skipna=skipna, **kwargs) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 2f36b72229225..01018c7263f32 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -12,6 +12,7 @@ import numpy as np +from pandas._libs import algos as libalgos from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import ( BaseOffset, @@ -506,7 +507,22 @@ def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray: new_parr = self.asfreq(freq, how=how) new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base) - return DatetimeArray(new_data)._with_freq("infer") + dta = DatetimeArray(new_data) + + if self.freq.name == "B": + # See if we can retain BDay instead of Day in cases where + # len(self) is too small for infer_freq to distinguish between them + diffs = libalgos.unique_deltas(self.asi8) + if len(diffs) == 1: + diff = diffs[0] + if diff == self.freq.n: + dta._freq = self.freq + elif diff == 1: + dta._freq = self.freq.base + # TODO: other cases? + return dta + else: + return dta._with_freq("infer") # -------------------------------------------------------------------- diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 960544a2f89ea..c054710a01f75 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -467,7 +467,7 @@ def __init__( "loses timezone information. Cast to object before " "sparse to retain timezone information.", UserWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) data = np.asarray(data, dtype="datetime64[ns]") if fill_value is NaT: @@ -1089,7 +1089,7 @@ def searchsorted( ) -> npt.NDArray[np.intp] | np.intp: msg = "searchsorted requires high memory usage." - warnings.warn(msg, PerformanceWarning, stacklevel=2) + warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level()) if not is_scalar(v): v = np.asarray(v) v = np.asarray(v) diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py index 915e13bc3bbb2..d23e217e605c7 100644 --- a/pandas/core/arrays/sparse/dtype.py +++ b/pandas/core/arrays/sparse/dtype.py @@ -16,6 +16,7 @@ type_t, ) from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ( ExtensionDtype, @@ -389,7 +390,7 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: f"values: '{fill_values}'. Picking the first and " "converting the rest.", PerformanceWarning, - stacklevel=6, + stacklevel=find_stack_level(), ) np_dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes] diff --git a/pandas/core/common.py b/pandas/core/common.py index 2bf925466e176..590296c4b12f5 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -36,6 +36,7 @@ Scalar, T, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import ( @@ -175,7 +176,7 @@ def cast_scalar_indexer(val, warn_float: bool = False): "Indexing with a float is deprecated, and will raise an IndexError " "in pandas 2.0. You can manually convert to an integer key instead.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) return int(val) return val diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index a4bd0270f9451..f14882227ddd9 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -16,6 +16,7 @@ import numpy as np from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.generic import ( ABCDataFrame, @@ -126,7 +127,9 @@ def _align_core(terms): f"than an order of magnitude on term {repr(terms[i].name)}, " f"by more than {ordm:.4g}; performance may suffer." ) - warnings.warn(w, category=PerformanceWarning, stacklevel=6) + warnings.warn( + w, category=PerformanceWarning, stacklevel=find_stack_level() + ) f = partial(ti.reindex, reindexer, axis=axis, copy=False) diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index 26748eadb4c85..d82cc37b90ad4 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -7,6 +7,7 @@ import warnings from pandas._libs.lib import no_default +from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg from pandas.core.computation.engines import ENGINES @@ -308,7 +309,7 @@ def eval( "will be removed in a future version." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) exprs: list[str | BinOp] diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 0081f8cd074b6..31c2ec8f0cbf9 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -25,6 +25,8 @@ is_text, ) +from pandas.util._exceptions import find_stack_level + # compute use_bottleneck_doc = """ @@ -373,7 +375,7 @@ def _deprecate_negative_int_max_colwidth(key): "will not be supported in future version. Instead, use None " "to not limit the column width.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) cf.register_option( diff --git a/pandas/core/construction.py b/pandas/core/construction.py index c6f131a9daba6..e3b41f2c7b8c2 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -25,6 +25,7 @@ DtypeObj, ) from pandas.errors import IntCastingNaNError +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ( ExtensionDtype, @@ -538,7 +539,7 @@ def sanitize_array( "if they cannot be cast losslessly (matching Series behavior). " "To retain the old behavior, use DataFrame(data).astype(dtype)", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) # GH#40110 until the deprecation is enforced, we _dont_ # ignore the dtype for DataFrame, and _do_ cast even though @@ -777,7 +778,7 @@ def _try_cast( "passed to 'DataFrame', either all columns will be cast to that " "dtype, or a TypeError will be raised.", FutureWarning, - stacklevel=7, + stacklevel=find_stack_level(), ) subarr = np.array(arr, dtype=object, copy=copy) return subarr diff --git a/pandas/core/describe.py b/pandas/core/describe.py index 2c4a340e8c8ea..8d88ce280d5c8 100644 --- a/pandas/core/describe.py +++ b/pandas/core/describe.py @@ -23,6 +23,7 @@ from pandas._libs.tslibs import Timestamp from pandas._typing import NDFrameT +from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_percentile from pandas.core.dtypes.common import ( @@ -377,7 +378,7 @@ def select_describe_func( "version of pandas. Specify `datetime_is_numeric=True` to " "silence this warning and adopt the future behavior now.", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) return describe_timestamp_as_categorical_1d elif is_timedelta64_dtype(data.dtype): diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 432074a8dd699..9cd67ad293f63 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -563,7 +563,7 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan): "dtype is deprecated. In a future version, this will be cast " "to object dtype. Pass `fill_value=Timestamp(date_obj)` instead.", FutureWarning, - stacklevel=8, + stacklevel=find_stack_level(), ) return dtype, fv elif isinstance(fill_value, str): @@ -969,13 +969,12 @@ def astype_dt64_to_dt64tz( # this should be the only copy values = values.copy() - level = find_stack_level() warnings.warn( "Using .astype to convert from timezone-naive dtype to " "timezone-aware dtype is deprecated and will raise in a " "future version. Use ser.dt.tz_localize instead.", FutureWarning, - stacklevel=level, + stacklevel=find_stack_level(), ) # GH#33401 this doesn't match DatetimeArray.astype, which @@ -986,13 +985,12 @@ def astype_dt64_to_dt64tz( # DatetimeArray/DatetimeIndex.astype behavior if values.tz is None and aware: dtype = cast(DatetimeTZDtype, dtype) - level = find_stack_level() warnings.warn( "Using .astype to convert from timezone-naive dtype to " "timezone-aware dtype is deprecated and will raise in a " "future version. Use obj.tz_localize instead.", FutureWarning, - stacklevel=level, + stacklevel=find_stack_level(), ) return values.tz_localize(dtype.tz) @@ -1006,14 +1004,13 @@ def astype_dt64_to_dt64tz( return result elif values.tz is not None: - level = find_stack_level() warnings.warn( "Using .astype to convert from timezone-aware dtype to " "timezone-naive dtype is deprecated and will raise in a " "future version. Use obj.tz_localize(None) or " "obj.tz_convert('UTC').tz_localize(None) instead", FutureWarning, - stacklevel=level, + stacklevel=find_stack_level(), ) result = values.tz_convert("UTC").tz_localize(None) @@ -1133,7 +1130,7 @@ def astype_nansafe( "Use .view(...) instead.", FutureWarning, # stacklevel chosen to be correct when reached via Series.astype - stacklevel=7, + stacklevel=find_stack_level(), ) if isna(arr).any(): raise ValueError("Cannot convert NaT values to integer") @@ -1155,7 +1152,7 @@ def astype_nansafe( "Use .view(...) instead.", FutureWarning, # stacklevel chosen to be correct when reached via Series.astype - stacklevel=7, + stacklevel=find_stack_level(), ) if isna(arr).any(): raise ValueError("Cannot convert NaT values to integer") @@ -1651,7 +1648,7 @@ def maybe_cast_to_datetime( "`pd.Series(values).dt.tz_localize(None)` " "instead.", FutureWarning, - stacklevel=8, + stacklevel=find_stack_level(), ) # equiv: dta.view(dtype) # Note: NOT equivalent to dta.astype(dtype) @@ -1691,7 +1688,7 @@ def maybe_cast_to_datetime( ".tz_localize('UTC').tz_convert(dtype.tz) " "or pd.Series(data.view('int64'), dtype=dtype)", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) value = dta.tz_localize("UTC").tz_convert(dtype.tz) @@ -1859,7 +1856,7 @@ def construct_2d_arraylike_from_scalar( shape = (length, width) if dtype.kind in ["m", "M"]: - value = maybe_unbox_datetimelike_tz_deprecation(value, dtype, stacklevel=4) + value = maybe_unbox_datetimelike_tz_deprecation(value, dtype) # error: Non-overlapping equality check (left operand type: "dtype[Any]", right # operand type: "Type[object]") elif dtype == object: # type: ignore[comparison-overlap] @@ -1932,9 +1929,7 @@ def construct_1d_arraylike_from_scalar( return subarr -def maybe_unbox_datetimelike_tz_deprecation( - value: Scalar, dtype: DtypeObj, stacklevel: int = 5 -): +def maybe_unbox_datetimelike_tz_deprecation(value: Scalar, dtype: DtypeObj): """ Wrap maybe_unbox_datetimelike with a check for a timezone-aware Timestamp along with a timezone-naive datetime64 dtype, which is deprecated. @@ -1963,7 +1958,7 @@ def maybe_unbox_datetimelike_tz_deprecation( "`pd.Series(values).dt.tz_localize(None)` " "instead.", FutureWarning, - stacklevel=stacklevel, + stacklevel=find_stack_level(), ) new_value = value.tz_localize(None) return maybe_unbox_datetimelike(new_value, dtype) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 815a0a2040ddb..7ac8e6c47158c 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -22,6 +22,7 @@ ArrayLike, DtypeObj, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.dtypes import ( @@ -304,7 +305,7 @@ def is_categorical(arr) -> bool: "is_categorical is deprecated and will be removed in a future version. " "Use is_categorical_dtype instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr) @@ -1378,7 +1379,7 @@ def is_extension_type(arr) -> bool: "'is_extension_type' is deprecated and will be removed in a future " "version. Use 'is_extension_array_dtype' instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if is_categorical_dtype(arr): diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index e20670893f71c..7d84d903c84b0 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -465,9 +465,11 @@ def _hash_categories(self) -> int: [cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)] ) else: - # error: Incompatible types in assignment (expression has type - # "List[ndarray]", variable has type "ndarray") - cat_array = [cat_array] # type: ignore[assignment] + cat_array_list = [cat_array] + combined_hashed = combine_hash_arrays( + iter(cat_array_list), num_items=len(cat_array_list) + ) + return np.bitwise_xor.reduce(combined_hashed) combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array)) return np.bitwise_xor.reduce(combined_hashed) diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index c457b52cf4b0e..eea3fa37b7435 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -475,8 +475,8 @@ def array_equivalent( return np.array_equal(left, right) -def _array_equivalent_float(left, right): - return ((left == right) | (np.isnan(left) & np.isnan(right))).all() +def _array_equivalent_float(left, right) -> bool: + return bool(((left == right) | (np.isnan(left) & np.isnan(right))).all()) def _array_equivalent_datetimelike(left, right): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1f26b6d9ae6ae..b88c97b8e988d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -83,6 +83,7 @@ doc, rewrite_axis_style_signature, ) +from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( validate_ascending, validate_axis_style_args, @@ -643,7 +644,7 @@ def __init__( "removed in a future version. Pass " "{name: data[name] for name in data.dtype.names} instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # a masked array @@ -1043,12 +1044,14 @@ def _repr_html_(self) -> str | None: return None @Substitution( - header_type="bool or sequence", + header_type="bool or sequence of strings", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", - col_space="The minimum width of each column", + col_space="The minimum width of each column. If a list of ints is given " + "every integers corresponds with one column. If a dict is given, the key " + "references the column, while the value defines the space to use.", ) @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_string( @@ -1065,11 +1068,11 @@ def to_string( index_names: bool = True, justify: str | None = None, max_rows: int | None = None, - min_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, + min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: @@ -1078,6 +1081,9 @@ def to_string( %(shared_params)s line_width : int, optional Width to wrap a line in characters. + min_rows : int, optional + The number of rows to display in the console in a truncated repr + (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. @@ -1793,7 +1799,7 @@ def to_dict(self, orient: str = "dict", into=dict): warnings.warn( "DataFrame columns are not unique, some columns will be omitted.", UserWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # GH16122 into_c = com.standardize_mapping(into) @@ -1814,7 +1820,7 @@ def to_dict(self, orient: str = "dict", into=dict): "will be used in a future version. Use one of the above " "to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if orient.startswith("d"): @@ -2659,7 +2665,7 @@ def to_markdown( "'showindex' is deprecated. Only 'index' will be used " "in a future version. Use 'index' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) kwargs.setdefault("headers", "keys") @@ -2836,15 +2842,14 @@ def to_html( border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. - encoding : str, default "utf-8" - Set character encoding. - - .. versionadded:: 1.0 - table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. + encoding : str, default "utf-8" + Set character encoding. + + .. versionadded:: 1.0 %(returns)s See Also -------- @@ -3218,7 +3223,7 @@ def info( warnings.warn( "null_counts is deprecated. Use show_counts instead", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) show_counts = null_counts info = DataFrameInfo( @@ -3591,7 +3596,7 @@ def _getitem_bool_array(self, key): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( @@ -4637,7 +4642,7 @@ def lookup( "You can use DataFrame.melt and DataFrame.loc " "as a substitute." ) - warnings.warn(msg, FutureWarning, stacklevel=2) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) n = len(row_labels) if n != len(col_labels): @@ -7754,7 +7759,7 @@ def groupby( "will be removed in a future version." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: squeeze = False @@ -9154,6 +9159,11 @@ def join( * inner: form intersection of calling frame's index (or column if on is specified) with `other`'s index, preserving the order of the calling's one. + * cross: creates the cartesian product from both frames, preserves the order + of the left keys. + + .. versionadded:: 1.2.0 + lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' @@ -9844,7 +9854,7 @@ def count( "deprecated and will be removed in a future version. Use groupby " "instead. df.count(level=1) should use df.groupby(level=1).count().", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._count_level(level, axis=axis, numeric_only=numeric_only) @@ -9944,7 +9954,7 @@ def _reduce( "will include datetime64 and datetime64tz columns in a " "future version.", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) # Non-copy equivalent to # dt64_cols = self.dtypes.apply(is_datetime64_any_dtype) @@ -10019,7 +10029,7 @@ def _get_data() -> DataFrame: "version this will raise TypeError. Select only valid " "columns before calling the reduction.", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) return out @@ -10052,7 +10062,7 @@ def _get_data() -> DataFrame: "version this will raise TypeError. Select only valid " "columns before calling the reduction.", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) if hasattr(result, "dtype"): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 93bf70c27f8ff..fd8af2c0cedd0 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -18,6 +18,7 @@ Literal, Mapping, Sequence, + Type, cast, final, overload, @@ -486,9 +487,10 @@ def _data(self): @property def _AXIS_NUMBERS(self) -> dict[str, int]: """.. deprecated:: 1.1.0""" - level = self.ndim + 1 warnings.warn( - "_AXIS_NUMBERS has been deprecated.", FutureWarning, stacklevel=level + "_AXIS_NUMBERS has been deprecated.", + FutureWarning, + stacklevel=find_stack_level(), ) return {"index": 0} @@ -2001,15 +2003,15 @@ def __contains__(self, key) -> bool_t: @property def empty(self) -> bool_t: """ - Indicator whether DataFrame is empty. + Indicator whether Series/DataFrame is empty. - True if DataFrame is entirely empty (no items), meaning any of the + True if Series/DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool - If DataFrame is empty, return True, if not return False. + If Series/DataFrame is empty, return True, if not return False. See Also -------- @@ -2019,7 +2021,7 @@ def empty(self) -> bool_t: Notes ----- - If DataFrame contains only NaNs, it is still not considered empty. See + If Series/DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples @@ -2045,6 +2047,16 @@ def empty(self) -> bool_t: False >>> df.dropna().empty True + + >>> ser_empty = pd.Series({'A' : []}) + >>> ser_empty + A [] + dtype: object + >>> ser_empty.empty + False + >>> ser_empty = pd.Series() + >>> ser_empty.empty + True """ return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) @@ -3647,7 +3659,7 @@ class max_speed "is_copy is deprecated and will be removed in a future version. " "'take' always returns a copy, so there is no need to specify this.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) nv.validate_take((), kwargs) @@ -3781,7 +3793,7 @@ class animal locomotion "Passing lists as key for xs is deprecated and will be removed in a " "future version. Pass key as a tuple instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if level is not None: @@ -5556,7 +5568,7 @@ def __setattr__(self, name: str, value) -> None: "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", - stacklevel=2, + stacklevel=find_stack_level(), ) object.__setattr__(self, name, value) @@ -5825,14 +5837,22 @@ def astype( "Only a column name can be used for the " "key in a dtype mappings argument." ) + + # GH#44417 cast to Series so we can use .iat below, which will be + # robust in case we + from pandas import Series + + dtype_ser = Series(dtype, dtype=object) + dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False) + results = [] - for col_name, col in self.items(): - if col_name in dtype: - results.append( - col.astype(dtype=dtype[col_name], copy=copy, errors=errors) - ) + for i, (col_name, col) in enumerate(self.items()): + cdt = dtype_ser.iat[i] + if isna(cdt): + res_col = col.copy() if copy else col else: - results.append(col.copy() if copy else col) + res_col = col.astype(dtype=cdt, copy=copy, errors=errors) + results.append(res_col) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype @@ -6219,8 +6239,12 @@ def convert_dtypes( for col_name, col in self.items() ] if len(results) > 0: + result = concat(results, axis=1, copy=False) + cons = cast(Type["DataFrame"], self._constructor) + result = cons(result) + result = result.__finalize__(self, method="convert_dtypes") # https://github.com/python/mypy/issues/8354 - return cast(NDFrameT, concat(results, axis=1, copy=False)) + return cast(NDFrameT, result) else: return self.copy() @@ -7774,7 +7798,7 @@ def between_time( "`include_start` and `include_end` are deprecated in " "favour of `inclusive`.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) left = True if isinstance(include_start, lib.NoDefault) else include_start right = True if isinstance(include_end, lib.NoDefault) else include_end @@ -9190,7 +9214,7 @@ def where( "try_cast keyword is deprecated and will be removed in a " "future version.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) return self._where(cond, other, inplace, axis, level, errors=errors) @@ -9222,7 +9246,7 @@ def mask( "try_cast keyword is deprecated and will be removed in a " "future version.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) # see gh-21891 @@ -9415,7 +9439,7 @@ def slice_shift(self: NDFrameT, periods: int = 1, axis=0) -> NDFrameT: "and will be removed in a future version. " "You can use DataFrame/Series.shift instead." ) - warnings.warn(msg, FutureWarning, stacklevel=2) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) if periods == 0: return self @@ -9467,7 +9491,7 @@ def tshift(self: NDFrameT, periods: int = 1, freq=None, axis: Axis = 0) -> NDFra "Please use shift instead." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if freq is None: @@ -10282,7 +10306,7 @@ def _logical_func( "deprecated and will be removed in a future version. Use groupby " "instead. df.any(level=1) should use df.groupby(level=1).any()", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) if bool_only is not None: raise NotImplementedError( @@ -10378,7 +10402,7 @@ def _stat_function_ddof( "deprecated and will be removed in a future version. Use groupby " "instead. df.var(level=1) should use df.groupby(level=1).var().", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) return self._agg_by_level( name, axis=axis, level=level, skipna=skipna, ddof=ddof @@ -10431,7 +10455,7 @@ def _stat_function( "deprecated and will be removed in a future version. Use groupby " "instead. df.median(level=1) should use df.groupby(level=1).median().", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) return self._agg_by_level( name, axis=axis, level=level, skipna=skipna, numeric_only=numeric_only @@ -10498,7 +10522,7 @@ def _min_count_stat_function( "deprecated and will be removed in a future version. Use groupby " "instead. df.sum(level=1) should use df.groupby(level=1).sum().", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) return self._agg_by_level( name, @@ -10582,7 +10606,7 @@ def mad(self, axis=None, skipna=None, level=None): "deprecated and will be removed in a future version. Use groupby " "instead. df.mad(level=1) should use df.groupby(level=1).mad()", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna) @@ -10980,7 +11004,7 @@ def expanding( warnings.warn( "The `center` argument on `expanding` will be removed in the future.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: center = False diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 8a330d08bef78..b8354e800753d 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -37,6 +37,7 @@ Substitution, doc, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_int64, @@ -991,7 +992,7 @@ def _wrap_applied_output( result = self.obj._constructor( index=self.grouper.result_index, columns=data.columns ) - result = result.astype(data.dtypes.to_dict(), copy=False) + result = result.astype(data.dtypes, copy=False) return result # GH12824 @@ -1330,7 +1331,7 @@ def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy: "Indexing with multiple keys (implicitly converted to a tuple " "of keys) will be deprecated, use a list instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return super().__getitem__(key) diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 7577b1e671d60..6cbe37c6b3838 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -21,6 +21,7 @@ ) from pandas.errors import InvalidIndexError from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import sanitize_to_nanoseconds from pandas.core.dtypes.common import ( @@ -964,8 +965,6 @@ def _check_deprecated_resample_kwargs(kwargs, origin): From where this function is being called; either Grouper or TimeGrouper. Used to determine an approximate stacklevel. """ - from pandas.core.resample import TimeGrouper - # Deprecation warning of `base` and `loffset` since v1.1.0: # we are raising the warning here to be able to set the `stacklevel` # properly since we need to raise the `base` and `loffset` deprecation @@ -975,11 +974,6 @@ def _check_deprecated_resample_kwargs(kwargs, origin): # core/groupby/grouper.py::Grouper # raising these warnings from TimeGrouper directly would fail the test: # tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base - # hacky way to set the stacklevel: if cls is TimeGrouper it means - # that the call comes from a pandas internal call of resample, - # otherwise it comes from pd.Grouper - stacklevel = (5 if origin is TimeGrouper else 2) + 1 - # the + 1 is for this helper function, check_deprecated_resample_kwargs if kwargs.get("base", None) is not None: warnings.warn( @@ -989,7 +983,7 @@ def _check_deprecated_resample_kwargs(kwargs, origin): "\nbecomes:\n" '\n>>> df.resample(freq="3s", offset="2s")\n', FutureWarning, - stacklevel=stacklevel, + stacklevel=find_stack_level(), ) if kwargs.get("loffset", None) is not None: warnings.warn( @@ -1000,5 +994,5 @@ def _check_deprecated_resample_kwargs(kwargs, origin): '\n>>> df = df.resample(freq="3s").mean()' '\n>>> df.index = df.index.to_timestamp() + to_offset("8H")\n', FutureWarning, - stacklevel=stacklevel, + stacklevel=find_stack_level(), ) diff --git a/pandas/core/index.py b/pandas/core/index.py index 13a687b1c27e3..00ca6f9048a40 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1,5 +1,7 @@ import warnings +from pandas.util._exceptions import find_stack_level + from pandas.core.indexes.api import ( # noqa:F401 CategoricalIndex, DatetimeIndex, @@ -26,5 +28,5 @@ "pandas.core.index is deprecated and will be removed in a future version. " "The public classes are available in the top-level namespace.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py index b1824413512c5..41920727c50fd 100644 --- a/pandas/core/indexers/utils.py +++ b/pandas/core/indexers/utils.py @@ -399,7 +399,7 @@ def unpack_1tuple(tup): "slice is deprecated and will raise in a future " "version. Pass a tuple instead.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) return tup[0] diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index b8f4b5f9d3423..3aad1140294e5 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -8,6 +8,8 @@ import numpy as np +from pandas.util._exceptions import find_stack_level + from pandas.core.dtypes.common import ( is_categorical_dtype, is_datetime64_dtype, @@ -286,7 +288,7 @@ def weekofyear(self): "Series.dt.weekofyear and Series.dt.week have been deprecated. " "Please use Series.dt.isocalendar().week instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) week_series = self.isocalendar().week week_series.name = self.name diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ba7dde7d2a4d8..a8896c1fde546 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -399,7 +399,7 @@ def __new__( "'tupleize_cols' is deprecated and will raise TypeError in a " "future version. Use the specific Index subclass directly instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) from pandas.core.arrays import PandasArray @@ -632,7 +632,7 @@ def asi8(self): warnings.warn( "Index.asi8 is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return None @@ -746,7 +746,7 @@ def _get_attributes_dict(self) -> dict[str_t, Any]: "The Index._get_attributes_dict method is deprecated, and will be " "removed in a future version", DeprecationWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return {k: getattr(self, k, None) for k in self._attributes} @@ -919,7 +919,7 @@ def ravel(self, order="C"): "Index.ravel returning ndarray is deprecated; in a future version " "this will return a view on self.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if needs_i8_conversion(self.dtype): # Item "ndarray[Any, Any]" of "Union[ExtensionArray, ndarray[Any, Any]]" @@ -1191,7 +1191,7 @@ def copy( "parameter dtype is deprecated and will be removed in a future " "version. Use the astype method instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) new_index = new_index.astype(dtype) return new_index @@ -1371,7 +1371,7 @@ def to_native_types(self, slicer=None, **kwargs) -> np.ndarray: "The 'to_native_types' method is deprecated and will be removed in " "a future version. Use 'astype(str)' instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) values = self if slicer is not None: @@ -2503,7 +2503,7 @@ def is_mixed(self) -> bool: "Index.is_mixed is deprecated and will be removed in a future version. " "Check index.inferred_type directly instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.inferred_type in ["mixed"] @@ -2538,7 +2538,7 @@ def is_all_dates(self) -> bool: "Index.is_all_dates is deprecated, will be removed in a future version. " "check index.inferred_type instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._is_all_dates @@ -2905,7 +2905,7 @@ def __and__(self, other): "in the future this will be a logical operation matching " "Series.__and__. Use index.intersection(other) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.intersection(other) @@ -2916,7 +2916,7 @@ def __or__(self, other): "in the future this will be a logical operation matching " "Series.__or__. Use index.union(other) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.union(other) @@ -2927,7 +2927,7 @@ def __xor__(self, other): "in the future this will be a logical operation matching " "Series.__xor__. Use index.symmetric_difference(other) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.symmetric_difference(other) @@ -3073,7 +3073,7 @@ def union(self, other, sort=None): "object dtype. To retain the old behavior, " "use `index.astype(object).union(other)`", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) dtype = self._find_common_type_compat(other) @@ -3524,7 +3524,7 @@ def get_loc(self, key, method=None, tolerance=None): "and will raise in a future version. Use " "index.get_indexer([item], method=...) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if is_scalar(key) and isna(key) and not self.hasnans: @@ -3958,7 +3958,7 @@ def is_int(v): "and will raise TypeError in a future version. " "Use .loc with labels or .iloc with positions instead.", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) indexer = key else: @@ -4107,7 +4107,7 @@ def reindex( "reindexing with a non-unique Index is deprecated and " "will raise in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) target = self._wrap_reindex_result(target, indexer, preserve_names) @@ -4444,8 +4444,7 @@ def _join_non_unique( if isinstance(join_array, np.ndarray): np.putmask(join_array, mask, right) else: - # error: "ExtensionArray" has no attribute "putmask" - join_array.putmask(mask, right) # type: ignore[attr-defined] + join_array._putmask(mask, right) join_index = self._wrap_joined_index(join_array, other) @@ -4849,7 +4848,7 @@ def is_type_compatible(self, kind: str_t) -> bool: "Index.is_type_compatible is deprecated and will be removed in a " "future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return kind == self.inferred_type @@ -5051,8 +5050,7 @@ def putmask(self, mask, value) -> Index: else: # Note: we use the original value here, not converted, as # _validate_fill_value is not idempotent - # error: "ExtensionArray" has no attribute "putmask" - values.putmask(mask, value) # type: ignore[attr-defined] + values._putmask(mask, value) return self._shallow_copy(values) @@ -5487,7 +5485,7 @@ def get_value(self, series: Series, key): "get_value is deprecated and will be removed in a future version. " "Use Series[key] instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) self._check_indexing_error(key) @@ -5555,7 +5553,7 @@ def set_value(self, arr, key, value): "will be removed in a future version." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) loc = self._engine.get_loc(key) validate_numeric_casting(arr.dtype, value) @@ -6683,8 +6681,6 @@ def all(self, *args, **kwargs): Examples -------- - **all** - True, because nonzero integers are considered True. >>> pd.Index([1, 2, 3]).all() @@ -6694,18 +6690,6 @@ def all(self, *args, **kwargs): >>> pd.Index([0, 1, 2]).all() False - - **any** - - True, because ``1`` is considered True. - - >>> pd.Index([0, 0, 1]).any() - True - - False, because ``0`` is considered False. - - >>> pd.Index([0, 0, 0]).any() - False """ nv.validate_all(args, kwargs) self._maybe_disable_logical_methods("all") @@ -7025,7 +7009,7 @@ def _maybe_cast_data_without_dtype( "In a future version, the Index constructor will not infer numeric " "dtypes when passed object-dtype sequences (matching Series behavior)", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) if result.dtype.kind in ["b", "c"]: return subarr @@ -7083,6 +7067,6 @@ def _maybe_try_sort(result, sort): warnings.warn( f"{err}, sort order is undefined for incomparable objects.", RuntimeWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) return result diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index e2dd5ecfde5a8..f26a24c38b19f 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -17,6 +17,7 @@ npt, ) from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_categorical_dtype, @@ -218,7 +219,7 @@ def __new__( "deprecated and will raise in a future version. " "Use CategoricalIndex([], ...) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) data = [] @@ -431,7 +432,7 @@ def reindex( "reindexing with a non-unique Index is deprecated and will " "raise in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if len(self) and indexer is not None: @@ -506,7 +507,7 @@ def take_nd(self, *args, **kwargs): "CategoricalIndex.take_nd is deprecated, use CategoricalIndex.take " "instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.take(*args, **kwargs) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index a0902a5fb32fe..104bce0369d37 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -36,6 +36,7 @@ cache_readonly, doc, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_categorical_dtype, @@ -403,7 +404,7 @@ def is_type_compatible(self, kind: str) -> bool: f"{type(self).__name__}.is_type_compatible is deprecated and will be " "removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return kind in self._data._infer_matches diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 6078da3bedd8c..e283509206344 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -495,7 +495,7 @@ def to_series(self, keep_tz=lib.no_default, index=None, name=None): "is deprecated and will be removed in a future version. " "You can stop passing 'keep_tz' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: warnings.warn( @@ -505,7 +505,7 @@ def to_series(self, keep_tz=lib.no_default, index=None, name=None): "can do 'idx.tz_convert(None)' before calling " "'to_series'.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: keep_tz = True @@ -752,7 +752,7 @@ def check_str_or_none(point): "with non-existing keys is deprecated and will raise a " "KeyError in a future Version.", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) indexer = mask.nonzero()[0][::step] if len(indexer) == len(self): @@ -1042,7 +1042,7 @@ def date_range( warnings.warn( "Argument `closed` is deprecated in favor of `inclusive`.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if closed is None: inclusive = "both" diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 5791f89828ca3..885c922d1ee0f 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -727,6 +727,8 @@ def _get_indexer_pointwise( if isinstance(locs, slice): # Only needed for get_indexer_non_unique locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp") + elif not self.is_unique and not self.is_monotonic: + locs = np.where(locs)[0] locs = np.array(locs, ndmin=1) except KeyError: missing.append(i) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index fe97d61be7548..128aa8e282a0d 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -46,6 +46,7 @@ deprecate_nonkeyword_arguments, doc, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import coerce_indexer_dtype from pandas.core.dtypes.common import ( @@ -893,7 +894,7 @@ def set_levels( warnings.warn( "inplace is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1054,7 +1055,7 @@ def set_codes(self, codes, level=None, inplace=None, verify_integrity: bool = Tr warnings.warn( "inplace is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1166,14 +1167,14 @@ def copy( "parameter levels is deprecated and will be removed in a future " "version. Use the set_levels method instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if codes is not None: warnings.warn( "parameter codes is deprecated and will be removed in a future " "version. Use the set_codes method instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if deep: @@ -1202,7 +1203,7 @@ def copy( "parameter dtype is deprecated and will be removed in a future " "version. Use the astype method instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) new_index = new_index.astype(dtype) return new_index @@ -1802,7 +1803,7 @@ def is_lexsorted(self) -> bool: "MultiIndex.is_lexsorted is deprecated as a public function, " "users should use MultiIndex.is_monotonic_increasing instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._is_lexsorted() @@ -1846,7 +1847,7 @@ def lexsort_depth(self) -> int: "MultiIndex.is_lexsorted is deprecated as a public function, " "users should use MultiIndex.is_monotonic_increasing instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._lexsort_depth @@ -2212,7 +2213,7 @@ def drop(self, codes, level=None, errors="raise"): "dropping on a non-lexsorted multi-index " "without a level parameter may impact performance.", PerformanceWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) loc = loc.nonzero()[0] inds.extend(loc) @@ -2877,7 +2878,7 @@ def _maybe_to_slice(loc): warnings.warn( "indexing past lexsort depth may impact performance.", PerformanceWarning, - stacklevel=10, + stacklevel=find_stack_level(), ) loc = np.arange(start, stop, dtype=np.intp) @@ -3335,7 +3336,7 @@ def _update_indexer(idxr: Index, indexer: Index) -> Index: # TODO: how to handle IntervalIndex level? # (no test cases) FutureWarning, - stacklevel=7, + stacklevel=find_stack_level(), ) continue else: diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 4d8c411478993..25b43c556b812 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -21,6 +21,7 @@ cache_readonly, doc, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import astype_nansafe from pandas.core.dtypes.common import ( @@ -421,7 +422,7 @@ def asi8(self) -> npt.NDArray[np.int64]: warnings.warn( "Index.asi8 is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._values.view(self._default_dtype) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index fd5b5bb7396af..e3e1589d91e09 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -25,6 +25,7 @@ DtypeObj, ) from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_datetime64_any_dtype, @@ -346,13 +347,21 @@ def astype(self, dtype, copy: bool = True, how=lib.no_default): "will be removed in a future version. " "Use index.to_timestamp(how=how) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: how = "start" if is_datetime64_any_dtype(dtype): # 'how' is index-specific, isn't part of the EA interface. + # GH#44398 deprecate astype(dt64), matching Series behavior + warnings.warn( + f"Converting {type(self).__name__} to DatetimeIndex with " + "'astype' is deprecated and will raise in a future version. " + "Use `obj.to_timestamp(how).tz_localize(dtype.tz)` instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) tz = getattr(dtype, "tz", None) return self.to_timestamp(how=how).tz_localize(tz) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index aed7a7a467db3..fdb1ee754a7e6 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -29,6 +29,7 @@ cache_readonly, doc, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_platform_int, @@ -256,7 +257,7 @@ def _start(self) -> int: warnings.warn( self._deprecation_message.format("_start", "start"), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.start @@ -279,7 +280,7 @@ def _stop(self) -> int: warnings.warn( self._deprecation_message.format("_stop", "stop"), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.stop @@ -303,7 +304,7 @@ def _step(self) -> int: warnings.warn( self._deprecation_message.format("_step", "step"), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.step @@ -456,7 +457,7 @@ def copy( "parameter dtype is deprecated and will be removed in a future " "version. Use the astype method instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) new_index = new_index.astype(dtype) return new_index diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 669274e034905..91f1415178471 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -16,6 +16,7 @@ InvalidIndexError, ) from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_array_like, @@ -1381,7 +1382,7 @@ def _has_valid_setitem_indexer(self, indexer) -> bool: "a future version.\n" "consider using .loc with a DataFrame indexer for automatic alignment.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) if not isinstance(indexer, tuple): @@ -1859,10 +1860,19 @@ def _setitem_single_column(self, loc: int, value, plane_indexer): # in case of slice ser = value[pi] else: - # set the item, possibly having a dtype change - ser = ser.copy() - ser._mgr = ser._mgr.setitem(indexer=(pi,), value=value) - ser._maybe_update_cacher(clear=True, inplace=True) + # set the item, first attempting to operate inplace, then + # falling back to casting if necessary; see + # _whatsnew_130.notable_bug_fixes.setitem_column_try_inplace + + orig_values = ser._values + ser._mgr = ser._mgr.setitem((pi,), value) + + if ser._values is orig_values: + # The setitem happened inplace, so the DataFrame's values + # were modified inplace. + return + self.obj._iset_item(loc, ser, inplace=True) + return # reset the sliced object if unique self.obj._iset_item(loc, ser, inplace=True) @@ -2298,7 +2308,7 @@ def convert_to_index_sliceable(obj: DataFrame, key): "and will be removed in a future version. Use `frame.loc[string]` " "instead.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) return res except (KeyError, ValueError, NotImplementedError): diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index 6cbaae3fe12e0..75715bdc90003 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -44,12 +44,14 @@ def __getattr__(name: str): import warnings + from pandas.util._exceptions import find_stack_level + if name == "CategoricalBlock": warnings.warn( "CategoricalBlock is deprecated and will be removed in a future version. " "Use ExtensionBlock instead.", DeprecationWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) from pandas.core.internals.blocks import CategoricalBlock diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 543b2ea26f750..1cd9fe65407ba 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -365,7 +365,7 @@ def diff(self: T, n: int, axis: int) -> T: # with axis=0 is equivalent assert n == 0 axis = 0 - return self.apply(algos.diff, n=n, axis=axis, stacklevel=5) + return self.apply(algos.diff, n=n, axis=axis) def interpolate(self: T, **kwargs) -> T: return self.apply_with_block("interpolate", swap_axis=False, **kwargs) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 2589015e0f0b1..46e5b5b9c53ad 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -190,7 +190,7 @@ def is_categorical(self) -> bool: "future version. Use isinstance(block.values, Categorical) " "instead. See https://github.com/pandas-dev/pandas/issues/40226", DeprecationWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return isinstance(self.values, Categorical) @@ -952,7 +952,8 @@ def putmask(self, mask, new) -> list[Block]: List[Block] """ orig_mask = mask - mask, noop = validate_putmask(self.values.T, mask) + values = cast(np.ndarray, self.values) + mask, noop = validate_putmask(values.T, mask) assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) # if we are passed a scalar None, convert it here @@ -960,7 +961,6 @@ def putmask(self, mask, new) -> list[Block]: new = self.fill_value if self._can_hold_element(new): - # error: Argument 1 to "putmask_without_repeat" has incompatible type # "Union[ndarray, ExtensionArray]"; expected "ndarray" putmask_without_repeat(self.values.T, mask, new) # type: ignore[arg-type] @@ -979,9 +979,15 @@ def putmask(self, mask, new) -> list[Block]: elif self.ndim == 1 or self.shape[0] == 1: # no need to split columns - # error: Argument 1 to "putmask_smart" has incompatible type "Union[ndarray, - # ExtensionArray]"; expected "ndarray" - nv = putmask_smart(self.values.T, mask, new).T # type: ignore[arg-type] + if not is_list_like(new): + # putmask_smart can't save us the need to cast + return self.coerce_to_target_dtype(new).putmask(mask, new) + + # This differs from + # `self.coerce_to_target_dtype(new).putmask(mask, new)` + # because putmask_smart will check if new[mask] may be held + # by our dtype. + nv = putmask_smart(values.T, mask, new).T return [self.make_block(nv)] else: @@ -1122,7 +1128,7 @@ def take_nd( def diff(self, n: int, axis: int = 1) -> list[Block]: """return block for the diff of the values""" - new_values = algos.diff(self.values, n, axis=axis, stacklevel=7) + new_values = algos.diff(self.values, n, axis=axis) return [self.make_block(values=new_values)] def shift(self, periods: int, axis: int = 0, fill_value: Any = None) -> list[Block]: @@ -1415,15 +1421,13 @@ def putmask(self, mask, new) -> list[Block]: new_values = self.values - if isinstance(new, (np.ndarray, ExtensionArray)) and len(new) == len(mask): - new = new[mask] - if mask.ndim == new_values.ndim + 1: # TODO(EA2D): unnecessary with 2D EAs mask = mask.reshape(new_values.shape) try: - new_values[mask] = new + # Caller is responsible for ensuring matching lengths + new_values._putmask(mask, new) except TypeError: if not is_interval_dtype(self.dtype): # Discussion about what we want to support in the general @@ -1704,7 +1708,7 @@ def putmask(self, mask, new) -> list[Block]: return self.coerce_to_target_dtype(new).putmask(mask, new) arr = self.values - arr.T.putmask(mask, new) + arr.T._putmask(mask, new) return [self] def where(self, other, cond) -> list[Block]: diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 159c20382dcfb..a766f8321a641 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -23,6 +23,7 @@ DtypeObj, Manager, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, @@ -442,15 +443,18 @@ def dict_to_mgr( if missing.any() and not is_integer_dtype(dtype): nan_dtype: DtypeObj - if dtype is None or ( - isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.flexible) - ): + if dtype is not None: + # calling sanitize_array ensures we don't mix-and-match + # NA dtypes + midxs = missing.values.nonzero()[0] + for i in midxs: + arr = sanitize_array(arrays.iat[i], index, dtype=dtype) + arrays.iat[i] = arr + else: # GH#1783 nan_dtype = np.dtype("object") - else: - nan_dtype = dtype - val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) - arrays.loc[missing] = [val] * missing.sum() + val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) + arrays.loc[missing] = [val] * missing.sum() arrays = list(arrays) columns = ensure_index(columns) @@ -830,7 +834,7 @@ def to_arrays( "To retain the old behavior, pass as a dictionary " "DataFrame({col: categorical, ..})", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) if columns is None: columns = default_index(len(data)) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index b4d6e0ace4223..cb0c3e05e955f 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1192,7 +1192,7 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: "Consider joining all columns at once using pd.concat(axis=1) " "instead. To get a de-fragmented frame, use `newframe = frame.copy()`", PerformanceWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) def _insert_update_mgr_locs(self, loc) -> None: @@ -1637,7 +1637,7 @@ def __init__( "The `fastpath` keyword is deprecated and will be removed " "in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) self.axes = [axis] diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index ece5b21fa2f8e..540a557f7c7cc 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -14,6 +14,7 @@ from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op # noqa:F401 from pandas._typing import Level from pandas.util._decorators import Appender +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_array_like, @@ -300,7 +301,7 @@ def to_series(right): "Do `left, right = left.align(right, axis=1, copy=False)` " "before e.g. `left == right`", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) left, right = left.align( diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 1b217a592987f..7026e470df1c0 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -10,6 +10,7 @@ Appender, deprecate_kwarg, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_extension_array_dtype, @@ -58,7 +59,7 @@ def melt( "In the future this will raise an error, please set the 'value_name' " "parameter of DataFrame.melt to a unique name.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) if id_vars is not None: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index a88d1dce693f6..4dd15dd367581 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -35,6 +35,7 @@ Appender, Substitution, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( @@ -676,7 +677,7 @@ def __init__( ) # stacklevel chosen to be correct when this is reached via pd.merge # (and not DataFrame.join) - warnings.warn(msg, FutureWarning, stacklevel=3) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) self._validate_specification() @@ -2297,7 +2298,7 @@ def _items_overlap_with_suffix( "unexpected results. Provide 'suffixes' as a tuple instead. In the " "future a 'TypeError' will be raised.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) to_rename = left.intersection(right) @@ -2347,7 +2348,7 @@ def renamer(x, suffix): f"Passing 'suffixes' which cause duplicate columns {set(dups)} in the " f"result is deprecated and will raise a MergeError in a future version.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) return llabels, rlabels diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 9c7107ab40644..6c6b14653df75 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -745,13 +745,15 @@ def _convert_level_number(level_num, columns): if frame._is_homogeneous_type and is_extension_array_dtype( frame.dtypes.iloc[0] ): + # TODO(EA2D): won't need special case, can go through .values + # paths below (might change to ._values) dtype = this[this.columns[loc]].dtypes.iloc[0] subset = this[this.columns[loc]] value_slice = dtype.construct_array_type()._concat_same_type( [x._values for _, x in subset.items()] ) - N, K = this.shape + N, K = subset.shape idx = np.arange(N * K).reshape(K, N).T.ravel() value_slice = value_slice.take(idx) diff --git a/pandas/core/series.py b/pandas/core/series.py index 996af80139458..e0a63b8e35105 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -54,6 +54,7 @@ deprecate_nonkeyword_arguments, doc, ) +from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( validate_ascending, validate_bool_kwarg, @@ -360,7 +361,7 @@ def __init__( "of 'float64' in a future version. Specify a dtype explicitly " "to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # uncomment the line below when removing the FutureWarning # dtype = np.dtype(object) @@ -886,7 +887,7 @@ def take(self, indices, axis=0, is_copy=None, **kwargs) -> Series: "is_copy is deprecated and will be removed in a future version. " "'take' always returns a copy, so there is no need to specify this.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) nv.validate_take((), kwargs) @@ -1011,7 +1012,7 @@ def _get_values_tuple(self, key): # mpl hackaround if com.any_none(*key): result = self._get_values(key) - deprecate_ndim_indexing(result, stacklevel=5) + deprecate_ndim_indexing(result, stacklevel=find_stack_level()) return result if not isinstance(self.index, MultiIndex): @@ -1078,7 +1079,7 @@ def __setitem__(self, key, value) -> None: "Series. Use `series.iloc[an_int] = val` to treat the " "key as positional.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(key, value) @@ -1887,7 +1888,7 @@ def groupby( "will be removed in a future version." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: squeeze = False @@ -1949,7 +1950,7 @@ def count(self, level=None): "deprecated and will be removed in a future version. Use groupby " "instead. ser.count(level=1) should use ser.groupby(level=1).count().", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if not isinstance(self.index, MultiIndex): raise ValueError("Series.count level is only valid with a MultiIndex") @@ -5135,7 +5136,7 @@ def between(self, left, right, inclusive="both") -> Series: "Boolean inputs to the `inclusive` argument are deprecated in " "favour of `both` or `neither`.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if inclusive: inclusive = "both" diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 1e27febab2af9..249fda9173b68 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -19,6 +19,7 @@ F, ) from pandas.util._decorators import Appender +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, @@ -238,7 +239,7 @@ def __iter__(self): warnings.warn( "Columnar iteration over characters will be deprecated in future releases.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) i = 0 g = self.get(i) @@ -1214,7 +1215,7 @@ def contains(self, pat, case=True, flags=0, na=None, regex=True): "This pattern has match groups. To actually get the " "groups, use str.extract.", UserWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) result = self._data.array._str_contains(pat, case, flags, na, regex) @@ -1426,7 +1427,7 @@ def replace( " In addition, single character regular expressions will " "*not* be treated as literal strings when regex=True." ) - warnings.warn(msg, FutureWarning, stacklevel=3) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) # Check whether repl is valid (GH 13438, GH 15055) if not (isinstance(repl, str) or callable(repl)): diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 669a39fcb3a74..67a6975c21fdd 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -39,6 +39,7 @@ ArrayLike, Timezone, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, @@ -1109,7 +1110,7 @@ def to_time(arg, format=None, infer_time_format=False, errors="raise"): "`to_time` has been moved, should be imported from pandas.core.tools.times. " "This alias will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) from pandas.core.tools.times import to_time diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index c17af442fe2cc..f5f681d9de797 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -640,7 +640,7 @@ def vol(self, bias: bool = False, *args, **kwargs): "Use std instead." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.std(bias, *args, **kwargs) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index b04aab3755b91..f7799912937b7 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -167,7 +167,7 @@ def win_type(self): "win_type will no longer return 'freq' in a future version. " "Check the type of self.window instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return "freq" return self._win_type @@ -177,7 +177,7 @@ def is_datetimelike(self) -> bool: warnings.warn( "is_datetimelike is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._win_freq_i8 is not None @@ -185,7 +185,7 @@ def validate(self) -> None: warnings.warn( "validate is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._validate() @@ -1763,6 +1763,7 @@ def count(self): "Specify min_periods=0 instead." ), FutureWarning, + stacklevel=find_stack_level(), ) self.min_periods = 0 result = super().count() diff --git a/pandas/io/common.py b/pandas/io/common.py index be6577e646ac3..12c7afc8ee2e4 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -49,6 +49,7 @@ import_lzma, ) from pandas.compat._optional import import_optional_dependency +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_file_like @@ -270,7 +271,7 @@ def _get_filepath_or_buffer( warnings.warn( "compression has no effect when passing a non-binary object as input.", RuntimeWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) compression_method = None diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py index f079a25f69fec..ef60afa195234 100644 --- a/pandas/io/date_converters.py +++ b/pandas/io/date_converters.py @@ -4,6 +4,7 @@ import numpy as np from pandas._libs.tslibs import parsing +from pandas.util._exceptions import find_stack_level def parse_date_time(date_col, time_col): @@ -18,7 +19,7 @@ def parse_date_time(date_col, time_col): Use pd.to_datetime(date_col + " " + time_col).to_pydatetime() instead to get a Numpy array. """, # noqa: E501 FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) date_col = _maybe_cast(date_col) time_col = _maybe_cast(time_col) @@ -38,7 +39,7 @@ def parse_date_fields(year_col, month_col, day_col): np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array. """, # noqa: E501 FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) year_col = _maybe_cast(year_col) @@ -63,7 +64,7 @@ def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col, second_ np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array. """, # noqa: E501 FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) year_col = _maybe_cast(year_col) @@ -89,7 +90,7 @@ def generic_parser(parse_func, *cols): Use pd.to_datetime instead. """, FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) N = _check_columns(cols) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index e543c9161a26e..ed79a5ad98ab9 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -519,11 +519,10 @@ def parse( if convert_float is None: convert_float = True else: - stacklevel = find_stack_level() warnings.warn( "convert_float is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=stacklevel, + stacklevel=find_stack_level(), ) validate_header_arg(header) @@ -833,7 +832,7 @@ def __new__( warnings.warn( "Use of **kwargs is deprecated, use engine_kwargs instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # only switch class if generic(ExcelWriter) @@ -868,7 +867,7 @@ def __new__( "deprecated and will also raise a warning, it can " "be globally set and the warning suppressed.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) cls = get_writer(engine) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index ba85a1b340d05..ca53bfb7d5e08 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -164,9 +164,6 @@ * unset. max_rows : int, optional Maximum number of rows to display in the console. - min_rows : int, optional - The number of rows to display in the console in a truncated repr - (when number of rows is above `max_rows`). max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index d91c0bb54f8dc..40803ff14e357 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -28,6 +28,7 @@ ) from pandas.compat._optional import import_optional_dependency from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level import pandas as pd from pandas import ( @@ -310,7 +311,7 @@ def render( warnings.warn( "this method is deprecated in favour of `Styler.to_html()`", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if sparse_index is None: sparse_index = get_option("styler.sparse.index") @@ -1675,7 +1676,7 @@ def where( warnings.warn( "this method is deprecated in favour of `Styler.applymap()`", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if other is None: @@ -1707,7 +1708,7 @@ def set_precision(self, precision: int) -> StylerRenderer: warnings.warn( "this method is deprecated in favour of `Styler.format(precision=..)`", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) self.precision = precision return self.format(precision=precision, na_rep=self.na_rep) @@ -2217,7 +2218,7 @@ def set_na_rep(self, na_rep: str) -> StylerRenderer: warnings.warn( "this method is deprecated in favour of `Styler.format(na_rep=..)`", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) self.na_rep = na_rep return self.format(na_rep=na_rep, precision=self.precision) @@ -2271,7 +2272,7 @@ def hide_index( warnings.warn( "this method is deprecated in favour of `Styler.hide(axis='index')`", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.hide(axis=0, level=level, subset=subset, names=names) @@ -2324,7 +2325,7 @@ def hide_columns( warnings.warn( "this method is deprecated in favour of `Styler.hide(axis='columns')`", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.hide(axis=1, level=level, subset=subset, names=names) diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index 90fd5d077d031..2c2c127394fb6 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -389,6 +389,8 @@ def _pull_field( try: if isinstance(spec, list): for field in spec: + if result is None: + raise KeyError(field) result = result[field] else: result = result[spec] diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 8cdcc05f60266..043eb34e18798 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -32,6 +32,7 @@ ParserError, ParserWarning, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import astype_nansafe from pandas.core.dtypes.common import ( @@ -259,7 +260,8 @@ def _validate_parse_dates_presence(self, columns: list[str]) -> None: # ParseDates = Union[DateGroups, List[DateGroups], # Dict[ColReference, DateGroups]] cols_needed = itertools.chain.from_iterable( - col if is_list_like(col) else [col] for col in self.parse_dates + col if is_list_like(col) and not isinstance(col, tuple) else [col] + for col in self.parse_dates ) else: cols_needed = [] @@ -314,14 +316,14 @@ def _should_parse_dates(self, i: int) -> bool: @final def _extract_multi_indexer_columns( - self, header, index_names, col_names, passed_names: bool = False + self, header, index_names, passed_names: bool = False ): """ extract and return the names, index_names, col_names header is a list-of-lists returned from the parsers """ if len(header) < 2: - return header[0], index_names, col_names, passed_names + return header[0], index_names, None, passed_names # the names are the tuples of the header that are not the index cols # 0 is the name of the index, assuming index_col is a list of column @@ -558,7 +560,7 @@ def _convert_to_ndarrays( f"for column {c} - only the converter will be used." ), ParserWarning, - stacklevel=7, + stacklevel=find_stack_level(), ) try: @@ -830,7 +832,7 @@ def _check_data_length(self, columns: list[str], data: list[ArrayLike]) -> None: "Length of header or names does not match length of data. This leads " "to a loss of data with index_col=False.", ParserWarning, - stacklevel=6, + stacklevel=find_stack_level(), ) def _evaluate_usecols(self, usecols, names): @@ -1091,7 +1093,7 @@ def _isindex(colspec): if isinstance(parse_spec, list): # list of column lists for colspec in parse_spec: - if is_scalar(colspec): + if is_scalar(colspec) or isinstance(colspec, tuple): if isinstance(colspec, int) and colspec not in data_dict: colspec = orig_names[colspec] if _isindex(colspec): @@ -1146,7 +1148,11 @@ def _try_convert_dates(parser: Callable, colspec, data_dict, columns): else: colnames.append(c) - new_name = "_".join([str(x) for x in colnames]) + new_name: tuple | str + if all(isinstance(x, tuple) for x in colnames): + new_name = tuple(map("_".join, zip(*colnames))) + else: + new_name = "_".join([str(x) for x in colnames]) to_parse = [np.asarray(data_dict[c]) for c in colnames if c in data_dict] new_col = parser(*to_parse) diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py index 32ca3aaeba6cc..db750cded45e5 100644 --- a/pandas/io/parsers/c_parser_wrapper.py +++ b/pandas/io/parsers/c_parser_wrapper.py @@ -10,6 +10,7 @@ FilePathOrBuffer, ) from pandas.errors import DtypeWarning +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_categorical_dtype, @@ -78,25 +79,18 @@ def __init__(self, src: FilePathOrBuffer, **kwds): if self._reader.header is None: self.names = None else: - if len(self._reader.header) > 1: - # we have a multi index in the columns - # error: Cannot determine type of 'names' - # error: Cannot determine type of 'index_names' - # error: Cannot determine type of 'col_names' - ( - self.names, # type: ignore[has-type] - self.index_names, - self.col_names, - passed_names, - ) = self._extract_multi_indexer_columns( - self._reader.header, - self.index_names, # type: ignore[has-type] - self.col_names, # type: ignore[has-type] - passed_names, - ) - else: - # error: Cannot determine type of 'names' - self.names = list(self._reader.header[0]) # type: ignore[has-type] + # error: Cannot determine type of 'names' + # error: Cannot determine type of 'index_names' + ( + self.names, # type: ignore[has-type] + self.index_names, + self.col_names, + passed_names, + ) = self._extract_multi_indexer_columns( + self._reader.header, + self.index_names, # type: ignore[has-type] + passed_names, + ) # error: Cannot determine type of 'names' if self.names is None: # type: ignore[has-type] @@ -394,7 +388,7 @@ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict: f"Specify dtype option on import or set low_memory=False." ] ) - warnings.warn(warning_message, DtypeWarning, stacklevel=8) + warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level()) return result diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index af253fc062632..4d596aa2f3fa6 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -24,6 +24,7 @@ EmptyDataError, ParserError, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_integer from pandas.core.dtypes.inference import is_dict_like @@ -117,24 +118,16 @@ def __init__(self, f: FilePathOrBuffer | list, **kwds): # Now self.columns has the set of columns that we will process. # The original set is stored in self.original_columns. - if len(self.columns) > 1: - # we are processing a multi index column - # error: Cannot determine type of 'index_names' - # error: Cannot determine type of 'col_names' - ( - self.columns, - self.index_names, - self.col_names, - _, - ) = self._extract_multi_indexer_columns( - self.columns, - self.index_names, # type: ignore[has-type] - self.col_names, # type: ignore[has-type] - ) - # Update list of original names to include all indices. - self.num_original_columns = len(self.columns) - else: - self.columns = self.columns[0] + # error: Cannot determine type of 'index_names' + ( + self.columns, + self.index_names, + self.col_names, + _, + ) = self._extract_multi_indexer_columns( + self.columns, + self.index_names, # type: ignore[has-type] + ) # get popped off for index self.orig_names: list[int | str | tuple] = list(self.columns) @@ -563,7 +556,7 @@ def _handle_usecols( "Defining usecols with out of bounds indices is deprecated " "and will raise a ParserError in a future version.", FutureWarning, - stacklevel=8, + stacklevel=find_stack_level(), ) col_indices = self.usecols diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 49c2b28207ed5..0b57f0f5ef814 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -86,7 +86,7 @@ delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``. delimiter : str, default ``None`` Alias for sep. -header : int, list of int, default 'infer' +header : int, list of int, None, default 'infer' Row number(s) to use as the column names, and the start of the data. Default behavior is to infer the column names: if no names are passed the behavior is identical to ``header=0`` and column @@ -104,7 +104,7 @@ List of column names to use. If the file contains a header row, then you should explicitly pass ``header=0`` to override the column names. Duplicates in this list are not allowed. -index_col : int, str, sequence of int / str, or False, default ``None`` +index_col : int, str, sequence of int / str, or False, optional, default ``None`` Column(s) to use as the row labels of the ``DataFrame``, either given as string name or column index. If a sequence of int / str is given, a MultiIndex is used. @@ -116,7 +116,8 @@ Return a subset of the columns. If list-like, all elements must either be positional (i.e. integer indices into the document columns) or strings that correspond to column names provided either by the user in `names` or - inferred from the document header row(s). For example, a valid list-like + inferred from the document header row(s). If ``names`` are given, the document + header row(s) are not taken into account. For example, a valid list-like `usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``. Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. To instantiate a DataFrame from ``data`` with element order preserved use @@ -331,7 +332,7 @@ `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to override values, a ParserWarning will be issued. See csv.Dialect documentation for more details. -error_bad_lines : bool, default ``None`` +error_bad_lines : bool, optional, default ``None`` Lines with too many fields (e.g. a csv line with too many commas) will by default cause an exception to be raised, and no DataFrame will be returned. If False, then these "bad lines" will be dropped from the DataFrame that is @@ -340,7 +341,7 @@ .. deprecated:: 1.3.0 The ``on_bad_lines`` parameter should be used instead to specify behavior upon encountering a bad line instead. -warn_bad_lines : bool, default ``None`` +warn_bad_lines : bool, optional, default ``None`` If error_bad_lines is False, and warn_bad_lines is True, a warning for each "bad line" will be output. @@ -1041,7 +1042,7 @@ def _clean_options(self, options, engine): "engine='python'." ), ParserWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) index_col = options["index_col"] @@ -1573,7 +1574,9 @@ def _merge_with_dialect_properties( conflict_msgs.append(msg) if conflict_msgs: - warnings.warn("\n\n".join(conflict_msgs), ParserWarning, stacklevel=2) + warnings.warn( + "\n\n".join(conflict_msgs), ParserWarning, stacklevel=find_stack_level() + ) kwds[param] = dialect_val return kwds diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 8c8e9b9feeb80..0e886befb5f2f 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -45,6 +45,7 @@ from pandas.compat.pickle_compat import patch_pickle from pandas.errors import PerformanceWarning from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, @@ -2190,7 +2191,9 @@ def update_info(self, info): # frequency/name just warn if key in ["freq", "index_name"]: ws = attribute_conflict_doc % (key, existing_value, value) - warnings.warn(ws, AttributeConflictWarning, stacklevel=6) + warnings.warn( + ws, AttributeConflictWarning, stacklevel=find_stack_level() + ) # reset idx[key] = None @@ -3080,7 +3083,7 @@ def write_array( pass else: ws = performance_doc % (inferred_type, key, items) - warnings.warn(ws, PerformanceWarning, stacklevel=7) + warnings.warn(ws, PerformanceWarning, stacklevel=find_stack_level()) vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom()) vlarr.append(value) diff --git a/pandas/io/sql.py b/pandas/io/sql.py index ec5262ee3a04c..867ce52cbde6f 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -28,6 +28,7 @@ from pandas._typing import DtypeArg from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_datetime64tz_dtype, @@ -1159,7 +1160,7 @@ def _sqlalchemy_type(self, col): "the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the database.", UserWarning, - stacklevel=8, + stacklevel=find_stack_level(), ) return BigInteger elif col_type == "floating": @@ -1886,7 +1887,7 @@ def _create_table_setup(self): pat = re.compile(r"\s+") column_names = [col_name for col_name, _, _ in column_names_and_types] if any(map(pat.search, column_names)): - warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6) + warnings.warn(_SAFE_NAMES_WARNING, stacklevel=find_stack_level()) escape = _get_valid_sqlite_name @@ -1948,7 +1949,7 @@ def _sql_type_name(self, col): "the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the database.", UserWarning, - stacklevel=8, + stacklevel=find_stack_level(), ) col_type = "integer" diff --git a/pandas/io/stata.py b/pandas/io/stata.py index f6c93e6f751c8..9803a2e4e3309 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -600,6 +600,8 @@ def _cast_to_stata_types(data: DataFrame) -> DataFrame: # Cast from unsupported types to supported types is_nullable_int = isinstance(data[col].dtype, (_IntegerDtype, BooleanDtype)) orig = data[col] + # We need to find orig_missing before altering data below + orig_missing = orig.isna() if is_nullable_int: missing_loc = data[col].isna() if missing_loc.any(): @@ -650,11 +652,10 @@ def _cast_to_stata_types(data: DataFrame) -> DataFrame: f"supported by Stata ({float64_max})" ) if is_nullable_int: - missing = orig.isna() - if missing.any(): + if orig_missing.any(): # Replace missing by Stata sentinel value sentinel = StataMissingValue.BASE_MISSING_VALUES[data[col].dtype.name] - data.loc[missing, col] = sentinel + data.loc[orig_missing, col] = sentinel if ws: warnings.warn(ws, PossiblePrecisionLoss) diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index 1308a83f61443..a2089de294e22 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -391,6 +391,11 @@ def plot_group(keys, values, ax: Axes): with plt.rc_context(rc): ax = plt.gca() data = data._get_numeric_data() + naxes = len(data.columns) + if naxes == 0: + raise ValueError( + "boxplot method requires numerical columns, nothing to plot." + ) if columns is None: columns = data.columns else: diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 9679e79d8c4ba..5314a61191d78 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -13,6 +13,8 @@ import matplotlib.ticker as ticker import numpy as np +from pandas.util._exceptions import find_stack_level + from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ( ABCDataFrame, @@ -233,7 +235,7 @@ def create_subplots( "When passing multiple axes, sharex and sharey " "are ignored. These settings must be specified when creating axes.", UserWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) if ax.size == naxes: fig = ax.flat[0].get_figure() @@ -256,7 +258,7 @@ def create_subplots( "To output multiple subplots, the figure containing " "the passed axes is being cleared.", UserWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) fig.clear() diff --git a/pandas/tests/arithmetic/common.py b/pandas/tests/arithmetic/common.py index 6f4e35ad4dfb2..f3173e8f0eb57 100644 --- a/pandas/tests/arithmetic/common.py +++ b/pandas/tests/arithmetic/common.py @@ -11,7 +11,26 @@ array, ) import pandas._testing as tm -from pandas.core.arrays import PandasArray +from pandas.core.arrays import ( + BooleanArray, + PandasArray, +) + + +def assert_cannot_add(left, right, msg="cannot add"): + """ + Helper to assert that left and right cannot be added. + + Parameters + ---------- + left : object + right : object + msg : str, default "cannot add" + """ + with pytest.raises(TypeError, match=msg): + left + right + with pytest.raises(TypeError, match=msg): + right + left def assert_invalid_addsub_type(left, right, msg=None): @@ -34,26 +53,29 @@ def assert_invalid_addsub_type(left, right, msg=None): right - left -def get_expected_box(box): +def get_upcast_box(left, right, is_cmp: bool = False): """ - Get the box to use for 'expected' in a comparison operation. - """ - if box in [Index, array]: - return np.ndarray - return box - + Get the box to use for 'expected' in an arithmetic or comparison operation. -def get_upcast_box(box, vector): - """ - Given two box-types, find the one that takes priority. + Parameters + left : Any + right : Any + is_cmp : bool, default False + Whether the operation is a comparison method. """ - if box is DataFrame or isinstance(vector, DataFrame): + + if isinstance(left, DataFrame) or isinstance(right, DataFrame): return DataFrame - if box is Series or isinstance(vector, Series): + if isinstance(left, Series) or isinstance(right, Series): + if is_cmp and isinstance(left, Index): + # Index does not defer for comparisons + return np.array return Series - if box is Index or isinstance(vector, Index): + if isinstance(left, Index) or isinstance(right, Index): + if is_cmp: + return np.array return Index - return box + return tm.to_array def assert_invalid_comparison(left, right, box): @@ -76,21 +98,29 @@ def xbox2(x): # just exclude PandasArray[bool] if isinstance(x, PandasArray): return x._ndarray + if isinstance(x, BooleanArray): + # NB: we are assuming no pd.NAs for now + return x.astype(bool) return x + # rev_box: box to use for reversed comparisons + rev_box = xbox + if isinstance(right, Index) and isinstance(left, Series): + rev_box = np.array + result = xbox2(left == right) expected = xbox(np.zeros(result.shape, dtype=np.bool_)) tm.assert_equal(result, expected) result = xbox2(right == left) - tm.assert_equal(result, expected) + tm.assert_equal(result, rev_box(expected)) result = xbox2(left != right) tm.assert_equal(result, ~expected) result = xbox2(right != left) - tm.assert_equal(result, ~expected) + tm.assert_equal(result, rev_box(~expected)) msg = "|".join( [ diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 82f1e60f0aea5..87bbdfb3c808f 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -41,9 +41,9 @@ ) from pandas.core.ops import roperator from pandas.tests.arithmetic.common import ( + assert_cannot_add, assert_invalid_addsub_type, assert_invalid_comparison, - get_expected_box, get_upcast_box, ) @@ -60,12 +60,12 @@ def test_compare_zerodim(self, tz_naive_fixture, box_with_array): # Test comparison with zero-dimensional array is unboxed tz = tz_naive_fixture box = box_with_array - xbox = get_expected_box(box) dti = date_range("20130101", periods=3, tz=tz) other = np.array(dti.to_numpy()[0]) dtarr = tm.box_expected(dti, box) + xbox = get_upcast_box(dtarr, other, True) result = dtarr <= other expected = np.array([True, False, False]) expected = tm.box_expected(expected, xbox) @@ -100,6 +100,7 @@ def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_arra @pytest.mark.parametrize( "other", [ + # GH#4968 invalid date/int comparisons list(range(10)), np.arange(10), np.arange(10).astype(np.float32), @@ -112,13 +113,14 @@ def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_arra pd.period_range("1971-01-01", freq="D", periods=10).astype(object), ], ) - def test_dt64arr_cmp_arraylike_invalid(self, other, tz_naive_fixture): - # We don't parametrize this over box_with_array because listlike - # other plays poorly with assert_invalid_comparison reversed checks + def test_dt64arr_cmp_arraylike_invalid( + self, other, tz_naive_fixture, box_with_array + ): tz = tz_naive_fixture dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data - assert_invalid_comparison(dta, other, tm.to_array) + obj = tm.box_expected(dta, box_with_array) + assert_invalid_comparison(obj, other, box_with_array) def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture): tz = tz_naive_fixture @@ -147,12 +149,12 @@ def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array): # GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly tz = tz_naive_fixture box = box_with_array - xbox = get_expected_box(box) ts = Timestamp.now(tz) ser = Series([ts, NaT]) obj = tm.box_expected(ser, box) + xbox = get_upcast_box(obj, ts, True) expected = Series([True, False], dtype=np.bool_) expected = tm.box_expected(expected, xbox) @@ -216,18 +218,6 @@ def test_nat_comparisons( tm.assert_series_equal(result, expected) - def test_comparison_invalid(self, tz_naive_fixture, box_with_array): - # GH#4968 - # invalid date/int comparisons - tz = tz_naive_fixture - ser = Series(range(5)) - ser2 = Series(date_range("20010101", periods=5, tz=tz)) - - ser = tm.box_expected(ser, box_with_array) - ser2 = tm.box_expected(ser2, box_with_array) - - assert_invalid_comparison(ser, ser2, box_with_array) - @pytest.mark.parametrize( "data", [ @@ -244,10 +234,9 @@ def test_nat_comparisons_scalar(self, dtype, data, box_with_array): # on older numpys (since they check object identity) return - xbox = get_expected_box(box) - left = Series(data, dtype=dtype) left = tm.box_expected(left, box) + xbox = get_upcast_box(left, NaT, True) expected = [False, False, False] expected = tm.box_expected(expected, xbox) @@ -317,16 +306,16 @@ def test_timestamp_compare_series(self, left, right): tm.assert_series_equal(result, expected) # Compare to NaT with series containing NaT - expected = left_f(s_nat, Timestamp("nat")) - result = right_f(Timestamp("nat"), s_nat) + expected = left_f(s_nat, NaT) + result = right_f(NaT, s_nat) tm.assert_series_equal(result, expected) def test_dt64arr_timestamp_equality(self, box_with_array): # GH#11034 - xbox = get_expected_box(box_with_array) ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT]) ser = tm.box_expected(ser, box_with_array) + xbox = get_upcast_box(ser, ser, True) result = ser != ser expected = tm.box_expected([False, False, True], xbox) @@ -361,6 +350,39 @@ def test_dt64arr_timestamp_equality(self, box_with_array): expected = tm.box_expected([False, False, False], xbox) tm.assert_equal(result, expected) + @pytest.mark.parametrize( + "datetimelike", + [ + Timestamp("20130101"), + datetime(2013, 1, 1), + np.datetime64("2013-01-01T00:00", "ns"), + ], + ) + @pytest.mark.parametrize( + "op,expected", + [ + (operator.lt, [True, False, False, False]), + (operator.le, [True, True, False, False]), + (operator.eq, [False, True, False, False]), + (operator.gt, [False, False, False, True]), + ], + ) + def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected): + # GH#17965, test for ability to compare datetime64[ns] columns + # to datetimelike + ser = Series( + [ + Timestamp("20120101"), + Timestamp("20130101"), + np.nan, + Timestamp("20130103"), + ], + name="A", + ) + result = op(ser, datetimelike) + expected = Series(expected, name="A") + tm.assert_series_equal(result, expected) + class TestDatetimeIndexComparisons: @@ -417,13 +439,12 @@ def test_dti_cmp_nat(self, dtype, box_with_array): # on older numpys (since they check object identity) return - xbox = get_expected_box(box_with_array) - left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")]) right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")]) left = tm.box_expected(left, box_with_array) right = tm.box_expected(right, box_with_array) + xbox = get_upcast_box(left, right, True) lhs, rhs = left, right if dtype is object: @@ -642,12 +663,11 @@ def test_scalar_comparison_tzawareness( self, comparison_op, other, tz_aware_fixture, box_with_array ): op = comparison_op - box = box_with_array tz = tz_aware_fixture dti = date_range("2016-01-01", periods=2, tz=tz) - xbox = get_expected_box(box) dtarr = tm.box_expected(dti, box_with_array) + xbox = get_upcast_box(dtarr, other, True) if op in [operator.eq, operator.ne]: exbool = op is operator.ne expected = np.array([exbool, exbool], dtype=bool) @@ -803,17 +823,6 @@ def test_dt64arr_add_timedeltalike_scalar( result = rng + two_hours tm.assert_equal(result, expected) - def test_dt64arr_iadd_timedeltalike_scalar( - self, tz_naive_fixture, two_hours, box_with_array - ): - tz = tz_naive_fixture - - rng = date_range("2000-01-01", "2000-02-01", tz=tz) - expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz) - - rng = tm.box_expected(rng, box_with_array) - expected = tm.box_expected(expected, box_with_array) - rng += two_hours tm.assert_equal(rng, expected) @@ -831,17 +840,6 @@ def test_dt64arr_sub_timedeltalike_scalar( result = rng - two_hours tm.assert_equal(result, expected) - def test_dt64arr_isub_timedeltalike_scalar( - self, tz_naive_fixture, two_hours, box_with_array - ): - tz = tz_naive_fixture - - rng = date_range("2000-01-01", "2000-02-01", tz=tz) - expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz) - - rng = tm.box_expected(rng, box_with_array) - expected = tm.box_expected(expected, box_with_array) - rng -= two_hours tm.assert_equal(rng, expected) @@ -1042,21 +1040,14 @@ def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array): dt64vals = dti.values dtarr = tm.box_expected(dti, box_with_array) - msg = "cannot add" - with pytest.raises(TypeError, match=msg): - dtarr + dt64vals - with pytest.raises(TypeError, match=msg): - dt64vals + dtarr + assert_cannot_add(dtarr, dt64vals) def test_dt64arr_add_timestamp_raises(self, box_with_array): # GH#22163 ensure DataFrame doesn't cast Timestamp to i8 idx = DatetimeIndex(["2011-01-01", "2011-01-02"]) + ts = idx[0] idx = tm.box_expected(idx, box_with_array) - msg = "cannot add" - with pytest.raises(TypeError, match=msg): - idx + Timestamp("2011-01-01") - with pytest.raises(TypeError, match=msg): - Timestamp("2011-01-01") + idx + assert_cannot_add(idx, ts) # ------------------------------------------------------------- # Other Invalid Addition/Subtraction @@ -1238,13 +1229,12 @@ def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array): dates = tm.box_expected(dates, box_with_array) expected = tm.box_expected(expected, box_with_array) - # TODO: parametrize over the scalar being added? radd? sub? - offset = dates + pd.offsets.Hour(5) - tm.assert_equal(offset, expected) - offset = dates + np.timedelta64(5, "h") - tm.assert_equal(offset, expected) - offset = dates + timedelta(hours=5) - tm.assert_equal(offset, expected) + # TODO: sub? + for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]: + offset = dates + scalar + tm.assert_equal(offset, expected) + offset = scalar + dates + tm.assert_equal(offset, expected) # ------------------------------------------------------------- # RelativeDelta DateOffsets @@ -1912,8 +1902,7 @@ def test_dt64_mul_div_numeric_invalid(self, one, dt64_series): one / dt64_series # TODO: parametrize over box - @pytest.mark.parametrize("op", ["__add__", "__radd__", "__sub__", "__rsub__"]) - def test_dt64_series_add_intlike(self, tz_naive_fixture, op): + def test_dt64_series_add_intlike(self, tz_naive_fixture): # GH#19123 tz = tz_naive_fixture dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz) @@ -1921,21 +1910,16 @@ def test_dt64_series_add_intlike(self, tz_naive_fixture, op): other = Series([20, 30, 40], dtype="uint8") - method = getattr(ser, op) msg = "|".join( [ "Addition/subtraction of integers and integer-arrays", "cannot subtract .* from ndarray", ] ) - with pytest.raises(TypeError, match=msg): - method(1) - with pytest.raises(TypeError, match=msg): - method(other) - with pytest.raises(TypeError, match=msg): - method(np.array(other)) - with pytest.raises(TypeError, match=msg): - method(pd.Index(other)) + assert_invalid_addsub_type(ser, 1, msg) + assert_invalid_addsub_type(ser, other, msg) + assert_invalid_addsub_type(ser, np.array(other), msg) + assert_invalid_addsub_type(ser, pd.Index(other), msg) # ------------------------------------------------------------- # Timezone-Centric Tests @@ -2033,7 +2017,9 @@ def test_dti_add_intarray_tick(self, int_holder, freq): dti = date_range("2016-01-01", periods=2, freq=freq) other = int_holder([4, -1]) - msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from" + msg = "|".join( + ["Addition/subtraction of integers", "cannot subtract DatetimeArray from"] + ) assert_invalid_addsub_type(dti, other, msg) @pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"]) @@ -2043,7 +2029,9 @@ def test_dti_add_intarray_non_tick(self, int_holder, freq): dti = date_range("2016-01-01", periods=2, freq=freq) other = int_holder([4, -1]) - msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from" + msg = "|".join( + ["Addition/subtraction of integers", "cannot subtract DatetimeArray from"] + ) assert_invalid_addsub_type(dti, other, msg) @pytest.mark.parametrize("int_holder", [np.array, pd.Index]) @@ -2193,10 +2181,7 @@ def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz): dtarr = tm.box_expected(dti, box_with_array) msg = "cannot add DatetimeArray and" - with pytest.raises(TypeError, match=msg): - dtarr + addend - with pytest.raises(TypeError, match=msg): - addend + dtarr + assert_cannot_add(dtarr, addend, msg) # ------------------------------------------------------------- @@ -2421,14 +2406,13 @@ def test_dti_addsub_offset_arraylike( self, tz_naive_fixture, names, op, index_or_series ): # GH#18849, GH#19744 - box = pd.Index other_box = index_or_series tz = tz_naive_fixture dti = date_range("2017-01-01", periods=2, tz=tz, name=names[0]) other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1]) - xbox = get_upcast_box(box, other) + xbox = get_upcast_box(dti, other) with tm.assert_produces_warning(PerformanceWarning): res = op(dti, other) @@ -2448,7 +2432,7 @@ def test_dti_addsub_object_arraylike( dti = date_range("2017-01-01", periods=2, tz=tz) dtarr = tm.box_expected(dti, box_with_array) other = other_box([pd.offsets.MonthEnd(), Timedelta(days=4)]) - xbox = get_upcast_box(box_with_array, other) + xbox = get_upcast_box(dtarr, other) expected = DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture) expected = tm.box_expected(expected, xbox) diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 9932adccdbaf2..3bf5fdb257c2a 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -29,6 +29,7 @@ UInt64Index, ) from pandas.core.computation import expressions as expr +from pandas.tests.arithmetic.common import assert_invalid_comparison @pytest.fixture(params=[Index, Series, tm.to_array]) @@ -84,25 +85,13 @@ def test_operator_series_comparison_zerorank(self): expected = 0.0 > Series([1, 2, 3]) tm.assert_series_equal(result, expected) - def test_df_numeric_cmp_dt64_raises(self): + def test_df_numeric_cmp_dt64_raises(self, box_with_array): # GH#8932, GH#22163 ts = pd.Timestamp.now() - df = pd.DataFrame({"x": range(5)}) + obj = np.array(range(5)) + obj = tm.box_expected(obj, box_with_array) - msg = ( - "'[<>]' not supported between instances of 'numpy.ndarray' and 'Timestamp'" - ) - with pytest.raises(TypeError, match=msg): - df > ts - with pytest.raises(TypeError, match=msg): - df < ts - with pytest.raises(TypeError, match=msg): - ts < df - with pytest.raises(TypeError, match=msg): - ts > df - - assert not (df == ts).any().any() - assert (df != ts).all().all() + assert_invalid_comparison(obj, ts, box_with_array) def test_compare_invalid(self): # GH#8058 diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py index 9a586fd553428..3069868ebb677 100644 --- a/pandas/tests/arithmetic/test_object.py +++ b/pandas/tests/arithmetic/test_object.py @@ -21,17 +21,15 @@ class TestObjectComparisons: - def test_comparison_object_numeric_nas(self): + def test_comparison_object_numeric_nas(self, comparison_op): ser = Series(np.random.randn(10), dtype=object) shifted = ser.shift(2) - ops = ["lt", "le", "gt", "ge", "eq", "ne"] - for op in ops: - func = getattr(operator, op) + func = comparison_op - result = func(ser, shifted) - expected = func(ser.astype(float), shifted.astype(float)) - tm.assert_series_equal(result, expected) + result = func(ser, shifted) + expected = func(ser.astype(float), shifted.astype(float)) + tm.assert_series_equal(result, expected) def test_object_comparisons(self): ser = Series(["a", "b", np.nan, "c", "a"]) @@ -141,11 +139,13 @@ def test_objarr_radd_str_invalid(self, dtype, data, box_with_array): ser = Series(data, dtype=dtype) ser = tm.box_expected(ser, box_with_array) - msg = ( - "can only concatenate str|" - "did not contain a loop with signature matching types|" - "unsupported operand type|" - "must be str" + msg = "|".join( + [ + "can only concatenate str", + "did not contain a loop with signature matching types", + "unsupported operand type", + "must be str", + ] ) with pytest.raises(TypeError, match=msg): "foo_" + ser @@ -159,7 +159,9 @@ def test_objarr_add_invalid(self, op, box_with_array): obj_ser.name = "objects" obj_ser = tm.box_expected(obj_ser, box) - msg = "can only concatenate str|unsupported operand type|must be str" + msg = "|".join( + ["can only concatenate str", "unsupported operand type", "must be str"] + ) with pytest.raises(Exception, match=msg): op(obj_ser, 1) with pytest.raises(Exception, match=msg): diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index 41c2cb2cc4f1e..f4404a3483e6f 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -26,8 +26,9 @@ from pandas.core import ops from pandas.core.arrays import TimedeltaArray from pandas.tests.arithmetic.common import ( + assert_invalid_addsub_type, assert_invalid_comparison, - get_expected_box, + get_upcast_box, ) # ------------------------------------------------------------------ @@ -39,23 +40,49 @@ class TestPeriodArrayLikeComparisons: # DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison # tests will eventually end up here. + @pytest.mark.parametrize("other", ["2017", Period("2017", freq="D")]) + def test_eq_scalar(self, other, box_with_array): + + idx = PeriodIndex(["2017", "2017", "2018"], freq="D") + idx = tm.box_expected(idx, box_with_array) + xbox = get_upcast_box(idx, other, True) + + expected = np.array([True, True, False]) + expected = tm.box_expected(expected, xbox) + + result = idx == other + + tm.assert_equal(result, expected) + def test_compare_zerodim(self, box_with_array): # GH#26689 make sure we unbox zero-dimensional arrays - xbox = get_expected_box(box_with_array) pi = period_range("2000", periods=4) other = np.array(pi.to_numpy()[0]) pi = tm.box_expected(pi, box_with_array) + xbox = get_upcast_box(pi, other, True) + result = pi <= other expected = np.array([True, False, False, False]) expected = tm.box_expected(expected, xbox) tm.assert_equal(result, expected) @pytest.mark.parametrize( - "scalar", ["foo", Timestamp.now(), Timedelta(days=4), 9, 9.5] + "scalar", + [ + "foo", + Timestamp.now(), + Timedelta(days=4), + 9, + 9.5, + 2000, # specifically don't consider 2000 to match Period("2000", "D") + False, + None, + ], ) def test_compare_invalid_scalar(self, box_with_array, scalar): + # GH#28980 # comparison with scalar that cannot be interpreted as a Period pi = period_range("2000", periods=4) parr = tm.box_expected(pi, box_with_array) @@ -69,6 +96,11 @@ def test_compare_invalid_scalar(self, box_with_array, scalar): np.arange(4), np.arange(4).astype(np.float64), list(range(4)), + # match Period semantics by not treating integers as Periods + [2000, 2001, 2002, 2003], + np.arange(2000, 2004), + np.arange(2000, 2004).astype(object), + pd.Index([2000, 2001, 2002, 2003]), ], ) def test_compare_invalid_listlike(self, box_with_array, other): @@ -78,11 +110,11 @@ def test_compare_invalid_listlike(self, box_with_array, other): @pytest.mark.parametrize("other_box", [list, np.array, lambda x: x.astype(object)]) def test_compare_object_dtype(self, box_with_array, other_box): - xbox = get_expected_box(box_with_array) pi = period_range("2000", periods=5) parr = tm.box_expected(pi, box_with_array) other = other_box(pi) + xbox = get_upcast_box(parr, other, True) expected = np.array([True, True, True, True, True]) expected = tm.box_expected(expected, xbox) @@ -137,72 +169,32 @@ def test_compare_object_dtype(self, box_with_array, other_box): class TestPeriodIndexComparisons: # TODO: parameterize over boxes - @pytest.mark.parametrize("other", ["2017", Period("2017", freq="D")]) - def test_eq(self, other): - idx = PeriodIndex(["2017", "2017", "2018"], freq="D") - expected = np.array([True, True, False]) - result = idx == other - - tm.assert_numpy_array_equal(result, expected) - - @pytest.mark.parametrize( - "other", - [ - 2017, - [2017, 2017, 2017], - np.array([2017, 2017, 2017]), - np.array([2017, 2017, 2017], dtype=object), - pd.Index([2017, 2017, 2017]), - ], - ) - def test_eq_integer_disallowed(self, other): - # match Period semantics by not treating integers as Periods - - idx = PeriodIndex(["2017", "2017", "2018"], freq="D") - expected = np.array([False, False, False]) - result = idx == other - - tm.assert_numpy_array_equal(result, expected) - msg = "|".join( - [ - "not supported between instances of 'Period' and 'int'", - r"Invalid comparison between dtype=period\[D\] and ", - ] - ) - with pytest.raises(TypeError, match=msg): - idx < other - with pytest.raises(TypeError, match=msg): - idx > other - with pytest.raises(TypeError, match=msg): - idx <= other - with pytest.raises(TypeError, match=msg): - idx >= other - def test_pi_cmp_period(self): idx = period_range("2007-01", periods=20, freq="M") + per = idx[10] - result = idx < idx[10] + result = idx < per exp = idx.values < idx.values[10] tm.assert_numpy_array_equal(result, exp) # Tests Period.__richcmp__ against ndarray[object, ndim=2] - result = idx.values.reshape(10, 2) < idx[10] + result = idx.values.reshape(10, 2) < per tm.assert_numpy_array_equal(result, exp.reshape(10, 2)) # Tests Period.__richcmp__ against ndarray[object, ndim=0] - result = idx < np.array(idx[10]) + result = idx < np.array(per) tm.assert_numpy_array_equal(result, exp) # TODO: moved from test_datetime64; de-duplicate with version below def test_parr_cmp_period_scalar2(self, box_with_array): - xbox = get_expected_box(box_with_array) - pi = period_range("2000-01-01", periods=10, freq="D") - val = Period("2000-01-04", freq="D") + val = pi[3] expected = [x > val for x in pi] ser = tm.box_expected(pi, box_with_array) + xbox = get_upcast_box(ser, val, True) + expected = tm.box_expected(expected, xbox) result = ser > val tm.assert_equal(result, expected) @@ -216,11 +208,10 @@ def test_parr_cmp_period_scalar2(self, box_with_array): @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) def test_parr_cmp_period_scalar(self, freq, box_with_array): # GH#13200 - xbox = get_expected_box(box_with_array) - base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) base = tm.box_expected(base, box_with_array) per = Period("2011-02", freq=freq) + xbox = get_upcast_box(base, per, True) exp = np.array([False, True, False, False]) exp = tm.box_expected(exp, xbox) @@ -255,14 +246,14 @@ def test_parr_cmp_period_scalar(self, freq, box_with_array): @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) def test_parr_cmp_pi(self, freq, box_with_array): # GH#13200 - xbox = get_expected_box(box_with_array) - base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) base = tm.box_expected(base, box_with_array) # TODO: could also box idx? idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq) + xbox = get_upcast_box(base, idx, True) + exp = np.array([False, False, True, False]) exp = tm.box_expected(exp, xbox) tm.assert_equal(base == idx, exp) @@ -325,23 +316,24 @@ def test_parr_cmp_pi_mismatched_freq(self, freq, box_with_array): @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) def test_pi_cmp_nat(self, freq): idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq) + per = idx1[1] - result = idx1 > Period("2011-02", freq=freq) + result = idx1 > per exp = np.array([False, False, False, True]) tm.assert_numpy_array_equal(result, exp) - result = Period("2011-02", freq=freq) < idx1 + result = per < idx1 tm.assert_numpy_array_equal(result, exp) - result = idx1 == Period("NaT", freq=freq) + result = idx1 == pd.NaT exp = np.array([False, False, False, False]) tm.assert_numpy_array_equal(result, exp) - result = Period("NaT", freq=freq) == idx1 + result = pd.NaT == idx1 tm.assert_numpy_array_equal(result, exp) - result = idx1 != Period("NaT", freq=freq) + result = idx1 != pd.NaT exp = np.array([True, True, True, True]) tm.assert_numpy_array_equal(result, exp) - result = Period("NaT", freq=freq) != idx1 + result = pd.NaT != idx1 tm.assert_numpy_array_equal(result, exp) idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq) @@ -474,28 +466,29 @@ def test_pi_comp_period(self): idx = PeriodIndex( ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" ) + per = idx[2] - f = lambda x: x == Period("2011-03", freq="M") + f = lambda x: x == per exp = np.array([False, False, True, False], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") == x + f = lambda x: per == x self._check(idx, f, exp) - f = lambda x: x != Period("2011-03", freq="M") + f = lambda x: x != per exp = np.array([True, True, False, True], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") != x + f = lambda x: per != x self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") >= x + f = lambda x: per >= x exp = np.array([True, True, True, False], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: x > Period("2011-03", freq="M") + f = lambda x: x > per exp = np.array([False, False, False, True], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") >= x + f = lambda x: per >= x exp = np.array([True, True, True, False], dtype=np.bool_) self._check(idx, f, exp) @@ -503,11 +496,12 @@ def test_pi_comp_period_nat(self): idx = PeriodIndex( ["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx" ) + per = idx[2] - f = lambda x: x == Period("2011-03", freq="M") + f = lambda x: x == per exp = np.array([False, False, True, False], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") == x + f = lambda x: per == x self._check(idx, f, exp) f = lambda x: x == pd.NaT @@ -516,10 +510,10 @@ def test_pi_comp_period_nat(self): f = lambda x: pd.NaT == x self._check(idx, f, exp) - f = lambda x: x != Period("2011-03", freq="M") + f = lambda x: x != per exp = np.array([True, True, False, True], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") != x + f = lambda x: per != x self._check(idx, f, exp) f = lambda x: x != pd.NaT @@ -528,11 +522,11 @@ def test_pi_comp_period_nat(self): f = lambda x: pd.NaT != x self._check(idx, f, exp) - f = lambda x: Period("2011-03", freq="M") >= x + f = lambda x: per >= x exp = np.array([True, False, True, False], dtype=np.bool_) self._check(idx, f, exp) - f = lambda x: x < Period("2011-03", freq="M") + f = lambda x: x < per exp = np.array([True, False, False, False], dtype=np.bool_) self._check(idx, f, exp) @@ -695,20 +689,6 @@ def test_sub_n_gt_1_offsets(self, offset, kwd_name, n): # ------------------------------------------------------------- # Invalid Operations - @pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])]) - @pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub]) - def test_parr_add_sub_float_raises(self, op, other, box_with_array): - dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D") - pi = dti.to_period("D") - pi = tm.box_expected(pi, box_with_array) - msg = ( - r"unsupported operand type\(s\) for [+-]: .* and .*|" - "Concatenation operation is not implemented for NumPy arrays" - ) - - with pytest.raises(TypeError, match=msg): - op(pi, other) - @pytest.mark.parametrize( "other", [ @@ -722,6 +702,8 @@ def test_parr_add_sub_float_raises(self, op, other, box_with_array): pd.date_range("2016-01-01", periods=3, freq="S")._data, pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data, # Miscellaneous invalid types + 3.14, + np.array([2.0, 3.0, 4.0]), ], ) def test_parr_add_sub_invalid(self, other, box_with_array): @@ -729,11 +711,15 @@ def test_parr_add_sub_invalid(self, other, box_with_array): rng = period_range("1/1/2000", freq="D", periods=3) rng = tm.box_expected(rng, box_with_array) - msg = ( - r"(:?cannot add PeriodArray and .*)" - r"|(:?cannot subtract .* from (:?a\s)?.*)" - r"|(:?unsupported operand type\(s\) for \+: .* and .*)" + msg = "|".join( + [ + r"(:?cannot add PeriodArray and .*)", + r"(:?cannot subtract .* from (:?a\s)?.*)", + r"(:?unsupported operand type\(s\) for \+: .* and .*)", + r"unsupported operand type\(s\) for [+-]: .* and .*", + ] ) + assert_invalid_addsub_type(rng, other, msg) with pytest.raises(TypeError, match=msg): rng + other with pytest.raises(TypeError, match=msg): @@ -1033,9 +1019,11 @@ def test_pi_add_timedeltalike_minute_gt1(self, three_days): result = rng - other tm.assert_index_equal(result, expected) - msg = ( - r"(:?bad operand type for unary -: 'PeriodArray')" - r"|(:?cannot subtract PeriodArray from timedelta64\[[hD]\])" + msg = "|".join( + [ + r"(:?bad operand type for unary -: 'PeriodArray')", + r"(:?cannot subtract PeriodArray from timedelta64\[[hD]\])", + ] ) with pytest.raises(TypeError, match=msg): other - rng @@ -1260,7 +1248,7 @@ def test_parr_add_sub_object_array(self): class TestPeriodSeriesArithmetic: - def test_ops_series_timedelta(self): + def test_parr_add_timedeltalike_scalar(self, three_days, box_with_array): # GH#13043 ser = Series( [Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")], @@ -1269,21 +1257,18 @@ def test_ops_series_timedelta(self): assert ser.dtype == "Period[D]" expected = Series( - [Period("2015-01-02", freq="D"), Period("2015-01-03", freq="D")], + [Period("2015-01-04", freq="D"), Period("2015-01-05", freq="D")], name="xxx", ) - result = ser + Timedelta("1 days") - tm.assert_series_equal(result, expected) - - result = Timedelta("1 days") + ser - tm.assert_series_equal(result, expected) + obj = tm.box_expected(ser, box_with_array) + expected = tm.box_expected(expected, box_with_array) - result = ser + pd.tseries.offsets.Day() - tm.assert_series_equal(result, expected) + result = obj + three_days + tm.assert_equal(result, expected) - result = pd.tseries.offsets.Day() + ser - tm.assert_series_equal(result, expected) + result = three_days + obj + tm.assert_equal(result, expected) def test_ops_series_period(self): # GH#13043 @@ -1367,9 +1352,13 @@ def test_parr_ops_errors(self, ng, func, box_with_array): ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" ) obj = tm.box_expected(idx, box_with_array) - msg = ( - r"unsupported operand type\(s\)|can only concatenate|" - r"must be str|object to str implicitly" + msg = "|".join( + [ + r"unsupported operand type\(s\)", + "can only concatenate", + r"must be str", + "object to str implicitly", + ] ) with pytest.raises(TypeError, match=msg): @@ -1543,11 +1532,3 @@ def test_pi_sub_period_nat(self): exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx") tm.assert_index_equal(idx - Period("NaT", freq="M"), exp) tm.assert_index_equal(Period("NaT", freq="M") - idx, exp) - - @pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None]) - def test_comparison_operations(self, scalars): - # GH 28980 - expected = Series([False, False]) - s = Series([Period("2019"), Period("2020")], dtype="period[A-DEC]") - result = s == scalars - tm.assert_series_equal(result, expected) diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index b8fa6c79b1b93..8078e8c90a2bf 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -84,11 +84,6 @@ def test_compare_timedelta64_zerodim(self, box_with_array): expected = tm.box_expected(expected, xbox) tm.assert_equal(res, expected) - msg = "Invalid comparison between dtype" - with pytest.raises(TypeError, match=msg): - # zero-dim of wrong dtype should still raise - tdi >= np.array(4) - @pytest.mark.parametrize( "td_scalar", [ @@ -120,6 +115,7 @@ def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar): Timestamp.now().to_datetime64(), Timestamp.now().to_pydatetime(), Timestamp.now().date(), + np.array(4), # zero-dim mismatched dtype ], ) def test_td64_comparisons_invalid(self, box_with_array, invalid): @@ -146,17 +142,18 @@ def test_td64_comparisons_invalid(self, box_with_array, invalid): pd.period_range("1971-01-01", freq="D", periods=10).astype(object), ], ) - def test_td64arr_cmp_arraylike_invalid(self, other): + def test_td64arr_cmp_arraylike_invalid(self, other, box_with_array): # We don't parametrize this over box_with_array because listlike # other plays poorly with assert_invalid_comparison reversed checks rng = timedelta_range("1 days", periods=10)._data - assert_invalid_comparison(rng, other, tm.to_array) + rng = tm.box_expected(rng, box_with_array) + assert_invalid_comparison(rng, other, box_with_array) def test_td64arr_cmp_mixed_invalid(self): rng = timedelta_range("1 days", periods=5)._data - other = np.array([0, 1, 2, rng[3], Timestamp.now()]) + result = rng == other expected = np.array([False, False, False, True, False]) tm.assert_numpy_array_equal(result, expected) @@ -1542,13 +1539,13 @@ def test_tdi_mul_float_series(self, box_with_array): ) def test_tdi_rmul_arraylike(self, other, box_with_array): box = box_with_array - xbox = get_upcast_box(box, other) tdi = TimedeltaIndex(["1 Day"] * 10) - expected = timedelta_range("1 days", "10 days") - expected._data.freq = None + expected = timedelta_range("1 days", "10 days")._with_freq(None) tdi = tm.box_expected(tdi, box) + xbox = get_upcast_box(tdi, other) + expected = tm.box_expected(expected, xbox) result = other * tdi @@ -1623,10 +1620,7 @@ def test_td64arr_div_td64_scalar(self, m, unit, box_with_array): box = box_with_array xbox = np.ndarray if box is pd.array else box - startdate = Series(pd.date_range("2013-01-01", "2013-01-03")) - enddate = Series(pd.date_range("2013-03-01", "2013-03-03")) - - ser = enddate - startdate + ser = Series([Timedelta(days=59)] * 3) ser[2] = np.nan flat = ser ser = tm.box_expected(ser, box) @@ -2000,7 +1994,6 @@ def test_td64arr_rmul_numeric_array( ): # GH#4521 # divide/multiply by integers - xbox = get_upcast_box(box_with_array, vector) tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") vector = vector.astype(any_real_numpy_dtype) @@ -2008,6 +2001,8 @@ def test_td64arr_rmul_numeric_array( expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]") tdser = tm.box_expected(tdser, box_with_array) + xbox = get_upcast_box(tdser, vector) + expected = tm.box_expected(expected, xbox) result = tdser * vector @@ -2026,7 +2021,6 @@ def test_td64arr_div_numeric_array( ): # GH#4521 # divide/multiply by integers - xbox = get_upcast_box(box_with_array, vector) tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") vector = vector.astype(any_real_numpy_dtype) @@ -2034,6 +2028,7 @@ def test_td64arr_div_numeric_array( expected = Series(["2.95D", "1D 23H 12m", "NaT"], dtype="timedelta64[ns]") tdser = tm.box_expected(tdser, box_with_array) + xbox = get_upcast_box(tdser, vector) expected = tm.box_expected(expected, xbox) result = tdser / vector @@ -2085,7 +2080,7 @@ def test_td64arr_mul_int_series(self, box_with_array, names): ) tdi = tm.box_expected(tdi, box) - xbox = get_upcast_box(box, ser) + xbox = get_upcast_box(tdi, ser) expected = tm.box_expected(expected, xbox) @@ -2117,9 +2112,8 @@ def test_float_series_rdiv_td64arr(self, box_with_array, names): name=xname, ) - xbox = get_upcast_box(box, ser) - tdi = tm.box_expected(tdi, box) + xbox = get_upcast_box(tdi, ser) expected = tm.box_expected(expected, xbox) result = ser.__rtruediv__(tdi) diff --git a/pandas/tests/arrays/floating/test_comparison.py b/pandas/tests/arrays/floating/test_comparison.py index c4163c25ae74d..a429649f1ce1d 100644 --- a/pandas/tests/arrays/floating/test_comparison.py +++ b/pandas/tests/arrays/floating/test_comparison.py @@ -1,7 +1,9 @@ +import numpy as np import pytest import pandas as pd import pandas._testing as tm +from pandas.core.arrays import FloatingArray from pandas.tests.arrays.masked_shared import ( ComparisonOps, NumericOps, @@ -34,3 +36,30 @@ def test_equals(): a1 = pd.array([1, 2, None], dtype="Float64") a2 = pd.array([1, 2, None], dtype="Float32") assert a1.equals(a2) is False + + +def test_equals_nan_vs_na(): + # GH#44382 + + mask = np.zeros(3, dtype=bool) + data = np.array([1.0, np.nan, 3.0], dtype=np.float64) + + left = FloatingArray(data, mask) + assert left.equals(left) + tm.assert_extension_array_equal(left, left) + + assert left.equals(left.copy()) + assert left.equals(FloatingArray(data.copy(), mask.copy())) + + mask2 = np.array([False, True, False], dtype=bool) + data2 = np.array([1.0, 2.0, 3.0], dtype=np.float64) + right = FloatingArray(data2, mask2) + assert right.equals(right) + tm.assert_extension_array_equal(right, right) + + assert not left.equals(right) + + # with mask[1] = True, the only difference is data[1], which should + # not matter for equals + mask[1] = True + assert left.equals(right) diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index c7c1ce6c04692..13fe3c2d427c5 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -1114,6 +1114,25 @@ def test_to_timestamp(self, how, arr1d): # an EA-specific tm.assert_ function tm.assert_index_equal(pd.Index(result), pd.Index(expected)) + def test_to_timestamp_roundtrip_bday(self): + # Case where infer_freq inside would choose "D" instead of "B" + dta = pd.date_range("2021-10-18", periods=3, freq="B")._data + parr = dta.to_period() + result = parr.to_timestamp() + assert result.freq == "B" + tm.assert_extension_array_equal(result, dta) + + dta2 = dta[::2] + parr2 = dta2.to_period() + result2 = parr2.to_timestamp() + assert result2.freq == "2B" + tm.assert_extension_array_equal(result2, dta2) + + parr3 = dta.to_period("2B") + result3 = parr3.to_timestamp() + assert result3.freq == "B" + tm.assert_extension_array_equal(result3, dta) + def test_to_timestamp_out_of_bounds(self): # GH#19643 previously overflowed silently pi = pd.period_range("1500", freq="Y", periods=3) diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py index 5431baf493260..23bb4c5d2670c 100644 --- a/pandas/tests/base/test_value_counts.py +++ b/pandas/tests/base/test_value_counts.py @@ -281,5 +281,5 @@ def test_value_counts_with_nan(dropna, index_or_series): if dropna is True: expected = Series([1], index=[True]) else: - expected = Series([2, 1], index=[pd.NA, True]) + expected = Series([1, 1, 1], index=[True, pd.NA, np.nan]) tm.assert_series_equal(res, expected) diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index c52f20255eb81..1d3d736ca7ee2 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -162,13 +162,12 @@ def test_compare_array(self, data, comparison_op): other = pd.Series([data[0]] * len(data)) self._compare_other(ser, data, comparison_op, other) - def test_direct_arith_with_ndframe_returns_not_implemented( - self, data, frame_or_series - ): + @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame]) + def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box): # EAs should return NotImplemented for ops with Series/DataFrame # Pandas takes care of unboxing the series and calling the EA's op. other = pd.Series(data) - if frame_or_series is pd.DataFrame: + if box is pd.DataFrame: other = other.to_frame() if hasattr(data, "__eq__"): diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index e9dc63e9bd903..6a1a9512bc036 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -303,3 +303,14 @@ def test_not_equal_with_na(self, categories): class TestParsing(base.BaseParsingTests): pass + + +class Test2DCompat(base.Dim2CompatTests): + def test_repr_2d(self, data): + # Categorical __repr__ doesn't include "Categorical", so we need + # to special-case + res = repr(data.reshape(1, -1)) + assert res.count("\nCategories") == 1 + + res = repr(data.reshape(-1, 1)) + assert res.count("\nCategories") == 1 diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index d735f0dbec8a5..bb1a1bc72116d 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -44,6 +44,19 @@ class TestDataFrameSetItem: + def test_setitem_str_subclass(self): + # GH#37366 + class mystring(str): + pass + + data = ["2020-10-22 01:21:00+00:00"] + index = DatetimeIndex(data) + df = DataFrame({"a": [1]}, index=index) + df["b"] = 2 + df[mystring("c")] = 3 + expected = DataFrame({"a": [1], "b": [2], mystring("c"): [3]}, index=index) + tm.assert_equal(df, expected) + @pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"]) def test_setitem_dtype(self, dtype, float_frame): arr = np.random.randn(len(float_frame)) @@ -384,7 +397,7 @@ def test_setitem_frame_length_0_str_key(self, indexer): expected["A"] = expected["A"].astype("object") tm.assert_frame_equal(df, expected) - def test_setitem_frame_duplicate_columns(self, using_array_manager): + def test_setitem_frame_duplicate_columns(self, using_array_manager, request): # GH#15695 cols = ["A", "B", "C"] * 2 df = DataFrame(index=range(3), columns=cols) @@ -407,6 +420,11 @@ def test_setitem_frame_duplicate_columns(self, using_array_manager): expected["C"] = expected["C"].astype("int64") # TODO(ArrayManager) .loc still overwrites expected["B"] = expected["B"].astype("int64") + + mark = pytest.mark.xfail( + reason="Both 'A' columns get set with 3 instead of 0 and 3" + ) + request.node.add_marker(mark) else: # set these with unique columns to be extra-unambiguous expected[2] = expected[2].astype(np.int64) @@ -995,22 +1013,37 @@ def test_setitem_always_copy(self, float_frame): float_frame["E"][5:10] = np.nan assert notna(s[5:10]).all() - def test_setitem_clear_caches(self): - # see GH#304 + @pytest.mark.parametrize("consolidate", [True, False]) + def test_setitem_partial_column_inplace(self, consolidate, using_array_manager): + # This setting should be in-place, regardless of whether frame is + # single-block or multi-block + # GH#304 this used to be incorrectly not-inplace, in which case + # we needed to ensure _item_cache was cleared. + df = DataFrame( {"x": [1.1, 2.1, 3.1, 4.1], "y": [5.1, 6.1, 7.1, 8.1]}, index=[0, 1, 2, 3] ) df.insert(2, "z", np.nan) + if not using_array_manager: + if consolidate: + df._consolidate_inplace() + assert len(df._mgr.blocks) == 1 + else: + assert len(df._mgr.blocks) == 2 - # cache it - foo = df["z"] - df.loc[df.index[2:], "z"] = 42 + zvals = df["z"]._values - expected = Series([np.nan, np.nan, 42, 42], index=df.index, name="z") + df.loc[2:, "z"] = 42 - assert df["z"] is not foo + expected = Series([np.nan, np.nan, 42, 42], index=df.index, name="z") tm.assert_series_equal(df["z"], expected) + # check setting occurred in-place + tm.assert_numpy_array_equal(zvals, expected.values) + assert np.shares_memory(zvals, df["z"]._values) + if not consolidate: + assert df["z"]._values is zvals + def test_setitem_duplicate_columns_not_inplace(self): # GH#39510 cols = ["A", "B"] * 2 diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py index d2704876c31c5..c6938abb57d64 100644 --- a/pandas/tests/frame/indexing/test_xs.py +++ b/pandas/tests/frame/indexing/test_xs.py @@ -366,12 +366,7 @@ def test_xs_droplevel_false_view(self, using_array_manager): assert np.shares_memory(result.iloc[:, 0]._values, df.iloc[:, 0]._values) # modifying original df also modifies result when having a single block df.iloc[0, 0] = 2 - if not using_array_manager: - expected = DataFrame({"a": [2]}) - else: - # TODO(ArrayManager) iloc does not update the array inplace using - # "split" path - expected = DataFrame({"a": [1]}) + expected = DataFrame({"a": [2]}) tm.assert_frame_equal(result, expected) # with mixed dataframe, modifying the parent doesn't modify result @@ -379,7 +374,13 @@ def test_xs_droplevel_false_view(self, using_array_manager): df = DataFrame([[1, 2.5, "a"]], columns=Index(["a", "b", "c"])) result = df.xs("a", axis=1, drop_level=False) df.iloc[0, 0] = 2 - expected = DataFrame({"a": [1]}) + if using_array_manager: + # Here the behavior is consistent + expected = DataFrame({"a": [2]}) + else: + # FIXME: iloc does not update the array inplace using + # "split" path + expected = DataFrame({"a": [1]}) tm.assert_frame_equal(result, expected) def test_xs_list_indexer_droplevel_false(self): diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py index 9f1f953cecc7e..e5e07761fd755 100644 --- a/pandas/tests/frame/methods/test_astype.py +++ b/pandas/tests/frame/methods/test_astype.py @@ -261,6 +261,26 @@ def test_astype_duplicate_col(self): expected = concat([a1_str, b, a2_str], axis=1) tm.assert_frame_equal(result, expected) + def test_astype_duplicate_col_series_arg(self): + # GH#44417 + vals = np.random.randn(3, 4) + df = DataFrame(vals, columns=["A", "B", "C", "A"]) + dtypes = df.dtypes + dtypes.iloc[0] = str + dtypes.iloc[2] = "Float64" + + result = df.astype(dtypes) + expected = DataFrame( + { + 0: vals[:, 0].astype(str), + 1: vals[:, 1], + 2: pd.array(vals[:, 2], dtype="Float64"), + 3: vals[:, 3], + } + ) + expected.columns = df.columns + tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize( "dtype", [ diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index 2e6318955e119..5773edbdbcdec 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -280,9 +280,13 @@ def test_quantile_datetime(self): tm.assert_frame_equal(result, expected) # empty when numeric_only=True - # FIXME (gives empty frame in 0.18.1, broken in 0.19.0) - # result = df[['a', 'c']].quantile(.5) - # result = df[['a', 'c']].quantile([.5]) + result = df[["a", "c"]].quantile(0.5) + expected = Series([], index=[], dtype=np.float64, name=0.5) + tm.assert_series_equal(result, expected) + + result = df[["a", "c"]].quantile([0.5]) + expected = DataFrame(index=[0.5]) + tm.assert_frame_equal(result, expected) def test_quantile_invalid(self, datetime_frame): msg = "percentiles should all be in the interval \\[0, 1\\]" @@ -481,7 +485,7 @@ def test_quantile_nat(self): ) tm.assert_frame_equal(res, exp) - def test_quantile_empty_no_rows(self): + def test_quantile_empty_no_rows_floats(self): # floats df = DataFrame(columns=["a", "b"], dtype="float64") @@ -494,21 +498,43 @@ def test_quantile_empty_no_rows(self): exp = DataFrame([[np.nan, np.nan]], columns=["a", "b"], index=[0.5]) tm.assert_frame_equal(res, exp) - # FIXME (gives empty frame in 0.18.1, broken in 0.19.0) - # res = df.quantile(0.5, axis=1) - # res = df.quantile([0.5], axis=1) + res = df.quantile(0.5, axis=1) + exp = Series([], index=[], dtype="float64", name=0.5) + tm.assert_series_equal(res, exp) + + res = df.quantile([0.5], axis=1) + exp = DataFrame(columns=[], index=[0.5]) + tm.assert_frame_equal(res, exp) + def test_quantile_empty_no_rows_ints(self): # ints df = DataFrame(columns=["a", "b"], dtype="int64") - # FIXME (gives empty frame in 0.18.1, broken in 0.19.0) - # res = df.quantile(0.5) + res = df.quantile(0.5) + exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5) + tm.assert_series_equal(res, exp) + def test_quantile_empty_no_rows_dt64(self): # datetimes df = DataFrame(columns=["a", "b"], dtype="datetime64[ns]") - # FIXME (gives NaNs instead of NaT in 0.18.1 or 0.19.0) - # res = df.quantile(0.5, numeric_only=False) + res = df.quantile(0.5, numeric_only=False) + exp = Series( + [pd.NaT, pd.NaT], index=["a", "b"], dtype="datetime64[ns]", name=0.5 + ) + tm.assert_series_equal(res, exp) + + # Mixed dt64/dt64tz + df["a"] = df["a"].dt.tz_localize("US/Central") + res = df.quantile(0.5, numeric_only=False) + exp = exp.astype(object) + tm.assert_series_equal(res, exp) + + # both dt64tz + df["b"] = df["b"].dt.tz_localize("US/Central") + res = df.quantile(0.5, numeric_only=False) + exp = exp.astype(df["b"].dtype) + tm.assert_series_equal(res, exp) def test_quantile_empty_no_columns(self): # GH#23925 _get_numeric_data may drop all columns diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index f92bbe1c718ab..52797862afa14 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2903,14 +2903,7 @@ def test_from_timedelta64_scalar_object(self, constructor): assert isinstance(get1(obj), np.timedelta64) @pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64]) - def test_from_scalar_datetimelike_mismatched(self, constructor, cls, request): - node = request.node - params = node.callspec.params - if params["frame_or_series"] is DataFrame and params["constructor"] is dict: - mark = pytest.mark.xfail( - reason="DataFrame incorrectly allows mismatched datetimelike" - ) - node.add_marker(mark) + def test_from_scalar_datetimelike_mismatched(self, constructor, cls): scalar = cls("NaT", "ns") dtype = {np.datetime64: "m8[ns]", np.timedelta64: "M8[ns]"}[cls] diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 919d8ab14778e..fc2c138538ac9 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -789,6 +789,10 @@ def test_std_timedelta64_skipna_false(self): # GH#37392 tdi = pd.timedelta_range("1 Day", periods=10) df = DataFrame({"A": tdi, "B": tdi}) + # Copy is needed for ArrayManager case, otherwise setting df.iloc + # below edits tdi, alterting both df['A'] and df['B'] + # FIXME: passing copy=True to constructor does not fix this + df = df.copy() df.iloc[-2, -1] = pd.NaT result = df.std(skipna=False) @@ -1017,7 +1021,9 @@ def test_idxmax_mixed_dtype(self): # don't cast to object, which would raise in nanops dti = date_range("2016-01-01", periods=3) - df = DataFrame({1: [0, 2, 1], 2: range(3)[::-1], 3: dti}) + # Copying dti is needed for ArrayManager otherwise when we set + # df.loc[0, 3] = pd.NaT below it edits dti + df = DataFrame({1: [0, 2, 1], 2: range(3)[::-1], 3: dti.copy(deep=True)}) result = df.idxmax() expected = Series([1, 0, 2], index=[1, 2, 3]) @@ -1074,6 +1080,10 @@ def test_idxmax_idxmin_convert_dtypes(self, op, expected_value): def test_idxmax_dt64_multicolumn_axis1(self): dti = date_range("2016-01-01", periods=3) df = DataFrame({3: dti, 4: dti[::-1]}) + # FIXME: copy needed for ArrayManager, otherwise setting with iloc + # below also sets df.iloc[-1, 1]; passing copy=True to DataFrame + # does not solve this. + df = df.copy() df.iloc[0, 0] = pd.NaT df._consolidate_inplace() diff --git a/pandas/tests/frame/test_stack_unstack.py b/pandas/tests/frame/test_stack_unstack.py index 404baecdfecac..62512249dabfc 100644 --- a/pandas/tests/frame/test_stack_unstack.py +++ b/pandas/tests/frame/test_stack_unstack.py @@ -2099,3 +2099,27 @@ def test_stack_unsorted(self): result = DF.stack(["VAR", "TYP"]).sort_index() expected = DF.sort_index(axis=1).stack(["VAR", "TYP"]).sort_index() tm.assert_series_equal(result, expected) + + def test_stack_nullable_dtype(self): + # GH#43561 + columns = MultiIndex.from_product( + [["54511", "54515"], ["r", "t_mean"]], names=["station", "element"] + ) + index = Index([1, 2, 3], name="time") + + arr = np.array([[50, 226, 10, 215], [10, 215, 9, 220], [305, 232, 111, 220]]) + df = DataFrame(arr, columns=columns, index=index, dtype=pd.Int64Dtype()) + + result = df.stack("station") + + expected = df.astype(np.int64).stack("station").astype(pd.Int64Dtype()) + tm.assert_frame_equal(result, expected) + + # non-homogeneous case + df[df.columns[0]] = df[df.columns[0]].astype(pd.Float64Dtype()) + result = df.stack("station") + + # TODO(EA2D): we get object dtype because DataFrame.values can't + # be an EA + expected = df.astype(object).stack("station") + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index 42474ff00ad6d..8d9957b24300f 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -13,6 +13,16 @@ import pandas._testing as tm +@pytest.fixture() +def gpd_style_subclass_df(): + class SubclassedDataFrame(DataFrame): + @property + def _constructor(self): + return SubclassedDataFrame + + return SubclassedDataFrame({"a": [1, 2, 3]}) + + class TestDataFrameSubclassing: def test_frame_subclassing_and_slicing(self): # Subclass frame and ensure it returns the right class on slicing it @@ -704,6 +714,15 @@ def test_idxmax_preserves_subclass(self): result = df.idxmax() assert isinstance(result, tm.SubclassedSeries) + def test_convert_dtypes_preserves_subclass(self, gpd_style_subclass_df): + # GH 43668 + df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + result = df.convert_dtypes() + assert isinstance(result, tm.SubclassedDataFrame) + + result = gpd_style_subclass_df.convert_dtypes() + assert isinstance(result, type(gpd_style_subclass_df)) + def test_equals_subclass(self): # https://github.com/pandas-dev/pandas/pull/34402 # allow subclass in both directions diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py index c1f8b5dd7cf41..135e8cc7b7aba 100644 --- a/pandas/tests/generic/test_finalize.py +++ b/pandas/tests/generic/test_finalize.py @@ -347,10 +347,7 @@ operator.methodcaller("infer_objects"), ), (pd.Series, ([1, 2],), operator.methodcaller("convert_dtypes")), - pytest.param( - (pd.DataFrame, frame_data, operator.methodcaller("convert_dtypes")), - marks=not_implemented_mark, - ), + (pd.DataFrame, frame_data, operator.methodcaller("convert_dtypes")), (pd.Series, ([1, None, 3],), operator.methodcaller("interpolate")), (pd.DataFrame, ({"A": [1, None, 3]},), operator.methodcaller("interpolate")), (pd.Series, ([1, 2],), operator.methodcaller("clip", lower=1)), diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 3c402480ea2ec..e5870a206f419 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -394,8 +394,7 @@ def test_median_empty_bins(observed): result = df.groupby(bins, observed=observed).median() expected = df.groupby(bins, observed=observed).agg(lambda x: x.median()) - # TODO: GH 41137 - tm.assert_frame_equal(result, expected, check_dtype=False) + tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 203d8abb465d0..f632da9616124 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2031,6 +2031,16 @@ def get_result(): tm.assert_equal(result, expected) +def test_empty_groupby_apply_nonunique_columns(): + # GH#44417 + df = DataFrame(np.random.randn(0, 4)) + df[3] = df[3].astype(np.int64) + df.columns = [0, 1, 2, 0] + gb = df.groupby(df[1]) + res = gb.apply(lambda x: x) + assert (res.dtypes == df.dtypes).all() + + def test_tuple_as_grouping(): # https://github.com/pandas-dev/pandas/issues/18314 df = DataFrame( diff --git a/pandas/tests/indexes/base_class/test_formats.py b/pandas/tests/indexes/base_class/test_formats.py index f07b06acbfbdb..9053d45dee623 100644 --- a/pandas/tests/indexes/base_class/test_formats.py +++ b/pandas/tests/indexes/base_class/test_formats.py @@ -122,6 +122,14 @@ def test_repr_summary(self): assert len(result) < 200 assert "..." in result + def test_summary_bug(self): + # GH#3869 + ind = Index(["{other}%s", "~:{range}:0"], name="A") + result = ind._summary() + # shouldn't be formatted accidentally. + assert "~:{range}:0" in result + assert "{other}%s" in result + def test_index_repr_bool_nan(self): # GH32146 arr = Index([True, False, np.nan], dtype=object) @@ -132,3 +140,9 @@ def test_index_repr_bool_nan(self): exp2 = repr(arr) out2 = "Index([True, False, nan], dtype='object')" assert out2 == exp2 + + def test_format_different_scalar_lengths(self): + # GH#35439 + idx = Index(["aaaaaaaaa", "b"]) + expected = ["aaaaaaaaa", "b"] + assert idx.format() == expected diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 33d2558613baf..a5ee743b5cd9a 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -69,26 +69,6 @@ def test_pickle_compat_construction(self): with pytest.raises(TypeError, match=msg): self._index_cls() - @pytest.mark.parametrize("name", [None, "new_name"]) - def test_to_frame(self, name, simple_index): - # see GH-15230, GH-22580 - idx = simple_index - - if name: - idx_name = name - else: - idx_name = idx.name or 0 - - df = idx.to_frame(name=idx_name) - - assert df.index is idx - assert len(df.columns) == 1 - assert df.columns[0] == idx_name - assert df[idx_name].values is not idx.values - - df = idx.to_frame(index=False, name=idx_name) - assert df.index is not idx - def test_shift(self, simple_index): # GH8083 test the base class for shift @@ -226,46 +206,6 @@ def test_repr_max_seq_item_setting(self, simple_index): repr(idx) assert "..." not in str(idx) - def test_copy_name(self, index): - # gh-12309: Check that the "name" argument - # passed at initialization is honored. - if isinstance(index, MultiIndex): - return - - first = type(index)(index, copy=True, name="mario") - second = type(first)(first, copy=False) - - # Even though "copy=False", we want a new object. - assert first is not second - - # Not using tm.assert_index_equal() since names differ. - assert index.equals(first) - - assert first.name == "mario" - assert second.name == "mario" - - s1 = Series(2, index=first) - s2 = Series(3, index=second[:-1]) - - if not isinstance(index, CategoricalIndex): - # See gh-13365 - s3 = s1 * s2 - assert s3.index.name == "mario" - - def test_copy_name2(self, index): - # gh-35592 - if isinstance(index, MultiIndex): - return - - assert index.copy(name="mario").name == "mario" - - with pytest.raises(ValueError, match="Length of new names must be 1, got 2"): - index.copy(name=["mario", "luigi"]) - - msg = f"{type(index).__name__}.name must be a hashable type" - with pytest.raises(TypeError, match=msg): - index.copy(name=[["mario"]]) - def test_ensure_copied_data(self, index): # Check the "copy" argument of each Index.__new__ is honoured # GH12309 diff --git a/pandas/tests/indexes/datetimelike_/test_is_monotonic.py b/pandas/tests/indexes/datetimelike_/test_is_monotonic.py new file mode 100644 index 0000000000000..22247c982edbc --- /dev/null +++ b/pandas/tests/indexes/datetimelike_/test_is_monotonic.py @@ -0,0 +1,46 @@ +from pandas import ( + Index, + NaT, + date_range, +) + + +def test_is_monotonic_with_nat(): + # GH#31437 + # PeriodIndex.is_monotonic should behave analogously to DatetimeIndex, + # in particular never be monotonic when we have NaT + dti = date_range("2016-01-01", periods=3) + pi = dti.to_period("D") + tdi = Index(dti.view("timedelta64[ns]")) + + for obj in [pi, pi._engine, dti, dti._engine, tdi, tdi._engine]: + if isinstance(obj, Index): + # i.e. not Engines + assert obj.is_monotonic + assert obj.is_monotonic_increasing + assert not obj.is_monotonic_decreasing + assert obj.is_unique + + dti1 = dti.insert(0, NaT) + pi1 = dti1.to_period("D") + tdi1 = Index(dti1.view("timedelta64[ns]")) + + for obj in [pi1, pi1._engine, dti1, dti1._engine, tdi1, tdi1._engine]: + if isinstance(obj, Index): + # i.e. not Engines + assert not obj.is_monotonic + assert not obj.is_monotonic_increasing + assert not obj.is_monotonic_decreasing + assert obj.is_unique + + dti2 = dti.insert(3, NaT) + pi2 = dti2.to_period("H") + tdi2 = Index(dti2.view("timedelta64[ns]")) + + for obj in [pi2, pi2._engine, dti2, dti2._engine, tdi2, tdi2._engine]: + if isinstance(obj, Index): + # i.e. not Engines + assert not obj.is_monotonic + assert not obj.is_monotonic_increasing + assert not obj.is_monotonic_decreasing + assert obj.is_unique diff --git a/pandas/tests/indexes/datetimes/methods/test_isocalendar.py b/pandas/tests/indexes/datetimes/methods/test_isocalendar.py new file mode 100644 index 0000000000000..128a8b3e10eb3 --- /dev/null +++ b/pandas/tests/indexes/datetimes/methods/test_isocalendar.py @@ -0,0 +1,20 @@ +from pandas import ( + DataFrame, + DatetimeIndex, +) +import pandas._testing as tm + + +def test_isocalendar_returns_correct_values_close_to_new_year_with_tz(): + # GH#6538: Check that DatetimeIndex and its TimeStamp elements + # return the same weekofyear accessor close to new year w/ tz + dates = ["2013/12/29", "2013/12/30", "2013/12/31"] + dates = DatetimeIndex(dates, tz="Europe/Brussels") + result = dates.isocalendar() + expected_data_frame = DataFrame( + [[2013, 52, 7], [2014, 1, 1], [2014, 1, 2]], + columns=["year", "week", "day"], + index=dates, + dtype="UInt32", + ) + tm.assert_frame_equal(result, expected_data_frame) diff --git a/pandas/tests/indexes/datetimes/test_asof.py b/pandas/tests/indexes/datetimes/test_asof.py index c794aefc6a48b..7adc400302cb9 100644 --- a/pandas/tests/indexes/datetimes/test_asof.py +++ b/pandas/tests/indexes/datetimes/test_asof.py @@ -1,8 +1,12 @@ +from datetime import timedelta + from pandas import ( Index, Timestamp, date_range, + isna, ) +import pandas._testing as tm class TestAsOf: @@ -12,3 +16,16 @@ def test_asof_partial(self): result = index.asof("2010-02") assert result == expected assert not isinstance(result, Index) + + def test_asof(self): + index = tm.makeDateIndex(100) + + dt = index[0] + assert index.asof(dt) == dt + assert isna(index.asof(dt - timedelta(1))) + + dt = index[-1] + assert index.asof(dt + timedelta(1)) == dt + + dt = index[0].to_pydatetime() + assert isinstance(index.asof(dt), Timestamp) diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py index 36046aaeacaae..197038dbadaf7 100644 --- a/pandas/tests/indexes/datetimes/test_formats.py +++ b/pandas/tests/indexes/datetimes/test_formats.py @@ -254,3 +254,20 @@ def test_dti_custom_business_summary_dateutil(self): pd.bdate_range( "1/1/2005", "1/1/2009", freq="C", tz=dateutil.tz.tzutc() )._summary() + + +class TestFormat: + def test_format_with_name_time_info(self): + # bug I fixed 12/20/2011 + dates = pd.date_range("2011-01-01 04:00:00", periods=10, name="something") + + formatted = dates.format(name=True) + assert formatted[0] == "something" + + def test_format_datetime_with_time(self): + dti = DatetimeIndex([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)]) + + result = dti.format() + expected = ["2012-02-07 00:00:00", "2012-02-07 23:00:00"] + assert len(result) == 2 + assert result == expected diff --git a/pandas/tests/indexes/datetimes/test_freq_attr.py b/pandas/tests/indexes/datetimes/test_freq_attr.py new file mode 100644 index 0000000000000..f5821a316358d --- /dev/null +++ b/pandas/tests/indexes/datetimes/test_freq_attr.py @@ -0,0 +1,61 @@ +import pytest + +from pandas import ( + DatetimeIndex, + date_range, +) + +from pandas.tseries.offsets import ( + BDay, + DateOffset, + Day, + Hour, +) + + +class TestFreq: + def test_freq_setter_errors(self): + # GH#20678 + idx = DatetimeIndex(["20180101", "20180103", "20180105"]) + + # setting with an incompatible freq + msg = ( + "Inferred frequency 2D from passed values does not conform to " + "passed frequency 5D" + ) + with pytest.raises(ValueError, match=msg): + idx._data.freq = "5D" + + # setting with non-freq string + with pytest.raises(ValueError, match="Invalid frequency"): + idx._data.freq = "foo" + + @pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []]) + @pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)]) + @pytest.mark.parametrize("tz", [None, "US/Eastern"]) + def test_freq_setter(self, values, freq, tz): + # GH#20678 + idx = DatetimeIndex(values, tz=tz) + + # can set to an offset, converting from string if necessary + idx._data.freq = freq + assert idx.freq == freq + assert isinstance(idx.freq, DateOffset) + + # can reset to None + idx._data.freq = None + assert idx.freq is None + + def test_freq_view_safe(self): + # Setting the freq for one DatetimeIndex shouldn't alter the freq + # for another that views the same data + + dti = date_range("2016-01-01", periods=5) + dta = dti._data + + dti2 = DatetimeIndex(dta)._with_freq(None) + assert dti2.freq is None + + # Original was not altered + assert dti.freq == "D" + assert dta.freq == "D" diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index c3152b77d39df..beca71969dfcd 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -21,25 +21,12 @@ ) import pandas._testing as tm -from pandas.tseries.offsets import ( - BDay, - CDay, -) +from pandas.tseries.frequencies import to_offset START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) class TestGetItem: - def test_ellipsis(self): - # GH#21282 - idx = date_range( - "2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx" - ) - - result = idx[...] - assert result.equals(idx) - assert result is not idx - def test_getitem_slice_keeps_name(self): # GH4226 st = Timestamp("2013-07-01 00:00:00", tz="America/Los_Angeles") @@ -88,44 +75,17 @@ def test_getitem(self): tm.assert_index_equal(result, expected) assert result.freq == expected.freq - def test_dti_business_getitem(self): - rng = bdate_range(START, END) - smaller = rng[:5] - exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="B") - tm.assert_index_equal(smaller, exp) - assert smaller.freq == exp.freq - - assert smaller.freq == rng.freq - - sliced = rng[::5] - assert sliced.freq == BDay() * 5 - - fancy_indexed = rng[[4, 3, 2, 1, 0]] - assert len(fancy_indexed) == 5 - assert isinstance(fancy_indexed, DatetimeIndex) - assert fancy_indexed.freq is None - - # 32-bit vs. 64-bit platforms - assert rng[4] == rng[np.int_(4)] - - def test_dti_business_getitem_matplotlib_hackaround(self): - rng = bdate_range(START, END) - with tm.assert_produces_warning(FutureWarning): - # GH#30588 multi-dimensional indexing deprecated - values = rng[:, None] - expected = rng.values[:, None] - tm.assert_numpy_array_equal(values, expected) - - def test_dti_custom_getitem(self): - rng = bdate_range(START, END, freq="C") + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_dti_business_getitem(self, freq): + rng = bdate_range(START, END, freq=freq) smaller = rng[:5] - exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="C") + exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq=freq) tm.assert_index_equal(smaller, exp) assert smaller.freq == exp.freq assert smaller.freq == rng.freq sliced = rng[::5] - assert sliced.freq == CDay() * 5 + assert sliced.freq == to_offset(freq) * 5 fancy_indexed = rng[[4, 3, 2, 1, 0]] assert len(fancy_indexed) == 5 @@ -135,8 +95,9 @@ def test_dti_custom_getitem(self): # 32-bit vs. 64-bit platforms assert rng[4] == rng[np.int_(4)] - def test_dti_custom_getitem_matplotlib_hackaround(self): - rng = bdate_range(START, END, freq="C") + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_dti_business_getitem_matplotlib_hackaround(self, freq): + rng = bdate_range(START, END, freq=freq) with tm.assert_produces_warning(FutureWarning): # GH#30588 multi-dimensional indexing deprecated values = rng[:, None] @@ -255,6 +216,12 @@ def test_where_tz(self): class TestTake: + def test_take_nan_first_datetime(self): + index = DatetimeIndex([pd.NaT, Timestamp("20130101"), Timestamp("20130102")]) + result = index.take([-1, 0, 1]) + expected = DatetimeIndex([index[-1], index[0], index[1]]) + tm.assert_index_equal(result, expected) + def test_take(self): # GH#10295 idx1 = date_range("2011-01-01", "2011-01-31", freq="D", name="idx") diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index f0757d0ba555e..44c353315562a 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -297,21 +297,6 @@ def test_week_and_weekofyear_are_deprecated(): idx.weekofyear -def test_isocalendar_returns_correct_values_close_to_new_year_with_tz(): - # GH 6538: Check that DatetimeIndex and its TimeStamp elements - # return the same weekofyear accessor close to new year w/ tz - dates = ["2013/12/29", "2013/12/30", "2013/12/31"] - dates = DatetimeIndex(dates, tz="Europe/Brussels") - result = dates.isocalendar() - expected_data_frame = pd.DataFrame( - [[2013, 52, 7], [2014, 1, 1], [2014, 1, 2]], - columns=["year", "week", "day"], - index=dates, - dtype="UInt32", - ) - tm.assert_frame_equal(result, expected_data_frame) - - def test_add_timedelta_preserves_freq(): # GH#37295 should hold for any DTI with freq=None or Tick freq tz = "Canada/Eastern" diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 7df94b5820e5d..d6ef4198fad2e 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -6,43 +6,17 @@ from pandas.compat import IS64 from pandas import ( - DateOffset, DatetimeIndex, Index, - Series, bdate_range, date_range, ) import pandas._testing as tm -from pandas.tseries.offsets import ( - BDay, - Day, - Hour, -) - START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) class TestDatetimeIndexOps: - def test_ops_properties_basic(self, datetime_series): - - # sanity check that the behavior didn't change - # GH#7206 - for op in ["year", "day", "second", "weekday"]: - msg = f"'Series' object has no attribute '{op}'" - with pytest.raises(AttributeError, match=msg): - getattr(datetime_series, op) - - # attribute access should still work! - s = Series({"year": 2000, "month": 1, "day": 10}) - assert s.year == 2000 - assert s.month == 1 - assert s.day == 10 - msg = "'Series' object has no attribute 'weekday'" - with pytest.raises(AttributeError, match=msg): - s.weekday - @pytest.mark.parametrize( "freq,expected", [ @@ -74,72 +48,28 @@ def test_infer_freq(self, freq_sample): tm.assert_index_equal(idx, result) assert result.freq == freq_sample - @pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []]) - @pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)]) - @pytest.mark.parametrize("tz", [None, "US/Eastern"]) - def test_freq_setter(self, values, freq, tz): - # GH 20678 - idx = DatetimeIndex(values, tz=tz) - - # can set to an offset, converting from string if necessary - idx._data.freq = freq - assert idx.freq == freq - assert isinstance(idx.freq, DateOffset) - - # can reset to None - idx._data.freq = None - assert idx.freq is None - - def test_freq_setter_errors(self): - # GH 20678 - idx = DatetimeIndex(["20180101", "20180103", "20180105"]) - - # setting with an incompatible freq - msg = ( - "Inferred frequency 2D from passed values does not conform to " - "passed frequency 5D" - ) - with pytest.raises(ValueError, match=msg): - idx._data.freq = "5D" - - # setting with non-freq string - with pytest.raises(ValueError, match="Invalid frequency"): - idx._data.freq = "foo" - - def test_freq_view_safe(self): - # Setting the freq for one DatetimeIndex shouldn't alter the freq - # for another that views the same data - - dti = date_range("2016-01-01", periods=5) - dta = dti._data - - dti2 = DatetimeIndex(dta)._with_freq(None) - assert dti2.freq is None - - # Original was not altered - assert dti.freq == "D" - assert dta.freq == "D" - +@pytest.mark.parametrize("freq", ["B", "C"]) class TestBusinessDatetimeIndex: - def setup_method(self, method): - self.rng = bdate_range(START, END) + @pytest.fixture + def rng(self, freq): + return bdate_range(START, END, freq=freq) - def test_comparison(self): - d = self.rng[10] + def test_comparison(self, rng): + d = rng[10] - comp = self.rng > d + comp = rng > d assert comp[11] assert not comp[9] - def test_copy(self): - cp = self.rng.copy() + def test_copy(self, rng): + cp = rng.copy() repr(cp) - tm.assert_index_equal(cp, self.rng) + tm.assert_index_equal(cp, rng) - def test_identical(self): - t1 = self.rng.copy() - t2 = self.rng.copy() + def test_identical(self, rng): + t1 = rng.copy() + t2 = rng.copy() assert t1.identical(t2) # name @@ -153,20 +83,3 @@ def test_identical(self): t2v = Index(t2.values) assert t1.equals(t2v) assert not t1.identical(t2v) - - -class TestCustomDatetimeIndex: - def setup_method(self, method): - self.rng = bdate_range(START, END, freq="C") - - def test_comparison(self): - d = self.rng[10] - - comp = self.rng > d - assert comp[11] - assert not comp[9] - - def test_copy(self): - cp = self.rng.copy() - repr(cp) - tm.assert_index_equal(cp, self.rng) diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index 896c43db5e356..2f32f9e18311d 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -1,7 +1,6 @@ """ test partial slicing on Series/Frame """ from datetime import datetime -import operator import numpy as np import pytest @@ -412,40 +411,6 @@ def test_loc_datetime_length_one(self): result = df.loc["2016-10-01T00:00:00":] tm.assert_frame_equal(result, df) - @pytest.mark.parametrize( - "datetimelike", - [ - Timestamp("20130101"), - datetime(2013, 1, 1), - np.datetime64("2013-01-01T00:00", "ns"), - ], - ) - @pytest.mark.parametrize( - "op,expected", - [ - (operator.lt, [True, False, False, False]), - (operator.le, [True, True, False, False]), - (operator.eq, [False, True, False, False]), - (operator.gt, [False, False, False, True]), - ], - ) - def test_selection_by_datetimelike(self, datetimelike, op, expected): - # GH issue #17965, test for ability to compare datetime64[ns] columns - # to datetimelike - df = DataFrame( - { - "A": [ - Timestamp("20120101"), - Timestamp("20130101"), - np.nan, - Timestamp("20130103"), - ] - } - ) - result = op(df.A, datetimelike) - expected = Series(expected, name="A") - tm.assert_series_equal(result, expected) - @pytest.mark.parametrize( "start", [ diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py index 8df8eef69e9c9..7c00b23dc9ac4 100644 --- a/pandas/tests/indexes/interval/test_indexing.py +++ b/pandas/tests/indexes/interval/test_indexing.py @@ -8,9 +8,12 @@ from pandas import ( NA, CategoricalIndex, + Index, Interval, IntervalIndex, + MultiIndex, NaT, + Series, Timedelta, date_range, timedelta_range, @@ -373,6 +376,31 @@ def test_get_indexer_with_nans(self): expected = np.array([0, 1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) + def test_get_index_non_unique_non_monotonic(self): + # GH#44084 (root cause) + index = IntervalIndex.from_tuples( + [(0.0, 1.0), (1.0, 2.0), (0.0, 1.0), (1.0, 2.0)] + ) + + result, _ = index.get_indexer_non_unique([Interval(1.0, 2.0)]) + expected = np.array([1, 3], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_multiindex_with_intervals(self): + # GH#44084 (MultiIndex case as reported) + interval_index = IntervalIndex.from_tuples( + [(2.0, 3.0), (0.0, 1.0), (1.0, 2.0)], name="interval" + ) + foo_index = Index([1, 2, 3], name="foo") + + multi_index = MultiIndex.from_product([foo_index, interval_index]) + + result = multi_index.get_level_values("interval").get_indexer_for( + [Interval(0.0, 1.0)] + ) + expected = np.array([1, 4, 7], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + class TestSliceLocs: def test_slice_locs_with_interval(self): @@ -523,3 +551,37 @@ def test_putmask_td64(self): result = idx.putmask(mask, idx[-1]) expected = IntervalIndex([idx[-1]] * 3 + list(idx[3:])) tm.assert_index_equal(result, expected) + + +class TestGetValue: + @pytest.mark.parametrize("key", [[5], (2, 3)]) + def test_get_value_non_scalar_errors(self, key): + # GH#31117 + idx = IntervalIndex.from_tuples([(1, 3), (2, 4), (3, 5), (7, 10), (3, 10)]) + ser = Series(range(len(idx)), index=idx) + + msg = str(key) + with pytest.raises(InvalidIndexError, match=msg): + with tm.assert_produces_warning(FutureWarning): + idx.get_value(ser, key) + + +class TestContains: + # .__contains__, not .contains + + def test_contains_dunder(self): + + index = IntervalIndex.from_arrays([0, 1], [1, 2], closed="right") + + # __contains__ requires perfect matches to intervals. + assert 0 not in index + assert 1 not in index + assert 2 not in index + + assert Interval(0, 1, closed="right") in index + assert Interval(0, 2, closed="right") not in index + assert Interval(0, 0.5, closed="right") not in index + assert Interval(3, 5, closed="right") not in index + assert Interval(-1, 0, closed="left") not in index + assert Interval(0, 1, closed="left") not in index + assert Interval(0, 1, closed="both") not in index diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 321d1aa34b9af..843885832690f 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -4,8 +4,6 @@ import numpy as np import pytest -from pandas.errors import InvalidIndexError - import pandas as pd from pandas import ( Index, @@ -500,23 +498,6 @@ def test_contains_method(self): ): i.contains(Interval(0, 1)) - def test_contains_dunder(self): - - index = IntervalIndex.from_arrays([0, 1], [1, 2], closed="right") - - # __contains__ requires perfect matches to intervals. - assert 0 not in index - assert 1 not in index - assert 2 not in index - - assert Interval(0, 1, closed="right") in index - assert Interval(0, 2, closed="right") not in index - assert Interval(0, 0.5, closed="right") not in index - assert Interval(3, 5, closed="right") not in index - assert Interval(-1, 0, closed="left") not in index - assert Interval(0, 1, closed="left") not in index - assert Interval(0, 1, closed="both") not in index - def test_dropna(self, closed): expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)], closed=closed) @@ -908,24 +889,6 @@ def test_is_all_dates(self): year_2017_index = IntervalIndex([year_2017]) assert not year_2017_index._is_all_dates - @pytest.mark.parametrize("key", [[5], (2, 3)]) - def test_get_value_non_scalar_errors(self, key): - # GH 31117 - idx = IntervalIndex.from_tuples([(1, 3), (2, 4), (3, 5), (7, 10), (3, 10)]) - s = pd.Series(range(len(idx)), index=idx) - - msg = str(key) - with pytest.raises(InvalidIndexError, match=msg): - with tm.assert_produces_warning(FutureWarning): - idx.get_value(s, key) - - @pytest.mark.parametrize("closed", ["left", "right", "both"]) - def test_pickle_round_trip_closed(self, closed): - # https://github.com/pandas-dev/pandas/issues/35658 - idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed) - result = tm.round_trip_pickle(idx) - tm.assert_index_equal(result, idx) - def test_dir(): # GH#27571 dir(interval_index) should not raise diff --git a/pandas/tests/indexes/interval/test_pickle.py b/pandas/tests/indexes/interval/test_pickle.py new file mode 100644 index 0000000000000..308a90e72eab5 --- /dev/null +++ b/pandas/tests/indexes/interval/test_pickle.py @@ -0,0 +1,13 @@ +import pytest + +from pandas import IntervalIndex +import pandas._testing as tm + + +class TestPickle: + @pytest.mark.parametrize("closed", ["left", "right", "both"]) + def test_pickle_round_trip_closed(self, closed): + # https://github.com/pandas-dev/pandas/issues/35658 + idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed) + result = tm.round_trip_pickle(idx) + tm.assert_index_equal(result, idx) diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py index d2b5a595b8454..cbb4ae0b0d09b 100644 --- a/pandas/tests/indexes/multi/test_compat.py +++ b/pandas/tests/indexes/multi/test_compat.py @@ -96,10 +96,3 @@ def test_inplace_mutation_resets_values(): assert "_values" not in mi2._cache tm.assert_almost_equal(mi2.values, new_values) assert "_values" in mi2._cache - - -def test_pickle_compat_construction(): - # this is testing for pickle compat - # need an object to create with - with pytest.raises(TypeError, match="Must pass both levels and codes"): - MultiIndex() diff --git a/pandas/tests/indexes/multi/test_pickle.py b/pandas/tests/indexes/multi/test_pickle.py new file mode 100644 index 0000000000000..1d8b721404421 --- /dev/null +++ b/pandas/tests/indexes/multi/test_pickle.py @@ -0,0 +1,10 @@ +import pytest + +from pandas import MultiIndex + + +def test_pickle_compat_construction(): + # this is testing for pickle compat + # need an object to create with + with pytest.raises(TypeError, match="Must pass both levels and codes"): + MultiIndex() diff --git a/pandas/tests/indexes/period/methods/test_astype.py b/pandas/tests/indexes/period/methods/test_astype.py index e2340a2db02f7..c44f2efed1fcc 100644 --- a/pandas/tests/indexes/period/methods/test_astype.py +++ b/pandas/tests/indexes/period/methods/test_astype.py @@ -164,7 +164,10 @@ def test_period_astype_to_timestamp(self): assert res.freq == exp.freq exp = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], tz="US/Eastern") - res = pi.astype("datetime64[ns, US/Eastern]") + msg = "Use `obj.to_timestamp" + with tm.assert_produces_warning(FutureWarning, match=msg): + # GH#44398 + res = pi.astype("datetime64[ns, US/Eastern]") tm.assert_index_equal(res, exp) assert res.freq == exp.freq diff --git a/pandas/tests/indexes/period/test_freq_attr.py b/pandas/tests/indexes/period/test_freq_attr.py new file mode 100644 index 0000000000000..3bf3e700e5e72 --- /dev/null +++ b/pandas/tests/indexes/period/test_freq_attr.py @@ -0,0 +1,21 @@ +import pytest + +from pandas import ( + offsets, + period_range, +) +import pandas._testing as tm + + +class TestFreq: + def test_freq_setter_deprecated(self): + # GH#20678 + idx = period_range("2018Q1", periods=4, freq="Q") + + # no warning for getter + with tm.assert_produces_warning(None): + idx.freq + + # warning for setter + with pytest.raises(AttributeError, match="can't set attribute"): + idx.freq = offsets.Day() diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index dfa750bf933a0..df2f114e73df2 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -52,14 +52,6 @@ def non_comparable_idx(request): class TestGetItem: - def test_ellipsis(self): - # GH#21282 - idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx") - - result = idx[...] - assert result.equals(idx) - assert result is not idx - def test_getitem_slice_keeps_name(self): idx = period_range("20010101", periods=10, freq="D", name="bob") assert idx.name == idx[1:].name @@ -205,6 +197,7 @@ def test_getitem_seconds(self): # GH7116 # these show deprecations as we are trying # to slice with non-integer indexers + # FIXME: don't leave commented-out # with pytest.raises(IndexError): # idx[v] continue @@ -814,12 +807,6 @@ def test_get_value(self): result2 = idx2.get_value(input2, p1) tm.assert_series_equal(result2, expected2) - def test_loc_str(self): - # https://github.com/pandas-dev/pandas/issues/33964 - index = period_range(start="2000", periods=20, freq="B") - series = Series(range(20), index=index) - assert series.loc["2000-01-14"] == 9 - @pytest.mark.parametrize("freq", ["H", "D"]) def test_get_value_datetime_hourly(self, freq): # get_loc and get_value should treat datetime objects symmetrically diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index e6c31d22e626f..f07107e9d3277 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -38,12 +38,6 @@ def index(self, request): def test_pickle_compat_construction(self): super().test_pickle_compat_construction() - @pytest.mark.parametrize("freq", ["D", "M", "A"]) - def test_pickle_round_trip(self, freq): - idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq=freq) - result = tm.round_trip_pickle(idx) - tm.assert_index_equal(result, idx) - def test_where(self): # This is handled in test_indexing pass @@ -211,7 +205,7 @@ def _check_all_fields(self, periodindex): ] periods = list(periodindex) - s = Series(periodindex) + ser = Series(periodindex) for field in fields: field_idx = getattr(periodindex, field) @@ -219,10 +213,10 @@ def _check_all_fields(self, periodindex): for x, val in zip(periods, field_idx): assert getattr(x, field) == val - if len(s) == 0: + if len(ser) == 0: continue - field_s = getattr(s.dt, field) + field_s = getattr(ser.dt, field) assert len(periodindex) == len(field_s) for x, val in zip(periods, field_s): assert getattr(x, field) == val @@ -307,13 +301,6 @@ def test_with_multi_index(self): assert isinstance(s.index.values[0][0], Period) - def test_pickle_freq(self): - # GH2891 - prng = period_range("1/1/2011", "1/1/2012", freq="M") - new_prng = tm.round_trip_pickle(prng) - assert new_prng.freq == offsets.MonthEnd() - assert new_prng.freqstr == "M" - def test_map(self): # test_map_dictlike generally tests @@ -341,47 +328,6 @@ def test_maybe_convert_timedelta(): pi._maybe_convert_timedelta(offset) -def test_is_monotonic_with_nat(): - # GH#31437 - # PeriodIndex.is_monotonic should behave analogously to DatetimeIndex, - # in particular never be monotonic when we have NaT - dti = date_range("2016-01-01", periods=3) - pi = dti.to_period("D") - tdi = Index(dti.view("timedelta64[ns]")) - - for obj in [pi, pi._engine, dti, dti._engine, tdi, tdi._engine]: - if isinstance(obj, Index): - # i.e. not Engines - assert obj.is_monotonic - assert obj.is_monotonic_increasing - assert not obj.is_monotonic_decreasing - assert obj.is_unique - - dti1 = dti.insert(0, NaT) - pi1 = dti1.to_period("D") - tdi1 = Index(dti1.view("timedelta64[ns]")) - - for obj in [pi1, pi1._engine, dti1, dti1._engine, tdi1, tdi1._engine]: - if isinstance(obj, Index): - # i.e. not Engines - assert not obj.is_monotonic - assert not obj.is_monotonic_increasing - assert not obj.is_monotonic_decreasing - assert obj.is_unique - - dti2 = dti.insert(3, NaT) - pi2 = dti2.to_period("H") - tdi2 = Index(dti2.view("timedelta64[ns]")) - - for obj in [pi2, pi2._engine, dti2, dti2._engine, tdi2, tdi2._engine]: - if isinstance(obj, Index): - # i.e. not Engines - assert not obj.is_monotonic - assert not obj.is_monotonic_increasing - assert not obj.is_monotonic_decreasing - assert obj.is_unique - - @pytest.mark.parametrize("array", [True, False]) def test_dunder_array(array): obj = PeriodIndex(["2000-01-01", "2001-01-01"], freq="D") diff --git a/pandas/tests/indexes/period/test_pickle.py b/pandas/tests/indexes/period/test_pickle.py new file mode 100644 index 0000000000000..82f906d1e361f --- /dev/null +++ b/pandas/tests/indexes/period/test_pickle.py @@ -0,0 +1,26 @@ +import numpy as np +import pytest + +from pandas import ( + NaT, + PeriodIndex, + period_range, +) +import pandas._testing as tm + +from pandas.tseries import offsets + + +class TestPickle: + @pytest.mark.parametrize("freq", ["D", "M", "A"]) + def test_pickle_round_trip(self, freq): + idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.NaN], freq=freq) + result = tm.round_trip_pickle(idx) + tm.assert_index_equal(result, idx) + + def test_pickle_freq(self): + # GH#2891 + prng = period_range("1/1/2011", "1/1/2012", freq="M") + new_prng = tm.round_trip_pickle(prng) + assert new_prng.freq == offsets.MonthEnd() + assert new_prng.freqstr == "M" diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_resolution.py similarity index 56% rename from pandas/tests/indexes/period/test_ops.py rename to pandas/tests/indexes/period/test_resolution.py index 9ebe44fb16c8d..7ecbde75cfa47 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_resolution.py @@ -1,10 +1,9 @@ import pytest import pandas as pd -import pandas._testing as tm -class TestPeriodIndexOps: +class TestResolution: @pytest.mark.parametrize( "freq,expected", [ @@ -22,15 +21,3 @@ class TestPeriodIndexOps: def test_resolution(self, freq, expected): idx = pd.period_range(start="2013-04-01", periods=30, freq=freq) assert idx.resolution == expected - - def test_freq_setter_deprecated(self): - # GH 20678 - idx = pd.period_range("2018Q1", periods=4, freq="Q") - - # no warning for getter - with tm.assert_produces_warning(None): - idx.freq - - # warning for setter - with pytest.raises(AttributeError, match="can't set attribute"): - idx.freq = pd.offsets.Day() diff --git a/pandas/tests/indexes/test_any_index.py b/pandas/tests/indexes/test_any_index.py index 39a1ddcbc8a6a..91679959e7979 100644 --- a/pandas/tests/indexes/test_any_index.py +++ b/pandas/tests/indexes/test_any_index.py @@ -84,6 +84,13 @@ def test_is_type_compatible_deprecation(index): index.is_type_compatible(index.inferred_type) +def test_is_mixed_deprecated(index): + # GH#32922 + msg = "Index.is_mixed is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + index.is_mixed() + + class TestConversion: def test_to_series(self, index): # assert that we are creating a copy of the index @@ -130,6 +137,12 @@ def test_pickle_preserves_name(self, index): class TestIndexing: + def test_getitem_ellipsis(self, index): + # GH#21282 + result = index[...] + assert result.equals(index) + assert result is not index + def test_slice_keeps_name(self, index): assert index.name == index[1:].name diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 50be69fb93d7c..59ec66ecc1fe9 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1,8 +1,5 @@ from collections import defaultdict -from datetime import ( - datetime, - timedelta, -) +from datetime import datetime from io import StringIO import math import re @@ -10,10 +7,7 @@ import numpy as np import pytest -from pandas.compat import ( - IS64, - np_datetime64_compat, -) +from pandas.compat import IS64 from pandas.util._test_decorators import async_mark import pandas as pd @@ -27,9 +21,7 @@ RangeIndex, Series, TimedeltaIndex, - Timestamp, date_range, - isna, period_range, ) import pandas._testing as tm @@ -220,91 +212,6 @@ def test_constructor_simple_new(self, vals, dtype): result = index._simple_new(index.values, dtype) tm.assert_index_equal(result, index) - @pytest.mark.parametrize( - "vals", - [ - [1, 2, 3], - np.array([1, 2, 3]), - np.array([1, 2, 3], dtype=int), - # below should coerce - [1.0, 2.0, 3.0], - np.array([1.0, 2.0, 3.0], dtype=float), - ], - ) - def test_constructor_dtypes_to_int64(self, vals): - index = Index(vals, dtype=int) - assert isinstance(index, Int64Index) - - @pytest.mark.parametrize( - "vals", - [ - [1, 2, 3], - [1.0, 2.0, 3.0], - np.array([1.0, 2.0, 3.0]), - np.array([1, 2, 3], dtype=int), - np.array([1.0, 2.0, 3.0], dtype=float), - ], - ) - def test_constructor_dtypes_to_float64(self, vals): - index = Index(vals, dtype=float) - assert isinstance(index, Float64Index) - - @pytest.mark.parametrize( - "vals", - [ - [1, 2, 3], - np.array([1, 2, 3], dtype=int), - np.array( - [np_datetime64_compat("2011-01-01"), np_datetime64_compat("2011-01-02")] - ), - [datetime(2011, 1, 1), datetime(2011, 1, 2)], - ], - ) - def test_constructor_dtypes_to_categorical(self, vals): - index = Index(vals, dtype="category") - assert isinstance(index, CategoricalIndex) - - @pytest.mark.parametrize("cast_index", [True, False]) - @pytest.mark.parametrize( - "vals", - [ - Index( - np.array( - [ - np_datetime64_compat("2011-01-01"), - np_datetime64_compat("2011-01-02"), - ] - ) - ), - Index([datetime(2011, 1, 1), datetime(2011, 1, 2)]), - ], - ) - def test_constructor_dtypes_to_datetime(self, cast_index, vals): - if cast_index: - index = Index(vals, dtype=object) - assert isinstance(index, Index) - assert index.dtype == object - else: - index = Index(vals) - assert isinstance(index, DatetimeIndex) - - @pytest.mark.parametrize("cast_index", [True, False]) - @pytest.mark.parametrize( - "vals", - [ - np.array([np.timedelta64(1, "D"), np.timedelta64(1, "D")]), - [timedelta(1), timedelta(1)], - ], - ) - def test_constructor_dtypes_to_timedelta(self, cast_index, vals): - if cast_index: - index = Index(vals, dtype=object) - assert isinstance(index, Index) - assert index.dtype == object - else: - index = Index(vals) - assert isinstance(index, TimedeltaIndex) - @pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning") @pytest.mark.parametrize("attr", ["values", "asi8"]) @pytest.mark.parametrize("klass", [Index, DatetimeIndex]) @@ -395,15 +302,6 @@ def test_constructor_empty_special(self, empty, klass): assert isinstance(empty, klass) assert not len(empty) - def test_constructor_overflow_int64(self): - # see gh-15832 - msg = ( - "The elements provided in the data cannot " - "all be casted to the dtype int64" - ) - with pytest.raises(OverflowError, match=msg): - Index([np.iinfo(np.uint64).max - 1], dtype="int64") - @pytest.mark.parametrize( "index", [ @@ -502,18 +400,6 @@ def test_is_(self): ind2 = Index(arr, copy=False) assert not ind1.is_(ind2) - @pytest.mark.parametrize("index", ["datetime"], indirect=True) - def test_asof(self, index): - d = index[0] - assert index.asof(d) == d - assert isna(index.asof(d - timedelta(1))) - - d = index[-1] - assert index.asof(d + timedelta(1)) == d - - d = index[0].to_pydatetime() - assert isinstance(index.asof(d), Timestamp) - def test_asof_numeric_vs_bool_raises(self): left = Index([1, 2, 3]) right = Index([True, False]) @@ -699,12 +585,6 @@ def test_append_empty_preserve_name(self, name, expected): result = left.append(right) assert result.name == expected - def test_is_mixed_deprecated(self, simple_index): - # GH#32922 - index = simple_index - with tm.assert_produces_warning(FutureWarning): - index.is_mixed() - @pytest.mark.parametrize( "index, expected", [ @@ -754,20 +634,6 @@ def test_is_all_dates(self, index, expected): def test_summary(self, index): index._summary() - def test_summary_bug(self): - # GH3869` - ind = Index(["{other}%s", "~:{range}:0"], name="A") - result = ind._summary() - # shouldn't be formatted accidentally. - assert "~:{range}:0" in result - assert "{other}%s" in result - - def test_format_different_scalar_lengths(self): - # GH35439 - idx = Index(["aaaaaaaaa", "b"]) - expected = ["aaaaaaaaa", "b"] - assert idx.format() == expected - def test_format_bug(self): # GH 14626 # windows has different precision on datetime.datetime.now (it doesn't @@ -795,21 +661,6 @@ def test_format_missing(self, vals, nulls_fixture): assert formatted == expected assert index[3] is nulls_fixture - def test_format_with_name_time_info(self): - # bug I fixed 12/20/2011 - dates = date_range("2011-01-01 04:00:00", periods=10, name="something") - - formatted = dates.format(name=True) - assert formatted[0] == "something" - - def test_format_datetime_with_time(self): - t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)]) - - result = t.format() - expected = ["2012-02-07 00:00:00", "2012-02-07 23:00:00"] - assert len(result) == 2 - assert result == expected - @pytest.mark.parametrize("op", ["any", "all"]) def test_logical_compat(self, op, simple_index): index = simple_index @@ -1157,12 +1008,6 @@ def test_outer_join_sort(self): tm.assert_index_equal(result, expected) - def test_nan_first_take_datetime(self): - index = Index([pd.NaT, Timestamp("20130101"), Timestamp("20130102")]) - result = index.take([-1, 0, 1]) - expected = Index([index[-1], index[0], index[1]]) - tm.assert_index_equal(result, expected) - def test_take_fill_value(self): # GH 12631 index = Index(list("ABC"), name="xxx") diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index ed9243a5ba8d0..80edaf77fe960 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -1,7 +1,7 @@ """ Collection of tests asserting things that should be true for -any index subclass. Makes use of the `indices` fixture defined -in pandas/tests/indexes/conftest.py. +any index subclass except for MultiIndex. Makes use of the `index_flat` +fixture defined in pandas/conftest.py. """ import re @@ -29,6 +29,26 @@ class TestCommon: + @pytest.mark.parametrize("name", [None, "new_name"]) + def test_to_frame(self, name, index_flat): + # see GH#15230, GH#22580 + idx = index_flat + + if name: + idx_name = name + else: + idx_name = idx.name or 0 + + df = idx.to_frame(name=idx_name) + + assert df.index is idx + assert len(df.columns) == 1 + assert df.columns[0] == idx_name + assert df[idx_name].values is not idx.values + + df = idx.to_frame(index=False, name=idx_name) + assert df.index is not idx + def test_droplevel(self, index): # GH 21115 if isinstance(index, MultiIndex): @@ -126,6 +146,46 @@ def test_copy_and_deepcopy(self, index_flat): new_copy = index.copy(deep=True, name="banana") assert new_copy.name == "banana" + def test_copy_name(self, index_flat): + # GH#12309: Check that the "name" argument + # passed at initialization is honored. + index = index_flat + + first = type(index)(index, copy=True, name="mario") + second = type(first)(first, copy=False) + + # Even though "copy=False", we want a new object. + assert first is not second + tm.assert_index_equal(first, second) + + # Not using tm.assert_index_equal() since names differ. + assert index.equals(first) + + assert first.name == "mario" + assert second.name == "mario" + + # TODO: belongs in series arithmetic tests? + s1 = pd.Series(2, index=first) + s2 = pd.Series(3, index=second[:-1]) + # See GH#13365 + s3 = s1 * s2 + assert s3.index.name == "mario" + + def test_copy_name2(self, index_flat): + # GH#35592 + index = index_flat + if isinstance(index, MultiIndex): + return + + assert index.copy(name="mario").name == "mario" + + with pytest.raises(ValueError, match="Length of new names must be 1, got 2"): + index.copy(name=["mario", "luigi"]) + + msg = f"{type(index).__name__}.name must be a hashable type" + with pytest.raises(TypeError, match=msg): + index.copy(name=[["mario"]]) + def test_unique_level(self, index_flat): # don't test a MultiIndex here (as its tested separated) index = index_flat @@ -332,6 +392,9 @@ def test_astype_preserves_name(self, index, dtype): ): # This astype is deprecated in favor of tz_localize warn = FutureWarning + elif isinstance(index, PeriodIndex) and dtype == "datetime64[ns]": + # Deprecated in favor of to_timestamp GH#44398 + warn = FutureWarning try: # Some of these conversions cannot succeed so we use a try / except with tm.assert_produces_warning(warn): diff --git a/pandas/tests/indexes/test_index_new.py b/pandas/tests/indexes/test_index_new.py index 293aa6dd57124..deeaffaf5b9cc 100644 --- a/pandas/tests/indexes/test_index_new.py +++ b/pandas/tests/indexes/test_index_new.py @@ -1,11 +1,17 @@ """ Tests for the Index constructor conducting inference. """ +from datetime import ( + datetime, + timedelta, +) from decimal import Decimal import numpy as np import pytest +from pandas.compat import np_datetime64_compat + from pandas.core.dtypes.common import is_unsigned_integer_dtype from pandas import ( @@ -27,6 +33,7 @@ ) import pandas._testing as tm from pandas.core.api import ( + Float64Index, Int64Index, UInt64Index, ) @@ -232,6 +239,91 @@ def test_constructor_int_dtype_nan_raises(self, dtype): with pytest.raises(ValueError, match=msg): Index(data, dtype=dtype) + @pytest.mark.parametrize( + "vals", + [ + [1, 2, 3], + np.array([1, 2, 3]), + np.array([1, 2, 3], dtype=int), + # below should coerce + [1.0, 2.0, 3.0], + np.array([1.0, 2.0, 3.0], dtype=float), + ], + ) + def test_constructor_dtypes_to_int64(self, vals): + index = Index(vals, dtype=int) + assert isinstance(index, Int64Index) + + @pytest.mark.parametrize( + "vals", + [ + [1, 2, 3], + [1.0, 2.0, 3.0], + np.array([1.0, 2.0, 3.0]), + np.array([1, 2, 3], dtype=int), + np.array([1.0, 2.0, 3.0], dtype=float), + ], + ) + def test_constructor_dtypes_to_float64(self, vals): + index = Index(vals, dtype=float) + assert isinstance(index, Float64Index) + + @pytest.mark.parametrize( + "vals", + [ + [1, 2, 3], + np.array([1, 2, 3], dtype=int), + np.array( + [np_datetime64_compat("2011-01-01"), np_datetime64_compat("2011-01-02")] + ), + [datetime(2011, 1, 1), datetime(2011, 1, 2)], + ], + ) + def test_constructor_dtypes_to_categorical(self, vals): + index = Index(vals, dtype="category") + assert isinstance(index, CategoricalIndex) + + @pytest.mark.parametrize("cast_index", [True, False]) + @pytest.mark.parametrize( + "vals", + [ + Index( + np.array( + [ + np_datetime64_compat("2011-01-01"), + np_datetime64_compat("2011-01-02"), + ] + ) + ), + Index([datetime(2011, 1, 1), datetime(2011, 1, 2)]), + ], + ) + def test_constructor_dtypes_to_datetime(self, cast_index, vals): + if cast_index: + index = Index(vals, dtype=object) + assert isinstance(index, Index) + assert index.dtype == object + else: + index = Index(vals) + assert isinstance(index, DatetimeIndex) + + @pytest.mark.parametrize("cast_index", [True, False]) + @pytest.mark.parametrize( + "vals", + [ + np.array([np.timedelta64(1, "D"), np.timedelta64(1, "D")]), + [timedelta(1), timedelta(1)], + ], + ) + def test_constructor_dtypes_to_timedelta(self, cast_index, vals): + if cast_index: + index = Index(vals, dtype=object) + assert isinstance(index, Index) + assert index.dtype == object + else: + index = Index(vals) + assert isinstance(index, TimedeltaIndex) + class TestIndexConstructorUnwrapping: # Test passing different arraylike values to pd.Index @@ -272,3 +364,14 @@ def __array__(self, dtype=None) -> np.ndarray: expected = Index(array) result = Index(ArrayLike(array)) tm.assert_index_equal(result, expected) + + +class TestIndexConstructionErrors: + def test_constructor_overflow_int64(self): + # see GH#15832 + msg = ( + "The elements provided in the data cannot " + "all be casted to the dtype int64" + ) + with pytest.raises(OverflowError, match=msg): + Index([np.iinfo(np.uint64).max - 1], dtype="int64") diff --git a/pandas/tests/indexes/timedeltas/test_freq_attr.py b/pandas/tests/indexes/timedeltas/test_freq_attr.py new file mode 100644 index 0000000000000..39b9c11aa833c --- /dev/null +++ b/pandas/tests/indexes/timedeltas/test_freq_attr.py @@ -0,0 +1,61 @@ +import pytest + +from pandas import TimedeltaIndex + +from pandas.tseries.offsets import ( + DateOffset, + Day, + Hour, +) + + +class TestFreq: + @pytest.mark.parametrize("values", [["0 days", "2 days", "4 days"], []]) + @pytest.mark.parametrize("freq", ["2D", Day(2), "48H", Hour(48)]) + def test_freq_setter(self, values, freq): + # GH#20678 + idx = TimedeltaIndex(values) + + # can set to an offset, converting from string if necessary + idx._data.freq = freq + assert idx.freq == freq + assert isinstance(idx.freq, DateOffset) + + # can reset to None + idx._data.freq = None + assert idx.freq is None + + def test_freq_setter_errors(self): + # GH#20678 + idx = TimedeltaIndex(["0 days", "2 days", "4 days"]) + + # setting with an incompatible freq + msg = ( + "Inferred frequency 2D from passed values does not conform to " + "passed frequency 5D" + ) + with pytest.raises(ValueError, match=msg): + idx._data.freq = "5D" + + # setting with a non-fixed frequency + msg = r"<2 \* BusinessDays> is a non-fixed frequency" + with pytest.raises(ValueError, match=msg): + idx._data.freq = "2B" + + # setting with non-freq string + with pytest.raises(ValueError, match="Invalid frequency"): + idx._data.freq = "foo" + + def test_freq_view_safe(self): + # Setting the freq for one TimedeltaIndex shouldn't alter the freq + # for another that views the same data + + tdi = TimedeltaIndex(["0 days", "2 days", "4 days"], freq="2D") + tda = tdi._data + + tdi2 = TimedeltaIndex(tda)._with_freq(None) + assert tdi2.freq is None + + # Original was not altered + assert tdi.freq == "2D" + assert tda.freq == "2D" diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index fc8abb83ed302..0c2f8d0103ceb 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -21,14 +21,6 @@ class TestGetItem: - def test_ellipsis(self): - # GH#21282 - idx = timedelta_range("1 day", "31 day", freq="D", name="idx") - - result = idx[...] - assert result.equals(idx) - assert result is not idx - def test_getitem_slice_keeps_name(self): # GH#4226 tdi = timedelta_range("1d", "5d", freq="H", name="timebucket") @@ -340,3 +332,17 @@ def test_slice_invalid_str_with_timedeltaindex( indexer_sl(obj)[:"foo"] with pytest.raises(TypeError, match=msg): indexer_sl(obj)[tdi[0] : "foo"] + + +class TestContains: + def test_contains_nonunique(self): + # GH#9512 + for vals in ( + [0, 1, 0], + [0, 0, -1], + [0, -1, -1], + ["00:01:00", "00:01:00", "00:02:00"], + ["00:01:00", "00:01:00", "00:00:01"], + ): + idx = TimedeltaIndex(vals) + assert idx[0] in idx diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 2a5051b2982bb..f6013baf86edc 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -1,97 +1,14 @@ -import numpy as np -import pytest - from pandas import ( - Series, TimedeltaIndex, timedelta_range, ) import pandas._testing as tm -from pandas.tseries.offsets import ( - DateOffset, - Day, - Hour, -) - class TestTimedeltaIndexOps: - def test_nonunique_contains(self): - # GH 9512 - for idx in map( - TimedeltaIndex, - ( - [0, 1, 0], - [0, 0, -1], - [0, -1, -1], - ["00:01:00", "00:01:00", "00:02:00"], - ["00:01:00", "00:01:00", "00:00:01"], - ), - ): - assert idx[0] in idx - - def test_unknown_attribute(self): - # see gh-9680 - tdi = timedelta_range(start=0, periods=10, freq="1s") - ts = Series(np.random.normal(size=10), index=tdi) - assert "foo" not in ts.__dict__.keys() - msg = "'Series' object has no attribute 'foo'" - with pytest.raises(AttributeError, match=msg): - ts.foo - def test_infer_freq(self, freq_sample): # GH#11018 idx = timedelta_range("1", freq=freq_sample, periods=10) result = TimedeltaIndex(idx.asi8, freq="infer") tm.assert_index_equal(idx, result) assert result.freq == freq_sample - - @pytest.mark.parametrize("values", [["0 days", "2 days", "4 days"], []]) - @pytest.mark.parametrize("freq", ["2D", Day(2), "48H", Hour(48)]) - def test_freq_setter(self, values, freq): - # GH 20678 - idx = TimedeltaIndex(values) - - # can set to an offset, converting from string if necessary - idx._data.freq = freq - assert idx.freq == freq - assert isinstance(idx.freq, DateOffset) - - # can reset to None - idx._data.freq = None - assert idx.freq is None - - def test_freq_setter_errors(self): - # GH 20678 - idx = TimedeltaIndex(["0 days", "2 days", "4 days"]) - - # setting with an incompatible freq - msg = ( - "Inferred frequency 2D from passed values does not conform to " - "passed frequency 5D" - ) - with pytest.raises(ValueError, match=msg): - idx._data.freq = "5D" - - # setting with a non-fixed frequency - msg = r"<2 \* BusinessDays> is a non-fixed frequency" - with pytest.raises(ValueError, match=msg): - idx._data.freq = "2B" - - # setting with non-freq string - with pytest.raises(ValueError, match="Invalid frequency"): - idx._data.freq = "foo" - - def test_freq_view_safe(self): - # Setting the freq for one TimedeltaIndex shouldn't alter the freq - # for another that views the same data - - tdi = TimedeltaIndex(["0 days", "2 days", "4 days"], freq="2D") - tda = tdi._data - - tdi2 = TimedeltaIndex(tda)._with_freq(None) - assert tdi2.freq is None - - # Original was not altered - assert tdi.freq == "2D" - assert tda.freq == "2D" diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py index 42edaa2fe6c3a..55d45a21d643a 100644 --- a/pandas/tests/indexing/multiindex/test_slice.py +++ b/pandas/tests/indexing/multiindex/test_slice.py @@ -702,32 +702,30 @@ def test_per_axis_per_level_setitem(self): tm.assert_frame_equal(df, expected) def test_multiindex_label_slicing_with_negative_step(self): - s = Series( + ser = Series( np.arange(20), MultiIndex.from_product([list("abcde"), np.arange(4)]) ) SLC = pd.IndexSlice - def assert_slices_equivalent(l_slc, i_slc): - tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc]) - tm.assert_series_equal(s[l_slc], s.iloc[i_slc]) + tm.assert_indexing_slices_equivalent(ser, SLC[::-1], SLC[::-1]) - assert_slices_equivalent(SLC[::-1], SLC[::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC["d"::-1], SLC[15::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[("d",)::-1], SLC[15::-1]) - assert_slices_equivalent(SLC["d"::-1], SLC[15::-1]) - assert_slices_equivalent(SLC[("d",)::-1], SLC[15::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[:"d":-1], SLC[:11:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[:("d",):-1], SLC[:11:-1]) - assert_slices_equivalent(SLC[:"d":-1], SLC[:11:-1]) - assert_slices_equivalent(SLC[:("d",):-1], SLC[:11:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC["d":"b":-1], SLC[15:3:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[("d",):"b":-1], SLC[15:3:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC["d":("b",):-1], SLC[15:3:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[("d",):("b",):-1], SLC[15:3:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC["b":"d":-1], SLC[:0]) - assert_slices_equivalent(SLC["d":"b":-1], SLC[15:3:-1]) - assert_slices_equivalent(SLC[("d",):"b":-1], SLC[15:3:-1]) - assert_slices_equivalent(SLC["d":("b",):-1], SLC[15:3:-1]) - assert_slices_equivalent(SLC[("d",):("b",):-1], SLC[15:3:-1]) - assert_slices_equivalent(SLC["b":"d":-1], SLC[:0]) - - assert_slices_equivalent(SLC[("c", 2)::-1], SLC[10::-1]) - assert_slices_equivalent(SLC[:("c", 2):-1], SLC[:9:-1]) - assert_slices_equivalent(SLC[("e", 0):("c", 2):-1], SLC[16:9:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[("c", 2)::-1], SLC[10::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[:("c", 2):-1], SLC[:9:-1]) + tm.assert_indexing_slices_equivalent( + ser, SLC[("e", 0):("c", 2):-1], SLC[16:9:-1] + ) def test_multiindex_slice_first_level(self): # GH 12697 diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py index e46eed05caa86..332ab02255911 100644 --- a/pandas/tests/indexing/test_datetime.py +++ b/pandas/tests/indexing/test_datetime.py @@ -130,7 +130,7 @@ def test_nanosecond_getitem_setitem_with_tz(self): expected = DataFrame(-1, index=index, columns=["a"]) tm.assert_frame_equal(result, expected) - def test_getitem_millisecond_resolution(self, frame_or_series): + def test_getitem_str_slice_millisecond_resolution(self, frame_or_series): # GH#33589 keys = [ @@ -152,16 +152,3 @@ def test_getitem_millisecond_resolution(self, frame_or_series): ], ) tm.assert_equal(result, expected) - - def test_str_subclass(self): - # GH 37366 - class mystring(str): - pass - - data = ["2020-10-22 01:21:00+00:00"] - index = pd.DatetimeIndex(data) - df = DataFrame({"a": [1]}, index=index) - df["b"] = 2 - df[mystring("c")] = 3 - expected = DataFrame({"a": [1], "b": [2], mystring("c"): [3]}, index=index) - tm.assert_equal(df, expected) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index a10288b2091ca..6a9ece738952d 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -323,9 +323,9 @@ def test_dups_fancy_indexing3(self): def test_duplicate_int_indexing(self, indexer_sl): # GH 17347 - s = Series(range(3), index=[1, 1, 3]) - expected = s[1] - result = indexer_sl(s)[[1]] + ser = Series(range(3), index=[1, 1, 3]) + expected = Series(range(2), index=[1, 1]) + result = indexer_sl(ser)[[1]] tm.assert_series_equal(result, expected) def test_indexing_mixed_frame_bug(self): @@ -653,13 +653,6 @@ def test_loc_setitem_fullindex_views(self): df.loc[df.index] = df.loc[df.index] tm.assert_frame_equal(df, df2) - def test_float_index_at_iat(self): - s = Series([1, 2, 3], index=[0.1, 0.2, 0.3]) - for el, item in s.items(): - assert s.at[el] == item - for i in range(len(s)): - assert s.iat[i] == i + 1 - def test_rhs_alignment(self): # GH8258, tests that both rows & columns are aligned to what is # assigned to. covers both uniform data-type & multi-type cases @@ -709,21 +702,17 @@ def run_tests(df, rhs, right_loc, right_iloc): def test_str_label_slicing_with_negative_step(self): SLC = pd.IndexSlice - def assert_slices_equivalent(l_slc, i_slc): - tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc]) - - if not idx.is_integer: - # For integer indices, .loc and plain getitem are position-based. - tm.assert_series_equal(s[l_slc], s.iloc[i_slc]) - tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc]) - for idx in [_mklbl("A", 20), np.arange(20) + 100, np.linspace(100, 150, 20)]: idx = Index(idx) - s = Series(np.arange(20), index=idx) - assert_slices_equivalent(SLC[idx[9] :: -1], SLC[9::-1]) - assert_slices_equivalent(SLC[: idx[9] : -1], SLC[:8:-1]) - assert_slices_equivalent(SLC[idx[13] : idx[9] : -1], SLC[13:8:-1]) - assert_slices_equivalent(SLC[idx[9] : idx[13] : -1], SLC[:0]) + ser = Series(np.arange(20), index=idx) + tm.assert_indexing_slices_equivalent(ser, SLC[idx[9] :: -1], SLC[9::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[: idx[9] : -1], SLC[:8:-1]) + tm.assert_indexing_slices_equivalent( + ser, SLC[idx[13] : idx[9] : -1], SLC[13:8:-1] + ) + tm.assert_indexing_slices_equivalent( + ser, SLC[idx[9] : idx[13] : -1], SLC[:0] + ) def test_slice_with_zero_step_raises(self, indexer_sl, frame_or_series): obj = frame_or_series(np.arange(20), index=_mklbl("A", 20)) @@ -786,12 +775,12 @@ def test_no_reference_cycle(self): del df assert wr() is None - def test_label_indexing_on_nan(self): + def test_label_indexing_on_nan(self, nulls_fixture): # GH 32431 - df = Series([1, "{1,2}", 1, None]) + df = Series([1, "{1,2}", 1, nulls_fixture]) vc = df.value_counts(dropna=False) - result1 = vc.loc[np.nan] - result2 = vc[np.nan] + result1 = vc.loc[nulls_fixture] + result2 = vc[nulls_fixture] expected = 1 assert result1 == expected @@ -967,7 +956,11 @@ def test_extension_array_cross_section(): def test_extension_array_cross_section_converts(): # all numeric columns -> numeric series df = DataFrame( - {"A": pd.array([1, 2], dtype="Int64"), "B": np.array([1, 2])}, index=["a", "b"] + { + "A": pd.array([1, 2], dtype="Int64"), + "B": np.array([1, 2], dtype="int64"), + }, + index=["a", "b"], ) result = df.loc["a"] expected = Series([1, 1], dtype="Int64", index=["A", "B"], name="a") @@ -987,10 +980,3 @@ def test_extension_array_cross_section_converts(): result = df.iloc[0] tm.assert_series_equal(result, expected) - - -def test_getitem_object_index_float_string(): - # GH 17286 - s = Series([1] * 4, index=Index(["a", "b", "c", 1.0])) - assert s["a"] == 1 - assert s[1.0] == 1 diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index b0aa05371271b..ed9b5cc0850b9 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -2941,3 +2941,9 @@ def test_loc_set_multiple_items_in_multiple_new_columns(self): ) tm.assert_frame_equal(df, expected) + + def test_getitem_loc_str_periodindex(self): + # GH#33964 + index = pd.period_range(start="2000", periods=20, freq="B") + series = Series(range(20), index=index) + assert series.loc["2000-01-14"] == 9 diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index c487777fc339e..82d55a7bf7189 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -22,6 +22,213 @@ import pandas._testing as tm +class TestEmptyFrameSetitemExpansion: + def test_empty_frame_setitem_index_name_retained(self): + # GH#31368 empty frame has non-None index.name -> retained + df = DataFrame({}, index=pd.RangeIndex(0, name="df_index")) + series = Series(1.23, index=pd.RangeIndex(4, name="series_index")) + + df["series"] = series + expected = DataFrame( + {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="df_index") + ) + + tm.assert_frame_equal(df, expected) + + def test_empty_frame_setitem_index_name_inherited(self): + # GH#36527 empty frame has None index.name -> not retained + df = DataFrame() + series = Series(1.23, index=pd.RangeIndex(4, name="series_index")) + df["series"] = series + expected = DataFrame( + {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="series_index") + ) + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_zerolen_series_columns_align(self): + # columns will align + df = DataFrame(columns=["A", "B"]) + df.loc[0] = Series(1, index=range(4)) + expected = DataFrame(columns=["A", "B"], index=[0], dtype=np.float64) + tm.assert_frame_equal(df, expected) + + # columns will align + df = DataFrame(columns=["A", "B"]) + df.loc[0] = Series(1, index=["B"]) + + exp = DataFrame([[np.nan, 1]], columns=["A", "B"], index=[0], dtype="float64") + tm.assert_frame_equal(df, exp) + + def test_loc_setitem_zerolen_list_length_must_match_columns(self): + # list-like must conform + df = DataFrame(columns=["A", "B"]) + + msg = "cannot set a row with mismatched columns" + with pytest.raises(ValueError, match=msg): + df.loc[0] = [1, 2, 3] + + df = DataFrame(columns=["A", "B"]) + df.loc[3] = [6, 7] # length matches len(df.columns) --> OK! + + exp = DataFrame([[6, 7]], index=[3], columns=["A", "B"], dtype=np.int64) + tm.assert_frame_equal(df, exp) + + def test_partial_set_empty_frame(self): + + # partially set with an empty object + # frame + df = DataFrame() + + msg = "cannot set a frame with no defined columns" + + with pytest.raises(ValueError, match=msg): + df.loc[1] = 1 + + with pytest.raises(ValueError, match=msg): + df.loc[1] = Series([1], index=["foo"]) + + msg = "cannot set a frame with no defined index and a scalar" + with pytest.raises(ValueError, match=msg): + df.loc[:, 1] = 1 + + def test_partial_set_empty_frame2(self): + # these work as they don't really change + # anything but the index + # GH#5632 + expected = DataFrame(columns=["foo"], index=Index([], dtype="object")) + + df = DataFrame(index=Index([], dtype="object")) + df["foo"] = Series([], dtype="object") + + tm.assert_frame_equal(df, expected) + + df = DataFrame() + df["foo"] = Series(df.index) + + tm.assert_frame_equal(df, expected) + + df = DataFrame() + df["foo"] = df.index + + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame3(self): + expected = DataFrame(columns=["foo"], index=Index([], dtype="int64")) + expected["foo"] = expected["foo"].astype("float64") + + df = DataFrame(index=Index([], dtype="int64")) + df["foo"] = [] + + tm.assert_frame_equal(df, expected) + + df = DataFrame(index=Index([], dtype="int64")) + df["foo"] = Series(np.arange(len(df)), dtype="float64") + + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame4(self): + df = DataFrame(index=Index([], dtype="int64")) + df["foo"] = range(len(df)) + + expected = DataFrame(columns=["foo"], index=Index([], dtype="int64")) + # range is int-dtype-like, so we get int64 dtype + expected["foo"] = expected["foo"].astype("int64") + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame5(self): + df = DataFrame() + tm.assert_index_equal(df.columns, Index([], dtype=object)) + df2 = DataFrame() + df2[1] = Series([1], index=["foo"]) + df.loc[:, 1] = Series([1], index=["foo"]) + tm.assert_frame_equal(df, DataFrame([[1]], index=["foo"], columns=[1])) + tm.assert_frame_equal(df, df2) + + def test_partial_set_empty_frame_no_index(self): + # no index to start + expected = DataFrame({0: Series(1, index=range(4))}, columns=["A", "B", 0]) + + df = DataFrame(columns=["A", "B"]) + df[0] = Series(1, index=range(4)) + df.dtypes + str(df) + tm.assert_frame_equal(df, expected) + + df = DataFrame(columns=["A", "B"]) + df.loc[:, 0] = Series(1, index=range(4)) + df.dtypes + str(df) + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame_row(self): + # GH#5720, GH#5744 + # don't create rows when empty + expected = DataFrame(columns=["A", "B", "New"], index=Index([], dtype="int64")) + expected["A"] = expected["A"].astype("int64") + expected["B"] = expected["B"].astype("float64") + expected["New"] = expected["New"].astype("float64") + + df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]}) + y = df[df.A > 5] + y["New"] = np.nan + tm.assert_frame_equal(y, expected) + + expected = DataFrame(columns=["a", "b", "c c", "d"]) + expected["d"] = expected["d"].astype("int64") + df = DataFrame(columns=["a", "b", "c c"]) + df["d"] = 3 + tm.assert_frame_equal(df, expected) + tm.assert_series_equal(df["c c"], Series(name="c c", dtype=object)) + + # reindex columns is ok + df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]}) + y = df[df.A > 5] + result = y.reindex(columns=["A", "B", "C"]) + expected = DataFrame(columns=["A", "B", "C"], index=Index([], dtype="int64")) + expected["A"] = expected["A"].astype("int64") + expected["B"] = expected["B"].astype("float64") + expected["C"] = expected["C"].astype("float64") + tm.assert_frame_equal(result, expected) + + def test_partial_set_empty_frame_set_series(self): + # GH#5756 + # setting with empty Series + df = DataFrame(Series(dtype=object)) + expected = DataFrame({0: Series(dtype=object)}) + tm.assert_frame_equal(df, expected) + + df = DataFrame(Series(name="foo", dtype=object)) + expected = DataFrame({"foo": Series(dtype=object)}) + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame_empty_copy_assignment(self): + # GH#5932 + # copy on empty with assignment fails + df = DataFrame(index=[0]) + df = df.copy() + df["a"] = 0 + expected = DataFrame(0, index=[0], columns=["a"]) + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame_empty_consistencies(self): + # GH#6171 + # consistency on empty frames + df = DataFrame(columns=["x", "y"]) + df["x"] = [1, 2] + expected = DataFrame({"x": [1, 2], "y": [np.nan, np.nan]}) + tm.assert_frame_equal(df, expected, check_dtype=False) + + df = DataFrame(columns=["x", "y"]) + df["x"] = ["1", "2"] + expected = DataFrame({"x": ["1", "2"], "y": [np.nan, np.nan]}, dtype=object) + tm.assert_frame_equal(df, expected) + + df = DataFrame(columns=["x", "y"]) + df.loc[0, "x"] = 1 + expected = DataFrame({"x": [1], "y": [np.nan]}) + tm.assert_frame_equal(df, expected, check_dtype=False) + + class TestPartialSetting: def test_partial_setting(self): @@ -61,8 +268,7 @@ def test_partial_setting(self): with pytest.raises(IndexError, match=msg): s.iat[3] = 5.0 - # ## frame ## - + def test_partial_setting_frame(self): df_orig = DataFrame( np.arange(6).reshape(3, 2), columns=["A", "B"], dtype="int64" ) @@ -166,33 +372,6 @@ def test_partial_setting_mixed_dtype(self): df.loc[2] = df.loc[1] tm.assert_frame_equal(df, expected) - # columns will align - df = DataFrame(columns=["A", "B"]) - df.loc[0] = Series(1, index=range(4)) - expected = DataFrame(columns=["A", "B"], index=[0], dtype=np.float64) - tm.assert_frame_equal(df, expected) - - # columns will align - # TODO: it isn't great that this behavior depends on consolidation - df = DataFrame(columns=["A", "B"])._consolidate() - df.loc[0] = Series(1, index=["B"]) - - exp = DataFrame([[np.nan, 1]], columns=["A", "B"], index=[0], dtype="float64") - tm.assert_frame_equal(df, exp) - - # list-like must conform - df = DataFrame(columns=["A", "B"]) - - msg = "cannot set a row with mismatched columns" - with pytest.raises(ValueError, match=msg): - df.loc[0] = [1, 2, 3] - - df = DataFrame(columns=["A", "B"]) - df.loc[3] = [6, 7] - - exp = DataFrame([[6, 7]], index=[3], columns=["A", "B"], dtype=np.int64) - tm.assert_frame_equal(df, exp) - def test_series_partial_set(self): # partial set with new index # Regression from GH4825 @@ -352,6 +531,7 @@ def test_setitem_with_expansion_numeric_into_datetimeindex(self, key): ex_index = Index(list(orig.index) + [key], dtype=object, name=orig.index.name) ex_data = np.concatenate([orig.values, df.iloc[[0]].values], axis=0) expected = DataFrame(ex_data, index=ex_index, columns=orig.columns) + tm.assert_frame_equal(df, expected) def test_partial_set_invalid(self): @@ -369,162 +549,6 @@ def test_partial_set_invalid(self): tm.assert_index_equal(df.index, Index(orig.index.tolist() + ["a"])) assert df.index.dtype == "object" - def test_partial_set_empty_frame(self): - - # partially set with an empty object - # frame - df = DataFrame() - - msg = "cannot set a frame with no defined columns" - - with pytest.raises(ValueError, match=msg): - df.loc[1] = 1 - - with pytest.raises(ValueError, match=msg): - df.loc[1] = Series([1], index=["foo"]) - - msg = "cannot set a frame with no defined index and a scalar" - with pytest.raises(ValueError, match=msg): - df.loc[:, 1] = 1 - - def test_partial_set_empty_frame2(self): - # these work as they don't really change - # anything but the index - # GH5632 - expected = DataFrame(columns=["foo"], index=Index([], dtype="object")) - - df = DataFrame(index=Index([], dtype="object")) - df["foo"] = Series([], dtype="object") - - tm.assert_frame_equal(df, expected) - - df = DataFrame() - df["foo"] = Series(df.index) - - tm.assert_frame_equal(df, expected) - - df = DataFrame() - df["foo"] = df.index - - tm.assert_frame_equal(df, expected) - - def test_partial_set_empty_frame3(self): - expected = DataFrame(columns=["foo"], index=Index([], dtype="int64")) - expected["foo"] = expected["foo"].astype("float64") - - df = DataFrame(index=Index([], dtype="int64")) - df["foo"] = [] - - tm.assert_frame_equal(df, expected) - - df = DataFrame(index=Index([], dtype="int64")) - df["foo"] = Series(np.arange(len(df)), dtype="float64") - - tm.assert_frame_equal(df, expected) - - def test_partial_set_empty_frame4(self): - df = DataFrame(index=Index([], dtype="int64")) - df["foo"] = range(len(df)) - - expected = DataFrame(columns=["foo"], index=Index([], dtype="int64")) - # range is int-dtype-like, so we get int64 dtype - expected["foo"] = expected["foo"].astype("int64") - tm.assert_frame_equal(df, expected) - - def test_partial_set_empty_frame5(self): - df = DataFrame() - tm.assert_index_equal(df.columns, Index([], dtype=object)) - df2 = DataFrame() - df2[1] = Series([1], index=["foo"]) - df.loc[:, 1] = Series([1], index=["foo"]) - tm.assert_frame_equal(df, DataFrame([[1]], index=["foo"], columns=[1])) - tm.assert_frame_equal(df, df2) - - def test_partial_set_empty_frame_no_index(self): - # no index to start - expected = DataFrame({0: Series(1, index=range(4))}, columns=["A", "B", 0]) - - df = DataFrame(columns=["A", "B"]) - df[0] = Series(1, index=range(4)) - df.dtypes - str(df) - tm.assert_frame_equal(df, expected) - - df = DataFrame(columns=["A", "B"]) - df.loc[:, 0] = Series(1, index=range(4)) - df.dtypes - str(df) - tm.assert_frame_equal(df, expected) - - def test_partial_set_empty_frame_row(self): - # GH5720, GH5744 - # don't create rows when empty - expected = DataFrame(columns=["A", "B", "New"], index=Index([], dtype="int64")) - expected["A"] = expected["A"].astype("int64") - expected["B"] = expected["B"].astype("float64") - expected["New"] = expected["New"].astype("float64") - - df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]}) - y = df[df.A > 5] - y["New"] = np.nan - tm.assert_frame_equal(y, expected) - # tm.assert_frame_equal(y,expected) - - expected = DataFrame(columns=["a", "b", "c c", "d"]) - expected["d"] = expected["d"].astype("int64") - df = DataFrame(columns=["a", "b", "c c"]) - df["d"] = 3 - tm.assert_frame_equal(df, expected) - tm.assert_series_equal(df["c c"], Series(name="c c", dtype=object)) - - # reindex columns is ok - df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]}) - y = df[df.A > 5] - result = y.reindex(columns=["A", "B", "C"]) - expected = DataFrame(columns=["A", "B", "C"], index=Index([], dtype="int64")) - expected["A"] = expected["A"].astype("int64") - expected["B"] = expected["B"].astype("float64") - expected["C"] = expected["C"].astype("float64") - tm.assert_frame_equal(result, expected) - - def test_partial_set_empty_frame_set_series(self): - # GH 5756 - # setting with empty Series - df = DataFrame(Series(dtype=object)) - expected = DataFrame({0: Series(dtype=object)}) - tm.assert_frame_equal(df, expected) - - df = DataFrame(Series(name="foo", dtype=object)) - expected = DataFrame({"foo": Series(dtype=object)}) - tm.assert_frame_equal(df, expected) - - def test_partial_set_empty_frame_empty_copy_assignment(self): - # GH 5932 - # copy on empty with assignment fails - df = DataFrame(index=[0]) - df = df.copy() - df["a"] = 0 - expected = DataFrame(0, index=[0], columns=["a"]) - tm.assert_frame_equal(df, expected) - - def test_partial_set_empty_frame_empty_consistencies(self): - # GH 6171 - # consistency on empty frames - df = DataFrame(columns=["x", "y"]) - df["x"] = [1, 2] - expected = DataFrame({"x": [1, 2], "y": [np.nan, np.nan]}) - tm.assert_frame_equal(df, expected, check_dtype=False) - - df = DataFrame(columns=["x", "y"]) - df["x"] = ["1", "2"] - expected = DataFrame({"x": ["1", "2"], "y": [np.nan, np.nan]}, dtype=object) - tm.assert_frame_equal(df, expected) - - df = DataFrame(columns=["x", "y"]) - df.loc[0, "x"] = 1 - expected = DataFrame({"x": [1], "y": [np.nan]}) - tm.assert_frame_equal(df, expected, check_dtype=False) - @pytest.mark.parametrize( "idx,labels,expected_idx", [ @@ -584,14 +608,14 @@ def test_loc_with_list_of_strings_representing_datetimes_missing_value( self, idx, labels ): # GH 11278 - s = Series(range(20), index=idx) + ser = Series(range(20), index=idx) df = DataFrame(range(20), index=idx) msg = r"not in index" with pytest.raises(KeyError, match=msg): - s.loc[labels] + ser.loc[labels] with pytest.raises(KeyError, match=msg): - s[labels] + ser[labels] with pytest.raises(KeyError, match=msg): df.loc[labels] @@ -628,37 +652,18 @@ def test_loc_with_list_of_strings_representing_datetimes_not_matched_type( self, idx, labels, msg ): # GH 11278 - s = Series(range(20), index=idx) + ser = Series(range(20), index=idx) df = DataFrame(range(20), index=idx) with pytest.raises(KeyError, match=msg): - s.loc[labels] + ser.loc[labels] with pytest.raises(KeyError, match=msg): - s[labels] + ser[labels] with pytest.raises(KeyError, match=msg): df.loc[labels] - def test_index_name_empty(self): - # GH 31368 - df = DataFrame({}, index=pd.RangeIndex(0, name="df_index")) - series = Series(1.23, index=pd.RangeIndex(4, name="series_index")) - - df["series"] = series - expected = DataFrame( - {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="df_index") - ) - - tm.assert_frame_equal(df, expected) - - # GH 36527 - df = DataFrame() - series = Series(1.23, index=pd.RangeIndex(4, name="series_index")) - df["series"] = series - expected = DataFrame( - {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="series_index") - ) - tm.assert_frame_equal(df, expected) +class TestStringSlicing: def test_slice_irregular_datetime_index_with_nan(self): # GH36953 index = pd.to_datetime(["2012-01-01", "2012-01-02", "2012-01-03", None]) diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py index bf262e6755289..bcb76fb078e74 100644 --- a/pandas/tests/indexing/test_scalar.py +++ b/pandas/tests/indexing/test_scalar.py @@ -77,6 +77,13 @@ def _check(f, func, values=False): class TestAtAndiAT: # at and iat tests that don't need Base class + def test_float_index_at_iat(self): + ser = Series([1, 2, 3], index=[0.1, 0.2, 0.3]) + for el, item in ser.items(): + assert ser.at[el] == item + for i in range(len(ser)): + assert ser.iat[i] == i + 1 + def test_at_iat_coercion(self): # as timestamp is not a tuple! diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index a2b90f607e918..272a4aa6723dd 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -634,6 +634,33 @@ def test_missing_meta(self, missing_metadata): expected = DataFrame(ex_data, columns=columns) tm.assert_frame_equal(result, expected) + def test_missing_nested_meta(self): + # GH44312 + # If errors="ignore" and nested metadata is null, we should return nan + data = {"meta": "foo", "nested_meta": None, "value": [{"rec": 1}, {"rec": 2}]} + result = json_normalize( + data, + record_path="value", + meta=["meta", ["nested_meta", "leaf"]], + errors="ignore", + ) + ex_data = [[1, "foo", np.nan], [2, "foo", np.nan]] + columns = ["rec", "meta", "nested_meta.leaf"] + expected = DataFrame(ex_data, columns=columns).astype( + {"nested_meta.leaf": object} + ) + tm.assert_frame_equal(result, expected) + + # If errors="raise" and nested metadata is null, we should raise with the + # key of the first missing level + with pytest.raises(KeyError, match="'leaf' not found"): + json_normalize( + data, + record_path="value", + meta=["meta", ["nested_meta", "leaf"]], + errors="raise", + ) + def test_missing_meta_multilevel_record_path_errors_raise(self, missing_metadata): # GH41876 # Ensure errors='raise' works as intended even when a record_path of length diff --git a/pandas/tests/io/parser/test_compression.py b/pandas/tests/io/parser/test_compression.py index e0799df8d7a4c..5aa0edfd8b46a 100644 --- a/pandas/tests/io/parser/test_compression.py +++ b/pandas/tests/io/parser/test_compression.py @@ -103,8 +103,6 @@ def test_compression(parser_and_data, compression_only, buffer, filename): tm.write_to_compressed(compress_type, path, data) compression = "infer" if filename else compress_type - if ext == "bz2": - pytest.xfail("pyarrow wheels don't have bz2 codec support") if buffer: with open(path, "rb") as f: result = parser.read_csv(f, compression=compression) diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 17c107814995c..470440290016d 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -26,6 +26,7 @@ is_platform_windows, np_array_datetime64_compat, ) +from pandas.compat.pyarrow import pa_version_under6p0 import pandas as pd from pandas import ( @@ -431,6 +432,11 @@ def test_date_col_as_index_col(all_parsers): columns=["X0", "X2", "X3", "X4", "X5", "X6", "X7"], index=index, ) + if parser.engine == "pyarrow" and not pa_version_under6p0: + # https://github.com/pandas-dev/pandas/issues/44231 + # pyarrow 6.0 starts to infer time type + expected["X2"] = pd.to_datetime("1970-01-01" + expected["X2"]).dt.time + tm.assert_frame_equal(result, expected) @@ -1726,6 +1732,39 @@ def test_date_parser_and_names(all_parsers): tm.assert_frame_equal(result, expected) +@skip_pyarrow +def test_date_parser_multiindex_columns(all_parsers): + parser = all_parsers + data = """a,b +1,2 +2019-12-31,6""" + result = parser.read_csv(StringIO(data), parse_dates=[("a", "1")], header=[0, 1]) + expected = DataFrame({("a", "1"): Timestamp("2019-12-31"), ("b", "2"): [6]}) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow +@pytest.mark.parametrize( + "parse_spec, col_name", + [ + ([[("a", "1"), ("b", "2")]], ("a_b", "1_2")), + ({("foo", "1"): [("a", "1"), ("b", "2")]}, ("foo", "1")), + ], +) +def test_date_parser_multiindex_columns_combine_cols(all_parsers, parse_spec, col_name): + parser = all_parsers + data = """a,b,c +1,2,3 +2019-12,-31,6""" + result = parser.read_csv( + StringIO(data), + parse_dates=parse_spec, + header=[0, 1], + ) + expected = DataFrame({col_name: Timestamp("2019-12-31"), ("c", "3"): [6]}) + tm.assert_frame_equal(result, expected) + + @skip_pyarrow def test_date_parser_usecols_thousands(all_parsers): # GH#39365 diff --git a/pandas/tests/libs/test_hashtable.py b/pandas/tests/libs/test_hashtable.py index bdc02ff0aa7a8..937eccf7a0afe 100644 --- a/pandas/tests/libs/test_hashtable.py +++ b/pandas/tests/libs/test_hashtable.py @@ -453,13 +453,11 @@ def test_mode_stable(self, dtype, writable): def test_modes_with_nans(): - # GH39007 - values = np.array([True, pd.NA, np.nan], dtype=np.object_) - # pd.Na and np.nan will have the same representative: np.nan - # thus we have 2 nans and 1 True + # GH42688, nans aren't mangled + nulls = [pd.NA, np.nan, pd.NaT, None] + values = np.array([True] + nulls * 2, dtype=np.object_) modes = ht.mode(values, False) - assert modes.size == 1 - assert np.isnan(modes[0]) + assert modes.size == len(nulls) def test_unique_label_indices_intp(writable): diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index dbceeae44a493..ce32e5801e461 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -543,6 +543,14 @@ def test_groupby_boxplot_subplots_false(self, col, expected_xticklabel): result_xticklabel = [x.get_text() for x in axes.get_xticklabels()] assert expected_xticklabel == result_xticklabel + def test_groupby_boxplot_object(self): + # GH 43480 + df = self.hist_df.astype("object") + grouped = df.groupby("gender") + msg = "boxplot method requires numerical columns, nothing to plot" + with pytest.raises(ValueError, match=msg): + _check_plot_works(grouped.boxplot, subplots=False) + def test_boxplot_multiindex_column(self): # GH 16748 arrays = [ diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 8436c2db445ee..34e8e2ac3e84a 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -1692,8 +1692,6 @@ def f(data, add_arg): df = DataFrame({"A": 1, "B": 2}, index=date_range("2017", periods=10)) result = df.groupby("A").resample("D").agg(f, multiplier).astype(float) expected = df.groupby("A").resample("D").mean().multiply(multiplier) - # TODO: GH 41137 - expected = expected.astype("float64") tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 21ed57813b60d..b9718249b38c8 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -182,6 +182,7 @@ def test_nat_methods_nat(method): def test_nat_iso_format(get_nat): # see gh-12300 assert get_nat("NaT").isoformat() == "NaT" + assert get_nat("NaT").isoformat(timespec="nanoseconds") == "NaT" @pytest.mark.parametrize( @@ -325,6 +326,10 @@ def test_nat_doc_strings(compare): klass, method = compare klass_doc = getattr(klass, method).__doc__ + # Ignore differences with Timestamp.isoformat() as they're intentional + if klass == Timestamp and method == "isoformat": + return + nat_doc = getattr(NaT, method).__doc__ assert klass_doc == nat_doc diff --git a/pandas/tests/scalar/timestamp/test_formats.py b/pandas/tests/scalar/timestamp/test_formats.py new file mode 100644 index 0000000000000..71dbf3539bdb2 --- /dev/null +++ b/pandas/tests/scalar/timestamp/test_formats.py @@ -0,0 +1,71 @@ +import pytest + +from pandas import Timestamp + +ts_no_ns = Timestamp( + year=2019, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=132263, +) +ts_ns = Timestamp( + year=2019, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=132263, + nanosecond=123, +) +ts_ns_tz = Timestamp( + year=2019, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=132263, + nanosecond=123, + tz="UTC", +) +ts_no_us = Timestamp( + year=2019, + month=5, + day=18, + hour=15, + minute=17, + second=8, + microsecond=0, + nanosecond=123, +) + + +@pytest.mark.parametrize( + "ts, timespec, expected_iso", + [ + (ts_no_ns, "auto", "2019-05-18T15:17:08.132263"), + (ts_no_ns, "seconds", "2019-05-18T15:17:08"), + (ts_no_ns, "nanoseconds", "2019-05-18T15:17:08.132263000"), + (ts_ns, "auto", "2019-05-18T15:17:08.132263123"), + (ts_ns, "hours", "2019-05-18T15"), + (ts_ns, "minutes", "2019-05-18T15:17"), + (ts_ns, "seconds", "2019-05-18T15:17:08"), + (ts_ns, "milliseconds", "2019-05-18T15:17:08.132"), + (ts_ns, "microseconds", "2019-05-18T15:17:08.132263"), + (ts_ns, "nanoseconds", "2019-05-18T15:17:08.132263123"), + (ts_ns_tz, "auto", "2019-05-18T15:17:08.132263123+00:00"), + (ts_ns_tz, "hours", "2019-05-18T15+00:00"), + (ts_ns_tz, "minutes", "2019-05-18T15:17+00:00"), + (ts_ns_tz, "seconds", "2019-05-18T15:17:08+00:00"), + (ts_ns_tz, "milliseconds", "2019-05-18T15:17:08.132+00:00"), + (ts_ns_tz, "microseconds", "2019-05-18T15:17:08.132263+00:00"), + (ts_ns_tz, "nanoseconds", "2019-05-18T15:17:08.132263123+00:00"), + (ts_no_us, "auto", "2019-05-18T15:17:08.000000123"), + ], +) +def test_isoformat(ts, timespec, expected_iso): + assert ts.isoformat(timespec=timespec) == expected_iso diff --git a/pandas/tests/series/accessors/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py index eb7e1d4268605..48a3ebd25c239 100644 --- a/pandas/tests/series/accessors/test_dt_accessor.py +++ b/pandas/tests/series/accessors/test_dt_accessor.py @@ -39,121 +39,136 @@ ) import pandas.core.common as com +ok_for_period = PeriodArray._datetimelike_ops +ok_for_period_methods = ["strftime", "to_timestamp", "asfreq"] +ok_for_dt = DatetimeArray._datetimelike_ops +ok_for_dt_methods = [ + "to_period", + "to_pydatetime", + "tz_localize", + "tz_convert", + "normalize", + "strftime", + "round", + "floor", + "ceil", + "day_name", + "month_name", + "isocalendar", +] +ok_for_td = TimedeltaArray._datetimelike_ops +ok_for_td_methods = [ + "components", + "to_pytimedelta", + "total_seconds", + "round", + "floor", + "ceil", +] + + +def get_dir(ser): + # check limited display api + results = [r for r in ser.dt.__dir__() if not r.startswith("_")] + return sorted(set(results)) -class TestSeriesDatetimeValues: - def test_dt_namespace_accessor(self): +class TestSeriesDatetimeValues: + def _compare(self, ser, name): # GH 7207, 11128 # test .dt namespace accessor - ok_for_period = PeriodArray._datetimelike_ops - ok_for_period_methods = ["strftime", "to_timestamp", "asfreq"] - ok_for_dt = DatetimeArray._datetimelike_ops - ok_for_dt_methods = [ - "to_period", - "to_pydatetime", - "tz_localize", - "tz_convert", - "normalize", - "strftime", - "round", - "floor", - "ceil", - "day_name", - "month_name", - "isocalendar", - ] - ok_for_td = TimedeltaArray._datetimelike_ops - ok_for_td_methods = [ - "components", - "to_pytimedelta", - "total_seconds", - "round", - "floor", - "ceil", - ] - - def get_expected(s, name): - result = getattr(Index(s._values), prop) + def get_expected(ser, prop): + result = getattr(Index(ser._values), prop) if isinstance(result, np.ndarray): if is_integer_dtype(result): result = result.astype("int64") elif not is_list_like(result) or isinstance(result, DataFrame): return result - return Series(result, index=s.index, name=s.name) - - def compare(s, name): - a = getattr(s.dt, prop) - b = get_expected(s, prop) - if not (is_list_like(a) and is_list_like(b)): - assert a == b - elif isinstance(a, DataFrame): - tm.assert_frame_equal(a, b) - else: - tm.assert_series_equal(a, b) + return Series(result, index=ser.index, name=ser.name) + + left = getattr(ser.dt, name) + right = get_expected(ser, name) + if not (is_list_like(left) and is_list_like(right)): + assert left == right + elif isinstance(left, DataFrame): + tm.assert_frame_equal(left, right) + else: + tm.assert_series_equal(left, right) + + @pytest.mark.parametrize("freq", ["D", "s", "ms"]) + def test_dt_namespace_accessor_datetime64(self, freq): + # GH#7207, GH#11128 + # test .dt namespace accessor # datetimeindex - cases = [ - Series(date_range("20130101", periods=5), name="xxx"), - Series(date_range("20130101", periods=5, freq="s"), name="xxx"), - Series(date_range("20130101 00:00:00", periods=5, freq="ms"), name="xxx"), - ] - for s in cases: - for prop in ok_for_dt: - # we test freq below - # we ignore week and weekofyear because they are deprecated - if prop not in ["freq", "week", "weekofyear"]: - compare(s, prop) + dti = date_range("20130101", periods=5, freq=freq) + ser = Series(dti, name="xxx") - for prop in ok_for_dt_methods: - getattr(s.dt, prop) + for prop in ok_for_dt: + # we test freq below + # we ignore week and weekofyear because they are deprecated + if prop not in ["freq", "week", "weekofyear"]: + self._compare(ser, prop) - result = s.dt.to_pydatetime() - assert isinstance(result, np.ndarray) - assert result.dtype == object + for prop in ok_for_dt_methods: + getattr(ser.dt, prop) - result = s.dt.tz_localize("US/Eastern") - exp_values = DatetimeIndex(s.values).tz_localize("US/Eastern") - expected = Series(exp_values, index=s.index, name="xxx") - tm.assert_series_equal(result, expected) + result = ser.dt.to_pydatetime() + assert isinstance(result, np.ndarray) + assert result.dtype == object - tz_result = result.dt.tz - assert str(tz_result) == "US/Eastern" - freq_result = s.dt.freq - assert freq_result == DatetimeIndex(s.values, freq="infer").freq - - # let's localize, then convert - result = s.dt.tz_localize("UTC").dt.tz_convert("US/Eastern") - exp_values = ( - DatetimeIndex(s.values).tz_localize("UTC").tz_convert("US/Eastern") - ) - expected = Series(exp_values, index=s.index, name="xxx") - tm.assert_series_equal(result, expected) + result = ser.dt.tz_localize("US/Eastern") + exp_values = DatetimeIndex(ser.values).tz_localize("US/Eastern") + expected = Series(exp_values, index=ser.index, name="xxx") + tm.assert_series_equal(result, expected) + + tz_result = result.dt.tz + assert str(tz_result) == "US/Eastern" + freq_result = ser.dt.freq + assert freq_result == DatetimeIndex(ser.values, freq="infer").freq + + # let's localize, then convert + result = ser.dt.tz_localize("UTC").dt.tz_convert("US/Eastern") + exp_values = ( + DatetimeIndex(ser.values).tz_localize("UTC").tz_convert("US/Eastern") + ) + expected = Series(exp_values, index=ser.index, name="xxx") + tm.assert_series_equal(result, expected) + + def test_dt_namespace_accessor_datetime64tz(self): + # GH#7207, GH#11128 + # test .dt namespace accessor # datetimeindex with tz - s = Series(date_range("20130101", periods=5, tz="US/Eastern"), name="xxx") + dti = date_range("20130101", periods=5, tz="US/Eastern") + ser = Series(dti, name="xxx") for prop in ok_for_dt: # we test freq below # we ignore week and weekofyear because they are deprecated if prop not in ["freq", "week", "weekofyear"]: - compare(s, prop) + self._compare(ser, prop) for prop in ok_for_dt_methods: - getattr(s.dt, prop) + getattr(ser.dt, prop) - result = s.dt.to_pydatetime() + result = ser.dt.to_pydatetime() assert isinstance(result, np.ndarray) assert result.dtype == object - result = s.dt.tz_convert("CET") - expected = Series(s._values.tz_convert("CET"), index=s.index, name="xxx") + result = ser.dt.tz_convert("CET") + expected = Series(ser._values.tz_convert("CET"), index=ser.index, name="xxx") tm.assert_series_equal(result, expected) tz_result = result.dt.tz assert str(tz_result) == "CET" - freq_result = s.dt.freq - assert freq_result == DatetimeIndex(s.values, freq="infer").freq + freq_result = ser.dt.freq + assert freq_result == DatetimeIndex(ser.values, freq="infer").freq + + def test_dt_namespace_accessor_timedelta(self): + # GH#7207, GH#11128 + # test .dt namespace accessor # timedelta index cases = [ @@ -166,102 +181,115 @@ def compare(s, name): name="xxx", ), ] - for s in cases: + for ser in cases: for prop in ok_for_td: # we test freq below if prop != "freq": - compare(s, prop) + self._compare(ser, prop) for prop in ok_for_td_methods: - getattr(s.dt, prop) + getattr(ser.dt, prop) - result = s.dt.components + result = ser.dt.components assert isinstance(result, DataFrame) - tm.assert_index_equal(result.index, s.index) + tm.assert_index_equal(result.index, ser.index) - result = s.dt.to_pytimedelta() + result = ser.dt.to_pytimedelta() assert isinstance(result, np.ndarray) assert result.dtype == object - result = s.dt.total_seconds() + result = ser.dt.total_seconds() assert isinstance(result, Series) assert result.dtype == "float64" - freq_result = s.dt.freq - assert freq_result == TimedeltaIndex(s.values, freq="infer").freq + freq_result = ser.dt.freq + assert freq_result == TimedeltaIndex(ser.values, freq="infer").freq + + def test_dt_namespace_accessor_period(self): + # GH#7207, GH#11128 + # test .dt namespace accessor + + # periodindex + pi = period_range("20130101", periods=5, freq="D") + ser = Series(pi, name="xxx") + + for prop in ok_for_period: + # we test freq below + if prop != "freq": + self._compare(ser, prop) + + for prop in ok_for_period_methods: + getattr(ser.dt, prop) + + freq_result = ser.dt.freq + assert freq_result == PeriodIndex(ser.values).freq + + def test_dt_namespace_accessor_index_and_values(self): # both index = date_range("20130101", periods=3, freq="D") - s = Series(date_range("20140204", periods=3, freq="s"), index=index, name="xxx") + dti = date_range("20140204", periods=3, freq="s") + ser = Series(dti, index=index, name="xxx") exp = Series( np.array([2014, 2014, 2014], dtype="int64"), index=index, name="xxx" ) - tm.assert_series_equal(s.dt.year, exp) + tm.assert_series_equal(ser.dt.year, exp) exp = Series(np.array([2, 2, 2], dtype="int64"), index=index, name="xxx") - tm.assert_series_equal(s.dt.month, exp) + tm.assert_series_equal(ser.dt.month, exp) exp = Series(np.array([0, 1, 2], dtype="int64"), index=index, name="xxx") - tm.assert_series_equal(s.dt.second, exp) - - exp = Series([s[0]] * 3, index=index, name="xxx") - tm.assert_series_equal(s.dt.normalize(), exp) - - # periodindex - cases = [Series(period_range("20130101", periods=5, freq="D"), name="xxx")] - for s in cases: - for prop in ok_for_period: - # we test freq below - if prop != "freq": - compare(s, prop) - - for prop in ok_for_period_methods: - getattr(s.dt, prop) + tm.assert_series_equal(ser.dt.second, exp) - freq_result = s.dt.freq - assert freq_result == PeriodIndex(s.values).freq + exp = Series([ser[0]] * 3, index=index, name="xxx") + tm.assert_series_equal(ser.dt.normalize(), exp) - # test limited display api - def get_dir(s): - results = [r for r in s.dt.__dir__() if not r.startswith("_")] - return sorted(set(results)) + def test_dt_accessor_limited_display_api(self): + # tznaive + ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx") + results = get_dir(ser) + tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods))) - s = Series(date_range("20130101", periods=5, freq="D"), name="xxx") - results = get_dir(s) + # tzaware + ser = Series(date_range("2015-01-01", "2016-01-01", freq="T"), name="xxx") + ser = ser.dt.tz_localize("UTC").dt.tz_convert("America/Chicago") + results = get_dir(ser) tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods))) - s = Series( + # Period + ser = Series( period_range("20130101", periods=5, freq="D", name="xxx").astype(object) ) - results = get_dir(s) + results = get_dir(ser) tm.assert_almost_equal( results, sorted(set(ok_for_period + ok_for_period_methods)) ) - # 11295 + def test_dt_accessor_ambiguous_freq_conversions(self): + # GH#11295 # ambiguous time error on the conversions - s = Series(date_range("2015-01-01", "2016-01-01", freq="T"), name="xxx") - s = s.dt.tz_localize("UTC").dt.tz_convert("America/Chicago") - results = get_dir(s) - tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods))) + ser = Series(date_range("2015-01-01", "2016-01-01", freq="T"), name="xxx") + ser = ser.dt.tz_localize("UTC").dt.tz_convert("America/Chicago") + exp_values = date_range( "2015-01-01", "2016-01-01", freq="T", tz="UTC" ).tz_convert("America/Chicago") # freq not preserved by tz_localize above exp_values = exp_values._with_freq(None) expected = Series(exp_values, name="xxx") - tm.assert_series_equal(s, expected) + tm.assert_series_equal(ser, expected) + def test_dt_accessor_not_writeable(self): # no setting allowed - s = Series(date_range("20130101", periods=5, freq="D"), name="xxx") + ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx") with pytest.raises(ValueError, match="modifications"): - s.dt.hour = 5 + ser.dt.hour = 5 # trying to set a copy msg = "modifications to a property of a datetimelike.+not supported" with pd.option_context("chained_assignment", "raise"): with pytest.raises(com.SettingWithCopyError, match=msg): - s.dt.hour[0] = 5 + ser.dt.hour[0] = 5 @pytest.mark.parametrize( "method, dates", @@ -273,24 +301,24 @@ def get_dir(s): ) def test_dt_round(self, method, dates): # round - s = Series( + ser = Series( pd.to_datetime( ["2012-01-01 13:00:00", "2012-01-01 12:01:00", "2012-01-01 08:00:00"] ), name="xxx", ) - result = getattr(s.dt, method)("D") + result = getattr(ser.dt, method)("D") expected = Series(pd.to_datetime(dates), name="xxx") tm.assert_series_equal(result, expected) def test_dt_round_tz(self): - s = Series( + ser = Series( pd.to_datetime( ["2012-01-01 13:00:00", "2012-01-01 12:01:00", "2012-01-01 08:00:00"] ), name="xxx", ) - result = s.dt.tz_localize("UTC").dt.tz_convert("US/Eastern").dt.round("D") + result = ser.dt.tz_localize("UTC").dt.tz_convert("US/Eastern").dt.round("D") exp_values = pd.to_datetime( ["2012-01-01", "2012-01-01", "2012-01-01"] @@ -339,23 +367,23 @@ def test_dt_round_tz_ambiguous(self, method): ) def test_dt_round_tz_nonexistent(self, method, ts_str, freq): # GH 23324 round near "spring forward" DST - s = Series([pd.Timestamp(ts_str, tz="America/Chicago")]) - result = getattr(s.dt, method)(freq, nonexistent="shift_forward") + ser = Series([pd.Timestamp(ts_str, tz="America/Chicago")]) + result = getattr(ser.dt, method)(freq, nonexistent="shift_forward") expected = Series([pd.Timestamp("2018-03-11 03:00:00", tz="America/Chicago")]) tm.assert_series_equal(result, expected) - result = getattr(s.dt, method)(freq, nonexistent="NaT") + result = getattr(ser.dt, method)(freq, nonexistent="NaT") expected = Series([pd.NaT]).dt.tz_localize(result.dt.tz) tm.assert_series_equal(result, expected) with pytest.raises(pytz.NonExistentTimeError, match="2018-03-11 02:00:00"): - getattr(s.dt, method)(freq, nonexistent="raise") + getattr(ser.dt, method)(freq, nonexistent="raise") def test_dt_namespace_accessor_categorical(self): # GH 19468 dti = DatetimeIndex(["20171111", "20181212"]).repeat(2) - s = Series(pd.Categorical(dti), name="foo") - result = s.dt.year + ser = Series(pd.Categorical(dti), name="foo") + result = ser.dt.year expected = Series([2017, 2017, 2018, 2018], name="foo") tm.assert_series_equal(result, expected) @@ -394,9 +422,9 @@ def test_dt_other_accessors_categorical(self, accessor): def test_dt_accessor_no_new_attributes(self): # https://github.com/pandas-dev/pandas/issues/10673 - s = Series(date_range("20130101", periods=5, freq="D")) + ser = Series(date_range("20130101", periods=5, freq="D")) with pytest.raises(AttributeError, match="You cannot add any new attribute"): - s.dt.xlabel = "a" + ser.dt.xlabel = "a" @pytest.mark.parametrize( "time_locale", [None] if tm.get_locales() is None else [None] + tm.get_locales() @@ -434,7 +462,7 @@ def test_dt_accessor_datetime_name_accessors(self, time_locale): expected_days = calendar.day_name[:] expected_months = calendar.month_name[1:] - s = Series(date_range(freq="D", start=datetime(1998, 1, 1), periods=365)) + ser = Series(date_range(freq="D", start=datetime(1998, 1, 1), periods=365)) english_days = [ "Monday", "Tuesday", @@ -446,13 +474,13 @@ def test_dt_accessor_datetime_name_accessors(self, time_locale): ] for day, name, eng_name in zip(range(4, 11), expected_days, english_days): name = name.capitalize() - assert s.dt.day_name(locale=time_locale)[day] == name - assert s.dt.day_name(locale=None)[day] == eng_name - s = s.append(Series([pd.NaT])) - assert np.isnan(s.dt.day_name(locale=time_locale).iloc[-1]) + assert ser.dt.day_name(locale=time_locale)[day] == name + assert ser.dt.day_name(locale=None)[day] == eng_name + ser = ser.append(Series([pd.NaT])) + assert np.isnan(ser.dt.day_name(locale=time_locale).iloc[-1]) - s = Series(date_range(freq="M", start="2012", end="2013")) - result = s.dt.month_name(locale=time_locale) + ser = Series(date_range(freq="M", start="2012", end="2013")) + result = ser.dt.month_name(locale=time_locale) expected = Series([month.capitalize() for month in expected_months]) # work around https://github.com/pandas-dev/pandas/issues/22342 @@ -461,7 +489,7 @@ def test_dt_accessor_datetime_name_accessors(self, time_locale): tm.assert_series_equal(result, expected) - for s_date, expected in zip(s, expected_months): + for s_date, expected in zip(ser, expected_months): result = s_date.month_name(locale=time_locale) expected = expected.capitalize() @@ -470,20 +498,20 @@ def test_dt_accessor_datetime_name_accessors(self, time_locale): assert result == expected - s = s.append(Series([pd.NaT])) - assert np.isnan(s.dt.month_name(locale=time_locale).iloc[-1]) + ser = ser.append(Series([pd.NaT])) + assert np.isnan(ser.dt.month_name(locale=time_locale).iloc[-1]) def test_strftime(self): # GH 10086 - s = Series(date_range("20130101", periods=5)) - result = s.dt.strftime("%Y/%m/%d") + ser = Series(date_range("20130101", periods=5)) + result = ser.dt.strftime("%Y/%m/%d") expected = Series( ["2013/01/01", "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"] ) tm.assert_series_equal(result, expected) - s = Series(date_range("2015-02-03 11:22:33.4567", periods=5)) - result = s.dt.strftime("%Y/%m/%d %H-%M-%S") + ser = Series(date_range("2015-02-03 11:22:33.4567", periods=5)) + result = ser.dt.strftime("%Y/%m/%d %H-%M-%S") expected = Series( [ "2015/02/03 11-22-33", @@ -495,15 +523,15 @@ def test_strftime(self): ) tm.assert_series_equal(result, expected) - s = Series(period_range("20130101", periods=5)) - result = s.dt.strftime("%Y/%m/%d") + ser = Series(period_range("20130101", periods=5)) + result = ser.dt.strftime("%Y/%m/%d") expected = Series( ["2013/01/01", "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"] ) tm.assert_series_equal(result, expected) - s = Series(period_range("2015-02-03 11:22:33.4567", periods=5, freq="s")) - result = s.dt.strftime("%Y/%m/%d %H-%M-%S") + ser = Series(period_range("2015-02-03 11:22:33.4567", periods=5, freq="s")) + result = ser.dt.strftime("%Y/%m/%d %H-%M-%S") expected = Series( [ "2015/02/03 11-22-33", @@ -515,9 +543,10 @@ def test_strftime(self): ) tm.assert_series_equal(result, expected) - s = Series(date_range("20130101", periods=5)) - s.iloc[0] = pd.NaT - result = s.dt.strftime("%Y/%m/%d") + def test_strftime_dt64_days(self): + ser = Series(date_range("20130101", periods=5)) + ser.iloc[0] = pd.NaT + result = ser.dt.strftime("%Y/%m/%d") expected = Series( [np.nan, "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"] ) @@ -533,6 +562,7 @@ def test_strftime(self): # dtype may be S10 or U10 depending on python version tm.assert_index_equal(result, expected) + def test_strftime_period_days(self): period_index = period_range("20150301", periods=5) result = period_index.strftime("%Y/%m/%d") expected = Index( @@ -541,13 +571,15 @@ def test_strftime(self): ) tm.assert_index_equal(result, expected) - s = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)]) - result = s.dt.strftime("%Y-%m-%d %H:%M:%S") + def test_strftime_dt64_microsecond_resolution(self): + ser = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)]) + result = ser.dt.strftime("%Y-%m-%d %H:%M:%S") expected = Series(["2013-01-01 02:32:59", "2013-01-02 14:32:01"]) tm.assert_series_equal(result, expected) - s = Series(period_range("20130101", periods=4, freq="H")) - result = s.dt.strftime("%Y/%m/%d %H:%M:%S") + def test_strftime_period_hours(self): + ser = Series(period_range("20130101", periods=4, freq="H")) + result = ser.dt.strftime("%Y/%m/%d %H:%M:%S") expected = Series( [ "2013/01/01 00:00:00", @@ -556,9 +588,11 @@ def test_strftime(self): "2013/01/01 03:00:00", ] ) + tm.assert_series_equal(result, expected) - s = Series(period_range("20130101", periods=4, freq="L")) - result = s.dt.strftime("%Y/%m/%d %H:%M:%S.%l") + def test_strftime_period_minutes(self): + ser = Series(period_range("20130101", periods=4, freq="L")) + result = ser.dt.strftime("%Y/%m/%d %H:%M:%S.%l") expected = Series( [ "2013/01/01 00:00:00.000", @@ -578,8 +612,8 @@ def test_strftime(self): ) def test_strftime_nat(self, data): # GH 29578 - s = Series(data) - result = s.dt.strftime("%Y-%m-%d") + ser = Series(data) + result = ser.dt.strftime("%Y-%m-%d") expected = Series(["2019-01-01", np.nan]) tm.assert_series_equal(result, expected) @@ -591,16 +625,16 @@ def test_valid_dt_with_missing_values(self): ) # GH 8689 - s = Series(date_range("20130101", periods=5, freq="D")) - s.iloc[2] = pd.NaT + ser = Series(date_range("20130101", periods=5, freq="D")) + ser.iloc[2] = pd.NaT for attr in ["microsecond", "nanosecond", "second", "minute", "hour", "day"]: - expected = getattr(s.dt, attr).copy() + expected = getattr(ser.dt, attr).copy() expected.iloc[2] = np.nan - result = getattr(s.dt, attr) + result = getattr(ser.dt, attr) tm.assert_series_equal(result, expected) - result = s.dt.date + result = ser.dt.date expected = Series( [ date(2013, 1, 1), @@ -613,7 +647,7 @@ def test_valid_dt_with_missing_values(self): ) tm.assert_series_equal(result, expected) - result = s.dt.time + result = ser.dt.time expected = Series([time(0), time(0), np.nan, time(0), time(0)], dtype="object") tm.assert_series_equal(result, expected) @@ -626,8 +660,8 @@ def test_dt_accessor_api(self): assert Series.dt is CombinedDatetimelikeProperties - s = Series(date_range("2000-01-01", periods=3)) - assert isinstance(s.dt, DatetimeProperties) + ser = Series(date_range("2000-01-01", periods=3)) + assert isinstance(ser.dt, DatetimeProperties) @pytest.mark.parametrize( "ser", [Series(np.arange(5)), Series(list("abcde")), Series(np.random.randn(5))] @@ -639,11 +673,11 @@ def test_dt_accessor_invalid(self, ser): assert not hasattr(ser, "dt") def test_dt_accessor_updates_on_inplace(self): - s = Series(date_range("2018-01-01", periods=10)) - s[2] = None - return_value = s.fillna(pd.Timestamp("2018-01-01"), inplace=True) + ser = Series(date_range("2018-01-01", periods=10)) + ser[2] = None + return_value = ser.fillna(pd.Timestamp("2018-01-01"), inplace=True) assert return_value is None - result = s.dt.date + result = ser.dt.date assert result[0] == result[2] def test_date_tz(self): @@ -652,10 +686,10 @@ def test_date_tz(self): ["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"], tz="US/Eastern", ) - s = Series(rng) + ser = Series(rng) expected = Series([date(2014, 4, 4), date(2014, 7, 18), date(2015, 11, 22)]) - tm.assert_series_equal(s.dt.date, expected) - tm.assert_series_equal(s.apply(lambda x: x.date()), expected) + tm.assert_series_equal(ser.dt.date, expected) + tm.assert_series_equal(ser.apply(lambda x: x.date()), expected) def test_dt_timetz_accessor(self, tz_naive_fixture): # GH21358 @@ -664,11 +698,11 @@ def test_dt_timetz_accessor(self, tz_naive_fixture): dtindex = DatetimeIndex( ["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"], tz=tz ) - s = Series(dtindex) + ser = Series(dtindex) expected = Series( [time(23, 56, tzinfo=tz), time(21, 24, tzinfo=tz), time(22, 14, tzinfo=tz)] ) - result = s.dt.timetz + result = ser.dt.timetz tm.assert_series_equal(result, expected) @pytest.mark.parametrize( @@ -731,9 +765,9 @@ def test_end_time_timevalues(self, input_vals): # when using the dt accessor on a Series input_vals = PeriodArray._from_sequence(np.asarray(input_vals)) - s = Series(input_vals) - result = s.dt.end_time - expected = s.apply(lambda x: x.end_time) + ser = Series(input_vals) + result = ser.dt.end_time + expected = ser.apply(lambda x: x.end_time) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("input_vals", [("2001"), ("NaT")]) @@ -755,7 +789,7 @@ def test_week_and_weekofyear_are_deprecated(): def test_normalize_pre_epoch_dates(): # GH: 36294 - s = pd.to_datetime(Series(["1969-01-01 09:00:00", "2016-01-01 09:00:00"])) - result = s.dt.normalize() + ser = pd.to_datetime(Series(["1969-01-01 09:00:00", "2016-01-01 09:00:00"])) + result = ser.dt.normalize() expected = pd.to_datetime(Series(["1969-01-01", "2016-01-01"])) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py index 03b1c512f9053..4c17917b949ca 100644 --- a/pandas/tests/series/indexing/test_getitem.py +++ b/pandas/tests/series/indexing/test_getitem.py @@ -36,6 +36,12 @@ class TestSeriesGetitemScalars: + def test_getitem_object_index_float_string(self): + # GH#17286 + ser = Series([1] * 4, index=Index(["a", "b", "c", 1.0])) + assert ser["a"] == 1 + assert ser[1.0] == 1 + def test_getitem_float_keys_tuple_values(self): # see GH#13509 diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index d77f831bee8bc..8a34882b1e5d4 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -338,26 +338,19 @@ def test_slice_with_zero_step_raises(index, frame_or_series, indexer_sli): ], ) def test_slice_with_negative_step(index): - def assert_slices_equivalent(l_slc, i_slc): - expected = ts.iloc[i_slc] - - tm.assert_series_equal(ts[l_slc], expected) - tm.assert_series_equal(ts.loc[l_slc], expected) - keystr1 = str(index[9]) keystr2 = str(index[13]) - box = type(index[0]) - ts = Series(np.arange(20), index) + ser = Series(np.arange(20), index) SLC = IndexSlice - for key in [keystr1, box(keystr1)]: - assert_slices_equivalent(SLC[key::-1], SLC[9::-1]) - assert_slices_equivalent(SLC[:key:-1], SLC[:8:-1]) + for key in [keystr1, index[9]]: + tm.assert_indexing_slices_equivalent(ser, SLC[key::-1], SLC[9::-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[:key:-1], SLC[:8:-1]) - for key2 in [keystr2, box(keystr2)]: - assert_slices_equivalent(SLC[key2:key:-1], SLC[13:8:-1]) - assert_slices_equivalent(SLC[key:key2:-1], SLC[0:0:-1]) + for key2 in [keystr2, index[13]]: + tm.assert_indexing_slices_equivalent(ser, SLC[key2:key:-1], SLC[13:8:-1]) + tm.assert_indexing_slices_equivalent(ser, SLC[key:key2:-1], SLC[0:0:-1]) def test_tuple_index(): @@ -377,17 +370,3 @@ def test_frozenset_index(): assert s[idx1] == 2 s[idx1] = 3 assert s[idx1] == 3 - - -def test_boolean_index(): - # GH18579 - s1 = Series([1, 2, 3], index=[4, 5, 6]) - s2 = Series([1, 3, 2], index=s1 == 2) - tm.assert_series_equal(Series([1, 3, 2], [False, True, False]), s2) - - -def test_index_ndim_gt_1_raises(): - # GH18579 - df = DataFrame([[1, 2], [3, 4], [5, 6]], index=[3, 6, 9]) - with pytest.raises(ValueError, match="Index data must be 1-dimensional"): - Series([1, 3, 2], index=df) diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py index 7eb51f8037792..8b5557ab6e85f 100644 --- a/pandas/tests/series/methods/test_drop_duplicates.py +++ b/pandas/tests/series/methods/test_drop_duplicates.py @@ -224,6 +224,22 @@ def test_drop_duplicates_categorical_bool(self, ordered): assert return_value is None tm.assert_series_equal(sc, tc[~expected]) + def test_drop_duplicates_categorical_bool_na(self, nulls_fixture): + # GH#44351 + ser = Series( + Categorical( + [True, False, True, False, nulls_fixture], + categories=[True, False], + ordered=True, + ) + ) + result = ser.drop_duplicates() + expected = Series( + Categorical([True, False, np.nan], categories=[True, False], ordered=True), + index=[0, 1, 4], + ) + tm.assert_series_equal(result, expected) + def test_drop_duplicates_pos_args_deprecation(): # GH#41485 diff --git a/pandas/tests/series/methods/test_duplicated.py b/pandas/tests/series/methods/test_duplicated.py index 5cc297913e851..1c547ee99efed 100644 --- a/pandas/tests/series/methods/test_duplicated.py +++ b/pandas/tests/series/methods/test_duplicated.py @@ -1,7 +1,10 @@ import numpy as np import pytest -from pandas import Series +from pandas import ( + Categorical, + Series, +) import pandas._testing as tm @@ -33,3 +36,17 @@ def test_duplicated_nan_none(keep, expected): result = ser.duplicated(keep=keep) tm.assert_series_equal(result, expected) + + +def test_duplicated_categorical_bool_na(nulls_fixture): + # GH#44351 + ser = Series( + Categorical( + [True, False, True, False, nulls_fixture], + categories=[True, False], + ordered=True, + ) + ) + result = ser.duplicated() + expected = Series([False, False, True, True, False]) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index b49c209a59a06..4e4eb89328540 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -182,3 +182,29 @@ def test_inspect_getmembers(self): ser = Series(dtype=object) with tm.assert_produces_warning(None): inspect.getmembers(ser) + + def test_unknown_attribute(self): + # GH#9680 + tdi = pd.timedelta_range(start=0, periods=10, freq="1s") + ser = Series(np.random.normal(size=10), index=tdi) + assert "foo" not in ser.__dict__.keys() + msg = "'Series' object has no attribute 'foo'" + with pytest.raises(AttributeError, match=msg): + ser.foo + + def test_datetime_series_no_datelike_attrs(self, datetime_series): + # GH#7206 + for op in ["year", "day", "second", "weekday"]: + msg = f"'Series' object has no attribute '{op}'" + with pytest.raises(AttributeError, match=msg): + getattr(datetime_series, op) + + def test_series_datetimelike_attribute_access(self): + # attribute access should still work! + ser = Series({"year": 2000, "month": 1, "day": 10}) + assert ser.year == 2000 + assert ser.month == 1 + assert ser.day == 10 + msg = "'Series' object has no attribute 'weekday'" + with pytest.raises(AttributeError, match=msg): + ser.weekday diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 2c33284df18c5..1b488b4cf0b77 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -154,6 +154,12 @@ def test_constructor(self, datetime_series): with pytest.raises(NotImplementedError, match=msg): Series(m) + def test_constructor_index_ndim_gt_1_raises(self): + # GH#18579 + df = DataFrame([[1, 2], [3, 4], [5, 6]], index=[3, 6, 9]) + with pytest.raises(ValueError, match="Index data must be 1-dimensional"): + Series([1, 3, 2], index=df) + @pytest.mark.parametrize("input_class", [list, dict, OrderedDict]) def test_constructor_empty(self, input_class): with tm.assert_produces_warning(FutureWarning): @@ -276,6 +282,15 @@ def test_constructor_list_like(self): result = Series(obj, index=[0, 1, 2]) tm.assert_series_equal(result, expected) + def test_constructor_boolean_index(self): + # GH#18579 + s1 = Series([1, 2, 3], index=[4, 5, 6]) + + index = s1 == 2 + result = Series([1, 3, 2], index=index) + expected = Series([1, 3, 2], index=[False, True, False]) + tm.assert_series_equal(result, expected) + @pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"]) def test_constructor_index_dtype(self, dtype): # GH 17088 diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 1f75bc11005bc..4867ba58838ef 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -46,6 +46,14 @@ from pandas.core.tools.datetimes import start_caching_at +@pytest.fixture(params=[True, False]) +def cache(request): + """ + cache keyword to pass to to_datetime. + """ + return request.param + + class TestTimeConversionFormats: @pytest.mark.parametrize("readonly", [True, False]) def test_to_datetime_readonly(self, readonly): @@ -57,7 +65,6 @@ def test_to_datetime_readonly(self, readonly): expected = to_datetime([]) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_format(self, cache): values = ["1/1/2000", "1/2/2000", "1/3/2000"] @@ -82,7 +89,6 @@ def test_to_datetime_format(self, cache): else: tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_format_YYYYMMDD(self, cache): s = Series([19801222, 19801222] + [19810105] * 5) expected = Series([Timestamp(x) for x in s.apply(str)]) @@ -109,17 +115,18 @@ def test_to_datetime_format_YYYYMMDD(self, cache): result = to_datetime(s, format="%Y%m%d", cache=cache) tm.assert_series_equal(result, expected) + def test_to_datetime_format_YYYYMMDD_coercion(self, cache): # coercion # GH 7930 - s = Series([20121231, 20141231, 99991231]) - result = to_datetime(s, format="%Y%m%d", errors="ignore", cache=cache) + ser = Series([20121231, 20141231, 99991231]) + result = to_datetime(ser, format="%Y%m%d", errors="ignore", cache=cache) expected = Series( [datetime(2012, 12, 31), datetime(2014, 12, 31), datetime(9999, 12, 31)], dtype=object, ) tm.assert_series_equal(result, expected) - result = to_datetime(s, format="%Y%m%d", errors="coerce", cache=cache) + result = to_datetime(ser, format="%Y%m%d", errors="coerce", cache=cache) expected = Series(["20121231", "20141231", "NaT"], dtype="M8[ns]") tm.assert_series_equal(result, expected) @@ -199,7 +206,6 @@ def test_to_datetime_with_NA(self, data, format, expected): result = to_datetime(data, format=format) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_format_integer(self, cache): # GH 10178 s = Series([2000, 2001, 2002]) @@ -236,7 +242,6 @@ def test_int_to_datetime_format_YYYYMMDD_typeerror(self, int_date, expected): result = to_datetime(int_date, format="%Y%m%d", errors="ignore") assert result == expected - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_format_microsecond(self, cache): # these are locale dependent @@ -249,7 +254,6 @@ def test_to_datetime_format_microsecond(self, cache): exp = datetime.strptime(val, format) assert result == exp - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_format_time(self, cache): data = [ ["01/10/2010 15:20", "%m/%d/%Y %H:%M", Timestamp("2010-01-10 15:20")], @@ -259,6 +263,7 @@ def test_to_datetime_format_time(self, cache): "%m/%d/%Y %H:%M:%S", Timestamp("2010-01-10 13:56:01"), ] # , + # FIXME: don't leave commented-out # ['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p', # Timestamp('2010-01-10 20:14')], # ['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p', @@ -270,7 +275,6 @@ def test_to_datetime_format_time(self, cache): assert to_datetime(s, format=format, cache=cache) == dt @td.skip_if_has_locale - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_with_non_exact(self, cache): # GH 10834 # 8904 @@ -284,7 +288,6 @@ def test_to_datetime_with_non_exact(self, cache): ) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_parse_nanoseconds_with_formula(self, cache): # GH8989 @@ -300,14 +303,15 @@ def test_parse_nanoseconds_with_formula(self, cache): result = to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f", cache=cache) assert result == expected - @pytest.mark.parametrize("cache", [True, False]) - def test_to_datetime_format_weeks(self, cache): - data = [ + @pytest.mark.parametrize( + "value,fmt,expected", + [ ["2009324", "%Y%W%w", Timestamp("2009-08-13")], ["2013020", "%Y%U%w", Timestamp("2013-01-13")], - ] - for s, format, dt in data: - assert to_datetime(s, format=format, cache=cache) == dt + ], + ) + def test_to_datetime_format_weeks(self, value, fmt, expected, cache): + assert to_datetime(value, format=fmt, cache=cache) == expected @pytest.mark.parametrize( "fmt,dates,expected_dates", @@ -601,7 +605,6 @@ def test_to_datetime_today_now_unicode_bytes(self): to_datetime(["now"]) to_datetime(["today"]) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_dt64s(self, cache): in_bound_dts = [np.datetime64("2000-01-01"), np.datetime64("2000-01-02")] @@ -611,7 +614,6 @@ def test_to_datetime_dt64s(self, cache): @pytest.mark.parametrize( "dt", [np.datetime64("1000-01-01"), np.datetime64("5000-01-02")] ) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_dt64s_out_of_bounds(self, cache, dt): msg = f"Out of bounds nanosecond timestamp: {dt}" with pytest.raises(OutOfBoundsDatetime, match=msg): @@ -620,7 +622,6 @@ def test_to_datetime_dt64s_out_of_bounds(self, cache, dt): Timestamp(dt) assert to_datetime(dt, errors="coerce", cache=cache) is NaT - @pytest.mark.parametrize("cache", [True, False]) @pytest.mark.parametrize("unit", ["s", "D"]) def test_to_datetime_array_of_dt64s(self, cache, unit): # https://github.com/pandas-dev/pandas/issues/31491 @@ -659,7 +660,6 @@ def test_to_datetime_array_of_dt64s(self, cache, unit): Index([dt.item() for dt in dts_with_oob]), ) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_tz(self, cache): # xref 8260 @@ -686,7 +686,6 @@ def test_to_datetime_tz(self, cache): with pytest.raises(ValueError, match=msg): to_datetime(arr, cache=cache) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_different_offsets(self, cache): # inspired by asv timeseries.ToDatetimeNONISO8601 benchmark # see GH-26097 for more @@ -697,7 +696,6 @@ def test_to_datetime_different_offsets(self, cache): result = to_datetime(arr, cache=cache) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_tz_pytz(self, cache): # see gh-8260 us_eastern = pytz.timezone("US/Eastern") @@ -720,19 +718,16 @@ def test_to_datetime_tz_pytz(self, cache): ) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) @pytest.mark.parametrize( - "init_constructor, end_constructor, test_method", + "init_constructor, end_constructor", [ - (Index, DatetimeIndex, tm.assert_index_equal), - (list, DatetimeIndex, tm.assert_index_equal), - (np.array, DatetimeIndex, tm.assert_index_equal), - (Series, Series, tm.assert_series_equal), + (Index, DatetimeIndex), + (list, DatetimeIndex), + (np.array, DatetimeIndex), + (Series, Series), ], ) - def test_to_datetime_utc_true( - self, cache, init_constructor, end_constructor, test_method - ): + def test_to_datetime_utc_true(self, cache, init_constructor, end_constructor): # See gh-11934 & gh-6415 data = ["20100102 121314", "20100102 121315"] expected_data = [ @@ -744,14 +739,13 @@ def test_to_datetime_utc_true( init_constructor(data), format="%Y%m%d %H%M%S", utc=True, cache=cache ) expected = end_constructor(expected_data) - test_method(result, expected) + tm.assert_equal(result, expected) # Test scalar case as well for scalar, expected in zip(data, expected_data): result = to_datetime(scalar, format="%Y%m%d %H%M%S", utc=True, cache=cache) assert result == expected - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_utc_true_with_series_single_value(self, cache): # GH 15760 UTC=True with Series ts = 1.5e18 @@ -759,7 +753,6 @@ def test_to_datetime_utc_true_with_series_single_value(self, cache): expected = Series([Timestamp(ts, tz="utc")]) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_utc_true_with_series_tzaware_string(self, cache): ts = "2013-01-01 00:00:00-01:00" expected_ts = "2013-01-01 01:00:00" @@ -768,7 +761,6 @@ def test_to_datetime_utc_true_with_series_tzaware_string(self, cache): expected = Series([Timestamp(expected_ts, tz="utc")] * 3) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) @pytest.mark.parametrize( "date, dtype", [ @@ -781,7 +773,6 @@ def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype): result = to_datetime(Series([date], dtype=dtype), utc=True, cache=cache) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) @td.skip_if_no("psycopg2") def test_to_datetime_tz_psycopg2(self, cache): @@ -822,7 +813,6 @@ def test_to_datetime_tz_psycopg2(self, cache): expected = DatetimeIndex(["2000-01-01 13:00:00"], dtype="datetime64[ns, UTC]") tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_datetime_bool(self, cache): # GH13176 msg = r"dtype bool cannot be converted to datetime64\[ns\]" @@ -945,18 +935,6 @@ def test_to_datetime_cache(self, utc, format, constructor): tm.assert_index_equal(result, expected) - @pytest.mark.parametrize( - "listlike", - [ - (deque([Timestamp("2010-06-02 09:30:00")] * 51)), - ([Timestamp("2010-06-02 09:30:00")] * 51), - (tuple([Timestamp("2010-06-02 09:30:00")] * 51)), - ], - ) - def test_no_slicing_errors_in_should_cache(self, listlike): - # GH 29403 - assert tools.should_cache(listlike) is True - def test_to_datetime_from_deque(self): # GH 29403 result = to_datetime(deque([Timestamp("2010-06-02 09:30:00")] * 51)) @@ -1198,7 +1176,6 @@ def test_to_datetime_fixed_offset(self): class TestToDatetimeUnit: - @pytest.mark.parametrize("cache", [True, False]) def test_unit(self, cache): # GH 11758 # test proper behavior with errors @@ -1247,17 +1224,19 @@ def test_unit(self, cache): with pytest.raises(OutOfBoundsDatetime, match=msg): to_datetime(values, errors="raise", unit="s", cache=cache) + def test_to_datetime_invalid_str_not_out_of_bounds_valuerror(self, cache): # if we have a string, then we raise a ValueError # and NOT an OutOfBoundsDatetime - for val in ["foo", Timestamp("20130101")]: - try: - to_datetime(val, errors="raise", unit="s", cache=cache) - except OutOfBoundsDatetime as err: - raise AssertionError("incorrect exception raised") from err - except ValueError: - pass - - @pytest.mark.parametrize("cache", [True, False]) + + try: + to_datetime("foo", errors="raise", unit="s", cache=cache) + except OutOfBoundsDatetime as err: + raise AssertionError("incorrect exception raised") from err + except ValueError: + pass + else: + assert False, "Failed to raise ValueError" + def test_unit_consistency(self, cache): # consistency of conversions @@ -1274,7 +1253,6 @@ def test_unit_consistency(self, cache): assert result == expected assert isinstance(result, Timestamp) - @pytest.mark.parametrize("cache", [True, False]) def test_unit_with_numeric(self, cache): # GH 13180 @@ -1303,7 +1281,6 @@ def test_unit_with_numeric(self, cache): result = to_datetime(arr, errors="coerce", cache=cache) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_unit_mixed(self, cache): # mixed integers/datetimes @@ -1324,7 +1301,6 @@ def test_unit_mixed(self, cache): with pytest.raises(ValueError, match=msg): to_datetime(arr, errors="raise", cache=cache) - @pytest.mark.parametrize("cache", [True, False]) def test_unit_rounding(self, cache): # GH 14156 & GH 20445: argument will incur floating point errors # but no premature rounding @@ -1332,17 +1308,105 @@ def test_unit_rounding(self, cache): expected = Timestamp("2015-06-19 19:55:31.877000192") assert result == expected - @pytest.mark.parametrize("cache", [True, False]) def test_unit_ignore_keeps_name(self, cache): # GH 21697 expected = Index([15e9] * 2, name="name") result = to_datetime(expected, errors="ignore", unit="s", cache=cache) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) - def test_dataframe(self, cache): + def test_to_datetime_errors_ignore_utc_true(self): + # GH#23758 + result = to_datetime([1], unit="s", utc=True, errors="ignore") + expected = DatetimeIndex(["1970-01-01 00:00:01"], tz="UTC") + tm.assert_index_equal(result, expected) + + # TODO: this is moved from tests.series.test_timeseries, may be redundant + def test_to_datetime_unit(self): + + epoch = 1370745748 + s1 = Series([epoch + t for t in range(20)]) + s2 = Series([epoch + t for t in range(20)]).astype(float) + + for ser in [s1, s2]: + result = to_datetime(ser, unit="s") + expected = Series( + [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) + for t in range(20) + ] + ) + tm.assert_series_equal(result, expected) + + s1 = Series([epoch + t for t in range(20)] + [iNaT]) + s2 = Series([epoch + t for t in range(20)] + [iNaT]).astype(float) + s3 = Series([epoch + t for t in range(20)] + [np.nan]) + + for ser in [s1, s2, s3]: + result = to_datetime(ser, unit="s") + expected = Series( + [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) + for t in range(20) + ] + + [NaT] + ) + tm.assert_series_equal(result, expected) + + def test_to_datetime_unit_fractional_seconds(self): + + # GH13834 + epoch = 1370745748 + s = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float) + result = to_datetime(s, unit="s") + expected = Series( + [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) + for t in np.arange(0, 2, 0.25) + ] + + [NaT] + ) + # GH20455 argument will incur floating point errors but no premature rounding + result = result.round("ms") + tm.assert_series_equal(result, expected) + + def test_to_datetime_unit_na_values(self): + result = to_datetime([1, 2, "NaT", NaT, np.nan], unit="D") + expected = DatetimeIndex( + [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3 + ) + tm.assert_index_equal(result, expected) + + def test_to_datetime_unit_invalid(self): + msg = "non convertible value foo with the unit 'D'" + with pytest.raises(ValueError, match=msg): + to_datetime([1, 2, "foo"], unit="D") + msg = "cannot convert input 111111111 with the unit 'D'" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime([1, 2, 111111111], unit="D") + + def test_to_timestamp_unit_coerce(self): + # coerce we can process + expected = DatetimeIndex( + [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1 + ) + result = to_datetime([1, 2, "foo"], unit="D", errors="coerce") + tm.assert_index_equal(result, expected) - df = DataFrame( + result = to_datetime([1, 2, 111111111], unit="D", errors="coerce") + tm.assert_index_equal(result, expected) + + +class TestToDatetimeDataFrame: + @pytest.fixture(params=[True, False]) + def cache(self, request): + """ + cache keyword to pass to to_datetime. + """ + return request.param + + @pytest.fixture + def df(self): + return DataFrame( { "year": [2015, 2016], "month": [2, 3], @@ -1356,6 +1420,8 @@ def test_dataframe(self, cache): } ) + def test_dataframe(self, df, cache): + result = to_datetime( {"year": df["year"], "month": df["month"], "day": df["day"]}, cache=cache ) @@ -1377,6 +1443,7 @@ def test_dataframe(self, cache): ) tm.assert_series_equal(result, expected2) + def test_dataframe_field_aliases_column_subset(self, df, cache): # unit mappings units = [ { @@ -1404,6 +1471,7 @@ def test_dataframe(self, cache): ) tm.assert_series_equal(result, expected) + def test_dataframe_field_aliases(self, df, cache): d = { "year": "year", "month": "month", @@ -1425,10 +1493,18 @@ def test_dataframe(self, cache): ) tm.assert_series_equal(result, expected) + def test_dataframe_str_dtype(self, df, cache): # coerce back to int result = to_datetime(df.astype(str), cache=cache) + expected = Series( + [ + Timestamp("20150204 06:58:10.001002003"), + Timestamp("20160305 07:59:11.001002003"), + ] + ) tm.assert_series_equal(result, expected) + def test_dataframe_coerce(self, cache): # passing coerce df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]}) @@ -1438,10 +1514,12 @@ def test_dataframe(self, cache): ) with pytest.raises(ValueError, match=msg): to_datetime(df2, cache=cache) + result = to_datetime(df2, errors="coerce", cache=cache) expected = Series([Timestamp("20150204 00:00:00"), NaT]) tm.assert_series_equal(result, expected) + def test_dataframe_extra_keys_raisesm(self, df, cache): # extra columns msg = r"extra keys have been passed to the datetime assemblage: \[foo\]" with pytest.raises(ValueError, match=msg): @@ -1449,6 +1527,7 @@ def test_dataframe(self, cache): df2["foo"] = 1 to_datetime(df2, cache=cache) + def test_dataframe_missing_keys_raises(self, df, cache): # not enough msg = ( r"to assemble mappings requires at least that \[year, month, " @@ -1464,6 +1543,7 @@ def test_dataframe(self, cache): with pytest.raises(ValueError, match=msg): to_datetime(df[c], cache=cache) + def test_dataframe_duplicate_columns_raises(self, cache): # duplicates msg = "cannot assemble with duplicate keys" df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]}) @@ -1478,9 +1558,8 @@ def test_dataframe(self, cache): with pytest.raises(ValueError, match=msg): to_datetime(df2, cache=cache) - @pytest.mark.parametrize("cache", [True, False]) def test_dataframe_dtypes(self, cache): - # #13451 + # GH#13451 df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}) # int16 @@ -1506,7 +1585,7 @@ def test_dataframe_dtypes(self, cache): to_datetime(df, cache=cache) def test_dataframe_utc_true(self): - # GH 23760 + # GH#23760 df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}) result = to_datetime(df, utc=True) expected = Series( @@ -1514,94 +1593,6 @@ def test_dataframe_utc_true(self): ).dt.tz_localize("UTC") tm.assert_series_equal(result, expected) - def test_to_datetime_errors_ignore_utc_true(self): - # GH 23758 - result = to_datetime([1], unit="s", utc=True, errors="ignore") - expected = DatetimeIndex(["1970-01-01 00:00:01"], tz="UTC") - tm.assert_index_equal(result, expected) - - # TODO: this is moved from tests.series.test_timeseries, may be redundant - def test_to_datetime_unit(self): - - epoch = 1370745748 - s = Series([epoch + t for t in range(20)]) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - ) - tm.assert_series_equal(result, expected) - - s = Series([epoch + t for t in range(20)]).astype(float) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - ) - tm.assert_series_equal(result, expected) - - s = Series([epoch + t for t in range(20)] + [iNaT]) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - + [NaT] - ) - tm.assert_series_equal(result, expected) - - s = Series([epoch + t for t in range(20)] + [iNaT]).astype(float) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - + [NaT] - ) - tm.assert_series_equal(result, expected) - - # GH13834 - s = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float) - result = to_datetime(s, unit="s") - expected = Series( - [ - Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) - for t in np.arange(0, 2, 0.25) - ] - + [NaT] - ) - # GH20455 argument will incur floating point errors but no premature rounding - result = result.round("ms") - tm.assert_series_equal(result, expected) - - s = pd.concat( - [Series([epoch + t for t in range(20)]).astype(float), Series([np.nan])], - ignore_index=True, - ) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - + [NaT] - ) - tm.assert_series_equal(result, expected) - - result = to_datetime([1, 2, "NaT", NaT, np.nan], unit="D") - expected = DatetimeIndex( - [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3 - ) - tm.assert_index_equal(result, expected) - - msg = "non convertible value foo with the unit 'D'" - with pytest.raises(ValueError, match=msg): - to_datetime([1, 2, "foo"], unit="D") - msg = "cannot convert input 111111111 with the unit 'D'" - with pytest.raises(OutOfBoundsDatetime, match=msg): - to_datetime([1, 2, 111111111], unit="D") - - # coerce we can process - expected = DatetimeIndex( - [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1 - ) - result = to_datetime([1, 2, "foo"], unit="D", errors="coerce") - tm.assert_index_equal(result, expected) - - result = to_datetime([1, 2, 111111111], unit="D", errors="coerce") - tm.assert_index_equal(result, expected) - class TestToDatetimeMisc: def test_to_datetime_barely_out_of_bounds(self): @@ -1614,7 +1605,6 @@ def test_to_datetime_barely_out_of_bounds(self): with pytest.raises(OutOfBoundsDatetime, match=msg): to_datetime(arr) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_iso8601(self, cache): result = to_datetime(["2012-01-01 00:00:00"], cache=cache) exp = Timestamp("2012-01-01 00:00:00") @@ -1624,19 +1614,17 @@ def test_to_datetime_iso8601(self, cache): exp = Timestamp("2012-10-01") assert result[0] == exp - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_default(self, cache): rs = to_datetime("2001", cache=cache) xp = datetime(2001, 1, 1) assert rs == xp # dayfirst is essentially broken - + # FIXME: don't leave commented-out # to_datetime('01-13-2012', dayfirst=True) # pytest.raises(ValueError, to_datetime('01-13-2012', # dayfirst=True)) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_on_datetime64_series(self, cache): # #2699 s = Series(date_range("1/1/2000", periods=10)) @@ -1644,7 +1632,6 @@ def test_to_datetime_on_datetime64_series(self, cache): result = to_datetime(s, cache=cache) assert result[0] == s[0] - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_with_space_in_series(self, cache): # GH 6428 s = Series(["10/18/2006", "10/18/2008", " "]) @@ -1658,7 +1645,6 @@ def test_to_datetime_with_space_in_series(self, cache): tm.assert_series_equal(result_ignore, s) @td.skip_if_has_locale - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_with_apply(self, cache): # this is only locale tested with US/None locales # GH 5195 @@ -1681,7 +1667,6 @@ def test_to_datetime_with_apply(self, cache): ) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_types(self, cache): # empty string @@ -1701,18 +1686,19 @@ def test_to_datetime_types(self, cache): result = to_datetime("2012", cache=cache) assert result == expected + # FIXME: don't leave commented-out # array = ['2012','20120101','20120101 12:01:01'] array = ["20120101", "20120101 12:01:01"] expected = list(to_datetime(array, cache=cache)) result = [Timestamp(date_str) for date_str in array] tm.assert_almost_equal(result, expected) + # FIXME: don't leave commented-out # currently fails ### # result = Timestamp('2012') # expected = to_datetime('2012') # assert result == expected - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_unprocessable_input(self, cache): # GH 4928 # GH 21864 @@ -1724,7 +1710,6 @@ def test_to_datetime_unprocessable_input(self, cache): with pytest.raises(TypeError, match=msg): to_datetime([1, "1"], errors="raise", cache=cache) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_unhashable_input(self, cache): series = Series([["a"]] * 100) result = to_datetime(series, errors="ignore", cache=cache) @@ -1765,7 +1750,6 @@ def test_to_datetime_overflow(self): with pytest.raises(OutOfBoundsTimedelta, match=msg): date_range(start="1/1/1700", freq="B", periods=100000) - @pytest.mark.parametrize("cache", [True, False]) def test_string_na_nat_conversion(self, cache): # GH #999, #858 @@ -1846,7 +1830,6 @@ def test_string_na_nat_conversion(self, cache): "datetime64[ns]", ], ) - @pytest.mark.parametrize("cache", [True, False]) def test_dti_constructor_numpy_timeunits(self, cache, dtype): # GH 9114 base = to_datetime(["2000-01-01T00:00", "2000-01-02T00:00", "NaT"], cache=cache) @@ -1856,7 +1839,6 @@ def test_dti_constructor_numpy_timeunits(self, cache, dtype): tm.assert_index_equal(DatetimeIndex(values), base) tm.assert_index_equal(to_datetime(values, cache=cache), base) - @pytest.mark.parametrize("cache", [True, False]) def test_dayfirst(self, cache): # GH 5917 arr = ["10/02/2014", "11/02/2014", "12/02/2014"] @@ -1980,7 +1962,6 @@ def test_guess_datetime_format_for_array(self): class TestToDatetimeInferFormat: - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_infer_datetime_format_consistent_format(self, cache): s = Series(date_range("20000101", periods=50, freq="H")) @@ -2002,7 +1983,6 @@ def test_to_datetime_infer_datetime_format_consistent_format(self, cache): tm.assert_series_equal(with_format, no_infer) tm.assert_series_equal(no_infer, yes_infer) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache): s = Series( np.array( @@ -2024,7 +2004,6 @@ def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache): to_datetime(s, infer_datetime_format=True, cache=cache), ) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_infer_datetime_format_series_with_nans(self, cache): s = Series( np.array( @@ -2037,7 +2016,6 @@ def test_to_datetime_infer_datetime_format_series_with_nans(self, cache): to_datetime(s, infer_datetime_format=True, cache=cache), ) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache): s = Series( np.array( @@ -2086,7 +2064,6 @@ def test_infer_datetime_format_zero_tz(self, ts, zero_tz, is_utc): expected = Series([Timestamp(ts, tz=tz)]) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_iso8601_noleading_0s(self, cache): # GH 11871 s = Series(["2014-1-1", "2014-2-2", "2015-3-3"]) @@ -2104,7 +2081,6 @@ def test_to_datetime_iso8601_noleading_0s(self, cache): class TestDaysInMonth: # tests for issue #10154 - @pytest.mark.parametrize("cache", [True, False]) def test_day_not_in_month_coerce(self, cache): assert isna(to_datetime("2015-02-29", errors="coerce", cache=cache)) assert isna( @@ -2117,7 +2093,6 @@ def test_day_not_in_month_coerce(self, cache): to_datetime("2015-04-31", format="%Y-%m-%d", errors="coerce", cache=cache) ) - @pytest.mark.parametrize("cache", [True, False]) def test_day_not_in_month_raise(self, cache): msg = "day is out of range for month" with pytest.raises(ValueError, match=msg): @@ -2135,7 +2110,6 @@ def test_day_not_in_month_raise(self, cache): with pytest.raises(ValueError, match=msg): to_datetime("2015-04-31", errors="raise", format="%Y-%m-%d", cache=cache) - @pytest.mark.parametrize("cache", [True, False]) def test_day_not_in_month_ignore(self, cache): assert to_datetime("2015-02-29", errors="ignore", cache=cache) == "2015-02-29" assert ( @@ -2205,7 +2179,6 @@ class TestDatetimeParsingWrappers: }.items() ), ) - @pytest.mark.parametrize("cache", [True, False]) def test_parsers(self, date_str, expected, cache): # dateutil >= 2.5.0 defaults to yearfirst=True @@ -2237,7 +2210,6 @@ def test_parsers(self, date_str, expected, cache): result7 = date_range(date_str, freq="S", periods=1, yearfirst=yearfirst) assert result7 == expected - @pytest.mark.parametrize("cache", [True, False]) def test_na_values_with_cache( self, cache, unique_nulls_fixture, unique_nulls_fixture2 ): @@ -2257,7 +2229,6 @@ def test_parsers_nat(self): assert result3 is NaT assert result4 is NaT - @pytest.mark.parametrize("cache", [True, False]) def test_parsers_dayfirst_yearfirst(self, cache): # OK # 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00 @@ -2345,7 +2316,6 @@ def test_parsers_dayfirst_yearfirst(self, cache): assert result3 == expected assert result4 == expected - @pytest.mark.parametrize("cache", [True, False]) def test_parsers_timestring(self, cache): # must be the same as dateutil result cases = { @@ -2368,7 +2338,6 @@ def test_parsers_timestring(self, cache): assert result4 == exp_now assert result5 == exp_now - @pytest.mark.parametrize("cache", [True, False]) @pytest.mark.parametrize( "dt_string, tz, dt_string_repr", [ @@ -2564,29 +2533,44 @@ def test_arg_tz_ns_unit(self, offset, utc, exp): tm.assert_index_equal(result, expected) -@pytest.mark.parametrize( - "listlike,do_caching", - [([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], False), ([1, 1, 1, 1, 4, 5, 6, 7, 8, 9], True)], -) -def test_should_cache(listlike, do_caching): - assert ( - tools.should_cache(listlike, check_count=len(listlike), unique_share=0.7) - == do_caching +class TestShouldCache: + @pytest.mark.parametrize( + "listlike,do_caching", + [ + ([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], False), + ([1, 1, 1, 1, 4, 5, 6, 7, 8, 9], True), + ], ) + def test_should_cache(self, listlike, do_caching): + assert ( + tools.should_cache(listlike, check_count=len(listlike), unique_share=0.7) + == do_caching + ) + @pytest.mark.parametrize( + "unique_share,check_count, err_message", + [ + (0.5, 11, r"check_count must be in next bounds: \[0; len\(arg\)\]"), + (10, 2, r"unique_share must be in next bounds: \(0; 1\)"), + ], + ) + def test_should_cache_errors(self, unique_share, check_count, err_message): + arg = [5] * 10 -@pytest.mark.parametrize( - "unique_share,check_count, err_message", - [ - (0.5, 11, r"check_count must be in next bounds: \[0; len\(arg\)\]"), - (10, 2, r"unique_share must be in next bounds: \(0; 1\)"), - ], -) -def test_should_cache_errors(unique_share, check_count, err_message): - arg = [5] * 10 + with pytest.raises(AssertionError, match=err_message): + tools.should_cache(arg, unique_share, check_count) - with pytest.raises(AssertionError, match=err_message): - tools.should_cache(arg, unique_share, check_count) + @pytest.mark.parametrize( + "listlike", + [ + (deque([Timestamp("2010-06-02 09:30:00")] * 51)), + ([Timestamp("2010-06-02 09:30:00")] * 51), + (tuple([Timestamp("2010-06-02 09:30:00")] * 51)), + ], + ) + def test_no_slicing_errors_in_should_cache(self, listlike): + # GH#29403 + assert tools.should_cache(listlike) is True def test_nullable_integer_to_datetime(): @@ -2624,7 +2608,7 @@ def test_na_to_datetime(nulls_fixture, klass): assert result[0] is NaT -def test_empty_string_datetime_coerce__format(): +def test_empty_string_datetime_coerce_format(): # GH13044 td = Series(["03/24/2016", "03/25/2016", ""]) format = "%m/%d/%Y" diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index c2d7f7b3f716c..fc01771507888 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -29,6 +29,7 @@ from pandas._libs.tslibs.parsing import get_rule_month from pandas._typing import npt from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_datetime64_dtype, @@ -116,7 +117,7 @@ def get_offset(name: str) -> DateOffset: "get_offset is deprecated and will be removed in a future version, " "use to_offset instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return _get_offset(name) diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index f8bd1ec7bc96a..ee54b1b2074cb 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -12,6 +12,8 @@ import numpy as np +from pandas.util._exceptions import find_stack_level + from pandas.core.dtypes.common import ( is_bool, is_integer, @@ -339,7 +341,7 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name): "positional arguments for 'index' or 'columns' will raise " "a 'TypeError'." ) - warnings.warn(msg, FutureWarning, stacklevel=4) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) out[data._get_axis_name(0)] = args[0] out[data._get_axis_name(1)] = args[1] else: diff --git a/pandas/util/testing.py b/pandas/util/testing.py index af9fe4846b27d..0ab59a202149d 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1,5 +1,7 @@ import warnings +from pandas.util._exceptions import find_stack_level + from pandas._testing import * # noqa warnings.warn( @@ -8,5 +10,5 @@ "public API at pandas.testing instead." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), )
xref #37715
https://api.github.com/repos/pandas-dev/pandas/pulls/44373
2021-11-09T21:59:02Z
2021-11-28T21:11:21Z
null
2021-11-29T03:22:50Z
TST: split/collect partial tests
diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index c487777fc339e..82d55a7bf7189 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -22,6 +22,213 @@ import pandas._testing as tm +class TestEmptyFrameSetitemExpansion: + def test_empty_frame_setitem_index_name_retained(self): + # GH#31368 empty frame has non-None index.name -> retained + df = DataFrame({}, index=pd.RangeIndex(0, name="df_index")) + series = Series(1.23, index=pd.RangeIndex(4, name="series_index")) + + df["series"] = series + expected = DataFrame( + {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="df_index") + ) + + tm.assert_frame_equal(df, expected) + + def test_empty_frame_setitem_index_name_inherited(self): + # GH#36527 empty frame has None index.name -> not retained + df = DataFrame() + series = Series(1.23, index=pd.RangeIndex(4, name="series_index")) + df["series"] = series + expected = DataFrame( + {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="series_index") + ) + tm.assert_frame_equal(df, expected) + + def test_loc_setitem_zerolen_series_columns_align(self): + # columns will align + df = DataFrame(columns=["A", "B"]) + df.loc[0] = Series(1, index=range(4)) + expected = DataFrame(columns=["A", "B"], index=[0], dtype=np.float64) + tm.assert_frame_equal(df, expected) + + # columns will align + df = DataFrame(columns=["A", "B"]) + df.loc[0] = Series(1, index=["B"]) + + exp = DataFrame([[np.nan, 1]], columns=["A", "B"], index=[0], dtype="float64") + tm.assert_frame_equal(df, exp) + + def test_loc_setitem_zerolen_list_length_must_match_columns(self): + # list-like must conform + df = DataFrame(columns=["A", "B"]) + + msg = "cannot set a row with mismatched columns" + with pytest.raises(ValueError, match=msg): + df.loc[0] = [1, 2, 3] + + df = DataFrame(columns=["A", "B"]) + df.loc[3] = [6, 7] # length matches len(df.columns) --> OK! + + exp = DataFrame([[6, 7]], index=[3], columns=["A", "B"], dtype=np.int64) + tm.assert_frame_equal(df, exp) + + def test_partial_set_empty_frame(self): + + # partially set with an empty object + # frame + df = DataFrame() + + msg = "cannot set a frame with no defined columns" + + with pytest.raises(ValueError, match=msg): + df.loc[1] = 1 + + with pytest.raises(ValueError, match=msg): + df.loc[1] = Series([1], index=["foo"]) + + msg = "cannot set a frame with no defined index and a scalar" + with pytest.raises(ValueError, match=msg): + df.loc[:, 1] = 1 + + def test_partial_set_empty_frame2(self): + # these work as they don't really change + # anything but the index + # GH#5632 + expected = DataFrame(columns=["foo"], index=Index([], dtype="object")) + + df = DataFrame(index=Index([], dtype="object")) + df["foo"] = Series([], dtype="object") + + tm.assert_frame_equal(df, expected) + + df = DataFrame() + df["foo"] = Series(df.index) + + tm.assert_frame_equal(df, expected) + + df = DataFrame() + df["foo"] = df.index + + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame3(self): + expected = DataFrame(columns=["foo"], index=Index([], dtype="int64")) + expected["foo"] = expected["foo"].astype("float64") + + df = DataFrame(index=Index([], dtype="int64")) + df["foo"] = [] + + tm.assert_frame_equal(df, expected) + + df = DataFrame(index=Index([], dtype="int64")) + df["foo"] = Series(np.arange(len(df)), dtype="float64") + + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame4(self): + df = DataFrame(index=Index([], dtype="int64")) + df["foo"] = range(len(df)) + + expected = DataFrame(columns=["foo"], index=Index([], dtype="int64")) + # range is int-dtype-like, so we get int64 dtype + expected["foo"] = expected["foo"].astype("int64") + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame5(self): + df = DataFrame() + tm.assert_index_equal(df.columns, Index([], dtype=object)) + df2 = DataFrame() + df2[1] = Series([1], index=["foo"]) + df.loc[:, 1] = Series([1], index=["foo"]) + tm.assert_frame_equal(df, DataFrame([[1]], index=["foo"], columns=[1])) + tm.assert_frame_equal(df, df2) + + def test_partial_set_empty_frame_no_index(self): + # no index to start + expected = DataFrame({0: Series(1, index=range(4))}, columns=["A", "B", 0]) + + df = DataFrame(columns=["A", "B"]) + df[0] = Series(1, index=range(4)) + df.dtypes + str(df) + tm.assert_frame_equal(df, expected) + + df = DataFrame(columns=["A", "B"]) + df.loc[:, 0] = Series(1, index=range(4)) + df.dtypes + str(df) + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame_row(self): + # GH#5720, GH#5744 + # don't create rows when empty + expected = DataFrame(columns=["A", "B", "New"], index=Index([], dtype="int64")) + expected["A"] = expected["A"].astype("int64") + expected["B"] = expected["B"].astype("float64") + expected["New"] = expected["New"].astype("float64") + + df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]}) + y = df[df.A > 5] + y["New"] = np.nan + tm.assert_frame_equal(y, expected) + + expected = DataFrame(columns=["a", "b", "c c", "d"]) + expected["d"] = expected["d"].astype("int64") + df = DataFrame(columns=["a", "b", "c c"]) + df["d"] = 3 + tm.assert_frame_equal(df, expected) + tm.assert_series_equal(df["c c"], Series(name="c c", dtype=object)) + + # reindex columns is ok + df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]}) + y = df[df.A > 5] + result = y.reindex(columns=["A", "B", "C"]) + expected = DataFrame(columns=["A", "B", "C"], index=Index([], dtype="int64")) + expected["A"] = expected["A"].astype("int64") + expected["B"] = expected["B"].astype("float64") + expected["C"] = expected["C"].astype("float64") + tm.assert_frame_equal(result, expected) + + def test_partial_set_empty_frame_set_series(self): + # GH#5756 + # setting with empty Series + df = DataFrame(Series(dtype=object)) + expected = DataFrame({0: Series(dtype=object)}) + tm.assert_frame_equal(df, expected) + + df = DataFrame(Series(name="foo", dtype=object)) + expected = DataFrame({"foo": Series(dtype=object)}) + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame_empty_copy_assignment(self): + # GH#5932 + # copy on empty with assignment fails + df = DataFrame(index=[0]) + df = df.copy() + df["a"] = 0 + expected = DataFrame(0, index=[0], columns=["a"]) + tm.assert_frame_equal(df, expected) + + def test_partial_set_empty_frame_empty_consistencies(self): + # GH#6171 + # consistency on empty frames + df = DataFrame(columns=["x", "y"]) + df["x"] = [1, 2] + expected = DataFrame({"x": [1, 2], "y": [np.nan, np.nan]}) + tm.assert_frame_equal(df, expected, check_dtype=False) + + df = DataFrame(columns=["x", "y"]) + df["x"] = ["1", "2"] + expected = DataFrame({"x": ["1", "2"], "y": [np.nan, np.nan]}, dtype=object) + tm.assert_frame_equal(df, expected) + + df = DataFrame(columns=["x", "y"]) + df.loc[0, "x"] = 1 + expected = DataFrame({"x": [1], "y": [np.nan]}) + tm.assert_frame_equal(df, expected, check_dtype=False) + + class TestPartialSetting: def test_partial_setting(self): @@ -61,8 +268,7 @@ def test_partial_setting(self): with pytest.raises(IndexError, match=msg): s.iat[3] = 5.0 - # ## frame ## - + def test_partial_setting_frame(self): df_orig = DataFrame( np.arange(6).reshape(3, 2), columns=["A", "B"], dtype="int64" ) @@ -166,33 +372,6 @@ def test_partial_setting_mixed_dtype(self): df.loc[2] = df.loc[1] tm.assert_frame_equal(df, expected) - # columns will align - df = DataFrame(columns=["A", "B"]) - df.loc[0] = Series(1, index=range(4)) - expected = DataFrame(columns=["A", "B"], index=[0], dtype=np.float64) - tm.assert_frame_equal(df, expected) - - # columns will align - # TODO: it isn't great that this behavior depends on consolidation - df = DataFrame(columns=["A", "B"])._consolidate() - df.loc[0] = Series(1, index=["B"]) - - exp = DataFrame([[np.nan, 1]], columns=["A", "B"], index=[0], dtype="float64") - tm.assert_frame_equal(df, exp) - - # list-like must conform - df = DataFrame(columns=["A", "B"]) - - msg = "cannot set a row with mismatched columns" - with pytest.raises(ValueError, match=msg): - df.loc[0] = [1, 2, 3] - - df = DataFrame(columns=["A", "B"]) - df.loc[3] = [6, 7] - - exp = DataFrame([[6, 7]], index=[3], columns=["A", "B"], dtype=np.int64) - tm.assert_frame_equal(df, exp) - def test_series_partial_set(self): # partial set with new index # Regression from GH4825 @@ -352,6 +531,7 @@ def test_setitem_with_expansion_numeric_into_datetimeindex(self, key): ex_index = Index(list(orig.index) + [key], dtype=object, name=orig.index.name) ex_data = np.concatenate([orig.values, df.iloc[[0]].values], axis=0) expected = DataFrame(ex_data, index=ex_index, columns=orig.columns) + tm.assert_frame_equal(df, expected) def test_partial_set_invalid(self): @@ -369,162 +549,6 @@ def test_partial_set_invalid(self): tm.assert_index_equal(df.index, Index(orig.index.tolist() + ["a"])) assert df.index.dtype == "object" - def test_partial_set_empty_frame(self): - - # partially set with an empty object - # frame - df = DataFrame() - - msg = "cannot set a frame with no defined columns" - - with pytest.raises(ValueError, match=msg): - df.loc[1] = 1 - - with pytest.raises(ValueError, match=msg): - df.loc[1] = Series([1], index=["foo"]) - - msg = "cannot set a frame with no defined index and a scalar" - with pytest.raises(ValueError, match=msg): - df.loc[:, 1] = 1 - - def test_partial_set_empty_frame2(self): - # these work as they don't really change - # anything but the index - # GH5632 - expected = DataFrame(columns=["foo"], index=Index([], dtype="object")) - - df = DataFrame(index=Index([], dtype="object")) - df["foo"] = Series([], dtype="object") - - tm.assert_frame_equal(df, expected) - - df = DataFrame() - df["foo"] = Series(df.index) - - tm.assert_frame_equal(df, expected) - - df = DataFrame() - df["foo"] = df.index - - tm.assert_frame_equal(df, expected) - - def test_partial_set_empty_frame3(self): - expected = DataFrame(columns=["foo"], index=Index([], dtype="int64")) - expected["foo"] = expected["foo"].astype("float64") - - df = DataFrame(index=Index([], dtype="int64")) - df["foo"] = [] - - tm.assert_frame_equal(df, expected) - - df = DataFrame(index=Index([], dtype="int64")) - df["foo"] = Series(np.arange(len(df)), dtype="float64") - - tm.assert_frame_equal(df, expected) - - def test_partial_set_empty_frame4(self): - df = DataFrame(index=Index([], dtype="int64")) - df["foo"] = range(len(df)) - - expected = DataFrame(columns=["foo"], index=Index([], dtype="int64")) - # range is int-dtype-like, so we get int64 dtype - expected["foo"] = expected["foo"].astype("int64") - tm.assert_frame_equal(df, expected) - - def test_partial_set_empty_frame5(self): - df = DataFrame() - tm.assert_index_equal(df.columns, Index([], dtype=object)) - df2 = DataFrame() - df2[1] = Series([1], index=["foo"]) - df.loc[:, 1] = Series([1], index=["foo"]) - tm.assert_frame_equal(df, DataFrame([[1]], index=["foo"], columns=[1])) - tm.assert_frame_equal(df, df2) - - def test_partial_set_empty_frame_no_index(self): - # no index to start - expected = DataFrame({0: Series(1, index=range(4))}, columns=["A", "B", 0]) - - df = DataFrame(columns=["A", "B"]) - df[0] = Series(1, index=range(4)) - df.dtypes - str(df) - tm.assert_frame_equal(df, expected) - - df = DataFrame(columns=["A", "B"]) - df.loc[:, 0] = Series(1, index=range(4)) - df.dtypes - str(df) - tm.assert_frame_equal(df, expected) - - def test_partial_set_empty_frame_row(self): - # GH5720, GH5744 - # don't create rows when empty - expected = DataFrame(columns=["A", "B", "New"], index=Index([], dtype="int64")) - expected["A"] = expected["A"].astype("int64") - expected["B"] = expected["B"].astype("float64") - expected["New"] = expected["New"].astype("float64") - - df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]}) - y = df[df.A > 5] - y["New"] = np.nan - tm.assert_frame_equal(y, expected) - # tm.assert_frame_equal(y,expected) - - expected = DataFrame(columns=["a", "b", "c c", "d"]) - expected["d"] = expected["d"].astype("int64") - df = DataFrame(columns=["a", "b", "c c"]) - df["d"] = 3 - tm.assert_frame_equal(df, expected) - tm.assert_series_equal(df["c c"], Series(name="c c", dtype=object)) - - # reindex columns is ok - df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]}) - y = df[df.A > 5] - result = y.reindex(columns=["A", "B", "C"]) - expected = DataFrame(columns=["A", "B", "C"], index=Index([], dtype="int64")) - expected["A"] = expected["A"].astype("int64") - expected["B"] = expected["B"].astype("float64") - expected["C"] = expected["C"].astype("float64") - tm.assert_frame_equal(result, expected) - - def test_partial_set_empty_frame_set_series(self): - # GH 5756 - # setting with empty Series - df = DataFrame(Series(dtype=object)) - expected = DataFrame({0: Series(dtype=object)}) - tm.assert_frame_equal(df, expected) - - df = DataFrame(Series(name="foo", dtype=object)) - expected = DataFrame({"foo": Series(dtype=object)}) - tm.assert_frame_equal(df, expected) - - def test_partial_set_empty_frame_empty_copy_assignment(self): - # GH 5932 - # copy on empty with assignment fails - df = DataFrame(index=[0]) - df = df.copy() - df["a"] = 0 - expected = DataFrame(0, index=[0], columns=["a"]) - tm.assert_frame_equal(df, expected) - - def test_partial_set_empty_frame_empty_consistencies(self): - # GH 6171 - # consistency on empty frames - df = DataFrame(columns=["x", "y"]) - df["x"] = [1, 2] - expected = DataFrame({"x": [1, 2], "y": [np.nan, np.nan]}) - tm.assert_frame_equal(df, expected, check_dtype=False) - - df = DataFrame(columns=["x", "y"]) - df["x"] = ["1", "2"] - expected = DataFrame({"x": ["1", "2"], "y": [np.nan, np.nan]}, dtype=object) - tm.assert_frame_equal(df, expected) - - df = DataFrame(columns=["x", "y"]) - df.loc[0, "x"] = 1 - expected = DataFrame({"x": [1], "y": [np.nan]}) - tm.assert_frame_equal(df, expected, check_dtype=False) - @pytest.mark.parametrize( "idx,labels,expected_idx", [ @@ -584,14 +608,14 @@ def test_loc_with_list_of_strings_representing_datetimes_missing_value( self, idx, labels ): # GH 11278 - s = Series(range(20), index=idx) + ser = Series(range(20), index=idx) df = DataFrame(range(20), index=idx) msg = r"not in index" with pytest.raises(KeyError, match=msg): - s.loc[labels] + ser.loc[labels] with pytest.raises(KeyError, match=msg): - s[labels] + ser[labels] with pytest.raises(KeyError, match=msg): df.loc[labels] @@ -628,37 +652,18 @@ def test_loc_with_list_of_strings_representing_datetimes_not_matched_type( self, idx, labels, msg ): # GH 11278 - s = Series(range(20), index=idx) + ser = Series(range(20), index=idx) df = DataFrame(range(20), index=idx) with pytest.raises(KeyError, match=msg): - s.loc[labels] + ser.loc[labels] with pytest.raises(KeyError, match=msg): - s[labels] + ser[labels] with pytest.raises(KeyError, match=msg): df.loc[labels] - def test_index_name_empty(self): - # GH 31368 - df = DataFrame({}, index=pd.RangeIndex(0, name="df_index")) - series = Series(1.23, index=pd.RangeIndex(4, name="series_index")) - - df["series"] = series - expected = DataFrame( - {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="df_index") - ) - - tm.assert_frame_equal(df, expected) - - # GH 36527 - df = DataFrame() - series = Series(1.23, index=pd.RangeIndex(4, name="series_index")) - df["series"] = series - expected = DataFrame( - {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="series_index") - ) - tm.assert_frame_equal(df, expected) +class TestStringSlicing: def test_slice_irregular_datetime_index_with_nan(self): # GH36953 index = pd.to_datetime(["2012-01-01", "2012-01-02", "2012-01-03", None])
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44372
2021-11-09T20:06:53Z
2021-11-11T17:51:02Z
2021-11-11T17:51:02Z
2021-11-11T17:53:16Z
TST: test that Int64 series can be created from pyarrow array
diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py index 7d343aab3c7a0..a4ade3864c007 100644 --- a/pandas/tests/extension/test_integer.py +++ b/pandas/tests/extension/test_integer.py @@ -249,3 +249,21 @@ class TestParsing(base.BaseParsingTests): class Test2DCompat(base.Dim2CompatTests): pass + + +def test_from_arrow(dtype): + pyarrow = pytest.importorskip("pyarrow") + + def types_mapper(arrow_type): + if pyarrow.types.is_integer(arrow_type): + return dtype + + pyarrow_array = pyarrow.array([1, None, 2], type=pyarrow.int64()) + expected = pd.Series([1, None, 2], dtype=dtype.name) + + # Convert to RecordBatch because types_mapper argument is ignored when + # using a pyarrow.Array. https://issues.apache.org/jira/browse/ARROW-9664 + record_batch = pyarrow.RecordBatch.from_arrays([pyarrow_array], ["test_col"]) + dataframe = record_batch.to_pandas(types_mapper=types_mapper) + series = dataframe["test_col"] + tm.assert_series_equal(series, expected, check_names=False)
- [x] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry Closes #44368
https://api.github.com/repos/pandas-dev/pandas/pulls/44371
2021-11-09T20:01:01Z
2021-11-10T18:42:25Z
null
2021-11-10T18:42:30Z
Backport PR #44362 on branch 1.3.x (CI: xfail tests failing on numpy dev)
diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py index 7ebda1f17ba56..ef53034580112 100644 --- a/pandas/core/arrays/sparse/scipy_sparse.py +++ b/pandas/core/arrays/sparse/scipy_sparse.py @@ -122,7 +122,7 @@ def coo_to_sparse_series(A, dense_index: bool = False): Parameters ---------- - A : scipy.sparse.coo.coo_matrix + A : scipy.sparse.coo_matrix dense_index : bool, default False Returns diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index 1cc8a2df44812..181e01233b8e0 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -1204,7 +1204,7 @@ def test_to_coo(self): dtype="Sparse[int]", ) A, _, _ = ser.sparse.to_coo() - assert isinstance(A, scipy.sparse.coo.coo_matrix) + assert isinstance(A, scipy.sparse.coo_matrix) def test_non_sparse_raises(self): ser = pd.Series([1, 2, 3]) diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py index b2e53a676b039..66b52da0f5578 100644 --- a/pandas/tests/window/moments/test_moments_rolling.py +++ b/pandas/tests/window/moments/test_moments_rolling.py @@ -558,6 +558,7 @@ def test_rolling_quantile_np_percentile(): tm.assert_almost_equal(df_quantile.values, np.array(np_percentile)) +@pytest.mark.xfail(reason="GH#44343", strict=False) @pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1]) @pytest.mark.parametrize( "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
Backport PR #44362
https://api.github.com/repos/pandas-dev/pandas/pulls/44370
2021-11-09T19:54:51Z
2021-11-09T21:23:33Z
2021-11-09T21:23:33Z
2021-11-09T21:23:37Z
TST: check compatibility with pyarrow types_mapper parameter
diff --git a/pandas/tests/arrays/masked/test_arrow_compat.py b/pandas/tests/arrays/masked/test_arrow_compat.py index 3f0a1b5d0eaf3..20eb055f14835 100644 --- a/pandas/tests/arrays/masked/test_arrow_compat.py +++ b/pandas/tests/arrays/masked/test_arrow_compat.py @@ -36,6 +36,27 @@ def test_arrow_roundtrip(data): tm.assert_frame_equal(result, df) +def test_dataframe_from_arrow_types_mapper(): + def types_mapper(arrow_type): + if pa.types.is_boolean(arrow_type): + return pd.BooleanDtype() + elif pa.types.is_integer(arrow_type): + return pd.Int64Dtype() + + bools_array = pa.array([True, None, False], type=pa.bool_()) + ints_array = pa.array([1, None, 2], type=pa.int64()) + small_ints_array = pa.array([-1, 0, 7], type=pa.int8()) + record_batch = pa.RecordBatch.from_arrays( + [bools_array, ints_array, small_ints_array], ["bools", "ints", "small_ints"] + ) + result = record_batch.to_pandas(types_mapper=types_mapper) + bools = pd.Series([True, None, False], dtype="boolean") + ints = pd.Series([1, None, 2], dtype="Int64") + small_ints = pd.Series([-1, 0, 7], dtype="Int64") + expected = pd.DataFrame({"bools": bools, "ints": ints, "small_ints": small_ints}) + tm.assert_frame_equal(result, expected) + + def test_arrow_load_from_zero_chunks(data): # GH-41040
- [x] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry Closes #44368
https://api.github.com/repos/pandas-dev/pandas/pulls/44369
2021-11-09T17:56:38Z
2021-12-01T01:26:47Z
2021-12-01T01:26:47Z
2021-12-01T16:54:07Z
CLN: split/fixturize to_datetime tests
diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 1f75bc11005bc..4867ba58838ef 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -46,6 +46,14 @@ from pandas.core.tools.datetimes import start_caching_at +@pytest.fixture(params=[True, False]) +def cache(request): + """ + cache keyword to pass to to_datetime. + """ + return request.param + + class TestTimeConversionFormats: @pytest.mark.parametrize("readonly", [True, False]) def test_to_datetime_readonly(self, readonly): @@ -57,7 +65,6 @@ def test_to_datetime_readonly(self, readonly): expected = to_datetime([]) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_format(self, cache): values = ["1/1/2000", "1/2/2000", "1/3/2000"] @@ -82,7 +89,6 @@ def test_to_datetime_format(self, cache): else: tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_format_YYYYMMDD(self, cache): s = Series([19801222, 19801222] + [19810105] * 5) expected = Series([Timestamp(x) for x in s.apply(str)]) @@ -109,17 +115,18 @@ def test_to_datetime_format_YYYYMMDD(self, cache): result = to_datetime(s, format="%Y%m%d", cache=cache) tm.assert_series_equal(result, expected) + def test_to_datetime_format_YYYYMMDD_coercion(self, cache): # coercion # GH 7930 - s = Series([20121231, 20141231, 99991231]) - result = to_datetime(s, format="%Y%m%d", errors="ignore", cache=cache) + ser = Series([20121231, 20141231, 99991231]) + result = to_datetime(ser, format="%Y%m%d", errors="ignore", cache=cache) expected = Series( [datetime(2012, 12, 31), datetime(2014, 12, 31), datetime(9999, 12, 31)], dtype=object, ) tm.assert_series_equal(result, expected) - result = to_datetime(s, format="%Y%m%d", errors="coerce", cache=cache) + result = to_datetime(ser, format="%Y%m%d", errors="coerce", cache=cache) expected = Series(["20121231", "20141231", "NaT"], dtype="M8[ns]") tm.assert_series_equal(result, expected) @@ -199,7 +206,6 @@ def test_to_datetime_with_NA(self, data, format, expected): result = to_datetime(data, format=format) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_format_integer(self, cache): # GH 10178 s = Series([2000, 2001, 2002]) @@ -236,7 +242,6 @@ def test_int_to_datetime_format_YYYYMMDD_typeerror(self, int_date, expected): result = to_datetime(int_date, format="%Y%m%d", errors="ignore") assert result == expected - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_format_microsecond(self, cache): # these are locale dependent @@ -249,7 +254,6 @@ def test_to_datetime_format_microsecond(self, cache): exp = datetime.strptime(val, format) assert result == exp - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_format_time(self, cache): data = [ ["01/10/2010 15:20", "%m/%d/%Y %H:%M", Timestamp("2010-01-10 15:20")], @@ -259,6 +263,7 @@ def test_to_datetime_format_time(self, cache): "%m/%d/%Y %H:%M:%S", Timestamp("2010-01-10 13:56:01"), ] # , + # FIXME: don't leave commented-out # ['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p', # Timestamp('2010-01-10 20:14')], # ['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p', @@ -270,7 +275,6 @@ def test_to_datetime_format_time(self, cache): assert to_datetime(s, format=format, cache=cache) == dt @td.skip_if_has_locale - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_with_non_exact(self, cache): # GH 10834 # 8904 @@ -284,7 +288,6 @@ def test_to_datetime_with_non_exact(self, cache): ) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_parse_nanoseconds_with_formula(self, cache): # GH8989 @@ -300,14 +303,15 @@ def test_parse_nanoseconds_with_formula(self, cache): result = to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f", cache=cache) assert result == expected - @pytest.mark.parametrize("cache", [True, False]) - def test_to_datetime_format_weeks(self, cache): - data = [ + @pytest.mark.parametrize( + "value,fmt,expected", + [ ["2009324", "%Y%W%w", Timestamp("2009-08-13")], ["2013020", "%Y%U%w", Timestamp("2013-01-13")], - ] - for s, format, dt in data: - assert to_datetime(s, format=format, cache=cache) == dt + ], + ) + def test_to_datetime_format_weeks(self, value, fmt, expected, cache): + assert to_datetime(value, format=fmt, cache=cache) == expected @pytest.mark.parametrize( "fmt,dates,expected_dates", @@ -601,7 +605,6 @@ def test_to_datetime_today_now_unicode_bytes(self): to_datetime(["now"]) to_datetime(["today"]) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_dt64s(self, cache): in_bound_dts = [np.datetime64("2000-01-01"), np.datetime64("2000-01-02")] @@ -611,7 +614,6 @@ def test_to_datetime_dt64s(self, cache): @pytest.mark.parametrize( "dt", [np.datetime64("1000-01-01"), np.datetime64("5000-01-02")] ) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_dt64s_out_of_bounds(self, cache, dt): msg = f"Out of bounds nanosecond timestamp: {dt}" with pytest.raises(OutOfBoundsDatetime, match=msg): @@ -620,7 +622,6 @@ def test_to_datetime_dt64s_out_of_bounds(self, cache, dt): Timestamp(dt) assert to_datetime(dt, errors="coerce", cache=cache) is NaT - @pytest.mark.parametrize("cache", [True, False]) @pytest.mark.parametrize("unit", ["s", "D"]) def test_to_datetime_array_of_dt64s(self, cache, unit): # https://github.com/pandas-dev/pandas/issues/31491 @@ -659,7 +660,6 @@ def test_to_datetime_array_of_dt64s(self, cache, unit): Index([dt.item() for dt in dts_with_oob]), ) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_tz(self, cache): # xref 8260 @@ -686,7 +686,6 @@ def test_to_datetime_tz(self, cache): with pytest.raises(ValueError, match=msg): to_datetime(arr, cache=cache) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_different_offsets(self, cache): # inspired by asv timeseries.ToDatetimeNONISO8601 benchmark # see GH-26097 for more @@ -697,7 +696,6 @@ def test_to_datetime_different_offsets(self, cache): result = to_datetime(arr, cache=cache) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_tz_pytz(self, cache): # see gh-8260 us_eastern = pytz.timezone("US/Eastern") @@ -720,19 +718,16 @@ def test_to_datetime_tz_pytz(self, cache): ) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) @pytest.mark.parametrize( - "init_constructor, end_constructor, test_method", + "init_constructor, end_constructor", [ - (Index, DatetimeIndex, tm.assert_index_equal), - (list, DatetimeIndex, tm.assert_index_equal), - (np.array, DatetimeIndex, tm.assert_index_equal), - (Series, Series, tm.assert_series_equal), + (Index, DatetimeIndex), + (list, DatetimeIndex), + (np.array, DatetimeIndex), + (Series, Series), ], ) - def test_to_datetime_utc_true( - self, cache, init_constructor, end_constructor, test_method - ): + def test_to_datetime_utc_true(self, cache, init_constructor, end_constructor): # See gh-11934 & gh-6415 data = ["20100102 121314", "20100102 121315"] expected_data = [ @@ -744,14 +739,13 @@ def test_to_datetime_utc_true( init_constructor(data), format="%Y%m%d %H%M%S", utc=True, cache=cache ) expected = end_constructor(expected_data) - test_method(result, expected) + tm.assert_equal(result, expected) # Test scalar case as well for scalar, expected in zip(data, expected_data): result = to_datetime(scalar, format="%Y%m%d %H%M%S", utc=True, cache=cache) assert result == expected - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_utc_true_with_series_single_value(self, cache): # GH 15760 UTC=True with Series ts = 1.5e18 @@ -759,7 +753,6 @@ def test_to_datetime_utc_true_with_series_single_value(self, cache): expected = Series([Timestamp(ts, tz="utc")]) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_utc_true_with_series_tzaware_string(self, cache): ts = "2013-01-01 00:00:00-01:00" expected_ts = "2013-01-01 01:00:00" @@ -768,7 +761,6 @@ def test_to_datetime_utc_true_with_series_tzaware_string(self, cache): expected = Series([Timestamp(expected_ts, tz="utc")] * 3) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) @pytest.mark.parametrize( "date, dtype", [ @@ -781,7 +773,6 @@ def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype): result = to_datetime(Series([date], dtype=dtype), utc=True, cache=cache) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) @td.skip_if_no("psycopg2") def test_to_datetime_tz_psycopg2(self, cache): @@ -822,7 +813,6 @@ def test_to_datetime_tz_psycopg2(self, cache): expected = DatetimeIndex(["2000-01-01 13:00:00"], dtype="datetime64[ns, UTC]") tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_datetime_bool(self, cache): # GH13176 msg = r"dtype bool cannot be converted to datetime64\[ns\]" @@ -945,18 +935,6 @@ def test_to_datetime_cache(self, utc, format, constructor): tm.assert_index_equal(result, expected) - @pytest.mark.parametrize( - "listlike", - [ - (deque([Timestamp("2010-06-02 09:30:00")] * 51)), - ([Timestamp("2010-06-02 09:30:00")] * 51), - (tuple([Timestamp("2010-06-02 09:30:00")] * 51)), - ], - ) - def test_no_slicing_errors_in_should_cache(self, listlike): - # GH 29403 - assert tools.should_cache(listlike) is True - def test_to_datetime_from_deque(self): # GH 29403 result = to_datetime(deque([Timestamp("2010-06-02 09:30:00")] * 51)) @@ -1198,7 +1176,6 @@ def test_to_datetime_fixed_offset(self): class TestToDatetimeUnit: - @pytest.mark.parametrize("cache", [True, False]) def test_unit(self, cache): # GH 11758 # test proper behavior with errors @@ -1247,17 +1224,19 @@ def test_unit(self, cache): with pytest.raises(OutOfBoundsDatetime, match=msg): to_datetime(values, errors="raise", unit="s", cache=cache) + def test_to_datetime_invalid_str_not_out_of_bounds_valuerror(self, cache): # if we have a string, then we raise a ValueError # and NOT an OutOfBoundsDatetime - for val in ["foo", Timestamp("20130101")]: - try: - to_datetime(val, errors="raise", unit="s", cache=cache) - except OutOfBoundsDatetime as err: - raise AssertionError("incorrect exception raised") from err - except ValueError: - pass - - @pytest.mark.parametrize("cache", [True, False]) + + try: + to_datetime("foo", errors="raise", unit="s", cache=cache) + except OutOfBoundsDatetime as err: + raise AssertionError("incorrect exception raised") from err + except ValueError: + pass + else: + assert False, "Failed to raise ValueError" + def test_unit_consistency(self, cache): # consistency of conversions @@ -1274,7 +1253,6 @@ def test_unit_consistency(self, cache): assert result == expected assert isinstance(result, Timestamp) - @pytest.mark.parametrize("cache", [True, False]) def test_unit_with_numeric(self, cache): # GH 13180 @@ -1303,7 +1281,6 @@ def test_unit_with_numeric(self, cache): result = to_datetime(arr, errors="coerce", cache=cache) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_unit_mixed(self, cache): # mixed integers/datetimes @@ -1324,7 +1301,6 @@ def test_unit_mixed(self, cache): with pytest.raises(ValueError, match=msg): to_datetime(arr, errors="raise", cache=cache) - @pytest.mark.parametrize("cache", [True, False]) def test_unit_rounding(self, cache): # GH 14156 & GH 20445: argument will incur floating point errors # but no premature rounding @@ -1332,17 +1308,105 @@ def test_unit_rounding(self, cache): expected = Timestamp("2015-06-19 19:55:31.877000192") assert result == expected - @pytest.mark.parametrize("cache", [True, False]) def test_unit_ignore_keeps_name(self, cache): # GH 21697 expected = Index([15e9] * 2, name="name") result = to_datetime(expected, errors="ignore", unit="s", cache=cache) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) - def test_dataframe(self, cache): + def test_to_datetime_errors_ignore_utc_true(self): + # GH#23758 + result = to_datetime([1], unit="s", utc=True, errors="ignore") + expected = DatetimeIndex(["1970-01-01 00:00:01"], tz="UTC") + tm.assert_index_equal(result, expected) + + # TODO: this is moved from tests.series.test_timeseries, may be redundant + def test_to_datetime_unit(self): + + epoch = 1370745748 + s1 = Series([epoch + t for t in range(20)]) + s2 = Series([epoch + t for t in range(20)]).astype(float) + + for ser in [s1, s2]: + result = to_datetime(ser, unit="s") + expected = Series( + [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) + for t in range(20) + ] + ) + tm.assert_series_equal(result, expected) + + s1 = Series([epoch + t for t in range(20)] + [iNaT]) + s2 = Series([epoch + t for t in range(20)] + [iNaT]).astype(float) + s3 = Series([epoch + t for t in range(20)] + [np.nan]) + + for ser in [s1, s2, s3]: + result = to_datetime(ser, unit="s") + expected = Series( + [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) + for t in range(20) + ] + + [NaT] + ) + tm.assert_series_equal(result, expected) + + def test_to_datetime_unit_fractional_seconds(self): + + # GH13834 + epoch = 1370745748 + s = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float) + result = to_datetime(s, unit="s") + expected = Series( + [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) + for t in np.arange(0, 2, 0.25) + ] + + [NaT] + ) + # GH20455 argument will incur floating point errors but no premature rounding + result = result.round("ms") + tm.assert_series_equal(result, expected) + + def test_to_datetime_unit_na_values(self): + result = to_datetime([1, 2, "NaT", NaT, np.nan], unit="D") + expected = DatetimeIndex( + [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3 + ) + tm.assert_index_equal(result, expected) + + def test_to_datetime_unit_invalid(self): + msg = "non convertible value foo with the unit 'D'" + with pytest.raises(ValueError, match=msg): + to_datetime([1, 2, "foo"], unit="D") + msg = "cannot convert input 111111111 with the unit 'D'" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime([1, 2, 111111111], unit="D") + + def test_to_timestamp_unit_coerce(self): + # coerce we can process + expected = DatetimeIndex( + [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1 + ) + result = to_datetime([1, 2, "foo"], unit="D", errors="coerce") + tm.assert_index_equal(result, expected) - df = DataFrame( + result = to_datetime([1, 2, 111111111], unit="D", errors="coerce") + tm.assert_index_equal(result, expected) + + +class TestToDatetimeDataFrame: + @pytest.fixture(params=[True, False]) + def cache(self, request): + """ + cache keyword to pass to to_datetime. + """ + return request.param + + @pytest.fixture + def df(self): + return DataFrame( { "year": [2015, 2016], "month": [2, 3], @@ -1356,6 +1420,8 @@ def test_dataframe(self, cache): } ) + def test_dataframe(self, df, cache): + result = to_datetime( {"year": df["year"], "month": df["month"], "day": df["day"]}, cache=cache ) @@ -1377,6 +1443,7 @@ def test_dataframe(self, cache): ) tm.assert_series_equal(result, expected2) + def test_dataframe_field_aliases_column_subset(self, df, cache): # unit mappings units = [ { @@ -1404,6 +1471,7 @@ def test_dataframe(self, cache): ) tm.assert_series_equal(result, expected) + def test_dataframe_field_aliases(self, df, cache): d = { "year": "year", "month": "month", @@ -1425,10 +1493,18 @@ def test_dataframe(self, cache): ) tm.assert_series_equal(result, expected) + def test_dataframe_str_dtype(self, df, cache): # coerce back to int result = to_datetime(df.astype(str), cache=cache) + expected = Series( + [ + Timestamp("20150204 06:58:10.001002003"), + Timestamp("20160305 07:59:11.001002003"), + ] + ) tm.assert_series_equal(result, expected) + def test_dataframe_coerce(self, cache): # passing coerce df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]}) @@ -1438,10 +1514,12 @@ def test_dataframe(self, cache): ) with pytest.raises(ValueError, match=msg): to_datetime(df2, cache=cache) + result = to_datetime(df2, errors="coerce", cache=cache) expected = Series([Timestamp("20150204 00:00:00"), NaT]) tm.assert_series_equal(result, expected) + def test_dataframe_extra_keys_raisesm(self, df, cache): # extra columns msg = r"extra keys have been passed to the datetime assemblage: \[foo\]" with pytest.raises(ValueError, match=msg): @@ -1449,6 +1527,7 @@ def test_dataframe(self, cache): df2["foo"] = 1 to_datetime(df2, cache=cache) + def test_dataframe_missing_keys_raises(self, df, cache): # not enough msg = ( r"to assemble mappings requires at least that \[year, month, " @@ -1464,6 +1543,7 @@ def test_dataframe(self, cache): with pytest.raises(ValueError, match=msg): to_datetime(df[c], cache=cache) + def test_dataframe_duplicate_columns_raises(self, cache): # duplicates msg = "cannot assemble with duplicate keys" df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]}) @@ -1478,9 +1558,8 @@ def test_dataframe(self, cache): with pytest.raises(ValueError, match=msg): to_datetime(df2, cache=cache) - @pytest.mark.parametrize("cache", [True, False]) def test_dataframe_dtypes(self, cache): - # #13451 + # GH#13451 df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}) # int16 @@ -1506,7 +1585,7 @@ def test_dataframe_dtypes(self, cache): to_datetime(df, cache=cache) def test_dataframe_utc_true(self): - # GH 23760 + # GH#23760 df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}) result = to_datetime(df, utc=True) expected = Series( @@ -1514,94 +1593,6 @@ def test_dataframe_utc_true(self): ).dt.tz_localize("UTC") tm.assert_series_equal(result, expected) - def test_to_datetime_errors_ignore_utc_true(self): - # GH 23758 - result = to_datetime([1], unit="s", utc=True, errors="ignore") - expected = DatetimeIndex(["1970-01-01 00:00:01"], tz="UTC") - tm.assert_index_equal(result, expected) - - # TODO: this is moved from tests.series.test_timeseries, may be redundant - def test_to_datetime_unit(self): - - epoch = 1370745748 - s = Series([epoch + t for t in range(20)]) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - ) - tm.assert_series_equal(result, expected) - - s = Series([epoch + t for t in range(20)]).astype(float) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - ) - tm.assert_series_equal(result, expected) - - s = Series([epoch + t for t in range(20)] + [iNaT]) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - + [NaT] - ) - tm.assert_series_equal(result, expected) - - s = Series([epoch + t for t in range(20)] + [iNaT]).astype(float) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - + [NaT] - ) - tm.assert_series_equal(result, expected) - - # GH13834 - s = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float) - result = to_datetime(s, unit="s") - expected = Series( - [ - Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) - for t in np.arange(0, 2, 0.25) - ] - + [NaT] - ) - # GH20455 argument will incur floating point errors but no premature rounding - result = result.round("ms") - tm.assert_series_equal(result, expected) - - s = pd.concat( - [Series([epoch + t for t in range(20)]).astype(float), Series([np.nan])], - ignore_index=True, - ) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - + [NaT] - ) - tm.assert_series_equal(result, expected) - - result = to_datetime([1, 2, "NaT", NaT, np.nan], unit="D") - expected = DatetimeIndex( - [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3 - ) - tm.assert_index_equal(result, expected) - - msg = "non convertible value foo with the unit 'D'" - with pytest.raises(ValueError, match=msg): - to_datetime([1, 2, "foo"], unit="D") - msg = "cannot convert input 111111111 with the unit 'D'" - with pytest.raises(OutOfBoundsDatetime, match=msg): - to_datetime([1, 2, 111111111], unit="D") - - # coerce we can process - expected = DatetimeIndex( - [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1 - ) - result = to_datetime([1, 2, "foo"], unit="D", errors="coerce") - tm.assert_index_equal(result, expected) - - result = to_datetime([1, 2, 111111111], unit="D", errors="coerce") - tm.assert_index_equal(result, expected) - class TestToDatetimeMisc: def test_to_datetime_barely_out_of_bounds(self): @@ -1614,7 +1605,6 @@ def test_to_datetime_barely_out_of_bounds(self): with pytest.raises(OutOfBoundsDatetime, match=msg): to_datetime(arr) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_iso8601(self, cache): result = to_datetime(["2012-01-01 00:00:00"], cache=cache) exp = Timestamp("2012-01-01 00:00:00") @@ -1624,19 +1614,17 @@ def test_to_datetime_iso8601(self, cache): exp = Timestamp("2012-10-01") assert result[0] == exp - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_default(self, cache): rs = to_datetime("2001", cache=cache) xp = datetime(2001, 1, 1) assert rs == xp # dayfirst is essentially broken - + # FIXME: don't leave commented-out # to_datetime('01-13-2012', dayfirst=True) # pytest.raises(ValueError, to_datetime('01-13-2012', # dayfirst=True)) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_on_datetime64_series(self, cache): # #2699 s = Series(date_range("1/1/2000", periods=10)) @@ -1644,7 +1632,6 @@ def test_to_datetime_on_datetime64_series(self, cache): result = to_datetime(s, cache=cache) assert result[0] == s[0] - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_with_space_in_series(self, cache): # GH 6428 s = Series(["10/18/2006", "10/18/2008", " "]) @@ -1658,7 +1645,6 @@ def test_to_datetime_with_space_in_series(self, cache): tm.assert_series_equal(result_ignore, s) @td.skip_if_has_locale - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_with_apply(self, cache): # this is only locale tested with US/None locales # GH 5195 @@ -1681,7 +1667,6 @@ def test_to_datetime_with_apply(self, cache): ) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_types(self, cache): # empty string @@ -1701,18 +1686,19 @@ def test_to_datetime_types(self, cache): result = to_datetime("2012", cache=cache) assert result == expected + # FIXME: don't leave commented-out # array = ['2012','20120101','20120101 12:01:01'] array = ["20120101", "20120101 12:01:01"] expected = list(to_datetime(array, cache=cache)) result = [Timestamp(date_str) for date_str in array] tm.assert_almost_equal(result, expected) + # FIXME: don't leave commented-out # currently fails ### # result = Timestamp('2012') # expected = to_datetime('2012') # assert result == expected - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_unprocessable_input(self, cache): # GH 4928 # GH 21864 @@ -1724,7 +1710,6 @@ def test_to_datetime_unprocessable_input(self, cache): with pytest.raises(TypeError, match=msg): to_datetime([1, "1"], errors="raise", cache=cache) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_unhashable_input(self, cache): series = Series([["a"]] * 100) result = to_datetime(series, errors="ignore", cache=cache) @@ -1765,7 +1750,6 @@ def test_to_datetime_overflow(self): with pytest.raises(OutOfBoundsTimedelta, match=msg): date_range(start="1/1/1700", freq="B", periods=100000) - @pytest.mark.parametrize("cache", [True, False]) def test_string_na_nat_conversion(self, cache): # GH #999, #858 @@ -1846,7 +1830,6 @@ def test_string_na_nat_conversion(self, cache): "datetime64[ns]", ], ) - @pytest.mark.parametrize("cache", [True, False]) def test_dti_constructor_numpy_timeunits(self, cache, dtype): # GH 9114 base = to_datetime(["2000-01-01T00:00", "2000-01-02T00:00", "NaT"], cache=cache) @@ -1856,7 +1839,6 @@ def test_dti_constructor_numpy_timeunits(self, cache, dtype): tm.assert_index_equal(DatetimeIndex(values), base) tm.assert_index_equal(to_datetime(values, cache=cache), base) - @pytest.mark.parametrize("cache", [True, False]) def test_dayfirst(self, cache): # GH 5917 arr = ["10/02/2014", "11/02/2014", "12/02/2014"] @@ -1980,7 +1962,6 @@ def test_guess_datetime_format_for_array(self): class TestToDatetimeInferFormat: - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_infer_datetime_format_consistent_format(self, cache): s = Series(date_range("20000101", periods=50, freq="H")) @@ -2002,7 +1983,6 @@ def test_to_datetime_infer_datetime_format_consistent_format(self, cache): tm.assert_series_equal(with_format, no_infer) tm.assert_series_equal(no_infer, yes_infer) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache): s = Series( np.array( @@ -2024,7 +2004,6 @@ def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache): to_datetime(s, infer_datetime_format=True, cache=cache), ) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_infer_datetime_format_series_with_nans(self, cache): s = Series( np.array( @@ -2037,7 +2016,6 @@ def test_to_datetime_infer_datetime_format_series_with_nans(self, cache): to_datetime(s, infer_datetime_format=True, cache=cache), ) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache): s = Series( np.array( @@ -2086,7 +2064,6 @@ def test_infer_datetime_format_zero_tz(self, ts, zero_tz, is_utc): expected = Series([Timestamp(ts, tz=tz)]) tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("cache", [True, False]) def test_to_datetime_iso8601_noleading_0s(self, cache): # GH 11871 s = Series(["2014-1-1", "2014-2-2", "2015-3-3"]) @@ -2104,7 +2081,6 @@ def test_to_datetime_iso8601_noleading_0s(self, cache): class TestDaysInMonth: # tests for issue #10154 - @pytest.mark.parametrize("cache", [True, False]) def test_day_not_in_month_coerce(self, cache): assert isna(to_datetime("2015-02-29", errors="coerce", cache=cache)) assert isna( @@ -2117,7 +2093,6 @@ def test_day_not_in_month_coerce(self, cache): to_datetime("2015-04-31", format="%Y-%m-%d", errors="coerce", cache=cache) ) - @pytest.mark.parametrize("cache", [True, False]) def test_day_not_in_month_raise(self, cache): msg = "day is out of range for month" with pytest.raises(ValueError, match=msg): @@ -2135,7 +2110,6 @@ def test_day_not_in_month_raise(self, cache): with pytest.raises(ValueError, match=msg): to_datetime("2015-04-31", errors="raise", format="%Y-%m-%d", cache=cache) - @pytest.mark.parametrize("cache", [True, False]) def test_day_not_in_month_ignore(self, cache): assert to_datetime("2015-02-29", errors="ignore", cache=cache) == "2015-02-29" assert ( @@ -2205,7 +2179,6 @@ class TestDatetimeParsingWrappers: }.items() ), ) - @pytest.mark.parametrize("cache", [True, False]) def test_parsers(self, date_str, expected, cache): # dateutil >= 2.5.0 defaults to yearfirst=True @@ -2237,7 +2210,6 @@ def test_parsers(self, date_str, expected, cache): result7 = date_range(date_str, freq="S", periods=1, yearfirst=yearfirst) assert result7 == expected - @pytest.mark.parametrize("cache", [True, False]) def test_na_values_with_cache( self, cache, unique_nulls_fixture, unique_nulls_fixture2 ): @@ -2257,7 +2229,6 @@ def test_parsers_nat(self): assert result3 is NaT assert result4 is NaT - @pytest.mark.parametrize("cache", [True, False]) def test_parsers_dayfirst_yearfirst(self, cache): # OK # 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00 @@ -2345,7 +2316,6 @@ def test_parsers_dayfirst_yearfirst(self, cache): assert result3 == expected assert result4 == expected - @pytest.mark.parametrize("cache", [True, False]) def test_parsers_timestring(self, cache): # must be the same as dateutil result cases = { @@ -2368,7 +2338,6 @@ def test_parsers_timestring(self, cache): assert result4 == exp_now assert result5 == exp_now - @pytest.mark.parametrize("cache", [True, False]) @pytest.mark.parametrize( "dt_string, tz, dt_string_repr", [ @@ -2564,29 +2533,44 @@ def test_arg_tz_ns_unit(self, offset, utc, exp): tm.assert_index_equal(result, expected) -@pytest.mark.parametrize( - "listlike,do_caching", - [([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], False), ([1, 1, 1, 1, 4, 5, 6, 7, 8, 9], True)], -) -def test_should_cache(listlike, do_caching): - assert ( - tools.should_cache(listlike, check_count=len(listlike), unique_share=0.7) - == do_caching +class TestShouldCache: + @pytest.mark.parametrize( + "listlike,do_caching", + [ + ([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], False), + ([1, 1, 1, 1, 4, 5, 6, 7, 8, 9], True), + ], ) + def test_should_cache(self, listlike, do_caching): + assert ( + tools.should_cache(listlike, check_count=len(listlike), unique_share=0.7) + == do_caching + ) + @pytest.mark.parametrize( + "unique_share,check_count, err_message", + [ + (0.5, 11, r"check_count must be in next bounds: \[0; len\(arg\)\]"), + (10, 2, r"unique_share must be in next bounds: \(0; 1\)"), + ], + ) + def test_should_cache_errors(self, unique_share, check_count, err_message): + arg = [5] * 10 -@pytest.mark.parametrize( - "unique_share,check_count, err_message", - [ - (0.5, 11, r"check_count must be in next bounds: \[0; len\(arg\)\]"), - (10, 2, r"unique_share must be in next bounds: \(0; 1\)"), - ], -) -def test_should_cache_errors(unique_share, check_count, err_message): - arg = [5] * 10 + with pytest.raises(AssertionError, match=err_message): + tools.should_cache(arg, unique_share, check_count) - with pytest.raises(AssertionError, match=err_message): - tools.should_cache(arg, unique_share, check_count) + @pytest.mark.parametrize( + "listlike", + [ + (deque([Timestamp("2010-06-02 09:30:00")] * 51)), + ([Timestamp("2010-06-02 09:30:00")] * 51), + (tuple([Timestamp("2010-06-02 09:30:00")] * 51)), + ], + ) + def test_no_slicing_errors_in_should_cache(self, listlike): + # GH#29403 + assert tools.should_cache(listlike) is True def test_nullable_integer_to_datetime(): @@ -2624,7 +2608,7 @@ def test_na_to_datetime(nulls_fixture, klass): assert result[0] is NaT -def test_empty_string_datetime_coerce__format(): +def test_empty_string_datetime_coerce_format(): # GH13044 td = Series(["03/24/2016", "03/25/2016", ""]) format = "%m/%d/%Y"
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44367
2021-11-09T17:49:52Z
2021-11-11T17:51:20Z
2021-11-11T17:51:20Z
2021-11-11T17:53:06Z
ENH: add suffixes argument to compare
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 9ead1e4a75d01..461b9b7d3b93c 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -213,6 +213,7 @@ Other enhancements - :meth:`.GroupBy.mean` now supports `Numba <http://numba.pydata.org/>`_ execution with the ``engine`` keyword (:issue:`43731`) - :meth:`Timestamp.isoformat`, now handles the ``timespec`` argument from the base :class:``datetime`` class (:issue:`26131`) - :meth:`NaT.to_numpy` ``dtype`` argument is now respected, so ``np.timedelta64`` can be returned (:issue:`44460`) +- :meth:`DataFrame.compare` now accepts a ``suffixes`` to allow the user to specify the suffixes of both left and right DataFrame which are being compared. This is by default ``self`` and ``other`` (:issue:`44354`) - diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 33df7a9f0ac1f..2967bea982a08 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7297,12 +7297,14 @@ def compare( align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, + suffixes: Suffixes = ("self", "other"), ) -> DataFrame: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, + suffixes=suffixes, ) def combine( diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2db8be19b4399..22ebe91aad6da 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -53,6 +53,7 @@ RandomState, Renamer, StorageOptions, + Suffixes, T, TimedeltaConvertibleTypes, TimestampConvertibleTypes, @@ -8538,6 +8539,7 @@ def compare( align_axis: Axis = 1, keep_shape: bool_t = False, keep_equal: bool_t = False, + suffixes: Suffixes = ("self", "other"), ): from pandas.core.reshape.concat import concat @@ -8548,7 +8550,6 @@ def compare( ) mask = ~((self == other) | (self.isna() & other.isna())) - keys = ["self", "other"] if not keep_equal: self = self.where(mask) @@ -8569,7 +8570,7 @@ def compare( else: axis = self._get_axis_number(align_axis) - diff = concat([self, other], axis=axis, keys=keys) + diff = concat([self, other], axis=axis, keys=suffixes) if axis >= self.ndim: # No need to reorganize data if stacking on new axis diff --git a/pandas/tests/frame/methods/test_compare.py b/pandas/tests/frame/methods/test_compare.py index 468811eba0d39..ad0c924753e62 100644 --- a/pandas/tests/frame/methods/test_compare.py +++ b/pandas/tests/frame/methods/test_compare.py @@ -180,3 +180,20 @@ def test_compare_unaligned_objects(): df1 = pd.DataFrame(np.ones((3, 3))) df2 = pd.DataFrame(np.zeros((2, 1))) df1.compare(df2) + + +def test_compare_suffixes(): + # GH + df1 = pd.DataFrame( + {"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]}, + columns=["col1", "col2", "col3"], + ) + df2 = df1.copy() + df2.loc[0, "col1"] = "c" + df2.loc[2, "col3"] = 4.0 + + suffixes = ["left", "right"] + comp = df1.compare(df2, suffixes=suffixes) + + result_suffixes = comp.columns.get_level_values(1).unique() + assert result_suffixes.isin(suffixes).all(), "suffixes not equal"
- [x] closes #44354 - [ ] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44365
2021-11-09T07:06:34Z
2022-02-06T22:51:27Z
null
2022-02-06T22:51:27Z
BUG/API: implement DayDST
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index 11de4e60f202d..a9aa49ae5ee66 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -23,6 +23,7 @@ "to_offset", "Tick", "BaseOffset", + "DayDST", "tz_compare", ] @@ -41,6 +42,7 @@ from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime from pandas._libs.tslibs.offsets import ( BaseOffset, + DayDST, Tick, to_offset, ) diff --git a/pandas/_libs/tslibs/offsets.pxd b/pandas/_libs/tslibs/offsets.pxd index 215c3f849281f..f27e2e0a8b928 100644 --- a/pandas/_libs/tslibs/offsets.pxd +++ b/pandas/_libs/tslibs/offsets.pxd @@ -1,7 +1,7 @@ from numpy cimport int64_t -cpdef to_offset(object obj) +cpdef to_offset(object obj, bint tzaware=*) cdef bint is_offset_object(object obj) cdef bint is_tick_object(object obj) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 39582a94dbdf9..d0492073b93c8 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1009,6 +1009,41 @@ def delta_to_tick(delta: timedelta) -> Tick: return Nano(nanos) +cdef class DayDST(SingleConstructorOffset): + _adjust_dst = True + _attributes = tuple(["n", "normalize"]) + rule_code = "D" # used by parse_time_string + + def __init__(self, n=1, normalize=False): + BaseOffset.__init__(self, n) + if normalize: + # GH#21427 + raise ValueError( + "Tick offset with `normalize=True` are not allowed." + ) + + def is_on_offset(self, dt) -> bool: + return True + + @apply_wraps + def _apply(self, other): + return other + Timedelta(days=self.n) + + @apply_index_wraps + def apply_index(self, dti): + return self._apply_array(dti) + + @apply_array_wraps + def _apply_array(self, dtarr): + return dtarr + Timedelta(days=self.n) + + @cache_readonly + def freqstr(self) -> str: + if self.n != 1: + return str(self.n) + "DayDST" + return "DayDST" + + # -------------------------------------------------------------------- cdef class RelativeDeltaOffset(BaseOffset): @@ -3569,7 +3604,7 @@ def _get_offset(name: str) -> BaseOffset: return _offset_map[name] -cpdef to_offset(freq): +cpdef to_offset(freq, bint tzaware=False): """ Return DateOffset object from string or tuple representation or datetime.timedelta object. @@ -3577,6 +3612,8 @@ cpdef to_offset(freq): Parameters ---------- freq : str, tuple, datetime.timedelta, DateOffset or None + tzaware : bool, default False + If we have a string "D", whether to interpret that as DayDST. Returns ------- @@ -3629,6 +3666,14 @@ cpdef to_offset(freq): delta = None stride_sign = None + if freq.endswith("DayDST"): + head = freq[:-6] + if len(head): + n = int(head) + else: + n = 1 + return DayDST(n) + try: split = opattern.split(freq) if split[-1] != "" and not split[-1].isspace(): @@ -3673,6 +3718,8 @@ cpdef to_offset(freq): if delta is None: raise ValueError(INVALID_FREQ_ERR_MSG.format(freq)) + if type(delta) is Day and tzaware: + return DayDST(delta.n) return delta diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 67696f9740ea1..cf6abfedf9eb4 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -113,7 +113,11 @@ from pandas._libs.tslibs.offsets cimport ( to_offset, ) -from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG +from pandas._libs.tslibs.offsets import ( + INVALID_FREQ_ERR_MSG, + Day, + DayDST, +) cdef: enum: @@ -1629,6 +1633,8 @@ cdef class _Period(PeriodMixin): freq = dtype.date_offset freq = to_offset(freq) + if isinstance(freq, DayDST): + freq = Day(freq.n) if freq.n <= 0: raise ValueError("Frequency must be positive, because it " diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index be39ccd444865..f86630b4fc595 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1350,7 +1350,7 @@ class Timedelta(_Timedelta): ndarray[int64_t] arr from pandas._libs.tslibs.offsets import to_offset - unit = to_offset(freq).nanos + unit = to_offset(freq, tzaware=False).nanos arr = np.array([self.value], dtype="i8") result = round_nsint64(arr, mode, unit)[0] diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 33da9ca858a4c..ddba0a6e37130 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -26,6 +26,7 @@ ) from pandas._libs.tslibs import ( BaseOffset, + DayDST, IncompatibleFrequency, NaT, NaTType, @@ -1081,8 +1082,9 @@ def _add_timedeltalike_scalar(self, other): new_values = new_values.view(self._ndarray.dtype) new_freq = None - if isinstance(self.freq, Tick) or is_period_dtype(self.dtype): + if isinstance(self.freq, (Tick, DayDST)) or is_period_dtype(self.dtype): # adding a scalar preserves freq + # TODO: sure this is accurate for DayDST new_freq = self.freq # error: Unexpected keyword argument "freq" for "_simple_new" of "NDArrayBacked" diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 7bd3403abd5cc..1acd8576aae54 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -79,6 +79,7 @@ from pandas.tseries.offsets import ( BDay, Day, + DayDST, Tick, ) @@ -365,7 +366,18 @@ def _from_sequence_not_strict( ambiguous=ambiguous, ) - freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer) + try: + freq, freq_infer = dtl.validate_inferred_freq( + freq, inferred_freq, freq_infer + ) + except ValueError: + if isinstance(freq, Tick) and isinstance(inferred_freq, DayDST): + # It is possible that both could be valid, so we'll + # go through _validate_frequency below + inferred_freq = None + freq_infer = False + else: + raise if explicit_none: freq = None @@ -432,10 +444,13 @@ def _generate_range( end, end_tz, end, freq, tz, ambiguous, nonexistent ) if freq is not None: + # FIXME: dont do this # We break Day arithmetic (fixed 24 hour) here and opt for # Day to mean calendar day (23/24/25 hour). Therefore, strip # tz info from start and day to avoid DST arithmetic - if isinstance(freq, Day): + if isinstance(freq, (Day, DayDST)): + if tz is not None: + freq = DayDST(freq.n) if start is not None: start = start.tz_localize(None) if end is not None: diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 01018c7263f32..2d4fe3d72acc2 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -29,6 +29,8 @@ from pandas._libs.tslibs.dtypes import FreqGroup from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import ( + Day, + DayDST, Tick, delta_to_tick, ) @@ -1108,6 +1110,8 @@ def dt64arr_to_periodarr(data, freq, tz=None): elif isinstance(data, (ABCIndex, ABCSeries)): data = data._values + if isinstance(freq, DayDST): + freq = Day(freq.n) freq = Period._maybe_convert_freq(freq) base = freq._period_dtype_code return c_dt64arr_to_periodarr(data.view("i8"), base, tz), freq diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e283509206344..2793c8af08960 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1012,7 +1012,7 @@ def date_range( DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00', '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00', '2018-01-05 00:00:00+09:00'], - dtype='datetime64[ns, Asia/Tokyo]', freq='D') + dtype='datetime64[ns, Asia/Tokyo]', freq='DayDST') `closed` controls whether to include `start` and `end` that are on the boundary. The default includes boundary points on either end. diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 01a8982c5fe16..fc329c53b2d9f 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -41,6 +41,7 @@ def test_setitem_invalidates_datetime_index_freq(self): # `freq` attribute on the underlying DatetimeIndex dti = date_range("20130101", periods=3, tz="US/Eastern") + orig_freq = dti.freq ts = dti[1] df = DataFrame({"B": dti}) @@ -50,7 +51,7 @@ def test_setitem_invalidates_datetime_index_freq(self): assert df["B"]._values.freq is None # check that the DatetimeIndex was not altered in place - assert dti.freq == "D" + assert dti.freq is orig_freq assert dti[1] == ts def test_cast_internals(self, float_frame): diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 9db6567ca1b56..7ec64e464aa5d 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -68,7 +68,7 @@ def test_getitem(self): result = idx[4::-1] expected = DatetimeIndex( ["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"], - freq="-1D", + freq=-1 * idx.freq, tz=idx.tz, name="idx", ) diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index ae4ed04f8adac..3814321513bac 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -377,7 +377,10 @@ def test_setops_preserve_freq(self, tz): result = rng[:50].intersection(rng[25:75]) assert result.name == rng.name - assert result.freqstr == "D" + if tz is None: + assert result.freqstr == "D" + else: + assert result.freqstr == "DayDST" assert result.tz == rng.tz nofreq = DatetimeIndex(list(rng[25:75]), name="other") diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 34e8e2ac3e84a..643f7f02a750b 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -874,7 +874,7 @@ def test_resample_origin_epoch_with_tz_day_vs_24h(): result_1 = ts_1.resample("D", origin="epoch").mean() result_2 = ts_1.resample("24H", origin="epoch").mean() - tm.assert_series_equal(result_1, result_2) + tm.assert_series_equal(result_1, result_2, check_freq=False) # check that we have the same behavior with epoch even if we are not timezone aware ts_no_tz = ts_1.tz_localize(None) @@ -897,7 +897,7 @@ def test_resample_origin_with_day_freq_on_dst(): # GH 31809 tz = "America/Chicago" - def _create_series(values, timestamps, freq="D"): + def _create_series(values, timestamps, freq="DayDST"): return Series( values, index=DatetimeIndex( @@ -1484,7 +1484,7 @@ def test_resample_dst_anchor(): dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz="US/Eastern") df = DataFrame([5], index=dti) - dti = DatetimeIndex(df.index.normalize(), freq="D") + dti = DatetimeIndex(df.index.normalize(), freq="DayDST") expected = DataFrame([5], index=dti) tm.assert_frame_equal(df.resample(rule="D").sum(), expected) df.resample(rule="MS").sum() @@ -1618,7 +1618,11 @@ def test_downsample_dst_at_midnight(): dti = date_range("2018-11-03", periods=3).tz_localize( "America/Havana", ambiguous=True ) - dti = DatetimeIndex(dti, freq="D") + with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst"): + # Check that we are requiring ambiguous be passed explicitly + dti = DatetimeIndex(dti, freq="D") + dti = DatetimeIndex(dti, freq="DayDST", ambiguous=True) + expected = DataFrame([7.5, 28.0, 44.5], index=dti) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py index 70d37f83c7f0c..dc129e1459517 100644 --- a/pandas/tests/resample/test_period_index.py +++ b/pandas/tests/resample/test_period_index.py @@ -281,7 +281,7 @@ def test_resample_with_pytz(self): expected = Series( 2.0, index=pd.DatetimeIndex( - ["2017-01-01", "2017-01-02"], tz="US/Eastern", freq="D" + ["2017-01-01", "2017-01-02"], tz="US/Eastern", freq="DayDST" ), ) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/accessors/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py index 48a3ebd25c239..ba58ad5bbae89 100644 --- a/pandas/tests/series/accessors/test_dt_accessor.py +++ b/pandas/tests/series/accessors/test_dt_accessor.py @@ -164,7 +164,10 @@ def test_dt_namespace_accessor_datetime64tz(self): tz_result = result.dt.tz assert str(tz_result) == "CET" freq_result = ser.dt.freq - assert freq_result == DatetimeIndex(ser.values, freq="infer").freq + assert ( + freq_result + == DatetimeIndex(ser._values._with_freq(None), freq="infer").freq + ) def test_dt_namespace_accessor_timedelta(self): # GH#7207, GH#11128 diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 5f96078ba70b1..0fb75253978e5 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -323,11 +323,12 @@ def test_setitem_invalidates_datetime_index_freq(self): # `freq` attribute on the underlying DatetimeIndex dti = date_range("20130101", periods=3, tz="US/Eastern") + orig_freq = dti.freq ts = dti[1] ser = Series(dti) assert ser._values is not dti assert ser._values._data.base is not dti._data._data.base - assert dti.freq == "D" + assert dti.freq is orig_freq ser.iloc[1] = NaT assert ser._values.freq is None @@ -335,7 +336,7 @@ def test_setitem_invalidates_datetime_index_freq(self): assert ser._values is not dti assert ser._values._data.base is not dti._data._data.base assert dti[1] == ts - assert dti.freq == "D" + assert dti.freq is orig_freq def test_dt64tz_setitem_does_not_mutate_dti(self): # GH#21907, GH#24096 diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py index cbbe29fb6cf9a..2e716977f79dc 100644 --- a/pandas/tests/tseries/frequencies/test_inference.py +++ b/pandas/tests/tseries/frequencies/test_inference.py @@ -267,6 +267,10 @@ def test_infer_freq_index(freq, expected): def test_infer_freq_tz(tz_naive_fixture, expected, dates): # see gh-7310 tz = tz_naive_fixture + + if expected == "D" and tz is not None: + expected = "DayDST" + idx = DatetimeIndex(dates, tz=tz) assert idx.inferred_freq == expected @@ -380,6 +384,24 @@ def test_infer_freq_business_hour(data, expected): assert idx.inferred_freq == expected +def test_infer_freq_across_dst_not_daily(): + # GH#37295 + dti = date_range( + start=Timestamp("2019-03-26 00:00:00-0400", tz="Canada/Eastern"), + end=Timestamp("2020-10-17 00:00:00-0400", tz="Canada/Eastern"), + freq="D", + ) + assert dti.freq == "DayDST" + + diff = dti - dti.shift() + assert not diff.is_unique + + assert dti.inferred_freq == "DayDST" + + dti2 = DatetimeIndex(dti._with_freq(None), freq="infer") + assert dti2.freq == "DayDST" + + def test_not_monotonic(): rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"]) rng = rng[::-1] diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 134ba79e7773d..9568de90c7f19 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -50,6 +50,7 @@ CustomBusinessMonthBegin, CustomBusinessMonthEnd, DateOffset, + DayDST, Easter, FY5253Quarter, LastWeekOfMonth, @@ -69,6 +70,7 @@ class TestCommon(Base): # used for .apply and .rollforward expecteds = { "Day": Timestamp("2011-01-02 09:00:00"), + "DayDST": Timestamp("2011-01-02 09:00:00"), "DateOffset": Timestamp("2011-01-02 09:00:00"), "BusinessDay": Timestamp("2011-01-03 09:00:00"), "CustomBusinessDay": Timestamp("2011-01-03 09:00:00"), @@ -161,13 +163,13 @@ def test_offset_freqstr(self, offset_types): offset = self._get_offset(offset_types) freqstr = offset.freqstr - if freqstr not in ("<Easter>", "<DateOffset: days=1>", "LWOM-SAT"): + if freqstr not in ("DayDST", "<Easter>", "<DateOffset: days=1>", "LWOM-SAT"): code = _get_offset(freqstr) assert offset.rule_code == code def _check_offsetfunc_works(self, offset, funcname, dt, expected, normalize=False): - if normalize and issubclass(offset, Tick): + if normalize and issubclass(offset, (Tick, DayDST)): # normalize=True disallowed for Tick subclasses GH#21427 return @@ -270,6 +272,7 @@ def test_rollforward(self, offset_types): # result will not be changed if the target is on the offset no_changes = [ "Day", + "DayDST", "MonthBegin", "SemiMonthBegin", "YearBegin", @@ -348,6 +351,7 @@ def test_rollback(self, offset_types): # result will not be changed if the target is on the offset for n in [ "Day", + "DayDST", "MonthBegin", "SemiMonthBegin", "YearBegin", @@ -369,6 +373,7 @@ def test_rollback(self, offset_types): normalized = { "Day": Timestamp("2010-12-31 00:00:00"), + "DayDST": Timestamp("2010-12-31 00:00:00"), "DateOffset": Timestamp("2010-12-31 00:00:00"), "MonthBegin": Timestamp("2010-12-01 00:00:00"), "SemiMonthBegin": Timestamp("2010-12-15 00:00:00"), @@ -400,7 +405,7 @@ def test_is_on_offset(self, offset_types): assert offset_s.is_on_offset(dt) # when normalize=True, is_on_offset checks time is 00:00:00 - if issubclass(offset_types, Tick): + if issubclass(offset_types, (Tick, DayDST)): # normalize=True disallowed for Tick subclasses GH#21427 return offset_n = self._get_offset(offset_types, normalize=True) @@ -432,7 +437,7 @@ def test_add(self, offset_types, tz_naive_fixture): assert result == expected_localize # normalize=True, disallowed for Tick subclasses GH#21427 - if issubclass(offset_types, Tick): + if issubclass(offset_types, (Tick, DayDST)): return offset_s = self._get_offset(offset_types, normalize=True) expected = Timestamp(expected.date()) diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py index d7abb19530837..98c61c919a9c0 100644 --- a/pandas/tests/tslibs/test_api.py +++ b/pandas/tests/tslibs/test_api.py @@ -36,6 +36,7 @@ def test_namespace(): "IncompatibleFrequency", "Resolution", "Tick", + "DayDST", "Timedelta", "dt64arr_to_periodarr", "Timestamp", diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 415af96a29aa3..eb77190c451c0 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -23,6 +23,7 @@ from pandas._libs.tslibs.offsets import ( # noqa:F401 DateOffset, Day, + DayDST, _get_offset, to_offset, ) @@ -342,7 +343,23 @@ def _infer_daily_rule(self) -> str | None: return _maybe_add_count(monthly_rule, self.mdiffs[0]) if self.is_unique: - return self._get_daily_rule() + days = self.deltas[0] / _ONE_DAY + if days % 7 == 0: + # Weekly + wd = int_to_weekday[self.rep_stamp.weekday()] + alias = f"W-{wd}" + return _maybe_add_count(alias, days / 7) + + if getattr(self.index, "tz", None) is not None: + return _maybe_add_count("DayDST", days) + + if not self.is_unique_asi8: + # TODO: default to DayDST or Day? + return _maybe_add_count("DayDST", days) + + if self.is_unique_asi8: + days = self.deltas_asi8[0] / _ONE_DAY + return _maybe_add_count("D", days) if self._is_business_daily(): return "B" diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index cee99d23f8d90..0b76028de0cac 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -21,6 +21,7 @@ CustomBusinessMonthEnd, DateOffset, Day, + DayDST, Easter, FY5253Quarter, Hour, @@ -45,6 +46,7 @@ __all__ = [ "Day", + "DayDST", "BusinessDay", "BDay", "CustomBusinessDay",
xref #41943 cc @mroeschke The tests all passed locally up until the last commit, have a handful of broken resample tests now. The approach here is to keep Day as-is and implement DayDST, fix the actively-wrong infer_freq behavior (see test_infer_freq_across_dst_not_daily), then decide what to do with `freq="infer"` and `freq="D"` based on the presence of a tz. I guess a deprecation path would be to warn users passing "D" or Day() that it will have DST-aware semantics in the future, and to keep Timedelta-like behavior they should pass "24H". Would we want to warn in cases where they are equvialent, e.g. tz="UTC" or with a date_range that doesn't happen to cross a DST transition? With such a deprecation cycle, we could avoid implementing DayDST and when the time comes just call it Day.
https://api.github.com/repos/pandas-dev/pandas/pulls/44364
2021-11-09T04:26:07Z
2022-01-16T18:12:41Z
null
2022-01-16T18:12:54Z
CI: xfail tests failing on numpy dev
diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py index 3f69321ae98a6..c1b994d4bc4c7 100644 --- a/pandas/core/arrays/sparse/scipy_sparse.py +++ b/pandas/core/arrays/sparse/scipy_sparse.py @@ -181,7 +181,7 @@ def coo_to_sparse_series( Parameters ---------- - A : scipy.sparse.coo.coo_matrix + A : scipy.sparse.coo_matrix dense_index : bool, default False Returns diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index 96021bfa18fb7..cc48918981338 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -1255,7 +1255,7 @@ def test_to_coo( A, rows, cols = ss.sparse.to_coo( row_levels=(0, 1), column_levels=(2, 3), sort_labels=sort_labels ) - assert isinstance(A, scipy.sparse.coo.coo_matrix) + assert isinstance(A, scipy.sparse.coo_matrix) tm.assert_numpy_array_equal(A.toarray(), expected_A) assert rows == expected_rows assert cols == expected_cols diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 0c0288b49c930..27b06e78d8ce2 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -1634,6 +1634,7 @@ def test_rolling_quantile_np_percentile(): tm.assert_almost_equal(df_quantile.values, np.array(np_percentile)) +@pytest.mark.xfail(reason="GH#44343", strict=False) @pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1]) @pytest.mark.parametrize( "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
- [x] ref #44343
https://api.github.com/repos/pandas-dev/pandas/pulls/44362
2021-11-08T23:09:44Z
2021-11-09T01:17:50Z
2021-11-09T01:17:50Z
2021-11-09T19:55:27Z
CLN: Removed _SAFE_NAMES_WARNING in io.sql
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 867ce52cbde6f..027bb9889202b 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -1818,12 +1818,6 @@ def _get_valid_sqlite_name(name): return '"' + uname.replace('"', '""') + '"' -_SAFE_NAMES_WARNING = ( - "The spaces in these column names will not be changed. " - "In pandas versions < 0.14, spaces were converted to underscores." -) - - class SQLiteTable(SQLTable): """ Patch the SQLTable for fallback support. @@ -1883,12 +1877,6 @@ def _create_table_setup(self): statement while the rest will be CREATE INDEX statements. """ column_names_and_types = self._get_column_names_and_types(self._sql_type_name) - - pat = re.compile(r"\s+") - column_names = [col_name for col_name, _, _ in column_names_and_types] - if any(map(pat.search, column_names)): - warnings.warn(_SAFE_NAMES_WARNING, stacklevel=find_stack_level()) - escape = _get_valid_sqlite_name create_tbl_stmts = [ diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 386f11b3dd794..52c1fc51a4c8d 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -1366,13 +1366,6 @@ def test_read_sql_delegate(self): with pytest.raises(sql.DatabaseError, match=msg): sql.read_sql("iris", self.conn) - def test_safe_names_warning(self): - # GH 6798 - df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space - # warns on create table with spaces in names - with tm.assert_produces_warning(UserWarning): - sql.to_sql(df, "test_frame3_legacy", self.conn, index=False) - def test_get_schema2(self, test_frame1): # without providing a connection object (available for backwards comp) create_sql = sql.get_schema(test_frame1, "test")
- [x] closes #44295 - [x] tests passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them Please let me know if I should improve/change something. Thank you very much!
https://api.github.com/repos/pandas-dev/pandas/pulls/44361
2021-11-08T23:06:17Z
2021-11-14T20:24:19Z
2021-11-14T20:24:19Z
2021-11-14T20:24:29Z
TYP: changed variable hashed to combined_hashed in dtype.py
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 21675ca0cdc7c..e20670893f71c 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -468,12 +468,8 @@ def _hash_categories(self) -> int: # error: Incompatible types in assignment (expression has type # "List[ndarray]", variable has type "ndarray") cat_array = [cat_array] # type: ignore[assignment] - # error: Incompatible types in assignment (expression has type "ndarray", - # variable has type "int") - hashed = combine_hash_arrays( # type: ignore[assignment] - iter(cat_array), num_items=len(cat_array) - ) - return np.bitwise_xor.reduce(hashed) + combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array)) + return np.bitwise_xor.reduce(combined_hashed) @classmethod def construct_array_type(cls) -> type_t[Categorical]:
xref #37715
https://api.github.com/repos/pandas-dev/pandas/pulls/44360
2021-11-08T22:51:37Z
2021-11-09T17:16:23Z
2021-11-09T17:16:22Z
2021-11-09T21:31:52Z
Update cython version for asv conf
diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json index df50f67432fbb..9ad856d5d03ed 100644 --- a/asv_bench/asv.conf.json +++ b/asv_bench/asv.conf.json @@ -39,7 +39,7 @@ // followed by the pip installed packages). "matrix": { "numpy": [], - "Cython": ["0.29.21"], + "Cython": ["0.29.24"], "matplotlib": [], "sqlalchemy": [], "scipy": [], diff --git a/environment.yml b/environment.yml index 7aa7bb0842eca..b4a8b977359cb 100644 --- a/environment.yml +++ b/environment.yml @@ -15,7 +15,7 @@ dependencies: # The compiler packages are meta-packages and install the correct compiler (activation) packages on the respective platforms. - c-compiler - cxx-compiler - - cython>=0.29.21 + - cython>=0.29.24 # code checks - black=21.5b2 diff --git a/requirements-dev.txt b/requirements-dev.txt index 6247b4e5a12b1..5673becbbe1cb 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -5,7 +5,7 @@ numpy>=1.18.5 python-dateutil>=2.8.1 pytz asv -cython>=0.29.21 +cython>=0.29.24 black==21.5b2 cpplint flake8==3.9.2
- [x] closes #44067
https://api.github.com/repos/pandas-dev/pandas/pulls/44359
2021-11-08T22:50:32Z
2021-11-09T21:24:47Z
2021-11-09T21:24:46Z
2021-11-09T21:25:00Z
ENH: Use find_stack_level in pandas.core
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index c31368f179ef0..07fa5799fe371 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -9,6 +9,7 @@ import warnings from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level class DirNamesMixin: @@ -267,7 +268,7 @@ def decorator(accessor): f"{repr(name)} for type {repr(cls)} is overriding a preexisting " f"attribute with the same name.", UserWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) setattr(cls, name, CachedAccessor(name, accessor)) cls._accessors.add(name) diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py index fe09a044566f8..11d32e8a159f3 100644 --- a/pandas/core/arraylike.py +++ b/pandas/core/arraylike.py @@ -11,6 +11,7 @@ import numpy as np from pandas._libs import lib +from pandas.util._exceptions import find_stack_level from pandas.core.construction import extract_array from pandas.core.ops import ( @@ -210,7 +211,7 @@ def _maybe_fallback(ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any): "or align manually (eg 'df1, df2 = df1.align(df2)') before passing to " "the ufunc to obtain the future behaviour and silence this warning.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) # keep the first dataframe of the inputs, other DataFrame/Series is diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 9c43e3714c332..759c7fb65374d 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -390,7 +390,7 @@ def __init__( "Allowing scalars in the Categorical constructor is deprecated " "and will raise in a future version. Use `[value]` instead", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) values = [values] @@ -945,7 +945,7 @@ def set_categories( "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1045,7 +1045,7 @@ def rename_categories(self, new_categories, inplace=no_default): "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1177,7 +1177,7 @@ def add_categories(self, new_categories, inplace=no_default): "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1252,7 +1252,7 @@ def remove_categories(self, removals, inplace=no_default): "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1327,7 +1327,7 @@ def remove_unused_categories(self, inplace=no_default): "remove_unused_categories is deprecated and " "will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1884,7 +1884,7 @@ def to_dense(self) -> np.ndarray: "Categorical.to_dense is deprecated and will be removed in " "a future version. Use np.asarray(cat) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return np.asarray(self) @@ -1901,7 +1901,7 @@ def _codes(self, value: np.ndarray): "Setting the codes on a Categorical is deprecated and will raise in " "a future version. Create a new Categorical object instead", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # GH#40606 NDArrayBacked.__init__(self, value, self.dtype) @@ -1924,7 +1924,7 @@ def take_nd(self, indexer, allow_fill: bool = False, fill_value=None): warn( "Categorical.take_nd is deprecated, use Categorical.take instead", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.take(indexer, allow_fill=allow_fill, fill_value=fill_value) @@ -2344,7 +2344,7 @@ def is_dtype_equal(self, other) -> bool: "Categorical.is_dtype_equal is deprecated and will be removed " "in a future version", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) try: return self._categories_match_up_to_permutation(other) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 4fecbe4be9681..a0a7ef3501d7f 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1206,7 +1206,7 @@ def to_perioddelta(self, freq) -> TimedeltaArray: "Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.", FutureWarning, # stacklevel chosen to be correct for when called from DatetimeIndex - stacklevel=3, + stacklevel=find_stack_level(), ) from pandas.core.arrays.timedeltas import TimedeltaArray @@ -1373,7 +1373,7 @@ def weekofyear(self): "weekofyear and return an Index, you may call " "pd.Int64Index(idx.isocalendar().week)", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) week_series = self.isocalendar().week if week_series.hasnans: diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 960544a2f89ea..c054710a01f75 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -467,7 +467,7 @@ def __init__( "loses timezone information. Cast to object before " "sparse to retain timezone information.", UserWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) data = np.asarray(data, dtype="datetime64[ns]") if fill_value is NaT: @@ -1089,7 +1089,7 @@ def searchsorted( ) -> npt.NDArray[np.intp] | np.intp: msg = "searchsorted requires high memory usage." - warnings.warn(msg, PerformanceWarning, stacklevel=2) + warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level()) if not is_scalar(v): v = np.asarray(v) v = np.asarray(v) diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py index 915e13bc3bbb2..d23e217e605c7 100644 --- a/pandas/core/arrays/sparse/dtype.py +++ b/pandas/core/arrays/sparse/dtype.py @@ -16,6 +16,7 @@ type_t, ) from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ( ExtensionDtype, @@ -389,7 +390,7 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: f"values: '{fill_values}'. Picking the first and " "converting the rest.", PerformanceWarning, - stacklevel=6, + stacklevel=find_stack_level(), ) np_dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes] diff --git a/pandas/core/common.py b/pandas/core/common.py index 2bf925466e176..590296c4b12f5 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -36,6 +36,7 @@ Scalar, T, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import ( @@ -175,7 +176,7 @@ def cast_scalar_indexer(val, warn_float: bool = False): "Indexing with a float is deprecated, and will raise an IndexError " "in pandas 2.0. You can manually convert to an integer key instead.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) return int(val) return val diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index a4bd0270f9451..f14882227ddd9 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -16,6 +16,7 @@ import numpy as np from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.generic import ( ABCDataFrame, @@ -126,7 +127,9 @@ def _align_core(terms): f"than an order of magnitude on term {repr(terms[i].name)}, " f"by more than {ordm:.4g}; performance may suffer." ) - warnings.warn(w, category=PerformanceWarning, stacklevel=6) + warnings.warn( + w, category=PerformanceWarning, stacklevel=find_stack_level() + ) f = partial(ti.reindex, reindexer, axis=axis, copy=False) diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index 26748eadb4c85..d82cc37b90ad4 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -7,6 +7,7 @@ import warnings from pandas._libs.lib import no_default +from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg from pandas.core.computation.engines import ENGINES @@ -308,7 +309,7 @@ def eval( "will be removed in a future version." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) exprs: list[str | BinOp] diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 0081f8cd074b6..31c2ec8f0cbf9 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -25,6 +25,8 @@ is_text, ) +from pandas.util._exceptions import find_stack_level + # compute use_bottleneck_doc = """ @@ -373,7 +375,7 @@ def _deprecate_negative_int_max_colwidth(key): "will not be supported in future version. Instead, use None " "to not limit the column width.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) cf.register_option( diff --git a/pandas/core/construction.py b/pandas/core/construction.py index c6f131a9daba6..e3b41f2c7b8c2 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -25,6 +25,7 @@ DtypeObj, ) from pandas.errors import IntCastingNaNError +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ( ExtensionDtype, @@ -538,7 +539,7 @@ def sanitize_array( "if they cannot be cast losslessly (matching Series behavior). " "To retain the old behavior, use DataFrame(data).astype(dtype)", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) # GH#40110 until the deprecation is enforced, we _dont_ # ignore the dtype for DataFrame, and _do_ cast even though @@ -777,7 +778,7 @@ def _try_cast( "passed to 'DataFrame', either all columns will be cast to that " "dtype, or a TypeError will be raised.", FutureWarning, - stacklevel=7, + stacklevel=find_stack_level(), ) subarr = np.array(arr, dtype=object, copy=copy) return subarr diff --git a/pandas/core/describe.py b/pandas/core/describe.py index 2c4a340e8c8ea..8d88ce280d5c8 100644 --- a/pandas/core/describe.py +++ b/pandas/core/describe.py @@ -23,6 +23,7 @@ from pandas._libs.tslibs import Timestamp from pandas._typing import NDFrameT +from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_percentile from pandas.core.dtypes.common import ( @@ -377,7 +378,7 @@ def select_describe_func( "version of pandas. Specify `datetime_is_numeric=True` to " "silence this warning and adopt the future behavior now.", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) return describe_timestamp_as_categorical_1d elif is_timedelta64_dtype(data.dtype): diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 432074a8dd699..2c26d6f838315 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -563,7 +563,7 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan): "dtype is deprecated. In a future version, this will be cast " "to object dtype. Pass `fill_value=Timestamp(date_obj)` instead.", FutureWarning, - stacklevel=8, + stacklevel=find_stack_level(), ) return dtype, fv elif isinstance(fill_value, str): @@ -1133,7 +1133,7 @@ def astype_nansafe( "Use .view(...) instead.", FutureWarning, # stacklevel chosen to be correct when reached via Series.astype - stacklevel=7, + stacklevel=find_stack_level(), ) if isna(arr).any(): raise ValueError("Cannot convert NaT values to integer") @@ -1155,7 +1155,7 @@ def astype_nansafe( "Use .view(...) instead.", FutureWarning, # stacklevel chosen to be correct when reached via Series.astype - stacklevel=7, + stacklevel=find_stack_level(), ) if isna(arr).any(): raise ValueError("Cannot convert NaT values to integer") @@ -1651,7 +1651,7 @@ def maybe_cast_to_datetime( "`pd.Series(values).dt.tz_localize(None)` " "instead.", FutureWarning, - stacklevel=8, + stacklevel=find_stack_level(), ) # equiv: dta.view(dtype) # Note: NOT equivalent to dta.astype(dtype) @@ -1691,7 +1691,7 @@ def maybe_cast_to_datetime( ".tz_localize('UTC').tz_convert(dtype.tz) " "or pd.Series(data.view('int64'), dtype=dtype)", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) value = dta.tz_localize("UTC").tz_convert(dtype.tz) @@ -1859,7 +1859,7 @@ def construct_2d_arraylike_from_scalar( shape = (length, width) if dtype.kind in ["m", "M"]: - value = maybe_unbox_datetimelike_tz_deprecation(value, dtype, stacklevel=4) + value = maybe_unbox_datetimelike_tz_deprecation(value, dtype) # error: Non-overlapping equality check (left operand type: "dtype[Any]", right # operand type: "Type[object]") elif dtype == object: # type: ignore[comparison-overlap] @@ -1932,9 +1932,7 @@ def construct_1d_arraylike_from_scalar( return subarr -def maybe_unbox_datetimelike_tz_deprecation( - value: Scalar, dtype: DtypeObj, stacklevel: int = 5 -): +def maybe_unbox_datetimelike_tz_deprecation(value: Scalar, dtype: DtypeObj): """ Wrap maybe_unbox_datetimelike with a check for a timezone-aware Timestamp along with a timezone-naive datetime64 dtype, which is deprecated. @@ -1963,7 +1961,7 @@ def maybe_unbox_datetimelike_tz_deprecation( "`pd.Series(values).dt.tz_localize(None)` " "instead.", FutureWarning, - stacklevel=stacklevel, + stacklevel=find_stack_level(), ) new_value = value.tz_localize(None) return maybe_unbox_datetimelike(new_value, dtype) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 815a0a2040ddb..7ac8e6c47158c 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -22,6 +22,7 @@ ArrayLike, DtypeObj, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.dtypes import ( @@ -304,7 +305,7 @@ def is_categorical(arr) -> bool: "is_categorical is deprecated and will be removed in a future version. " "Use is_categorical_dtype instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr) @@ -1378,7 +1379,7 @@ def is_extension_type(arr) -> bool: "'is_extension_type' is deprecated and will be removed in a future " "version. Use 'is_extension_array_dtype' instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if is_categorical_dtype(arr): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1f26b6d9ae6ae..b01de5dec610d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -83,6 +83,7 @@ doc, rewrite_axis_style_signature, ) +from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( validate_ascending, validate_axis_style_args, @@ -643,7 +644,7 @@ def __init__( "removed in a future version. Pass " "{name: data[name] for name in data.dtype.names} instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # a masked array @@ -1793,7 +1794,7 @@ def to_dict(self, orient: str = "dict", into=dict): warnings.warn( "DataFrame columns are not unique, some columns will be omitted.", UserWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # GH16122 into_c = com.standardize_mapping(into) @@ -1814,7 +1815,7 @@ def to_dict(self, orient: str = "dict", into=dict): "will be used in a future version. Use one of the above " "to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if orient.startswith("d"): @@ -2659,7 +2660,7 @@ def to_markdown( "'showindex' is deprecated. Only 'index' will be used " "in a future version. Use 'index' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) kwargs.setdefault("headers", "keys") @@ -3218,7 +3219,7 @@ def info( warnings.warn( "null_counts is deprecated. Use show_counts instead", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) show_counts = null_counts info = DataFrameInfo( @@ -3591,7 +3592,7 @@ def _getitem_bool_array(self, key): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( @@ -4637,7 +4638,7 @@ def lookup( "You can use DataFrame.melt and DataFrame.loc " "as a substitute." ) - warnings.warn(msg, FutureWarning, stacklevel=2) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) n = len(row_labels) if n != len(col_labels): @@ -7754,7 +7755,7 @@ def groupby( "will be removed in a future version." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: squeeze = False @@ -9844,7 +9845,7 @@ def count( "deprecated and will be removed in a future version. Use groupby " "instead. df.count(level=1) should use df.groupby(level=1).count().", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._count_level(level, axis=axis, numeric_only=numeric_only) @@ -9944,7 +9945,7 @@ def _reduce( "will include datetime64 and datetime64tz columns in a " "future version.", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) # Non-copy equivalent to # dt64_cols = self.dtypes.apply(is_datetime64_any_dtype) @@ -10019,7 +10020,7 @@ def _get_data() -> DataFrame: "version this will raise TypeError. Select only valid " "columns before calling the reduction.", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) return out @@ -10052,7 +10053,7 @@ def _get_data() -> DataFrame: "version this will raise TypeError. Select only valid " "columns before calling the reduction.", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) if hasattr(result, "dtype"): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 93bf70c27f8ff..23608cf0192df 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3647,7 +3647,7 @@ class max_speed "is_copy is deprecated and will be removed in a future version. " "'take' always returns a copy, so there is no need to specify this.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) nv.validate_take((), kwargs) @@ -3781,7 +3781,7 @@ class animal locomotion "Passing lists as key for xs is deprecated and will be removed in a " "future version. Pass key as a tuple instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if level is not None: @@ -5556,7 +5556,7 @@ def __setattr__(self, name: str, value) -> None: "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", - stacklevel=2, + stacklevel=find_stack_level(), ) object.__setattr__(self, name, value) @@ -7774,7 +7774,7 @@ def between_time( "`include_start` and `include_end` are deprecated in " "favour of `inclusive`.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) left = True if isinstance(include_start, lib.NoDefault) else include_start right = True if isinstance(include_end, lib.NoDefault) else include_end @@ -9190,7 +9190,7 @@ def where( "try_cast keyword is deprecated and will be removed in a " "future version.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) return self._where(cond, other, inplace, axis, level, errors=errors) @@ -9222,7 +9222,7 @@ def mask( "try_cast keyword is deprecated and will be removed in a " "future version.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) # see gh-21891 @@ -9415,7 +9415,7 @@ def slice_shift(self: NDFrameT, periods: int = 1, axis=0) -> NDFrameT: "and will be removed in a future version. " "You can use DataFrame/Series.shift instead." ) - warnings.warn(msg, FutureWarning, stacklevel=2) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) if periods == 0: return self @@ -9467,7 +9467,7 @@ def tshift(self: NDFrameT, periods: int = 1, freq=None, axis: Axis = 0) -> NDFra "Please use shift instead." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if freq is None: @@ -10282,7 +10282,7 @@ def _logical_func( "deprecated and will be removed in a future version. Use groupby " "instead. df.any(level=1) should use df.groupby(level=1).any()", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) if bool_only is not None: raise NotImplementedError( @@ -10378,7 +10378,7 @@ def _stat_function_ddof( "deprecated and will be removed in a future version. Use groupby " "instead. df.var(level=1) should use df.groupby(level=1).var().", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) return self._agg_by_level( name, axis=axis, level=level, skipna=skipna, ddof=ddof @@ -10431,7 +10431,7 @@ def _stat_function( "deprecated and will be removed in a future version. Use groupby " "instead. df.median(level=1) should use df.groupby(level=1).median().", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) return self._agg_by_level( name, axis=axis, level=level, skipna=skipna, numeric_only=numeric_only @@ -10498,7 +10498,7 @@ def _min_count_stat_function( "deprecated and will be removed in a future version. Use groupby " "instead. df.sum(level=1) should use df.groupby(level=1).sum().", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) return self._agg_by_level( name, @@ -10582,7 +10582,7 @@ def mad(self, axis=None, skipna=None, level=None): "deprecated and will be removed in a future version. Use groupby " "instead. df.mad(level=1) should use df.groupby(level=1).mad()", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna) @@ -10980,7 +10980,7 @@ def expanding( warnings.warn( "The `center` argument on `expanding` will be removed in the future.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: center = False diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 8a330d08bef78..3c45f7263265c 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -37,6 +37,7 @@ Substitution, doc, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_int64, @@ -1330,7 +1331,7 @@ def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy: "Indexing with multiple keys (implicitly converted to a tuple " "of keys) will be deprecated, use a list instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return super().__getitem__(key) diff --git a/pandas/core/index.py b/pandas/core/index.py index 13a687b1c27e3..00ca6f9048a40 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1,5 +1,7 @@ import warnings +from pandas.util._exceptions import find_stack_level + from pandas.core.indexes.api import ( # noqa:F401 CategoricalIndex, DatetimeIndex, @@ -26,5 +28,5 @@ "pandas.core.index is deprecated and will be removed in a future version. " "The public classes are available in the top-level namespace.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py index b1824413512c5..41920727c50fd 100644 --- a/pandas/core/indexers/utils.py +++ b/pandas/core/indexers/utils.py @@ -399,7 +399,7 @@ def unpack_1tuple(tup): "slice is deprecated and will raise in a future " "version. Pass a tuple instead.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) return tup[0] diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index b8f4b5f9d3423..3aad1140294e5 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -8,6 +8,8 @@ import numpy as np +from pandas.util._exceptions import find_stack_level + from pandas.core.dtypes.common import ( is_categorical_dtype, is_datetime64_dtype, @@ -286,7 +288,7 @@ def weekofyear(self): "Series.dt.weekofyear and Series.dt.week have been deprecated. " "Please use Series.dt.isocalendar().week instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) week_series = self.isocalendar().week week_series.name = self.name diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 2514702b036dd..9715bf8f61f3c 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -399,7 +399,7 @@ def __new__( "'tupleize_cols' is deprecated and will raise TypeError in a " "future version. Use the specific Index subclass directly instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) from pandas.core.arrays import PandasArray @@ -632,7 +632,7 @@ def asi8(self): warnings.warn( "Index.asi8 is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return None @@ -746,7 +746,7 @@ def _get_attributes_dict(self) -> dict[str_t, Any]: "The Index._get_attributes_dict method is deprecated, and will be " "removed in a future version", DeprecationWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return {k: getattr(self, k, None) for k in self._attributes} @@ -919,7 +919,7 @@ def ravel(self, order="C"): "Index.ravel returning ndarray is deprecated; in a future version " "this will return a view on self.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if needs_i8_conversion(self.dtype): # Item "ndarray[Any, Any]" of "Union[ExtensionArray, ndarray[Any, Any]]" @@ -1191,7 +1191,7 @@ def copy( "parameter dtype is deprecated and will be removed in a future " "version. Use the astype method instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) new_index = new_index.astype(dtype) return new_index @@ -1371,7 +1371,7 @@ def to_native_types(self, slicer=None, **kwargs) -> np.ndarray: "The 'to_native_types' method is deprecated and will be removed in " "a future version. Use 'astype(str)' instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) values = self if slicer is not None: @@ -2503,7 +2503,7 @@ def is_mixed(self) -> bool: "Index.is_mixed is deprecated and will be removed in a future version. " "Check index.inferred_type directly instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.inferred_type in ["mixed"] @@ -2538,7 +2538,7 @@ def is_all_dates(self) -> bool: "Index.is_all_dates is deprecated, will be removed in a future version. " "check index.inferred_type instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._is_all_dates @@ -2905,7 +2905,7 @@ def __and__(self, other): "in the future this will be a logical operation matching " "Series.__and__. Use index.intersection(other) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.intersection(other) @@ -2916,7 +2916,7 @@ def __or__(self, other): "in the future this will be a logical operation matching " "Series.__or__. Use index.union(other) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.union(other) @@ -2927,7 +2927,7 @@ def __xor__(self, other): "in the future this will be a logical operation matching " "Series.__xor__. Use index.symmetric_difference(other) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.symmetric_difference(other) @@ -3073,7 +3073,7 @@ def union(self, other, sort=None): "object dtype. To retain the old behavior, " "use `index.astype(object).union(other)`", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) dtype = self._find_common_type_compat(other) @@ -3524,7 +3524,7 @@ def get_loc(self, key, method=None, tolerance=None): "and will raise in a future version. Use " "index.get_indexer([item], method=...) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if is_scalar(key) and isna(key) and not self.hasnans: @@ -3958,7 +3958,7 @@ def is_int(v): "and will raise TypeError in a future version. " "Use .loc with labels or .iloc with positions instead.", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) indexer = key else: @@ -4107,7 +4107,7 @@ def reindex( "reindexing with a non-unique Index is deprecated and " "will raise in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) target = self._wrap_reindex_result(target, indexer, preserve_names) @@ -4848,7 +4848,7 @@ def is_type_compatible(self, kind: str_t) -> bool: "Index.is_type_compatible is deprecated and will be removed in a " "future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return kind == self.inferred_type @@ -5485,7 +5485,7 @@ def get_value(self, series: Series, key): "get_value is deprecated and will be removed in a future version. " "Use Series[key] instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) self._check_indexing_error(key) @@ -5553,7 +5553,7 @@ def set_value(self, arr, key, value): "will be removed in a future version." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) loc = self._engine.get_loc(key) validate_numeric_casting(arr.dtype, value) @@ -7023,7 +7023,7 @@ def _maybe_cast_data_without_dtype( "In a future version, the Index constructor will not infer numeric " "dtypes when passed object-dtype sequences (matching Series behavior)", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) if result.dtype.kind in ["b", "c"]: return subarr @@ -7081,6 +7081,6 @@ def _maybe_try_sort(result, sort): warnings.warn( f"{err}, sort order is undefined for incomparable objects.", RuntimeWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) return result diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index e2dd5ecfde5a8..f26a24c38b19f 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -17,6 +17,7 @@ npt, ) from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_categorical_dtype, @@ -218,7 +219,7 @@ def __new__( "deprecated and will raise in a future version. " "Use CategoricalIndex([], ...) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) data = [] @@ -431,7 +432,7 @@ def reindex( "reindexing with a non-unique Index is deprecated and will " "raise in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if len(self) and indexer is not None: @@ -506,7 +507,7 @@ def take_nd(self, *args, **kwargs): "CategoricalIndex.take_nd is deprecated, use CategoricalIndex.take " "instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.take(*args, **kwargs) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index a0902a5fb32fe..104bce0369d37 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -36,6 +36,7 @@ cache_readonly, doc, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_categorical_dtype, @@ -403,7 +404,7 @@ def is_type_compatible(self, kind: str) -> bool: f"{type(self).__name__}.is_type_compatible is deprecated and will be " "removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return kind in self._data._infer_matches diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 6078da3bedd8c..e283509206344 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -495,7 +495,7 @@ def to_series(self, keep_tz=lib.no_default, index=None, name=None): "is deprecated and will be removed in a future version. " "You can stop passing 'keep_tz' to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: warnings.warn( @@ -505,7 +505,7 @@ def to_series(self, keep_tz=lib.no_default, index=None, name=None): "can do 'idx.tz_convert(None)' before calling " "'to_series'.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: keep_tz = True @@ -752,7 +752,7 @@ def check_str_or_none(point): "with non-existing keys is deprecated and will raise a " "KeyError in a future Version.", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) indexer = mask.nonzero()[0][::step] if len(indexer) == len(self): @@ -1042,7 +1042,7 @@ def date_range( warnings.warn( "Argument `closed` is deprecated in favor of `inclusive`.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if closed is None: inclusive = "both" diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index fe97d61be7548..128aa8e282a0d 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -46,6 +46,7 @@ deprecate_nonkeyword_arguments, doc, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import coerce_indexer_dtype from pandas.core.dtypes.common import ( @@ -893,7 +894,7 @@ def set_levels( warnings.warn( "inplace is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1054,7 +1055,7 @@ def set_codes(self, codes, level=None, inplace=None, verify_integrity: bool = Tr warnings.warn( "inplace is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) else: inplace = False @@ -1166,14 +1167,14 @@ def copy( "parameter levels is deprecated and will be removed in a future " "version. Use the set_levels method instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if codes is not None: warnings.warn( "parameter codes is deprecated and will be removed in a future " "version. Use the set_codes method instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if deep: @@ -1202,7 +1203,7 @@ def copy( "parameter dtype is deprecated and will be removed in a future " "version. Use the astype method instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) new_index = new_index.astype(dtype) return new_index @@ -1802,7 +1803,7 @@ def is_lexsorted(self) -> bool: "MultiIndex.is_lexsorted is deprecated as a public function, " "users should use MultiIndex.is_monotonic_increasing instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._is_lexsorted() @@ -1846,7 +1847,7 @@ def lexsort_depth(self) -> int: "MultiIndex.is_lexsorted is deprecated as a public function, " "users should use MultiIndex.is_monotonic_increasing instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._lexsort_depth @@ -2212,7 +2213,7 @@ def drop(self, codes, level=None, errors="raise"): "dropping on a non-lexsorted multi-index " "without a level parameter may impact performance.", PerformanceWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) loc = loc.nonzero()[0] inds.extend(loc) @@ -2877,7 +2878,7 @@ def _maybe_to_slice(loc): warnings.warn( "indexing past lexsort depth may impact performance.", PerformanceWarning, - stacklevel=10, + stacklevel=find_stack_level(), ) loc = np.arange(start, stop, dtype=np.intp) @@ -3335,7 +3336,7 @@ def _update_indexer(idxr: Index, indexer: Index) -> Index: # TODO: how to handle IntervalIndex level? # (no test cases) FutureWarning, - stacklevel=7, + stacklevel=find_stack_level(), ) continue else: diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 4d8c411478993..25b43c556b812 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -21,6 +21,7 @@ cache_readonly, doc, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import astype_nansafe from pandas.core.dtypes.common import ( @@ -421,7 +422,7 @@ def asi8(self) -> npt.NDArray[np.int64]: warnings.warn( "Index.asi8 is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._values.view(self._default_dtype) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index fd5b5bb7396af..23851eff252b4 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -25,6 +25,7 @@ DtypeObj, ) from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_datetime64_any_dtype, @@ -346,7 +347,7 @@ def astype(self, dtype, copy: bool = True, how=lib.no_default): "will be removed in a future version. " "Use index.to_timestamp(how=how) instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: how = "start" diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index aed7a7a467db3..fdb1ee754a7e6 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -29,6 +29,7 @@ cache_readonly, doc, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_platform_int, @@ -256,7 +257,7 @@ def _start(self) -> int: warnings.warn( self._deprecation_message.format("_start", "start"), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.start @@ -279,7 +280,7 @@ def _stop(self) -> int: warnings.warn( self._deprecation_message.format("_stop", "stop"), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.stop @@ -303,7 +304,7 @@ def _step(self) -> int: warnings.warn( self._deprecation_message.format("_step", "step"), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.step @@ -456,7 +457,7 @@ def copy( "parameter dtype is deprecated and will be removed in a future " "version. Use the astype method instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) new_index = new_index.astype(dtype) return new_index diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 669274e034905..e773bf5ffb7f4 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -16,6 +16,7 @@ InvalidIndexError, ) from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_array_like, @@ -1381,7 +1382,7 @@ def _has_valid_setitem_indexer(self, indexer) -> bool: "a future version.\n" "consider using .loc with a DataFrame indexer for automatic alignment.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) if not isinstance(indexer, tuple): @@ -2298,7 +2299,7 @@ def convert_to_index_sliceable(obj: DataFrame, key): "and will be removed in a future version. Use `frame.loc[string]` " "instead.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) return res except (KeyError, ValueError, NotImplementedError): diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index 6cbaae3fe12e0..75715bdc90003 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -44,12 +44,14 @@ def __getattr__(name: str): import warnings + from pandas.util._exceptions import find_stack_level + if name == "CategoricalBlock": warnings.warn( "CategoricalBlock is deprecated and will be removed in a future version. " "Use ExtensionBlock instead.", DeprecationWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) from pandas.core.internals.blocks import CategoricalBlock diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 66a40b962e183..55e5b0d0439fa 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -190,7 +190,7 @@ def is_categorical(self) -> bool: "future version. Use isinstance(block.values, Categorical) " "instead. See https://github.com/pandas-dev/pandas/issues/40226", DeprecationWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return isinstance(self.values, Categorical) diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 159c20382dcfb..e6d6b561803d6 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -23,6 +23,7 @@ DtypeObj, Manager, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, @@ -830,7 +831,7 @@ def to_arrays( "To retain the old behavior, pass as a dictionary " "DataFrame({col: categorical, ..})", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) if columns is None: columns = default_index(len(data)) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index b4d6e0ace4223..cb0c3e05e955f 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1192,7 +1192,7 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: "Consider joining all columns at once using pd.concat(axis=1) " "instead. To get a de-fragmented frame, use `newframe = frame.copy()`", PerformanceWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) def _insert_update_mgr_locs(self, loc) -> None: @@ -1637,7 +1637,7 @@ def __init__( "The `fastpath` keyword is deprecated and will be removed " "in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) self.axes = [axis] diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index ece5b21fa2f8e..540a557f7c7cc 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -14,6 +14,7 @@ from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op # noqa:F401 from pandas._typing import Level from pandas.util._decorators import Appender +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_array_like, @@ -300,7 +301,7 @@ def to_series(right): "Do `left, right = left.align(right, axis=1, copy=False)` " "before e.g. `left == right`", FutureWarning, - stacklevel=5, + stacklevel=find_stack_level(), ) left, right = left.align( diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 1b217a592987f..7026e470df1c0 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -10,6 +10,7 @@ Appender, deprecate_kwarg, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_extension_array_dtype, @@ -58,7 +59,7 @@ def melt( "In the future this will raise an error, please set the 'value_name' " "parameter of DataFrame.melt to a unique name.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) if id_vars is not None: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index a88d1dce693f6..4dd15dd367581 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -35,6 +35,7 @@ Appender, Substitution, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( @@ -676,7 +677,7 @@ def __init__( ) # stacklevel chosen to be correct when this is reached via pd.merge # (and not DataFrame.join) - warnings.warn(msg, FutureWarning, stacklevel=3) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) self._validate_specification() @@ -2297,7 +2298,7 @@ def _items_overlap_with_suffix( "unexpected results. Provide 'suffixes' as a tuple instead. In the " "future a 'TypeError' will be raised.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) to_rename = left.intersection(right) @@ -2347,7 +2348,7 @@ def renamer(x, suffix): f"Passing 'suffixes' which cause duplicate columns {set(dups)} in the " f"result is deprecated and will raise a MergeError in a future version.", FutureWarning, - stacklevel=4, + stacklevel=find_stack_level(), ) return llabels, rlabels diff --git a/pandas/core/series.py b/pandas/core/series.py index 996af80139458..b3c9167bfbbab 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -54,6 +54,7 @@ deprecate_nonkeyword_arguments, doc, ) +from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( validate_ascending, validate_bool_kwarg, @@ -360,7 +361,7 @@ def __init__( "of 'float64' in a future version. Specify a dtype explicitly " "to silence this warning.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # uncomment the line below when removing the FutureWarning # dtype = np.dtype(object) @@ -886,7 +887,7 @@ def take(self, indices, axis=0, is_copy=None, **kwargs) -> Series: "is_copy is deprecated and will be removed in a future version. " "'take' always returns a copy, so there is no need to specify this.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) nv.validate_take((), kwargs) @@ -1078,7 +1079,7 @@ def __setitem__(self, key, value) -> None: "Series. Use `series.iloc[an_int] = val` to treat the " "key as positional.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(key, value) @@ -1887,7 +1888,7 @@ def groupby( "will be removed in a future version." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) else: squeeze = False @@ -1949,7 +1950,7 @@ def count(self, level=None): "deprecated and will be removed in a future version. Use groupby " "instead. ser.count(level=1) should use ser.groupby(level=1).count().", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if not isinstance(self.index, MultiIndex): raise ValueError("Series.count level is only valid with a MultiIndex") @@ -5135,7 +5136,7 @@ def between(self, left, right, inclusive="both") -> Series: "Boolean inputs to the `inclusive` argument are deprecated in " "favour of `both` or `neither`.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) if inclusive: inclusive = "both" diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 1e27febab2af9..f82e1aa5d188c 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -19,6 +19,7 @@ F, ) from pandas.util._decorators import Appender +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, @@ -238,7 +239,7 @@ def __iter__(self): warnings.warn( "Columnar iteration over characters will be deprecated in future releases.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) i = 0 g = self.get(i) @@ -1214,7 +1215,7 @@ def contains(self, pat, case=True, flags=0, na=None, regex=True): "This pattern has match groups. To actually get the " "groups, use str.extract.", UserWarning, - stacklevel=3, + stacklevel=find_stack_level(), ) result = self._data.array._str_contains(pat, case, flags, na, regex) diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 669a39fcb3a74..67a6975c21fdd 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -39,6 +39,7 @@ ArrayLike, Timezone, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, @@ -1109,7 +1110,7 @@ def to_time(arg, format=None, infer_time_format=False, errors="raise"): "`to_time` has been moved, should be imported from pandas.core.tools.times. " "This alias will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) from pandas.core.tools.times import to_time diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index c17af442fe2cc..f5f681d9de797 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -640,7 +640,7 @@ def vol(self, bias: bool = False, *args, **kwargs): "Use std instead." ), FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self.std(bias, *args, **kwargs) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index b04aab3755b91..f7799912937b7 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -167,7 +167,7 @@ def win_type(self): "win_type will no longer return 'freq' in a future version. " "Check the type of self.window instead.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return "freq" return self._win_type @@ -177,7 +177,7 @@ def is_datetimelike(self) -> bool: warnings.warn( "is_datetimelike is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._win_freq_i8 is not None @@ -185,7 +185,7 @@ def validate(self) -> None: warnings.warn( "validate is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(), ) return self._validate() @@ -1763,6 +1763,7 @@ def count(self): "Specify min_periods=0 instead." ), FutureWarning, + stacklevel=find_stack_level(), ) self.min_periods = 0 result = super().count()
Part of #44347 - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them This handles most of the stacklevels in core. A few more will take a bit more effort, separating out into a separate PR. After all stacklevels are replaced, will be removing `check_stacklevel=False` from the tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/44358
2021-11-08T22:47:15Z
2021-11-12T03:19:05Z
2021-11-12T03:19:05Z
2021-12-04T15:25:42Z
DOC: Clarify DST rounding behavior in Timestamp/DatetimeIndex
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 0ec0fb9e814c1..0cbae74ecadac 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -792,6 +792,13 @@ timedelta}, default 'raise' ------ ValueError if the freq cannot be converted + Notes + ----- + If the Timestamp has a timezone, rounding will take place relative to the + local ("wall") time and re-localized to the same timezone. When rounding + near daylight savings time, use ``nonexistent`` and ``ambiguous`` to + control the re-localization behavior. + Examples -------- Create a timestamp object: @@ -826,6 +833,17 @@ timedelta}, default 'raise' >>> pd.NaT.round() NaT + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> ts_tz = pd.Timestamp("2021-10-31 01:30:00").tz_localize("Europe/Amsterdam") + + >>> ts_tz.round("H", ambiguous=False) + Timestamp('2021-10-31 02:00:00+0100', tz='Europe/Amsterdam') + + >>> ts_tz.round("H", ambiguous=True) + Timestamp('2021-10-31 02:00:00+0200', tz='Europe/Amsterdam') """, ) floor = _make_nat_func( @@ -863,6 +881,13 @@ timedelta}, default 'raise' ------ ValueError if the freq cannot be converted. + Notes + ----- + If the Timestamp has a timezone, flooring will take place relative to the + local ("wall") time and re-localized to the same timezone. When flooring + near daylight savings time, use ``nonexistent`` and ``ambiguous`` to + control the re-localization behavior. + Examples -------- Create a timestamp object: @@ -897,6 +922,17 @@ timedelta}, default 'raise' >>> pd.NaT.floor() NaT + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> ts_tz = pd.Timestamp("2021-10-31 03:30:00").tz_localize("Europe/Amsterdam") + + >>> ts_tz.floor("2H", ambiguous=False) + Timestamp('2021-10-31 02:00:00+0100', tz='Europe/Amsterdam') + + >>> ts_tz.floor("2H", ambiguous=True) + Timestamp('2021-10-31 02:00:00+0200', tz='Europe/Amsterdam') """, ) ceil = _make_nat_func( @@ -934,6 +970,13 @@ timedelta}, default 'raise' ------ ValueError if the freq cannot be converted. + Notes + ----- + If the Timestamp has a timezone, ceiling will take place relative to the + local ("wall") time and re-localized to the same timezone. When ceiling + near daylight savings time, use ``nonexistent`` and ``ambiguous`` to + control the re-localization behavior. + Examples -------- Create a timestamp object: @@ -968,6 +1011,17 @@ timedelta}, default 'raise' >>> pd.NaT.ceil() NaT + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> ts_tz = pd.Timestamp("2021-10-31 01:30:00").tz_localize("Europe/Amsterdam") + + >>> ts_tz.ceil("H", ambiguous=False) + Timestamp('2021-10-31 02:00:00+0100', tz='Europe/Amsterdam') + + >>> ts_tz.ceil("H", ambiguous=True) + Timestamp('2021-10-31 02:00:00+0200', tz='Europe/Amsterdam') """, ) diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index bf3b3ed0264a0..9ea347594229f 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1462,6 +1462,13 @@ timedelta}, default 'raise' ------ ValueError if the freq cannot be converted + Notes + ----- + If the Timestamp has a timezone, rounding will take place relative to the + local ("wall") time and re-localized to the same timezone. When rounding + near daylight savings time, use ``nonexistent`` and ``ambiguous`` to + control the re-localization behavior. + Examples -------- Create a timestamp object: @@ -1496,6 +1503,17 @@ timedelta}, default 'raise' >>> pd.NaT.round() NaT + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> ts_tz = pd.Timestamp("2021-10-31 01:30:00").tz_localize("Europe/Amsterdam") + + >>> ts_tz.round("H", ambiguous=False) + Timestamp('2021-10-31 02:00:00+0100', tz='Europe/Amsterdam') + + >>> ts_tz.round("H", ambiguous=True) + Timestamp('2021-10-31 02:00:00+0200', tz='Europe/Amsterdam') """ return self._round( freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent @@ -1535,6 +1553,13 @@ timedelta}, default 'raise' ------ ValueError if the freq cannot be converted. + Notes + ----- + If the Timestamp has a timezone, flooring will take place relative to the + local ("wall") time and re-localized to the same timezone. When flooring + near daylight savings time, use ``nonexistent`` and ``ambiguous`` to + control the re-localization behavior. + Examples -------- Create a timestamp object: @@ -1569,6 +1594,17 @@ timedelta}, default 'raise' >>> pd.NaT.floor() NaT + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> ts_tz = pd.Timestamp("2021-10-31 03:30:00").tz_localize("Europe/Amsterdam") + + >>> ts_tz.floor("2H", ambiguous=False) + Timestamp('2021-10-31 02:00:00+0100', tz='Europe/Amsterdam') + + >>> ts_tz.floor("2H", ambiguous=True) + Timestamp('2021-10-31 02:00:00+0200', tz='Europe/Amsterdam') """ return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent) @@ -1606,6 +1642,13 @@ timedelta}, default 'raise' ------ ValueError if the freq cannot be converted. + Notes + ----- + If the Timestamp has a timezone, ceiling will take place relative to the + local ("wall") time and re-localized to the same timezone. When ceiling + near daylight savings time, use ``nonexistent`` and ``ambiguous`` to + control the re-localization behavior. + Examples -------- Create a timestamp object: @@ -1640,6 +1683,17 @@ timedelta}, default 'raise' >>> pd.NaT.ceil() NaT + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> ts_tz = pd.Timestamp("2021-10-31 01:30:00").tz_localize("Europe/Amsterdam") + + >>> ts_tz.ceil("H", ambiguous=False) + Timestamp('2021-10-31 02:00:00+0100', tz='Europe/Amsterdam') + + >>> ts_tz.ceil("H", ambiguous=True) + Timestamp('2021-10-31 02:00:00+0200', tz='Europe/Amsterdam') """ return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index a8cc07c8fd964..6f18db6caab7d 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1636,6 +1636,13 @@ def strftime(self, date_format: str) -> npt.NDArray[np.object_]: ------ ValueError if the `freq` cannot be converted. + Notes + ----- + If the timestamps have a timezone, {op}ing will take place relative to the + local ("wall") time and re-localized to the same timezone. When {op}ing + near daylight savings time, use ``nonexistent`` and ``ambiguous`` to + control the re-localization behavior. + Examples -------- **DatetimeIndex** @@ -1659,6 +1666,19 @@ def strftime(self, date_format: str) -> npt.NDArray[np.object_]: 1 2018-01-01 12:00:00 2 2018-01-01 12:00:00 dtype: datetime64[ns] + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam") + + >>> rng_tz.floor("2H", ambiguous=False) + DatetimeIndex(['2021-10-31 02:00:00+01:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + + >>> rng_tz.floor("2H", ambiguous=True) + DatetimeIndex(['2021-10-31 02:00:00+02:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) """ _floor_example = """>>> rng.floor('H') @@ -1673,6 +1693,19 @@ def strftime(self, date_format: str) -> npt.NDArray[np.object_]: 1 2018-01-01 12:00:00 2 2018-01-01 12:00:00 dtype: datetime64[ns] + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam") + + >>> rng_tz.floor("2H", ambiguous=False) + DatetimeIndex(['2021-10-31 02:00:00+01:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + + >>> rng_tz.floor("2H", ambiguous=True) + DatetimeIndex(['2021-10-31 02:00:00+02:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) """ _ceil_example = """>>> rng.ceil('H') @@ -1687,6 +1720,19 @@ def strftime(self, date_format: str) -> npt.NDArray[np.object_]: 1 2018-01-01 12:00:00 2 2018-01-01 13:00:00 dtype: datetime64[ns] + + When rounding near a daylight savings time transition, use ``ambiguous`` or + ``nonexistent`` to control how the timestamp should be re-localized. + + >>> rng_tz = pd.DatetimeIndex(["2021-10-31 01:30:00"], tz="Europe/Amsterdam") + + >>> rng_tz.ceil("H", ambiguous=False) + DatetimeIndex(['2021-10-31 02:00:00+01:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) + + >>> rng_tz.ceil("H", ambiguous=True) + DatetimeIndex(['2021-10-31 02:00:00+02:00'], + dtype='datetime64[ns, Europe/Amsterdam]', freq=None) """
xref https://github.com/pandas-dev/pandas/issues/44287 - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/44357
2021-11-08T22:35:44Z
2021-11-17T13:52:38Z
2021-11-17T13:52:38Z
2021-11-17T17:48:56Z
Fixed regression in Series.duplicated for categorical dtype with bool categories
diff --git a/doc/source/whatsnew/v1.3.5.rst b/doc/source/whatsnew/v1.3.5.rst index 589092c0dd7e3..951b05b65c81b 100644 --- a/doc/source/whatsnew/v1.3.5.rst +++ b/doc/source/whatsnew/v1.3.5.rst @@ -16,6 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in :meth:`Series.equals` when comparing floats with dtype object to None (:issue:`44190`) - Fixed performance regression in :func:`read_csv` (:issue:`44106`) +- Fixed regression in :meth:`Series.duplicated` and :meth:`Series.drop_duplicates` when Series has :class:`Categorical` dtype with boolean categories (:issue:`44351`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index c1b587ce3a6b2..8c2c01b6aedc8 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -148,7 +148,7 @@ def _ensure_data(values: ArrayLike) -> np.ndarray: # i.e. all-bool Categorical, BooleanArray try: return np.asarray(values).astype("uint8", copy=False) - except TypeError: + except (TypeError, ValueError): # GH#42107 we have pd.NAs present return np.asarray(values) diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py index 7eb51f8037792..f72d85337df8e 100644 --- a/pandas/tests/series/methods/test_drop_duplicates.py +++ b/pandas/tests/series/methods/test_drop_duplicates.py @@ -2,6 +2,7 @@ import pytest from pandas import ( + NA, Categorical, Series, ) @@ -224,6 +225,20 @@ def test_drop_duplicates_categorical_bool(self, ordered): assert return_value is None tm.assert_series_equal(sc, tc[~expected]) + def test_drop_duplicates_categorical_bool_na(self): + # GH#44351 + ser = Series( + Categorical( + [True, False, True, False, NA], categories=[True, False], ordered=True + ) + ) + result = ser.drop_duplicates() + expected = Series( + Categorical([True, False, np.nan], categories=[True, False], ordered=True), + index=[0, 1, 4], + ) + tm.assert_series_equal(result, expected) + def test_drop_duplicates_pos_args_deprecation(): # GH#41485 diff --git a/pandas/tests/series/methods/test_duplicated.py b/pandas/tests/series/methods/test_duplicated.py index 5cc297913e851..c61492168da63 100644 --- a/pandas/tests/series/methods/test_duplicated.py +++ b/pandas/tests/series/methods/test_duplicated.py @@ -1,7 +1,11 @@ import numpy as np import pytest -from pandas import Series +from pandas import ( + NA, + Categorical, + Series, +) import pandas._testing as tm @@ -33,3 +37,15 @@ def test_duplicated_nan_none(keep, expected): result = ser.duplicated(keep=keep) tm.assert_series_equal(result, expected) + + +def test_duplicated_categorical_bool_na(): + # GH#44351 + ser = Series( + Categorical( + [True, False, True, False, NA], categories=[True, False], ordered=True + ) + ) + result = ser.duplicated() + expected = Series([False, False, True, True, False]) + tm.assert_series_equal(result, expected)
- [x] closes #44351 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44356
2021-11-08T22:10:59Z
2021-11-12T03:09:36Z
2021-11-12T03:09:35Z
2021-11-12T11:44:04Z
CLN: split giant dt accessor tests
diff --git a/pandas/tests/series/accessors/test_dt_accessor.py b/pandas/tests/series/accessors/test_dt_accessor.py index eb7e1d4268605..48a3ebd25c239 100644 --- a/pandas/tests/series/accessors/test_dt_accessor.py +++ b/pandas/tests/series/accessors/test_dt_accessor.py @@ -39,121 +39,136 @@ ) import pandas.core.common as com +ok_for_period = PeriodArray._datetimelike_ops +ok_for_period_methods = ["strftime", "to_timestamp", "asfreq"] +ok_for_dt = DatetimeArray._datetimelike_ops +ok_for_dt_methods = [ + "to_period", + "to_pydatetime", + "tz_localize", + "tz_convert", + "normalize", + "strftime", + "round", + "floor", + "ceil", + "day_name", + "month_name", + "isocalendar", +] +ok_for_td = TimedeltaArray._datetimelike_ops +ok_for_td_methods = [ + "components", + "to_pytimedelta", + "total_seconds", + "round", + "floor", + "ceil", +] + + +def get_dir(ser): + # check limited display api + results = [r for r in ser.dt.__dir__() if not r.startswith("_")] + return sorted(set(results)) -class TestSeriesDatetimeValues: - def test_dt_namespace_accessor(self): +class TestSeriesDatetimeValues: + def _compare(self, ser, name): # GH 7207, 11128 # test .dt namespace accessor - ok_for_period = PeriodArray._datetimelike_ops - ok_for_period_methods = ["strftime", "to_timestamp", "asfreq"] - ok_for_dt = DatetimeArray._datetimelike_ops - ok_for_dt_methods = [ - "to_period", - "to_pydatetime", - "tz_localize", - "tz_convert", - "normalize", - "strftime", - "round", - "floor", - "ceil", - "day_name", - "month_name", - "isocalendar", - ] - ok_for_td = TimedeltaArray._datetimelike_ops - ok_for_td_methods = [ - "components", - "to_pytimedelta", - "total_seconds", - "round", - "floor", - "ceil", - ] - - def get_expected(s, name): - result = getattr(Index(s._values), prop) + def get_expected(ser, prop): + result = getattr(Index(ser._values), prop) if isinstance(result, np.ndarray): if is_integer_dtype(result): result = result.astype("int64") elif not is_list_like(result) or isinstance(result, DataFrame): return result - return Series(result, index=s.index, name=s.name) - - def compare(s, name): - a = getattr(s.dt, prop) - b = get_expected(s, prop) - if not (is_list_like(a) and is_list_like(b)): - assert a == b - elif isinstance(a, DataFrame): - tm.assert_frame_equal(a, b) - else: - tm.assert_series_equal(a, b) + return Series(result, index=ser.index, name=ser.name) + + left = getattr(ser.dt, name) + right = get_expected(ser, name) + if not (is_list_like(left) and is_list_like(right)): + assert left == right + elif isinstance(left, DataFrame): + tm.assert_frame_equal(left, right) + else: + tm.assert_series_equal(left, right) + + @pytest.mark.parametrize("freq", ["D", "s", "ms"]) + def test_dt_namespace_accessor_datetime64(self, freq): + # GH#7207, GH#11128 + # test .dt namespace accessor # datetimeindex - cases = [ - Series(date_range("20130101", periods=5), name="xxx"), - Series(date_range("20130101", periods=5, freq="s"), name="xxx"), - Series(date_range("20130101 00:00:00", periods=5, freq="ms"), name="xxx"), - ] - for s in cases: - for prop in ok_for_dt: - # we test freq below - # we ignore week and weekofyear because they are deprecated - if prop not in ["freq", "week", "weekofyear"]: - compare(s, prop) + dti = date_range("20130101", periods=5, freq=freq) + ser = Series(dti, name="xxx") - for prop in ok_for_dt_methods: - getattr(s.dt, prop) + for prop in ok_for_dt: + # we test freq below + # we ignore week and weekofyear because they are deprecated + if prop not in ["freq", "week", "weekofyear"]: + self._compare(ser, prop) - result = s.dt.to_pydatetime() - assert isinstance(result, np.ndarray) - assert result.dtype == object + for prop in ok_for_dt_methods: + getattr(ser.dt, prop) - result = s.dt.tz_localize("US/Eastern") - exp_values = DatetimeIndex(s.values).tz_localize("US/Eastern") - expected = Series(exp_values, index=s.index, name="xxx") - tm.assert_series_equal(result, expected) + result = ser.dt.to_pydatetime() + assert isinstance(result, np.ndarray) + assert result.dtype == object - tz_result = result.dt.tz - assert str(tz_result) == "US/Eastern" - freq_result = s.dt.freq - assert freq_result == DatetimeIndex(s.values, freq="infer").freq - - # let's localize, then convert - result = s.dt.tz_localize("UTC").dt.tz_convert("US/Eastern") - exp_values = ( - DatetimeIndex(s.values).tz_localize("UTC").tz_convert("US/Eastern") - ) - expected = Series(exp_values, index=s.index, name="xxx") - tm.assert_series_equal(result, expected) + result = ser.dt.tz_localize("US/Eastern") + exp_values = DatetimeIndex(ser.values).tz_localize("US/Eastern") + expected = Series(exp_values, index=ser.index, name="xxx") + tm.assert_series_equal(result, expected) + + tz_result = result.dt.tz + assert str(tz_result) == "US/Eastern" + freq_result = ser.dt.freq + assert freq_result == DatetimeIndex(ser.values, freq="infer").freq + + # let's localize, then convert + result = ser.dt.tz_localize("UTC").dt.tz_convert("US/Eastern") + exp_values = ( + DatetimeIndex(ser.values).tz_localize("UTC").tz_convert("US/Eastern") + ) + expected = Series(exp_values, index=ser.index, name="xxx") + tm.assert_series_equal(result, expected) + + def test_dt_namespace_accessor_datetime64tz(self): + # GH#7207, GH#11128 + # test .dt namespace accessor # datetimeindex with tz - s = Series(date_range("20130101", periods=5, tz="US/Eastern"), name="xxx") + dti = date_range("20130101", periods=5, tz="US/Eastern") + ser = Series(dti, name="xxx") for prop in ok_for_dt: # we test freq below # we ignore week and weekofyear because they are deprecated if prop not in ["freq", "week", "weekofyear"]: - compare(s, prop) + self._compare(ser, prop) for prop in ok_for_dt_methods: - getattr(s.dt, prop) + getattr(ser.dt, prop) - result = s.dt.to_pydatetime() + result = ser.dt.to_pydatetime() assert isinstance(result, np.ndarray) assert result.dtype == object - result = s.dt.tz_convert("CET") - expected = Series(s._values.tz_convert("CET"), index=s.index, name="xxx") + result = ser.dt.tz_convert("CET") + expected = Series(ser._values.tz_convert("CET"), index=ser.index, name="xxx") tm.assert_series_equal(result, expected) tz_result = result.dt.tz assert str(tz_result) == "CET" - freq_result = s.dt.freq - assert freq_result == DatetimeIndex(s.values, freq="infer").freq + freq_result = ser.dt.freq + assert freq_result == DatetimeIndex(ser.values, freq="infer").freq + + def test_dt_namespace_accessor_timedelta(self): + # GH#7207, GH#11128 + # test .dt namespace accessor # timedelta index cases = [ @@ -166,102 +181,115 @@ def compare(s, name): name="xxx", ), ] - for s in cases: + for ser in cases: for prop in ok_for_td: # we test freq below if prop != "freq": - compare(s, prop) + self._compare(ser, prop) for prop in ok_for_td_methods: - getattr(s.dt, prop) + getattr(ser.dt, prop) - result = s.dt.components + result = ser.dt.components assert isinstance(result, DataFrame) - tm.assert_index_equal(result.index, s.index) + tm.assert_index_equal(result.index, ser.index) - result = s.dt.to_pytimedelta() + result = ser.dt.to_pytimedelta() assert isinstance(result, np.ndarray) assert result.dtype == object - result = s.dt.total_seconds() + result = ser.dt.total_seconds() assert isinstance(result, Series) assert result.dtype == "float64" - freq_result = s.dt.freq - assert freq_result == TimedeltaIndex(s.values, freq="infer").freq + freq_result = ser.dt.freq + assert freq_result == TimedeltaIndex(ser.values, freq="infer").freq + + def test_dt_namespace_accessor_period(self): + # GH#7207, GH#11128 + # test .dt namespace accessor + + # periodindex + pi = period_range("20130101", periods=5, freq="D") + ser = Series(pi, name="xxx") + + for prop in ok_for_period: + # we test freq below + if prop != "freq": + self._compare(ser, prop) + + for prop in ok_for_period_methods: + getattr(ser.dt, prop) + + freq_result = ser.dt.freq + assert freq_result == PeriodIndex(ser.values).freq + + def test_dt_namespace_accessor_index_and_values(self): # both index = date_range("20130101", periods=3, freq="D") - s = Series(date_range("20140204", periods=3, freq="s"), index=index, name="xxx") + dti = date_range("20140204", periods=3, freq="s") + ser = Series(dti, index=index, name="xxx") exp = Series( np.array([2014, 2014, 2014], dtype="int64"), index=index, name="xxx" ) - tm.assert_series_equal(s.dt.year, exp) + tm.assert_series_equal(ser.dt.year, exp) exp = Series(np.array([2, 2, 2], dtype="int64"), index=index, name="xxx") - tm.assert_series_equal(s.dt.month, exp) + tm.assert_series_equal(ser.dt.month, exp) exp = Series(np.array([0, 1, 2], dtype="int64"), index=index, name="xxx") - tm.assert_series_equal(s.dt.second, exp) - - exp = Series([s[0]] * 3, index=index, name="xxx") - tm.assert_series_equal(s.dt.normalize(), exp) - - # periodindex - cases = [Series(period_range("20130101", periods=5, freq="D"), name="xxx")] - for s in cases: - for prop in ok_for_period: - # we test freq below - if prop != "freq": - compare(s, prop) - - for prop in ok_for_period_methods: - getattr(s.dt, prop) + tm.assert_series_equal(ser.dt.second, exp) - freq_result = s.dt.freq - assert freq_result == PeriodIndex(s.values).freq + exp = Series([ser[0]] * 3, index=index, name="xxx") + tm.assert_series_equal(ser.dt.normalize(), exp) - # test limited display api - def get_dir(s): - results = [r for r in s.dt.__dir__() if not r.startswith("_")] - return sorted(set(results)) + def test_dt_accessor_limited_display_api(self): + # tznaive + ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx") + results = get_dir(ser) + tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods))) - s = Series(date_range("20130101", periods=5, freq="D"), name="xxx") - results = get_dir(s) + # tzaware + ser = Series(date_range("2015-01-01", "2016-01-01", freq="T"), name="xxx") + ser = ser.dt.tz_localize("UTC").dt.tz_convert("America/Chicago") + results = get_dir(ser) tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods))) - s = Series( + # Period + ser = Series( period_range("20130101", periods=5, freq="D", name="xxx").astype(object) ) - results = get_dir(s) + results = get_dir(ser) tm.assert_almost_equal( results, sorted(set(ok_for_period + ok_for_period_methods)) ) - # 11295 + def test_dt_accessor_ambiguous_freq_conversions(self): + # GH#11295 # ambiguous time error on the conversions - s = Series(date_range("2015-01-01", "2016-01-01", freq="T"), name="xxx") - s = s.dt.tz_localize("UTC").dt.tz_convert("America/Chicago") - results = get_dir(s) - tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods))) + ser = Series(date_range("2015-01-01", "2016-01-01", freq="T"), name="xxx") + ser = ser.dt.tz_localize("UTC").dt.tz_convert("America/Chicago") + exp_values = date_range( "2015-01-01", "2016-01-01", freq="T", tz="UTC" ).tz_convert("America/Chicago") # freq not preserved by tz_localize above exp_values = exp_values._with_freq(None) expected = Series(exp_values, name="xxx") - tm.assert_series_equal(s, expected) + tm.assert_series_equal(ser, expected) + def test_dt_accessor_not_writeable(self): # no setting allowed - s = Series(date_range("20130101", periods=5, freq="D"), name="xxx") + ser = Series(date_range("20130101", periods=5, freq="D"), name="xxx") with pytest.raises(ValueError, match="modifications"): - s.dt.hour = 5 + ser.dt.hour = 5 # trying to set a copy msg = "modifications to a property of a datetimelike.+not supported" with pd.option_context("chained_assignment", "raise"): with pytest.raises(com.SettingWithCopyError, match=msg): - s.dt.hour[0] = 5 + ser.dt.hour[0] = 5 @pytest.mark.parametrize( "method, dates", @@ -273,24 +301,24 @@ def get_dir(s): ) def test_dt_round(self, method, dates): # round - s = Series( + ser = Series( pd.to_datetime( ["2012-01-01 13:00:00", "2012-01-01 12:01:00", "2012-01-01 08:00:00"] ), name="xxx", ) - result = getattr(s.dt, method)("D") + result = getattr(ser.dt, method)("D") expected = Series(pd.to_datetime(dates), name="xxx") tm.assert_series_equal(result, expected) def test_dt_round_tz(self): - s = Series( + ser = Series( pd.to_datetime( ["2012-01-01 13:00:00", "2012-01-01 12:01:00", "2012-01-01 08:00:00"] ), name="xxx", ) - result = s.dt.tz_localize("UTC").dt.tz_convert("US/Eastern").dt.round("D") + result = ser.dt.tz_localize("UTC").dt.tz_convert("US/Eastern").dt.round("D") exp_values = pd.to_datetime( ["2012-01-01", "2012-01-01", "2012-01-01"] @@ -339,23 +367,23 @@ def test_dt_round_tz_ambiguous(self, method): ) def test_dt_round_tz_nonexistent(self, method, ts_str, freq): # GH 23324 round near "spring forward" DST - s = Series([pd.Timestamp(ts_str, tz="America/Chicago")]) - result = getattr(s.dt, method)(freq, nonexistent="shift_forward") + ser = Series([pd.Timestamp(ts_str, tz="America/Chicago")]) + result = getattr(ser.dt, method)(freq, nonexistent="shift_forward") expected = Series([pd.Timestamp("2018-03-11 03:00:00", tz="America/Chicago")]) tm.assert_series_equal(result, expected) - result = getattr(s.dt, method)(freq, nonexistent="NaT") + result = getattr(ser.dt, method)(freq, nonexistent="NaT") expected = Series([pd.NaT]).dt.tz_localize(result.dt.tz) tm.assert_series_equal(result, expected) with pytest.raises(pytz.NonExistentTimeError, match="2018-03-11 02:00:00"): - getattr(s.dt, method)(freq, nonexistent="raise") + getattr(ser.dt, method)(freq, nonexistent="raise") def test_dt_namespace_accessor_categorical(self): # GH 19468 dti = DatetimeIndex(["20171111", "20181212"]).repeat(2) - s = Series(pd.Categorical(dti), name="foo") - result = s.dt.year + ser = Series(pd.Categorical(dti), name="foo") + result = ser.dt.year expected = Series([2017, 2017, 2018, 2018], name="foo") tm.assert_series_equal(result, expected) @@ -394,9 +422,9 @@ def test_dt_other_accessors_categorical(self, accessor): def test_dt_accessor_no_new_attributes(self): # https://github.com/pandas-dev/pandas/issues/10673 - s = Series(date_range("20130101", periods=5, freq="D")) + ser = Series(date_range("20130101", periods=5, freq="D")) with pytest.raises(AttributeError, match="You cannot add any new attribute"): - s.dt.xlabel = "a" + ser.dt.xlabel = "a" @pytest.mark.parametrize( "time_locale", [None] if tm.get_locales() is None else [None] + tm.get_locales() @@ -434,7 +462,7 @@ def test_dt_accessor_datetime_name_accessors(self, time_locale): expected_days = calendar.day_name[:] expected_months = calendar.month_name[1:] - s = Series(date_range(freq="D", start=datetime(1998, 1, 1), periods=365)) + ser = Series(date_range(freq="D", start=datetime(1998, 1, 1), periods=365)) english_days = [ "Monday", "Tuesday", @@ -446,13 +474,13 @@ def test_dt_accessor_datetime_name_accessors(self, time_locale): ] for day, name, eng_name in zip(range(4, 11), expected_days, english_days): name = name.capitalize() - assert s.dt.day_name(locale=time_locale)[day] == name - assert s.dt.day_name(locale=None)[day] == eng_name - s = s.append(Series([pd.NaT])) - assert np.isnan(s.dt.day_name(locale=time_locale).iloc[-1]) + assert ser.dt.day_name(locale=time_locale)[day] == name + assert ser.dt.day_name(locale=None)[day] == eng_name + ser = ser.append(Series([pd.NaT])) + assert np.isnan(ser.dt.day_name(locale=time_locale).iloc[-1]) - s = Series(date_range(freq="M", start="2012", end="2013")) - result = s.dt.month_name(locale=time_locale) + ser = Series(date_range(freq="M", start="2012", end="2013")) + result = ser.dt.month_name(locale=time_locale) expected = Series([month.capitalize() for month in expected_months]) # work around https://github.com/pandas-dev/pandas/issues/22342 @@ -461,7 +489,7 @@ def test_dt_accessor_datetime_name_accessors(self, time_locale): tm.assert_series_equal(result, expected) - for s_date, expected in zip(s, expected_months): + for s_date, expected in zip(ser, expected_months): result = s_date.month_name(locale=time_locale) expected = expected.capitalize() @@ -470,20 +498,20 @@ def test_dt_accessor_datetime_name_accessors(self, time_locale): assert result == expected - s = s.append(Series([pd.NaT])) - assert np.isnan(s.dt.month_name(locale=time_locale).iloc[-1]) + ser = ser.append(Series([pd.NaT])) + assert np.isnan(ser.dt.month_name(locale=time_locale).iloc[-1]) def test_strftime(self): # GH 10086 - s = Series(date_range("20130101", periods=5)) - result = s.dt.strftime("%Y/%m/%d") + ser = Series(date_range("20130101", periods=5)) + result = ser.dt.strftime("%Y/%m/%d") expected = Series( ["2013/01/01", "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"] ) tm.assert_series_equal(result, expected) - s = Series(date_range("2015-02-03 11:22:33.4567", periods=5)) - result = s.dt.strftime("%Y/%m/%d %H-%M-%S") + ser = Series(date_range("2015-02-03 11:22:33.4567", periods=5)) + result = ser.dt.strftime("%Y/%m/%d %H-%M-%S") expected = Series( [ "2015/02/03 11-22-33", @@ -495,15 +523,15 @@ def test_strftime(self): ) tm.assert_series_equal(result, expected) - s = Series(period_range("20130101", periods=5)) - result = s.dt.strftime("%Y/%m/%d") + ser = Series(period_range("20130101", periods=5)) + result = ser.dt.strftime("%Y/%m/%d") expected = Series( ["2013/01/01", "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"] ) tm.assert_series_equal(result, expected) - s = Series(period_range("2015-02-03 11:22:33.4567", periods=5, freq="s")) - result = s.dt.strftime("%Y/%m/%d %H-%M-%S") + ser = Series(period_range("2015-02-03 11:22:33.4567", periods=5, freq="s")) + result = ser.dt.strftime("%Y/%m/%d %H-%M-%S") expected = Series( [ "2015/02/03 11-22-33", @@ -515,9 +543,10 @@ def test_strftime(self): ) tm.assert_series_equal(result, expected) - s = Series(date_range("20130101", periods=5)) - s.iloc[0] = pd.NaT - result = s.dt.strftime("%Y/%m/%d") + def test_strftime_dt64_days(self): + ser = Series(date_range("20130101", periods=5)) + ser.iloc[0] = pd.NaT + result = ser.dt.strftime("%Y/%m/%d") expected = Series( [np.nan, "2013/01/02", "2013/01/03", "2013/01/04", "2013/01/05"] ) @@ -533,6 +562,7 @@ def test_strftime(self): # dtype may be S10 or U10 depending on python version tm.assert_index_equal(result, expected) + def test_strftime_period_days(self): period_index = period_range("20150301", periods=5) result = period_index.strftime("%Y/%m/%d") expected = Index( @@ -541,13 +571,15 @@ def test_strftime(self): ) tm.assert_index_equal(result, expected) - s = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)]) - result = s.dt.strftime("%Y-%m-%d %H:%M:%S") + def test_strftime_dt64_microsecond_resolution(self): + ser = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)]) + result = ser.dt.strftime("%Y-%m-%d %H:%M:%S") expected = Series(["2013-01-01 02:32:59", "2013-01-02 14:32:01"]) tm.assert_series_equal(result, expected) - s = Series(period_range("20130101", periods=4, freq="H")) - result = s.dt.strftime("%Y/%m/%d %H:%M:%S") + def test_strftime_period_hours(self): + ser = Series(period_range("20130101", periods=4, freq="H")) + result = ser.dt.strftime("%Y/%m/%d %H:%M:%S") expected = Series( [ "2013/01/01 00:00:00", @@ -556,9 +588,11 @@ def test_strftime(self): "2013/01/01 03:00:00", ] ) + tm.assert_series_equal(result, expected) - s = Series(period_range("20130101", periods=4, freq="L")) - result = s.dt.strftime("%Y/%m/%d %H:%M:%S.%l") + def test_strftime_period_minutes(self): + ser = Series(period_range("20130101", periods=4, freq="L")) + result = ser.dt.strftime("%Y/%m/%d %H:%M:%S.%l") expected = Series( [ "2013/01/01 00:00:00.000", @@ -578,8 +612,8 @@ def test_strftime(self): ) def test_strftime_nat(self, data): # GH 29578 - s = Series(data) - result = s.dt.strftime("%Y-%m-%d") + ser = Series(data) + result = ser.dt.strftime("%Y-%m-%d") expected = Series(["2019-01-01", np.nan]) tm.assert_series_equal(result, expected) @@ -591,16 +625,16 @@ def test_valid_dt_with_missing_values(self): ) # GH 8689 - s = Series(date_range("20130101", periods=5, freq="D")) - s.iloc[2] = pd.NaT + ser = Series(date_range("20130101", periods=5, freq="D")) + ser.iloc[2] = pd.NaT for attr in ["microsecond", "nanosecond", "second", "minute", "hour", "day"]: - expected = getattr(s.dt, attr).copy() + expected = getattr(ser.dt, attr).copy() expected.iloc[2] = np.nan - result = getattr(s.dt, attr) + result = getattr(ser.dt, attr) tm.assert_series_equal(result, expected) - result = s.dt.date + result = ser.dt.date expected = Series( [ date(2013, 1, 1), @@ -613,7 +647,7 @@ def test_valid_dt_with_missing_values(self): ) tm.assert_series_equal(result, expected) - result = s.dt.time + result = ser.dt.time expected = Series([time(0), time(0), np.nan, time(0), time(0)], dtype="object") tm.assert_series_equal(result, expected) @@ -626,8 +660,8 @@ def test_dt_accessor_api(self): assert Series.dt is CombinedDatetimelikeProperties - s = Series(date_range("2000-01-01", periods=3)) - assert isinstance(s.dt, DatetimeProperties) + ser = Series(date_range("2000-01-01", periods=3)) + assert isinstance(ser.dt, DatetimeProperties) @pytest.mark.parametrize( "ser", [Series(np.arange(5)), Series(list("abcde")), Series(np.random.randn(5))] @@ -639,11 +673,11 @@ def test_dt_accessor_invalid(self, ser): assert not hasattr(ser, "dt") def test_dt_accessor_updates_on_inplace(self): - s = Series(date_range("2018-01-01", periods=10)) - s[2] = None - return_value = s.fillna(pd.Timestamp("2018-01-01"), inplace=True) + ser = Series(date_range("2018-01-01", periods=10)) + ser[2] = None + return_value = ser.fillna(pd.Timestamp("2018-01-01"), inplace=True) assert return_value is None - result = s.dt.date + result = ser.dt.date assert result[0] == result[2] def test_date_tz(self): @@ -652,10 +686,10 @@ def test_date_tz(self): ["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"], tz="US/Eastern", ) - s = Series(rng) + ser = Series(rng) expected = Series([date(2014, 4, 4), date(2014, 7, 18), date(2015, 11, 22)]) - tm.assert_series_equal(s.dt.date, expected) - tm.assert_series_equal(s.apply(lambda x: x.date()), expected) + tm.assert_series_equal(ser.dt.date, expected) + tm.assert_series_equal(ser.apply(lambda x: x.date()), expected) def test_dt_timetz_accessor(self, tz_naive_fixture): # GH21358 @@ -664,11 +698,11 @@ def test_dt_timetz_accessor(self, tz_naive_fixture): dtindex = DatetimeIndex( ["2014-04-04 23:56", "2014-07-18 21:24", "2015-11-22 22:14"], tz=tz ) - s = Series(dtindex) + ser = Series(dtindex) expected = Series( [time(23, 56, tzinfo=tz), time(21, 24, tzinfo=tz), time(22, 14, tzinfo=tz)] ) - result = s.dt.timetz + result = ser.dt.timetz tm.assert_series_equal(result, expected) @pytest.mark.parametrize( @@ -731,9 +765,9 @@ def test_end_time_timevalues(self, input_vals): # when using the dt accessor on a Series input_vals = PeriodArray._from_sequence(np.asarray(input_vals)) - s = Series(input_vals) - result = s.dt.end_time - expected = s.apply(lambda x: x.end_time) + ser = Series(input_vals) + result = ser.dt.end_time + expected = ser.apply(lambda x: x.end_time) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("input_vals", [("2001"), ("NaT")]) @@ -755,7 +789,7 @@ def test_week_and_weekofyear_are_deprecated(): def test_normalize_pre_epoch_dates(): # GH: 36294 - s = pd.to_datetime(Series(["1969-01-01 09:00:00", "2016-01-01 09:00:00"])) - result = s.dt.normalize() + ser = pd.to_datetime(Series(["1969-01-01 09:00:00", "2016-01-01 09:00:00"])) + result = ser.dt.normalize() expected = pd.to_datetime(Series(["1969-01-01", "2016-01-01"])) tm.assert_series_equal(result, expected)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44355
2021-11-08T21:45:28Z
2021-11-10T01:43:48Z
2021-11-10T01:43:48Z
2021-11-10T02:14:25Z
changed shape argument for ndarray from int to tuple in ./core/strings/object_array.py
diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py index ba2f56c79bdfe..2ce5c0cbea272 100644 --- a/pandas/core/strings/object_array.py +++ b/pandas/core/strings/object_array.py @@ -12,7 +12,7 @@ import pandas._libs.missing as libmissing import pandas._libs.ops as libops from pandas._typing import ( - Dtype, + NpDtype, Scalar, ) @@ -37,7 +37,7 @@ def __len__(self): raise NotImplementedError def _str_map( - self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True + self, f, na_value=None, dtype: NpDtype | None = None, convert: bool = True ): """ Map a callable over valid elements of the array. @@ -62,9 +62,7 @@ def _str_map( na_value = self._str_na_value if not len(self): - # error: Argument 1 to "ndarray" has incompatible type "int"; - # expected "Sequence[int]" - return np.ndarray(0, dtype=dtype) # type: ignore[arg-type] + return np.ndarray(0, dtype=dtype) arr = np.asarray(self, dtype=object) mask = isna(arr)
xref #37715
https://api.github.com/repos/pandas-dev/pandas/pulls/44352
2021-11-08T18:35:46Z
2021-11-16T00:57:31Z
2021-11-16T00:57:31Z
2022-01-05T18:42:34Z
TYP: misc typing in _libs
diff --git a/pandas/_libs/join.pyi b/pandas/_libs/join.pyi index 3a22aa439b7be..a5e91e2ce83eb 100644 --- a/pandas/_libs/join.pyi +++ b/pandas/_libs/join.pyi @@ -55,7 +55,7 @@ def asof_join_backward_on_X_by_Y( left_by_values: np.ndarray, # by_t[:] right_by_values: np.ndarray, # by_t[:] allow_exact_matches: bool = ..., - tolerance=..., + tolerance: np.number | int | float | None = ..., ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def asof_join_forward_on_X_by_Y( left_values: np.ndarray, # asof_t[:] @@ -63,7 +63,7 @@ def asof_join_forward_on_X_by_Y( left_by_values: np.ndarray, # by_t[:] right_by_values: np.ndarray, # by_t[:] allow_exact_matches: bool = ..., - tolerance=..., + tolerance: np.number | int | float | None = ..., ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def asof_join_nearest_on_X_by_Y( left_values: np.ndarray, # asof_t[:] @@ -71,23 +71,23 @@ def asof_join_nearest_on_X_by_Y( left_by_values: np.ndarray, # by_t[:] right_by_values: np.ndarray, # by_t[:] allow_exact_matches: bool = ..., - tolerance=..., + tolerance: np.number | int | float | None = ..., ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def asof_join_backward( left_values: np.ndarray, # asof_t[:] right_values: np.ndarray, # asof_t[:] allow_exact_matches: bool = ..., - tolerance=..., + tolerance: np.number | int | float | None = ..., ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def asof_join_forward( left_values: np.ndarray, # asof_t[:] right_values: np.ndarray, # asof_t[:] allow_exact_matches: bool = ..., - tolerance=..., + tolerance: np.number | int | float | None = ..., ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def asof_join_nearest( left_values: np.ndarray, # asof_t[:] right_values: np.ndarray, # asof_t[:] allow_exact_matches: bool = ..., - tolerance=..., + tolerance: np.number | int | float | None = ..., ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... diff --git a/pandas/_libs/ops.pyi b/pandas/_libs/ops.pyi index 11ae3b852e97a..74a6ad87cd279 100644 --- a/pandas/_libs/ops.pyi +++ b/pandas/_libs/ops.pyi @@ -1,6 +1,7 @@ from typing import ( Any, Callable, + Iterable, Literal, overload, ) @@ -35,15 +36,15 @@ def vec_binop( @overload def maybe_convert_bool( arr: npt.NDArray[np.object_], - true_values=..., - false_values=..., + true_values: Iterable = ..., + false_values: Iterable = ..., convert_to_masked_nullable: Literal[False] = ..., ) -> tuple[np.ndarray, None]: ... @overload def maybe_convert_bool( arr: npt.NDArray[np.object_], - true_values=..., - false_values=..., + true_values: Iterable = ..., + false_values: Iterable = ..., *, convert_to_masked_nullable: Literal[True], ) -> tuple[np.ndarray, np.ndarray]: ... diff --git a/pandas/_libs/tslibs/fields.pyi b/pandas/_libs/tslibs/fields.pyi index cbf91f2bcaf76..415b4329310c0 100644 --- a/pandas/_libs/tslibs/fields.pyi +++ b/pandas/_libs/tslibs/fields.pyi @@ -9,7 +9,7 @@ def month_position_check(fields, weekdays) -> str | None: ... def get_date_name_field( dtindex: npt.NDArray[np.int64], # const int64_t[:] field: str, - locale=..., + locale: str | None = ..., ) -> npt.NDArray[np.object_]: ... def get_start_end_field( dtindex: npt.NDArray[np.int64], # const int64_t[:] @@ -31,7 +31,7 @@ def isleapyear_arr( def build_isocalendar_sarray( dtindex: npt.NDArray[np.int64], # const int64_t[:] ) -> np.ndarray: ... -def get_locale_names(name_type: str, locale: object = ...): ... +def get_locale_names(name_type: str, locale: str | None = ...): ... class RoundTo: @property diff --git a/pandas/_libs/tslibs/nattype.pyi b/pandas/_libs/tslibs/nattype.pyi index 6a5555cfff030..1e29ef8940891 100644 --- a/pandas/_libs/tslibs/nattype.pyi +++ b/pandas/_libs/tslibs/nattype.pyi @@ -1,6 +1,7 @@ from datetime import ( datetime, timedelta, + tzinfo as _tzinfo, ) from typing import Any @@ -17,7 +18,7 @@ class NaTType(datetime): def asm8(self) -> np.datetime64: ... def to_datetime64(self) -> np.datetime64: ... def to_numpy( - self, dtype=..., copy: bool = ... + self, dtype: np.dtype | str | None = ..., copy: bool = ... ) -> np.datetime64 | np.timedelta64: ... @property def is_leap_year(self) -> bool: ... @@ -69,7 +70,20 @@ class NaTType(datetime): def ceil(self) -> NaTType: ... def tz_convert(self) -> NaTType: ... def tz_localize(self) -> NaTType: ... - def replace(self, *args, **kwargs) -> NaTType: ... + # error: Signature of "replace" incompatible with supertype "datetime" + def replace( # type: ignore[override] + self, + year: int | None = ..., + month: int | None = ..., + day: int | None = ..., + hour: int | None = ..., + minute: int | None = ..., + second: int | None = ..., + microsecond: int | None = ..., + nanosecond: int | None = ..., + tzinfo: _tzinfo | None = ..., + fold: int | None = ..., + ) -> NaTType: ... # error: Return type "float" of "year" incompatible with return # type "int" in supertype "date" @property diff --git a/pandas/_libs/tslibs/period.pyi b/pandas/_libs/tslibs/period.pyi index 4f7505fd7e792..2f60df0ad888e 100644 --- a/pandas/_libs/tslibs/period.pyi +++ b/pandas/_libs/tslibs/period.pyi @@ -59,22 +59,22 @@ class Period: def __new__( # type: ignore[misc] cls, value=..., - freq=..., - ordinal=..., - year=..., - month=..., - quarter=..., - day=..., - hour=..., - minute=..., - second=..., + freq: int | str | None = ..., + ordinal: int | None = ..., + year: int | None = ..., + month: int | None = ..., + quarter: int | None = ..., + day: int | None = ..., + hour: int | None = ..., + minute: int | None = ..., + second: int | None = ..., ) -> Period | NaTType: ... @classmethod def _maybe_convert_freq(cls, freq) -> BaseOffset: ... @classmethod def _from_ordinal(cls, ordinal: int, freq) -> Period: ... @classmethod - def now(cls, freq=...) -> Period: ... + def now(cls, freq: BaseOffset = ...) -> Period: ... def strftime(self, fmt: str) -> str: ... def to_timestamp( self, @@ -82,7 +82,7 @@ class Period: how: str = ..., tz: Timezone | None = ..., ) -> Timestamp: ... - def asfreq(self, freq, how=...) -> Period: ... + def asfreq(self, freq: str, how: str = ...) -> Period: ... @property def freqstr(self) -> str: ... @property diff --git a/pandas/_libs/tslibs/timedeltas.pyi b/pandas/_libs/tslibs/timedeltas.pyi index 7c0131cf28c9a..d8369f0cc90f9 100644 --- a/pandas/_libs/tslibs/timedeltas.pyi +++ b/pandas/_libs/tslibs/timedeltas.pyi @@ -14,7 +14,7 @@ from pandas._libs.tslibs import ( ) from pandas._typing import npt -_S = TypeVar("_S") +_S = TypeVar("_S", bound=timedelta) def ints_to_pytimedelta( arr: npt.NDArray[np.int64], # const int64_t[:] @@ -36,7 +36,10 @@ class Timedelta(timedelta): # error: "__new__" must return a class instance (got "Union[Timedelta, NaTType]") def __new__( # type: ignore[misc] - cls: Type[_S], value=..., unit=..., **kwargs + cls: Type[_S], + value=..., + unit: str = ..., + **kwargs: int | float | np.integer | np.floating, ) -> _S | NaTType: ... @property def days(self) -> int: ... @@ -50,9 +53,9 @@ class Timedelta(timedelta): @property def asm8(self) -> np.timedelta64: ... # TODO: round/floor/ceil could return NaT? - def round(self: _S, freq) -> _S: ... - def floor(self: _S, freq) -> _S: ... - def ceil(self: _S, freq) -> _S: ... + def round(self: _S, freq: str) -> _S: ... + def floor(self: _S, freq: str) -> _S: ... + def ceil(self: _S, freq: str) -> _S: ... @property def resolution_string(self) -> str: ... def __add__(self, other: timedelta) -> timedelta: ...
Reduced the list of partially typed functions in _libs a bit.
https://api.github.com/repos/pandas-dev/pandas/pulls/44349
2021-11-08T02:29:57Z
2021-12-14T02:05:20Z
2021-12-14T02:05:20Z
2022-03-09T02:56:34Z
CLN: address TODOs/FIXMEs
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ebf3428020652..1f26b6d9ae6ae 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3732,6 +3732,9 @@ def _setitem_array(self, key, value): self.iloc[indexer] = value else: + # Note: unlike self.iloc[:, indexer] = value, this will + # never try to overwrite values inplace + if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 05a9aab4a5554..b4d6e0ace4223 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1045,8 +1045,7 @@ def iset( self._rebuild_blknos_and_blklocs() # Note: we exclude DTA/TDA here - vdtype = getattr(value, "dtype", None) - value_is_extension_type = is_1d_only_ea_dtype(vdtype) + value_is_extension_type = is_1d_only_ea_dtype(value.dtype) # categorical/sparse/datetimetz if value_is_extension_type: diff --git a/pandas/core/series.py b/pandas/core/series.py index 7ee9a0bcdd9e1..996af80139458 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4538,6 +4538,7 @@ def rename( dtype: int64 """ if axis is not None: + # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index f341014110e18..2e6318955e119 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -617,7 +617,7 @@ def test_quantile_ea_with_na(self, obj, index): expected = type(obj)(expected) tm.assert_equal(result, expected) - # TODO: filtering can be removed after GH#39763 is fixed + # TODO(GH#39763): filtering can be removed after GH#39763 is fixed @pytest.mark.filterwarnings("ignore:Using .astype to convert:FutureWarning") def test_quantile_ea_all_na(self, obj, index, frame_or_series): diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 1bb4b24266de0..f92bbe1c718ab 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2585,6 +2585,19 @@ def test_error_from_2darray(self, col_a, col_b): DataFrame({"a": col_a, "b": col_b}) +class TestDataFrameConstructorIndexInference: + def test_frame_from_dict_of_series_overlapping_monthly_period_indexes(self): + rng1 = pd.period_range("1/1/1999", "1/1/2012", freq="M") + s1 = Series(np.random.randn(len(rng1)), rng1) + + rng2 = pd.period_range("1/1/1980", "12/1/2001", freq="M") + s2 = Series(np.random.randn(len(rng2)), rng2) + df = DataFrame({"s1": s1, "s2": s2}) + + exp = pd.period_range("1/1/1980", "1/1/2012", freq="M") + tm.assert_index_equal(df.index, exp) + + class TestDataFrameConstructorWithDtypeCoercion: def test_floating_values_integer_dtype(self): # GH#40110 make DataFrame behavior with arraylike floating data and diff --git a/pandas/tests/indexes/base_class/test_setops.py b/pandas/tests/indexes/base_class/test_setops.py index 7a4ba52cdfdd5..87ffe99896199 100644 --- a/pandas/tests/indexes/base_class/test_setops.py +++ b/pandas/tests/indexes/base_class/test_setops.py @@ -90,7 +90,7 @@ def test_union_sort_other_incomparable(self): @pytest.mark.xfail(reason="GH#25151 need to decide on True behavior") def test_union_sort_other_incomparable_true(self): - # TODO decide on True behaviour + # TODO(GH#25151): decide on True behaviour # sort=True idx = Index([1, pd.Timestamp("2000")]) with pytest.raises(TypeError, match=".*"): @@ -98,7 +98,7 @@ def test_union_sort_other_incomparable_true(self): @pytest.mark.xfail(reason="GH#25151 need to decide on True behavior") def test_intersection_equal_sort_true(self): - # TODO decide on True behaviour + # TODO(GH#25151): decide on True behaviour idx = Index(["c", "a", "b"]) sorted_ = Index(["a", "b", "c"]) tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_) diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 80c86e0103436..a99d2f590be97 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -746,7 +746,7 @@ def test_cached_range_bug(self): assert len(rng) == 50 assert rng[0] == datetime(2010, 9, 1, 5) - def test_timezone_comparaison_bug(self): + def test_timezone_comparison_bug(self): # smoke test start = Timestamp("20130220 10:00", tz="US/Eastern") result = date_range(start, periods=2, tz="US/Eastern") diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py index 2a1fa8a015ccc..507449eabfb6e 100644 --- a/pandas/tests/indexes/multi/test_setops.py +++ b/pandas/tests/indexes/multi/test_setops.py @@ -203,7 +203,7 @@ def test_difference_sort_special(): @pytest.mark.xfail(reason="Not implemented.") def test_difference_sort_special_true(): - # TODO decide on True behaviour + # TODO(GH#25151): decide on True behaviour idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) result = idx.difference([], sort=True) expected = MultiIndex.from_product([[0, 1], ["a", "b"]]) @@ -340,7 +340,7 @@ def test_intersect_equal_sort(): @pytest.mark.xfail(reason="Not implemented.") def test_intersect_equal_sort_true(): - # TODO decide on True behaviour + # TODO(GH#25151): decide on True behaviour idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) sorted_ = MultiIndex.from_product([[0, 1], ["a", "b"]]) tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_) @@ -363,7 +363,7 @@ def test_union_sort_other_empty(slice_): @pytest.mark.xfail(reason="Not implemented.") def test_union_sort_other_empty_sort(slice_): - # TODO decide on True behaviour + # TODO(GH#25151): decide on True behaviour # # sort=True idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) other = idx[:0] @@ -388,7 +388,7 @@ def test_union_sort_other_incomparable(): @pytest.mark.xfail(reason="Not implemented.") def test_union_sort_other_incomparable_sort(): - # TODO decide on True behaviour + # TODO(GH#25151): decide on True behaviour # # sort=True idx = MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]]) with pytest.raises(TypeError, match="Cannot compare"): diff --git a/pandas/tests/indexes/numeric/test_setops.py b/pandas/tests/indexes/numeric/test_setops.py index 4045cc0b91313..72336d3e33b79 100644 --- a/pandas/tests/indexes/numeric/test_setops.py +++ b/pandas/tests/indexes/numeric/test_setops.py @@ -155,7 +155,7 @@ def test_union_sort_other_special(self, slice_): @pytest.mark.xfail(reason="Not implemented") @pytest.mark.parametrize("slice_", [slice(None), slice(0)]) def test_union_sort_special_true(self, slice_): - # TODO: decide on True behaviour + # TODO(GH#25151): decide on True behaviour # sort=True idx = Index([1, 0, 2]) # default, sort=None diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py index ce5c46dd55c0d..bac231ef0085d 100644 --- a/pandas/tests/indexes/period/test_setops.py +++ b/pandas/tests/indexes/period/test_setops.py @@ -153,18 +153,6 @@ def test_union_misc(self, sort): expected = index.astype(object).union(index2.astype(object), sort=sort) tm.assert_index_equal(result, expected) - # TODO: belongs elsewhere - def test_union_dataframe_index(self): - rng1 = period_range("1/1/1999", "1/1/2012", freq="M") - s1 = pd.Series(np.random.randn(len(rng1)), rng1) - - rng2 = period_range("1/1/1980", "12/1/2001", freq="M") - s2 = pd.Series(np.random.randn(len(rng2)), rng2) - df = pd.DataFrame({"s1": s1, "s2": s2}) - - exp = period_range("1/1/1980", "1/1/2012", freq="M") - tm.assert_index_equal(df.index, exp) - def test_intersection(self, sort): index = period_range("1/1/2000", "1/20/2000", freq="D") diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index 277f686a8487a..c45a4c771856c 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -204,10 +204,10 @@ def test_delete_preserves_rangeindex_list_middle(self): loc = [1, 2, 3, 4] result = idx.delete(loc) expected = RangeIndex(0, 6, 5) - tm.assert_index_equal(result, expected, exact="equiv") # TODO: retain! + tm.assert_index_equal(result, expected, exact=True) result = idx.delete(loc[::-1]) - tm.assert_index_equal(result, expected, exact="equiv") # TODO: retain! + tm.assert_index_equal(result, expected, exact=True) def test_delete_all_preserves_rangeindex(self): idx = RangeIndex(0, 6, 1) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index f1ece3e363bb6..50be69fb93d7c 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -525,20 +525,6 @@ def test_asof_numeric_vs_bool_raises(self): with pytest.raises(TypeError, match=msg): right.asof(left) - # TODO: this tests Series.asof - def test_asof_nanosecond_index_access(self): - s = Timestamp("20130101").value - r = DatetimeIndex([s + 50 + i for i in range(100)]) - ser = Series(np.random.randn(100), index=r) - - first_value = ser.asof(ser.index[0]) - - # this does not yet work, as parsing strings is done via dateutil - # assert first_value == x['2013-01-01 00:00:00.000000050+0000'] - - expected_ts = np_datetime64_compat("2013-01-01 00:00:00.000000050+0000", "ns") - assert first_value == ser[Timestamp(expected_ts)] - @pytest.mark.parametrize("index", ["string"], indirect=True) def test_booleanindex(self, index): bool_index = np.ones(len(index), dtype=bool) diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py index a0e97223435e6..abe1c4fd03fcd 100644 --- a/pandas/tests/indexes/test_setops.py +++ b/pandas/tests/indexes/test_setops.py @@ -773,7 +773,7 @@ def test_difference_incomparable(self, opname): @pytest.mark.xfail(reason="Not implemented") @pytest.mark.parametrize("opname", ["difference", "symmetric_difference"]) def test_difference_incomparable_true(self, opname): - # TODO: decide on True behaviour + # TODO(GH#25151): decide on True behaviour # # sort=True, raises a = Index([3, Timestamp("2000"), 1]) b = Index([2, Timestamp("1999"), 1]) diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index d446d606d726f..7d2f68b00d95f 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -96,10 +96,7 @@ def test_iloc_setitem_fullcol_categorical(self, indexer, key, using_array_manage # check we dont have a view on cat (may be undesired GH#39986) df.iloc[0, 0] = "gamma" - if overwrite: - assert cat[0] != "gamma" - else: - assert cat[0] != "gamma" + assert cat[0] != "gamma" # TODO with mixed dataframe ("split" path), we always overwrite the column frame = DataFrame({0: np.array([0, 1, 2], dtype=object), 1: range(3)}) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index d6402e027be98..a10288b2091ca 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -129,6 +129,21 @@ def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli): with pytest.raises(err, match=msg): idxr[nd3] = 0 + def test_getitem_ndarray_0d(self): + # GH#24924 + key = np.array(0) + + # dataframe __getitem__ + df = DataFrame([[1, 2], [3, 4]]) + result = df[key] + expected = Series([1, 3], name=0) + tm.assert_series_equal(result, expected) + + # series __getitem__ + ser = Series([1, 2]) + result = ser[key] + assert result == 1 + def test_inf_upcast(self): # GH 16957 # We should be able to use np.inf as a key diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index bc08c53784e76..b0aa05371271b 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -6,7 +6,6 @@ time, timedelta, ) -from io import StringIO import re from dateutil.tz import gettz @@ -558,15 +557,27 @@ def test_loc_setitem_consistency_empty(self): def test_loc_setitem_consistency_slice_column_len(self): # .loc[:,column] setting with slice == len of the column # GH10408 - data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat -Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse -Region,Site,RespondentID,,,,, -Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes, -Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes -Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes, -Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No""" - - df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2]) + levels = [ + ["Region_1"] * 4, + ["Site_1", "Site_1", "Site_2", "Site_2"], + [3987227376, 3980680971, 3977723249, 3977723089], + ] + mi = MultiIndex.from_arrays(levels, names=["Region", "Site", "RespondentID"]) + + clevels = [ + ["Respondent", "Respondent", "Respondent", "OtherCat", "OtherCat"], + ["Something", "StartDate", "EndDate", "Yes/No", "SomethingElse"], + ] + cols = MultiIndex.from_arrays(clevels, names=["Level_0", "Level_1"]) + + values = [ + ["A", "5/25/2015 10:59", "5/25/2015 11:22", "Yes", np.nan], + ["A", "5/21/2015 9:40", "5/21/2015 9:52", "Yes", "Yes"], + ["A", "5/20/2015 8:27", "5/20/2015 8:41", "Yes", np.nan], + ["A", "5/20/2015 8:33", "5/20/2015 9:09", "Yes", "No"], + ] + df = DataFrame(values, index=mi, columns=cols) + df.loc[:, ("Respondent", "StartDate")] = to_datetime( df.loc[:, ("Respondent", "StartDate")] ) diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py index 39611bce2b4fa..bf262e6755289 100644 --- a/pandas/tests/indexing/test_scalar.py +++ b/pandas/tests/indexing/test_scalar.py @@ -74,8 +74,8 @@ def _check(f, func, values=False): _check(f, "at") -class TestScalar2: - # TODO: Better name, just separating things that dont need Base class +class TestAtAndiAT: + # at and iat tests that don't need Base class def test_at_iat_coercion(self): @@ -214,19 +214,6 @@ def test_iat_setter_incompatible_assignment(self): expected = DataFrame({"a": [None, 1], "b": [4, 5]}) tm.assert_frame_equal(result, expected) - def test_getitem_zerodim_np_array(self): - # GH24924 - # dataframe __getitem__ - df = DataFrame([[1, 2], [3, 4]]) - result = df[np.array(0)] - expected = Series([1, 3], name=0) - tm.assert_series_equal(result, expected) - - # series __getitem__ - s = Series([1, 2]) - result = s[np.array(0)] - assert result == 1 - def test_iat_dont_wrap_object_datetimelike(): # GH#32809 .iat calls go through DataFrame._get_value, should not diff --git a/pandas/tests/io/formats/test_console.py b/pandas/tests/io/formats/test_console.py index 39674db6916c1..5bd73e6045e32 100644 --- a/pandas/tests/io/formats/test_console.py +++ b/pandas/tests/io/formats/test_console.py @@ -5,7 +5,7 @@ from pandas._config import detect_console_encoding -class MockEncoding: # TODO(py27): replace with mock +class MockEncoding: """ Used to add a side effect when accessing the 'encoding' property. If the side effect is a str in nature, the value will be returned. Otherwise, the diff --git a/pandas/tests/plotting/test_backend.py b/pandas/tests/plotting/test_backend.py index 2eef940ee9a40..be053a8f46051 100644 --- a/pandas/tests/plotting/test_backend.py +++ b/pandas/tests/plotting/test_backend.py @@ -71,7 +71,7 @@ def test_register_entrypoint(restore_backend): result = pandas.plotting._core._get_plot_backend("my_backend") assert result is mod - # TODO: https://github.com/pandas-dev/pandas/issues/27517 + # TODO(GH#27517): https://github.com/pandas-dev/pandas/issues/27517 # Remove the td.skip_if_no_mpl with pandas.option_context("plotting.backend", "my_backend"): result = pandas.plotting._core._get_plot_backend() diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index af9d6dd83bee3..8a83cdcbdefb0 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -671,13 +671,12 @@ def test_simple(self): tm.assert_frame_equal(result, expected) def test_stubs(self): - # GH9204 + # GH9204 wide_to_long call should not modify 'stubs' list df = DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]]) df.columns = ["id", "inc1", "inc2", "edu1", "edu2"] stubs = ["inc", "edu"] - # TODO: unused? - df_long = wide_to_long(df, stubs, i="id", j="age") # noqa + wide_to_long(df, stubs, i="id", j="age") assert stubs == ["inc", "edu"] diff --git a/pandas/tests/scalar/period/test_asfreq.py b/pandas/tests/scalar/period/test_asfreq.py index 9110352d33c26..386ab4150c6ff 100644 --- a/pandas/tests/scalar/period/test_asfreq.py +++ b/pandas/tests/scalar/period/test_asfreq.py @@ -428,9 +428,6 @@ def test_conv_daily(self): ival_D_saturday = Period(freq="D", year=2007, month=1, day=6) ival_D_sunday = Period(freq="D", year=2007, month=1, day=7) - # TODO: unused? - # ival_D_monday = Period(freq='D', year=2007, month=1, day=8) - ival_B_friday = Period(freq="B", year=2007, month=1, day=5) ival_B_monday = Period(freq="B", year=2007, month=1, day=8) diff --git a/pandas/tests/series/methods/test_asof.py b/pandas/tests/series/methods/test_asof.py index 7a3f68fd3d990..8ddcf07934e21 100644 --- a/pandas/tests/series/methods/test_asof.py +++ b/pandas/tests/series/methods/test_asof.py @@ -2,8 +2,10 @@ import pytest from pandas._libs.tslibs import IncompatibleFrequency +from pandas.compat import np_datetime64_compat from pandas import ( + DatetimeIndex, Series, Timestamp, date_range, @@ -15,6 +17,20 @@ class TestSeriesAsof: + def test_asof_nanosecond_index_access(self): + ts = Timestamp("20130101").value + dti = DatetimeIndex([ts + 50 + i for i in range(100)]) + ser = Series(np.random.randn(100), index=dti) + + first_value = ser.asof(ser.index[0]) + + # this used to not work bc parsing was done by dateutil that didn't + # handle nanoseconds + assert first_value == ser["2013-01-01 00:00:00.000000050+0000"] + + expected_ts = np_datetime64_compat("2013-01-01 00:00:00.000000050+0000", "ns") + assert first_value == ser[Timestamp(expected_ts)] + def test_basic(self): # array or list or dates diff --git a/pandas/tests/series/test_logical_ops.py b/pandas/tests/series/test_logical_ops.py index ec060aa91e383..563c8f63df57d 100644 --- a/pandas/tests/series/test_logical_ops.py +++ b/pandas/tests/series/test_logical_ops.py @@ -49,9 +49,6 @@ def test_logical_operators_bool_dtype_with_empty(self): def test_logical_operators_int_dtype_with_int_dtype(self): # GH#9016: support bitwise op for integer types - # TODO: unused - # s_0101 = Series([0, 1, 0, 1]) - s_0123 = Series(range(4), dtype="int64") s_3333 = Series([3] * 4) s_4444 = Series([4] * 4)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44348
2021-11-07T23:01:47Z
2021-11-08T13:13:09Z
2021-11-08T13:13:09Z
2021-11-08T14:57:32Z
REF: re-remove _putmask_preserve
diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py index 54324bf721945..77e38e6c6e3fc 100644 --- a/pandas/core/array_algos/putmask.py +++ b/pandas/core/array_algos/putmask.py @@ -126,7 +126,8 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd if values.dtype.kind == new.dtype.kind: # preserves dtype if possible - return _putmask_preserve(values, new, mask) + np.putmask(values, mask, new) + return values dtype = find_common_type([values.dtype, new.dtype]) # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has incompatible type @@ -135,15 +136,8 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd # List[Any], _DTypeDict, Tuple[Any, Any]]]" values = values.astype(dtype) # type: ignore[arg-type] - return _putmask_preserve(values, new, mask) - - -def _putmask_preserve(new_values: np.ndarray, new, mask: npt.NDArray[np.bool_]): - try: - new_values[mask] = new[mask] - except (IndexError, ValueError): - new_values[mask] = new - return new_values + np.putmask(values, mask, new) + return values def putmask_without_repeat(
Un-revert half of #44338
https://api.github.com/repos/pandas-dev/pandas/pulls/44346
2021-11-07T19:36:21Z
2021-11-10T01:45:18Z
2021-11-10T01:45:18Z
2021-11-10T02:14:07Z
BUG: frame.loc[2:, 'z'] not setting inplace when multi-block
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 8732e1c397ce5..85f0f16c44b89 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -543,6 +543,7 @@ Indexing - Bug when setting string-backed :class:`Categorical` values that can be parsed to datetimes into a :class:`DatetimeArray` or :class:`Series` or :class:`DataFrame` column backed by :class:`DatetimeArray` failing to parse these strings (:issue:`44236`) - Bug in :meth:`Series.__setitem__` with an integer dtype other than ``int64`` setting with a ``range`` object unnecessarily upcasting to ``int64`` (:issue:`44261`) - Bug in :meth:`Series.__setitem__` with a boolean mask indexer setting a listlike value of length 1 incorrectly broadcasting that value (:issue:`44265`) +- Bug in :meth:`DataFrame.loc.__setitem__` and :meth:`DataFrame.iloc.__setitem__` with mixed dtypes sometimes failing to operate in-place (:issue:`44345`) - Missing diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 669274e034905..4aa9d251b04c7 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1859,10 +1859,19 @@ def _setitem_single_column(self, loc: int, value, plane_indexer): # in case of slice ser = value[pi] else: - # set the item, possibly having a dtype change - ser = ser.copy() - ser._mgr = ser._mgr.setitem(indexer=(pi,), value=value) - ser._maybe_update_cacher(clear=True, inplace=True) + # set the item, first attempting to operate inplace, then + # falling back to casting if necessary; see + # _whatsnew_130.notable_bug_fixes.setitem_column_try_inplace + + orig_values = ser._values + ser._mgr = ser._mgr.setitem((pi,), value) + + if ser._values is orig_values: + # The setitem happened inplace, so the DataFrame's values + # were modified inplace. + return + self.obj._iset_item(loc, ser, inplace=True) + return # reset the sliced object if unique self.obj._iset_item(loc, ser, inplace=True) diff --git a/pandas/io/stata.py b/pandas/io/stata.py index f6c93e6f751c8..9803a2e4e3309 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -600,6 +600,8 @@ def _cast_to_stata_types(data: DataFrame) -> DataFrame: # Cast from unsupported types to supported types is_nullable_int = isinstance(data[col].dtype, (_IntegerDtype, BooleanDtype)) orig = data[col] + # We need to find orig_missing before altering data below + orig_missing = orig.isna() if is_nullable_int: missing_loc = data[col].isna() if missing_loc.any(): @@ -650,11 +652,10 @@ def _cast_to_stata_types(data: DataFrame) -> DataFrame: f"supported by Stata ({float64_max})" ) if is_nullable_int: - missing = orig.isna() - if missing.any(): + if orig_missing.any(): # Replace missing by Stata sentinel value sentinel = StataMissingValue.BASE_MISSING_VALUES[data[col].dtype.name] - data.loc[missing, col] = sentinel + data.loc[orig_missing, col] = sentinel if ws: warnings.warn(ws, PossiblePrecisionLoss) diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index d735f0dbec8a5..389bf56ab6035 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -384,7 +384,7 @@ def test_setitem_frame_length_0_str_key(self, indexer): expected["A"] = expected["A"].astype("object") tm.assert_frame_equal(df, expected) - def test_setitem_frame_duplicate_columns(self, using_array_manager): + def test_setitem_frame_duplicate_columns(self, using_array_manager, request): # GH#15695 cols = ["A", "B", "C"] * 2 df = DataFrame(index=range(3), columns=cols) @@ -407,6 +407,11 @@ def test_setitem_frame_duplicate_columns(self, using_array_manager): expected["C"] = expected["C"].astype("int64") # TODO(ArrayManager) .loc still overwrites expected["B"] = expected["B"].astype("int64") + + mark = pytest.mark.xfail( + reason="Both 'A' columns get set with 3 instead of 0 and 3" + ) + request.node.add_marker(mark) else: # set these with unique columns to be extra-unambiguous expected[2] = expected[2].astype(np.int64) @@ -995,22 +1000,37 @@ def test_setitem_always_copy(self, float_frame): float_frame["E"][5:10] = np.nan assert notna(s[5:10]).all() - def test_setitem_clear_caches(self): - # see GH#304 + @pytest.mark.parametrize("consolidate", [True, False]) + def test_setitem_partial_column_inplace(self, consolidate, using_array_manager): + # This setting should be in-place, regardless of whether frame is + # single-block or multi-block + # GH#304 this used to be incorrectly not-inplace, in which case + # we needed to ensure _item_cache was cleared. + df = DataFrame( {"x": [1.1, 2.1, 3.1, 4.1], "y": [5.1, 6.1, 7.1, 8.1]}, index=[0, 1, 2, 3] ) df.insert(2, "z", np.nan) + if not using_array_manager: + if consolidate: + df._consolidate_inplace() + assert len(df._mgr.blocks) == 1 + else: + assert len(df._mgr.blocks) == 2 - # cache it - foo = df["z"] - df.loc[df.index[2:], "z"] = 42 + zvals = df["z"]._values - expected = Series([np.nan, np.nan, 42, 42], index=df.index, name="z") + df.loc[2:, "z"] = 42 - assert df["z"] is not foo + expected = Series([np.nan, np.nan, 42, 42], index=df.index, name="z") tm.assert_series_equal(df["z"], expected) + # check setting occurred in-place + tm.assert_numpy_array_equal(zvals, expected.values) + assert np.shares_memory(zvals, df["z"]._values) + if not consolidate: + assert df["z"]._values is zvals + def test_setitem_duplicate_columns_not_inplace(self): # GH#39510 cols = ["A", "B"] * 2 diff --git a/pandas/tests/frame/indexing/test_xs.py b/pandas/tests/frame/indexing/test_xs.py index d2704876c31c5..c6938abb57d64 100644 --- a/pandas/tests/frame/indexing/test_xs.py +++ b/pandas/tests/frame/indexing/test_xs.py @@ -366,12 +366,7 @@ def test_xs_droplevel_false_view(self, using_array_manager): assert np.shares_memory(result.iloc[:, 0]._values, df.iloc[:, 0]._values) # modifying original df also modifies result when having a single block df.iloc[0, 0] = 2 - if not using_array_manager: - expected = DataFrame({"a": [2]}) - else: - # TODO(ArrayManager) iloc does not update the array inplace using - # "split" path - expected = DataFrame({"a": [1]}) + expected = DataFrame({"a": [2]}) tm.assert_frame_equal(result, expected) # with mixed dataframe, modifying the parent doesn't modify result @@ -379,7 +374,13 @@ def test_xs_droplevel_false_view(self, using_array_manager): df = DataFrame([[1, 2.5, "a"]], columns=Index(["a", "b", "c"])) result = df.xs("a", axis=1, drop_level=False) df.iloc[0, 0] = 2 - expected = DataFrame({"a": [1]}) + if using_array_manager: + # Here the behavior is consistent + expected = DataFrame({"a": [2]}) + else: + # FIXME: iloc does not update the array inplace using + # "split" path + expected = DataFrame({"a": [1]}) tm.assert_frame_equal(result, expected) def test_xs_list_indexer_droplevel_false(self): diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 919d8ab14778e..fc2c138538ac9 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -789,6 +789,10 @@ def test_std_timedelta64_skipna_false(self): # GH#37392 tdi = pd.timedelta_range("1 Day", periods=10) df = DataFrame({"A": tdi, "B": tdi}) + # Copy is needed for ArrayManager case, otherwise setting df.iloc + # below edits tdi, alterting both df['A'] and df['B'] + # FIXME: passing copy=True to constructor does not fix this + df = df.copy() df.iloc[-2, -1] = pd.NaT result = df.std(skipna=False) @@ -1017,7 +1021,9 @@ def test_idxmax_mixed_dtype(self): # don't cast to object, which would raise in nanops dti = date_range("2016-01-01", periods=3) - df = DataFrame({1: [0, 2, 1], 2: range(3)[::-1], 3: dti}) + # Copying dti is needed for ArrayManager otherwise when we set + # df.loc[0, 3] = pd.NaT below it edits dti + df = DataFrame({1: [0, 2, 1], 2: range(3)[::-1], 3: dti.copy(deep=True)}) result = df.idxmax() expected = Series([1, 0, 2], index=[1, 2, 3]) @@ -1074,6 +1080,10 @@ def test_idxmax_idxmin_convert_dtypes(self, op, expected_value): def test_idxmax_dt64_multicolumn_axis1(self): dti = date_range("2016-01-01", periods=3) df = DataFrame({3: dti, 4: dti[::-1]}) + # FIXME: copy needed for ArrayManager, otherwise setting with iloc + # below also sets df.iloc[-1, 1]; passing copy=True to DataFrame + # does not solve this. + df = df.copy() df.iloc[0, 0] = pd.NaT df._consolidate_inplace()
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44345
2021-11-07T19:04:39Z
2021-11-13T00:28:44Z
2021-11-13T00:28:44Z
2021-11-13T00:32:18Z
CI: Fix import of coo_matrix after scipy dev upgrade
diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index 96021bfa18fb7..cc48918981338 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -1255,7 +1255,7 @@ def test_to_coo( A, rows, cols = ss.sparse.to_coo( row_levels=(0, 1), column_levels=(2, 3), sort_labels=sort_labels ) - assert isinstance(A, scipy.sparse.coo.coo_matrix) + assert isinstance(A, scipy.sparse.coo_matrix) tm.assert_numpy_array_equal(A.toarray(), expected_A) assert rows == expected_rows assert cols == expected_cols
New scipy_dev renamed ``coo`` to ``_coo`` to make it private
https://api.github.com/repos/pandas-dev/pandas/pulls/44344
2021-11-07T15:49:57Z
2021-11-09T20:08:21Z
null
2021-11-13T19:32:29Z
TYP: timestamps.pyi
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi index 17df594a39c44..9693f18e2e05d 100644 --- a/pandas/_libs/tslibs/timestamps.pyi +++ b/pandas/_libs/tslibs/timestamps.pyi @@ -8,7 +8,6 @@ from datetime import ( from time import struct_time from typing import ( ClassVar, - Type, TypeVar, overload, ) @@ -22,9 +21,9 @@ from pandas._libs.tslibs import ( Timedelta, ) -_S = TypeVar("_S") +_S = TypeVar("_S", bound=datetime) -def integer_op_not_supported(obj) -> TypeError: ... +def integer_op_not_supported(obj: object) -> TypeError: ... class Timestamp(datetime): min: ClassVar[Timestamp] @@ -35,7 +34,7 @@ class Timestamp(datetime): # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]") def __new__( # type: ignore[misc] - cls: Type[_S], + cls: type[_S], ts_input: int | np.integer | float @@ -43,9 +42,9 @@ class Timestamp(datetime): | _date | datetime | np.datetime64 = ..., - freq=..., + freq: int | None | str | BaseOffset = ..., tz: str | _tzinfo | None | int = ..., - unit=..., + unit: str | int | None = ..., year: int | None = ..., month: int | None = ..., day: int | None = ..., @@ -80,24 +79,28 @@ class Timestamp(datetime): @property def fold(self) -> int: ... @classmethod - def fromtimestamp(cls: Type[_S], t: float, tz: _tzinfo | None = ...) -> _S: ... + def fromtimestamp(cls: type[_S], t: float, tz: _tzinfo | None = ...) -> _S: ... @classmethod - def utcfromtimestamp(cls: Type[_S], t: float) -> _S: ... + def utcfromtimestamp(cls: type[_S], t: float) -> _S: ... @classmethod - def today(cls: Type[_S]) -> _S: ... + def today(cls: type[_S], tz: _tzinfo | str | None = ...) -> _S: ... @classmethod - def fromordinal(cls: Type[_S], n: int) -> _S: ... + def fromordinal( + cls: type[_S], + ordinal: int, + freq: str | BaseOffset | None = ..., + tz: _tzinfo | str | None = ..., + ) -> _S: ... @classmethod - def now(cls: Type[_S], tz: _tzinfo | str | None = ...) -> _S: ... + def now(cls: type[_S], tz: _tzinfo | str | None = ...) -> _S: ... @classmethod - def utcnow(cls: Type[_S]) -> _S: ... + def utcnow(cls: type[_S]) -> _S: ... + # error: Signature of "combine" incompatible with supertype "datetime" @classmethod - def combine( - cls, date: _date, time: _time, tzinfo: _tzinfo | None = ... - ) -> datetime: ... + def combine(cls, date: _date, time: _time) -> datetime: ... # type: ignore[override] @classmethod - def fromisoformat(cls: Type[_S], date_string: str) -> _S: ... - def strftime(self, fmt: str) -> str: ... + def fromisoformat(cls: type[_S], date_string: str) -> _S: ... + def strftime(self, format: str) -> str: ... def __format__(self, fmt: str) -> str: ... def toordinal(self) -> int: ... def timetuple(self) -> struct_time: ... @@ -116,12 +119,12 @@ class Timestamp(datetime): second: int = ..., microsecond: int = ..., tzinfo: _tzinfo | None = ..., - *, fold: int = ..., ) -> datetime: ... def astimezone(self: _S, tz: _tzinfo | None = ...) -> _S: ... def ctime(self) -> str: ... - def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ... + # error: Signature of "isoformat" incompatible with supertype "datetime" + def isoformat(self, sep: str = ...) -> str: ... # type: ignore[override] @classmethod def strptime(cls, date_string: str, format: str) -> datetime: ... def utcoffset(self) -> timedelta | None: ... @@ -131,12 +134,18 @@ class Timestamp(datetime): def __lt__(self, other: datetime) -> bool: ... # type: ignore def __ge__(self, other: datetime) -> bool: ... # type: ignore def __gt__(self, other: datetime) -> bool: ... # type: ignore - def __add__(self: _S, other: timedelta) -> _S: ... + # error: Signature of "__add__" incompatible with supertype "date"/"datetime" + @overload # type: ignore[override] + def __add__(self, other: np.ndarray) -> np.ndarray: ... + @overload + # TODO: other can also be Tick (but it cannot be resolved) + def __add__(self: _S, other: timedelta | np.timedelta64) -> _S: ... def __radd__(self: _S, other: timedelta) -> _S: ... @overload # type: ignore def __sub__(self, other: datetime) -> timedelta: ... @overload - def __sub__(self, other: timedelta) -> datetime: ... + # TODO: other can also be Tick (but it cannot be resolved) + def __sub__(self, other: timedelta | np.timedelta64) -> datetime: ... def __hash__(self) -> int: ... def weekday(self) -> int: ... def isoweekday(self) -> int: ... @@ -157,23 +166,38 @@ class Timestamp(datetime): def is_year_end(self) -> bool: ... def to_pydatetime(self, warn: bool = ...) -> datetime: ... def to_datetime64(self) -> np.datetime64: ... - def to_period(self, freq) -> Period: ... + def to_period(self, freq: BaseOffset | str | None = ...) -> Period: ... def to_julian_date(self) -> np.float64: ... @property def asm8(self) -> np.datetime64: ... - def tz_convert(self: _S, tz) -> _S: ... + def tz_convert(self: _S, tz: _tzinfo | str | None) -> _S: ... # TODO: could return NaT? def tz_localize( - self: _S, tz, ambiguous: str = ..., nonexistent: str = ... + self: _S, tz: _tzinfo | str | None, ambiguous: str = ..., nonexistent: str = ... ) -> _S: ... def normalize(self: _S) -> _S: ... # TODO: round/floor/ceil could return NaT? def round( - self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ... + self: _S, freq: str, ambiguous: bool | str = ..., nonexistent: str = ... ) -> _S: ... def floor( - self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ... + self: _S, freq: str, ambiguous: bool | str = ..., nonexistent: str = ... ) -> _S: ... def ceil( - self: _S, freq, ambiguous: bool | str = ..., nonexistent: str = ... + self: _S, freq: str, ambiguous: bool | str = ..., nonexistent: str = ... ) -> _S: ... + def day_name(self, locale: str | None = ...) -> str: ... + def month_name(self, locale: str | None = ...) -> str: ... + @property + def day_of_week(self) -> int: ... + @property + def day_of_month(self) -> int: ... + @property + def day_of_year(self) -> int: ... + @property + def quarter(self) -> int: ... + @property + def week(self) -> int: ... + def to_numpy( + self, dtype: np.dtype | None = ..., copy: bool = ... + ) -> np.datetime64: ...
Should type annotations from a pyi files also be added to the pyx file (if possible)?
https://api.github.com/repos/pandas-dev/pandas/pulls/44339
2021-11-06T23:59:15Z
2021-12-19T22:41:15Z
2021-12-19T22:41:15Z
2021-12-20T16:20:29Z
Revert "REF: remove putmask_preserve, putmask_without_repeat"
diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py index b17e86e774f60..54324bf721945 100644 --- a/pandas/core/array_algos/putmask.py +++ b/pandas/core/array_algos/putmask.py @@ -126,8 +126,7 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd if values.dtype.kind == new.dtype.kind: # preserves dtype if possible - np.putmask(values, mask, new) - return values + return _putmask_preserve(values, new, mask) dtype = find_common_type([values.dtype, new.dtype]) # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has incompatible type @@ -136,8 +135,51 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd # List[Any], _DTypeDict, Tuple[Any, Any]]]" values = values.astype(dtype) # type: ignore[arg-type] - np.putmask(values, mask, new) - return values + return _putmask_preserve(values, new, mask) + + +def _putmask_preserve(new_values: np.ndarray, new, mask: npt.NDArray[np.bool_]): + try: + new_values[mask] = new[mask] + except (IndexError, ValueError): + new_values[mask] = new + return new_values + + +def putmask_without_repeat( + values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any +) -> None: + """ + np.putmask will truncate or repeat if `new` is a listlike with + len(new) != len(values). We require an exact match. + + Parameters + ---------- + values : np.ndarray + mask : np.ndarray[bool] + new : Any + """ + if getattr(new, "ndim", 0) >= 1: + new = new.astype(values.dtype, copy=False) + + # TODO: this prob needs some better checking for 2D cases + nlocs = mask.sum() + if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1: + if nlocs == len(new): + # GH#30567 + # If length of ``new`` is less than the length of ``values``, + # `np.putmask` would first repeat the ``new`` array and then + # assign the masked values hence produces incorrect result. + # `np.place` on the other hand uses the ``new`` values at it is + # to place in the masked locations of ``values`` + np.place(values, mask, new) + # i.e. values[mask] = new + elif mask.shape[-1] == len(new) or len(new) == 1: + np.putmask(values, mask, new) + else: + raise ValueError("cannot assign mismatch length to masked array") + else: + np.putmask(values, mask, new) def validate_putmask( diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index a452eabd4ea6f..2589015e0f0b1 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -51,7 +51,6 @@ is_extension_array_dtype, is_interval_dtype, is_list_like, - is_object_dtype, is_string_dtype, ) from pandas.core.dtypes.dtypes import ( @@ -77,6 +76,7 @@ extract_bool_array, putmask_inplace, putmask_smart, + putmask_without_repeat, setitem_datetimelike_compat, validate_putmask, ) @@ -960,7 +960,10 @@ def putmask(self, mask, new) -> list[Block]: new = self.fill_value if self._can_hold_element(new): - np.putmask(self.values.T, mask, new) + + # error: Argument 1 to "putmask_without_repeat" has incompatible type + # "Union[ndarray, ExtensionArray]"; expected "ndarray" + putmask_without_repeat(self.values.T, mask, new) # type: ignore[arg-type] return [self] elif noop: @@ -1412,16 +1415,15 @@ def putmask(self, mask, new) -> list[Block]: new_values = self.values + if isinstance(new, (np.ndarray, ExtensionArray)) and len(new) == len(mask): + new = new[mask] + if mask.ndim == new_values.ndim + 1: # TODO(EA2D): unnecessary with 2D EAs mask = mask.reshape(new_values.shape) try: - if isinstance(new, (np.ndarray, ExtensionArray)): - # Caller is responsible for ensuring matching lengths - new_values[mask] = new[mask] - else: - new_values[mask] = new + new_values[mask] = new except TypeError: if not is_interval_dtype(self.dtype): # Discussion about what we want to support in the general @@ -1479,14 +1481,7 @@ def setitem(self, indexer, value): # we are always 1-D indexer = indexer[0] - try: - check_setitem_lengths(indexer, value, self.values) - except ValueError: - # If we are object dtype (e.g. PandasDtype[object]) then - # we can hold nested data, so can ignore this mismatch. - if not is_object_dtype(self.dtype): - raise - + check_setitem_lengths(indexer, value, self.values) self.values[indexer] = value return self diff --git a/pandas/core/series.py b/pandas/core/series.py index 579c16613ec2e..7ee9a0bcdd9e1 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1101,6 +1101,7 @@ def __setitem__(self, key, value) -> None: is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) + and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index df424e649fbe9..e60f7769270bd 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -363,11 +363,6 @@ def test_concat(self, data, in_frame): class TestSetitem(BaseNumPyTests, base.BaseSetitemTests): - @skip_nested - def test_setitem_sequence_mismatched_length_raises(self, data, as_array): - # doesn't raise bc object dtype holds nested data - super().test_setitem_sequence_mismatched_length_raises(data, as_array) - @skip_nested def test_setitem_invalid(self, data, invalid_scalar): # object dtype can hold anything, so doesn't raise
Reverts pandas-dev/pandas#44328 cc @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/44338
2021-11-06T21:26:42Z
2021-11-06T22:28:05Z
2021-11-06T22:28:05Z
2021-11-06T22:28:11Z
Remove aiobotocore pin
diff --git a/ci/deps/actions-38-db.yaml b/ci/deps/actions-38-db.yaml index 7b73f43b7ba03..3e959f9b7e992 100644 --- a/ci/deps/actions-38-db.yaml +++ b/ci/deps/actions-38-db.yaml @@ -12,7 +12,6 @@ dependencies: - pytest-cov>=2.10.1 # this is only needed in the coverage build, ref: GH 35737 # pandas dependencies - - aiobotocore<2.0.0 - beautifulsoup4 - botocore>=1.11 - dask diff --git a/environment.yml b/environment.yml index 7aa7bb0842eca..f5f495bed4d78 100644 --- a/environment.yml +++ b/environment.yml @@ -105,7 +105,7 @@ dependencies: - pytables>=3.6.1 # pandas.read_hdf, DataFrame.to_hdf - s3fs>=0.4.0 # file IO when using 's3://...' path - - aiobotocore<2.0.0 + - aiobotocore - fsspec>=0.7.4 # for generic remote file operations - gcsfs>=0.6.0 # file IO when using 'gcs://...' path - sqlalchemy # pandas.read_sql, DataFrame.to_sql diff --git a/requirements-dev.txt b/requirements-dev.txt index 6247b4e5a12b1..b384d3b6af5b8 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -69,7 +69,7 @@ pyarrow>=1.0.1 python-snappy tables>=3.6.1 s3fs>=0.4.0 -aiobotocore<2.0.0 +aiobotocore fsspec>=0.7.4 gcsfs>=0.6.0 sqlalchemy
null
https://api.github.com/repos/pandas-dev/pandas/pulls/44337
2021-11-06T20:39:42Z
2021-11-06T21:38:04Z
null
2021-11-13T19:32:31Z
ENH: option to change the number in `_dir_additions_for_owner`
diff --git a/doc/source/user_guide/options.rst b/doc/source/user_guide/options.rst index a65bb774b9df8..93448dae578c9 100644 --- a/doc/source/user_guide/options.rst +++ b/doc/source/user_guide/options.rst @@ -430,6 +430,10 @@ display.html.use_mathjax True When True, Jupyter notebook table contents using MathJax, rendering mathematical expressions enclosed by the dollar symbol. +display.max_dir_items 100 The number of columns from a dataframe that + are added to dir. These columns can then be + suggested by tab completion. 'None' value means + unlimited. io.excel.xls.writer xlwt The default Excel writer engine for 'xls' files. diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index ad6bf64dcdfa8..64bdbca9c1f27 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -213,7 +213,7 @@ Other enhancements - :meth:`.GroupBy.mean` now supports `Numba <http://numba.pydata.org/>`_ execution with the ``engine`` keyword (:issue:`43731`) - :meth:`Timestamp.isoformat`, now handles the ``timespec`` argument from the base :class:``datetime`` class (:issue:`26131`) - :meth:`NaT.to_numpy` ``dtype`` argument is now respected, so ``np.timedelta64`` can be returned (:issue:`44460`) -- +- New option ``display.max_dir_items`` customizes the number of columns added to :meth:`Dataframe.__dir__` and suggested for tab completion (:issue:`37996`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 31c2ec8f0cbf9..bf2d770ee1e7f 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -238,6 +238,16 @@ def use_numba_cb(key): (default: True) """ +pc_max_dir_items = """\ +: int + The number of items that will be added to `dir(...)`. 'None' value means + unlimited. Because dir is cached, changing this option will not immediately + affect already existing dataframes until a column is deleted or added. + + This is for instance used to suggest columns from a dataframe to tab + completion. +""" + pc_width_doc = """ : int Width of the display in characters. In case python/IPython is running in @@ -451,6 +461,9 @@ def _deprecate_negative_int_max_colwidth(key): cf.register_option( "html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool ) + cf.register_option( + "max_dir_items", 100, pc_max_dir_items, validator=is_nonnegative_int + ) tc_sim_interactive_doc = """ : boolean diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 220b43f323a5f..5f7bc718215be 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -846,7 +846,7 @@ def _dir_additions_for_owner(self) -> set[str_t]: """ return { c - for c in self.unique(level=0)[:100] + for c in self.unique(level=0)[: get_option("display.max_dir_items")] if isinstance(c, str) and c.isidentifier() } diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 2e276f4f27a67..3adc4ebceaad5 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -5,6 +5,8 @@ import numpy as np import pytest +from pandas._config.config import option_context + import pandas.util._test_decorators as td from pandas.util._test_decorators import ( async_mark, @@ -87,6 +89,25 @@ def test_tab_completion(self): assert key not in dir(df) assert isinstance(df.__getitem__("A"), DataFrame) + def test_display_max_dir_items(self): + # display.max_dir_items increaes the number of columns that are in __dir__. + columns = ["a" + str(i) for i in range(420)] + values = [range(420), range(420)] + df = DataFrame(values, columns=columns) + + # The default value for display.max_dir_items is 100 + assert "a99" in dir(df) + assert "a100" not in dir(df) + + with option_context("display.max_dir_items", 300): + df = DataFrame(values, columns=columns) + assert "a299" in dir(df) + assert "a300" not in dir(df) + + with option_context("display.max_dir_items", None): + df = DataFrame(values, columns=columns) + assert "a419" in dir(df) + def test_not_hashable(self): empty_frame = DataFrame()
- [x] closes #37996 - [x] tests added / passed - [x] Allows to optionally change the number of columns from a dataframe that are added to `dir`, which is frequently also the number of columns suggested by tab completion, by using the option `pandas.set_option('display.max_dir_additions', 100)` to a higher number than 100 or None for an unlimited number of columns. Changing the value of `display.max_dir_additions` will only take effect for existing dataframes after a column is added or deleted, as the result of `dir` is cached.
https://api.github.com/repos/pandas-dev/pandas/pulls/44335
2021-11-06T16:39:28Z
2021-11-26T15:02:54Z
2021-11-26T15:02:53Z
2021-11-26T15:02:57Z
TST: use custom parametrization for consistency in base extension array tests
diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index c52f20255eb81..1d3d736ca7ee2 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -162,13 +162,12 @@ def test_compare_array(self, data, comparison_op): other = pd.Series([data[0]] * len(data)) self._compare_other(ser, data, comparison_op, other) - def test_direct_arith_with_ndframe_returns_not_implemented( - self, data, frame_or_series - ): + @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame]) + def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box): # EAs should return NotImplemented for ops with Series/DataFrame # Pandas takes care of unboxing the series and calling the EA's op. other = pd.Series(data) - if frame_or_series is pd.DataFrame: + if box is pd.DataFrame: other = other.to_frame() if hasattr(data, "__eq__"):
Follow-up on https://github.com/pandas-dev/pandas/pull/44242, as that broke geopandas CI. This adds back the custom parametrization, which also makes it consistent with the other occurrence of this test (for a different op) which is still using this: https://github.com/pandas-dev/pandas/blob/057c6f81b464c6bbb667d326e203ad0dbd17cbde/pandas/tests/extension/base/ops.py#L116-L127 Since this is the only occurrence of `frame_or_series` in the base extension tests, I think it's not warranted to make it a fixture. If we would rather want this, we should add it to `tests/extension/conftest.py` instead (can always be done in a follow-up).
https://api.github.com/repos/pandas-dev/pandas/pulls/44332
2021-11-06T10:16:12Z
2021-11-11T20:10:23Z
2021-11-11T20:10:23Z
2021-11-11T20:10:27Z
DOC: df.to_html documentation incorrectly contains min_rows optional param
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4e7f6329bb73b..b97175cc57fd3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1065,11 +1065,11 @@ def to_string( index_names: bool = True, justify: str | None = None, max_rows: int | None = None, - min_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, + min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: @@ -1078,6 +1078,9 @@ def to_string( %(shared_params)s line_width : int, optional Width to wrap a line in characters. + min_rows : int, optional + The number of rows to display in the console in a truncated repr + (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. @@ -2838,15 +2841,14 @@ def to_html( border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. - encoding : str, default "utf-8" - Set character encoding. - - .. versionadded:: 1.0 - table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. + encoding : str, default "utf-8" + Set character encoding. + + .. versionadded:: 1.0 %(returns)s See Also -------- diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index ba85a1b340d05..ca53bfb7d5e08 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -164,9 +164,6 @@ * unset. max_rows : int, optional Maximum number of rows to display in the console. - min_rows : int, optional - The number of rows to display in the console in a truncated repr - (when number of rows is above `max_rows`). max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False
- [x] closes #44304 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [NA] whatsnew entry Moved min_rows parameter out of the common docstring that is used by both DataFrame.to_html and DataFrame.to_string and reintroduced the parameter in DataFrame.to_string only
https://api.github.com/repos/pandas-dev/pandas/pulls/44331
2021-11-06T03:41:55Z
2021-11-14T02:40:08Z
2021-11-14T02:40:07Z
2021-11-15T10:50:07Z
CLN: remove _AXIS_REVERSED
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4e7f6329bb73b..ebf3428020652 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1686,9 +1686,7 @@ def to_numpy( self._consolidate_inplace() if dtype is not None: dtype = np.dtype(dtype) - result = self._mgr.as_array( - transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value - ) + result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) @@ -10715,7 +10713,6 @@ def isin(self, values) -> DataFrame: 1: 1, "columns": 1, } - _AXIS_REVERSED = True _AXIS_LEN = len(_AXIS_ORDERS) _info_axis_number = 1 _info_axis_name = "columns" @@ -10840,7 +10837,7 @@ def values(self) -> np.ndarray: ['monkey', nan, None]], dtype=object) """ self._consolidate_inplace() - return self._mgr.as_array(transpose=True) + return self._mgr.as_array() @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"]) def ffill( diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 732508f9b7fb6..13e1c6e7e20b1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -479,7 +479,6 @@ def _data(self): _stat_axis_name = "index" _AXIS_ORDERS: list[str] _AXIS_TO_AXIS_NUMBER: dict[Axis, int] = {0: 0, "index": 0, "rows": 0} - _AXIS_REVERSED: bool_t _info_axis_number: int _info_axis_name: str _AXIS_LEN: int @@ -566,9 +565,10 @@ def _get_axis(self, axis: Axis) -> Index: def _get_block_manager_axis(cls, axis: Axis) -> int: """Map the axis to the block_manager axis.""" axis = cls._get_axis_number(axis) - if cls._AXIS_REVERSED: - m = cls._AXIS_LEN - 1 - return m - axis + ndim = cls._AXIS_LEN + if ndim == 2: + # i.e. DataFrame + return 1 - axis return axis @final diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index fc7d2168c1c79..543b2ea26f750 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -1082,7 +1082,6 @@ def unstack(self, unstacker, fill_value) -> ArrayManager: def as_array( self, - transpose: bool = False, dtype=None, copy: bool = False, na_value=lib.no_default, @@ -1092,8 +1091,6 @@ def as_array( Parameters ---------- - transpose : bool, default False - If True, transpose the return array. dtype : object, default None Data type of the return array. copy : bool, default False @@ -1109,7 +1106,7 @@ def as_array( """ if len(self.arrays) == 0: empty_arr = np.empty(self.shape, dtype=float) - return empty_arr.transpose() if transpose else empty_arr + return empty_arr.transpose() # We want to copy when na_value is provided to avoid # mutating the original object @@ -1137,8 +1134,6 @@ def as_array( result[isna(result)] = na_value return result - # FIXME: don't leave commented-out - # return arr.transpose() if transpose else arr class SingleArrayManager(BaseArrayManager, SingleDataManager): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 7db19eda0f2fb..05a9aab4a5554 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1465,7 +1465,6 @@ def to_dict(self, copy: bool = True): def as_array( self, - transpose: bool = False, dtype: np.dtype | None = None, copy: bool = False, na_value=lib.no_default, @@ -1475,8 +1474,6 @@ def as_array( Parameters ---------- - transpose : bool, default False - If True, transpose the return array. dtype : np.dtype or None, default None Data type of the return array. copy : bool, default False @@ -1492,7 +1489,7 @@ def as_array( """ if len(self.blocks) == 0: arr = np.empty(self.shape, dtype=float) - return arr.transpose() if transpose else arr + return arr.transpose() # We want to copy when na_value is provided to avoid # mutating the original object @@ -1524,7 +1521,7 @@ def as_array( if na_value is not lib.no_default: arr[isna(arr)] = na_value - return arr.transpose() if transpose else arr + return arr.transpose() def _interleave( self, diff --git a/pandas/core/series.py b/pandas/core/series.py index cfa3e90f8bc73..7ee9a0bcdd9e1 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -5498,7 +5498,6 @@ def mask( # ---------------------------------------------------------------------- # Add index _AXIS_ORDERS = ["index"] - _AXIS_REVERSED = False _AXIS_LEN = len(_AXIS_ORDERS) _info_axis_number = 0 _info_axis_name = "index" diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 54706dc24fc42..b577bc7e436df 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -851,7 +851,7 @@ def test_validate_bool_args(self, value): def _as_array(mgr): if mgr.ndim == 1: return mgr.external_values() - return mgr.as_array() + return mgr.as_array().T class TestIndexing:
Motivated by the FIXME in ArrayManager.as_array
https://api.github.com/repos/pandas-dev/pandas/pulls/44330
2021-11-06T02:03:17Z
2021-11-06T19:41:13Z
2021-11-06T19:41:13Z
2021-11-06T19:41:52Z
REF: remove putmask_preserve, putmask_without_repeat
diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py index 54324bf721945..b17e86e774f60 100644 --- a/pandas/core/array_algos/putmask.py +++ b/pandas/core/array_algos/putmask.py @@ -126,7 +126,8 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd if values.dtype.kind == new.dtype.kind: # preserves dtype if possible - return _putmask_preserve(values, new, mask) + np.putmask(values, mask, new) + return values dtype = find_common_type([values.dtype, new.dtype]) # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has incompatible type @@ -135,51 +136,8 @@ def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.nd # List[Any], _DTypeDict, Tuple[Any, Any]]]" values = values.astype(dtype) # type: ignore[arg-type] - return _putmask_preserve(values, new, mask) - - -def _putmask_preserve(new_values: np.ndarray, new, mask: npt.NDArray[np.bool_]): - try: - new_values[mask] = new[mask] - except (IndexError, ValueError): - new_values[mask] = new - return new_values - - -def putmask_without_repeat( - values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any -) -> None: - """ - np.putmask will truncate or repeat if `new` is a listlike with - len(new) != len(values). We require an exact match. - - Parameters - ---------- - values : np.ndarray - mask : np.ndarray[bool] - new : Any - """ - if getattr(new, "ndim", 0) >= 1: - new = new.astype(values.dtype, copy=False) - - # TODO: this prob needs some better checking for 2D cases - nlocs = mask.sum() - if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1: - if nlocs == len(new): - # GH#30567 - # If length of ``new`` is less than the length of ``values``, - # `np.putmask` would first repeat the ``new`` array and then - # assign the masked values hence produces incorrect result. - # `np.place` on the other hand uses the ``new`` values at it is - # to place in the masked locations of ``values`` - np.place(values, mask, new) - # i.e. values[mask] = new - elif mask.shape[-1] == len(new) or len(new) == 1: - np.putmask(values, mask, new) - else: - raise ValueError("cannot assign mismatch length to masked array") - else: - np.putmask(values, mask, new) + np.putmask(values, mask, new) + return values def validate_putmask( diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 33c78f396b80b..758633a7ab956 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -51,6 +51,7 @@ is_extension_array_dtype, is_interval_dtype, is_list_like, + is_object_dtype, is_string_dtype, ) from pandas.core.dtypes.dtypes import ( @@ -76,7 +77,6 @@ extract_bool_array, putmask_inplace, putmask_smart, - putmask_without_repeat, setitem_datetimelike_compat, validate_putmask, ) @@ -960,10 +960,7 @@ def putmask(self, mask, new) -> list[Block]: new = self.fill_value if self._can_hold_element(new): - - # error: Argument 1 to "putmask_without_repeat" has incompatible type - # "Union[ndarray, ExtensionArray]"; expected "ndarray" - putmask_without_repeat(self.values.T, mask, new) # type: ignore[arg-type] + np.putmask(self.values.T, mask, new) return [self] elif noop: @@ -1407,15 +1404,16 @@ def putmask(self, mask, new) -> list[Block]: new_values = self.values - if isinstance(new, (np.ndarray, ExtensionArray)) and len(new) == len(mask): - new = new[mask] - if mask.ndim == new_values.ndim + 1: # TODO(EA2D): unnecessary with 2D EAs mask = mask.reshape(new_values.shape) try: - new_values[mask] = new + if isinstance(new, (np.ndarray, ExtensionArray)): + # Caller is responsible for ensuring matching lengths + new_values[mask] = new[mask] + else: + new_values[mask] = new except TypeError: if not is_interval_dtype(self.dtype): # Discussion about what we want to support in the general @@ -1473,7 +1471,14 @@ def setitem(self, indexer, value): # we are always 1-D indexer = indexer[0] - check_setitem_lengths(indexer, value, self.values) + try: + check_setitem_lengths(indexer, value, self.values) + except ValueError: + # If we are object dtype (e.g. PandasDtype[object]) then + # we can hold nested data, so can ignore this mismatch. + if not is_object_dtype(self.dtype): + raise + self.values[indexer] = value return self diff --git a/pandas/core/series.py b/pandas/core/series.py index 02f4810bb1e6b..77789fd6f7d68 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1101,7 +1101,6 @@ def __setitem__(self, key, value) -> None: is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) - and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index e60f7769270bd..df424e649fbe9 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -363,6 +363,11 @@ def test_concat(self, data, in_frame): class TestSetitem(BaseNumPyTests, base.BaseSetitemTests): + @skip_nested + def test_setitem_sequence_mismatched_length_raises(self, data, as_array): + # doesn't raise bc object dtype holds nested data + super().test_setitem_sequence_mismatched_length_raises(data, as_array) + @skip_nested def test_setitem_invalid(self, data, invalid_scalar): # object dtype can hold anything, so doesn't raise
No longer needed following #44275
https://api.github.com/repos/pandas-dev/pandas/pulls/44328
2021-11-05T18:51:14Z
2021-11-06T19:42:42Z
2021-11-06T19:42:42Z
2021-11-06T21:10:02Z
BUG: is_dtype_equal(dtype, "string[pyarrow]") raises if pyarrow not installed
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 4f7eb7c87b260..1765b22dda05f 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -655,7 +655,7 @@ Conversion Strings ^^^^^^^ -- +- Fixed bug in checking for ``string[pyarrow]`` dtype incorrectly raising an ImportError when pyarrow is not installed (:issue:`44327`) - Interval diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 72cc28c1dd66d..75689b8dba8f5 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -613,7 +613,7 @@ def is_dtype_equal(source, target) -> bool: src = get_dtype(source) if isinstance(src, ExtensionDtype): return src == target - except (TypeError, AttributeError): + except (TypeError, AttributeError, ImportError): return False elif isinstance(source, str): return is_dtype_equal(target, source) @@ -622,7 +622,7 @@ def is_dtype_equal(source, target) -> bool: source = get_dtype(source) target = get_dtype(target) return source == target - except (TypeError, AttributeError): + except (TypeError, AttributeError, ImportError): # invalid comparison # object == category will hit this diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index be5cb81506efd..7f1bcecc31d26 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -115,6 +115,7 @@ def test_period_dtype(self, dtype): "float": np.dtype(np.float64), "object": np.dtype(object), "category": com.pandas_dtype("category"), + "string": pd.StringDtype(), } @@ -128,6 +129,12 @@ def test_dtype_equal(name1, dtype1, name2, dtype2): assert not com.is_dtype_equal(dtype1, dtype2) +@pytest.mark.parametrize("name,dtype", list(dtypes.items()), ids=lambda x: str(x)) +def test_pyarrow_string_import_error(name, dtype): + # GH-44276 + assert not com.is_dtype_equal(dtype, "string[pyarrow]") + + @pytest.mark.parametrize( "dtype1,dtype2", [
- [x] closes #44276 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry Added instance to check string preventing import error
https://api.github.com/repos/pandas-dev/pandas/pulls/44327
2021-11-05T16:56:31Z
2021-12-15T07:52:51Z
2021-12-15T07:52:50Z
2021-12-15T08:27:03Z
BUG: closes #44312: fixes unwanted TypeError with nullable nested metadata in json_normalize
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 4d0dee01f05c1..17993b8712019 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -569,6 +569,7 @@ I/O - Bug in :func:`read_csv`, changed exception class when expecting a file path name or file-like object from ``OSError`` to ``TypeError`` (:issue:`43366`) - Bug in :func:`read_json` not handling non-numpy dtypes correctly (especially ``category``) (:issue:`21892`, :issue:`33205`) - Bug in :func:`json_normalize` where multi-character ``sep`` parameter is incorrectly prefixed to every key (:issue:`43831`) +- Bug in :func:`json_normalize` where reading data with missing multi-level metadata would not respect errors="ignore" (:issue:`44312`) - Bug in :func:`read_csv` with :code:`float_precision="round_trip"` which did not skip initial/trailing whitespace (:issue:`43713`) - Bug in dumping/loading a :class:`DataFrame` with ``yaml.dump(frame)`` (:issue:`42748`) - diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index 90fd5d077d031..2c2c127394fb6 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -389,6 +389,8 @@ def _pull_field( try: if isinstance(spec, list): for field in spec: + if result is None: + raise KeyError(field) result = result[field] else: result = result[spec] diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index a2b90f607e918..272a4aa6723dd 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -634,6 +634,33 @@ def test_missing_meta(self, missing_metadata): expected = DataFrame(ex_data, columns=columns) tm.assert_frame_equal(result, expected) + def test_missing_nested_meta(self): + # GH44312 + # If errors="ignore" and nested metadata is null, we should return nan + data = {"meta": "foo", "nested_meta": None, "value": [{"rec": 1}, {"rec": 2}]} + result = json_normalize( + data, + record_path="value", + meta=["meta", ["nested_meta", "leaf"]], + errors="ignore", + ) + ex_data = [[1, "foo", np.nan], [2, "foo", np.nan]] + columns = ["rec", "meta", "nested_meta.leaf"] + expected = DataFrame(ex_data, columns=columns).astype( + {"nested_meta.leaf": object} + ) + tm.assert_frame_equal(result, expected) + + # If errors="raise" and nested metadata is null, we should raise with the + # key of the first missing level + with pytest.raises(KeyError, match="'leaf' not found"): + json_normalize( + data, + record_path="value", + meta=["meta", ["nested_meta", "leaf"]], + errors="raise", + ) + def test_missing_meta_multilevel_record_path_errors_raise(self, missing_metadata): # GH41876 # Ensure errors='raise' works as intended even when a record_path of length
- [x] closes #44312 Fixes TypeError crash in json_normalize when using errors="ignore" and extracting nullable nested metadata.
https://api.github.com/repos/pandas-dev/pandas/pulls/44325
2021-11-05T14:40:05Z
2021-11-14T02:34:45Z
2021-11-14T02:34:45Z
2021-11-14T02:34:49Z
DOC: remove note about groupby.apply side-effect
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index ac3808d9ee590..00c4d2778e545 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -163,11 +163,6 @@ class providing the base-class of operations. Notes ----- - In the current implementation ``apply`` calls ``func`` twice on the - first group to decide whether it can take a fast or slow code - path. This can lead to unexpected behavior if ``func`` has - side-effects, as they will take effect twice for the first - group. .. versionchanged:: 1.3.0
The described behavior has been fixed in 0.25 - [x] closes #44318 - [ ] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry, _comment:_ wouldn't say this is applicable here
https://api.github.com/repos/pandas-dev/pandas/pulls/44321
2021-11-05T11:04:47Z
2021-11-05T13:04:15Z
2021-11-05T13:04:15Z
2021-11-05T13:20:28Z
Backport PR #44315 on branch 1.3.x (Pin aiobotocore to lower than 2.0 )
diff --git a/ci/deps/actions-37-db.yaml b/ci/deps/actions-37-db.yaml index 73d3bf2dcc70a..ed6086832cf71 100644 --- a/ci/deps/actions-37-db.yaml +++ b/ci/deps/actions-37-db.yaml @@ -12,6 +12,7 @@ dependencies: - pytest-cov>=2.10.1 # this is only needed in the coverage build, ref: GH 35737 # pandas dependencies + - aiobotocore<2.0.0 - beautifulsoup4 - botocore>=1.11 - dask diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml index 5ca449d537df3..c9249b272829f 100644 --- a/ci/deps/actions-39.yaml +++ b/ci/deps/actions-39.yaml @@ -12,6 +12,7 @@ dependencies: - hypothesis>=3.58.0 # pandas dependencies + - aiobotocore<2.0.0 - beautifulsoup4 - bottleneck - fsspec>=0.8.0, <2021.6.0 diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index 52a5de70f85dc..e455563ef9e9c 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -13,6 +13,7 @@ dependencies: - pytest-azurepipelines # pandas dependencies + - aiobotocore<2.0.0 - blosc - bottleneck - fastparquet>=0.4.0 diff --git a/environment.yml b/environment.yml index 13d022c3f0e72..2843d5302b404 100644 --- a/environment.yml +++ b/environment.yml @@ -106,7 +106,7 @@ dependencies: - pyqt>=5.9.2 # pandas.read_clipboard - pytables>=3.5.1 # pandas.read_hdf, DataFrame.to_hdf - s3fs>=0.4.0 # file IO when using 's3://...' path - - aiobotocore + - aiobotocore<2.0.0 - fsspec>=0.7.4, <2021.6.0 # for generic remote file operations - gcsfs>=0.6.0 # file IO when using 'gcs://...' path - sqlalchemy # pandas.read_sql, DataFrame.to_sql diff --git a/requirements-dev.txt b/requirements-dev.txt index 0341f645e595b..c801aba57b201 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -70,7 +70,7 @@ python-snappy pyqt5>=5.9.2 tables>=3.5.1 s3fs>=0.4.0 -aiobotocore +aiobotocore<2.0.0 fsspec>=0.7.4, <2021.6.0 gcsfs>=0.6.0 sqlalchemy
(cherry picked from commit 28c7f7628dd6b862e1eed29ff71e2e8363ca1eee) - [x] xref #44315
https://api.github.com/repos/pandas-dev/pandas/pulls/44319
2021-11-05T08:31:32Z
2021-11-09T09:21:59Z
2021-11-09T09:21:59Z
2021-11-13T19:32:27Z
ENH: Add decimal parameter to read_excel
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 1b3be65ee66f2..a1bb3261d0445 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -182,6 +182,7 @@ Other enhancements - Added :meth:`.ExponentialMovingWindow.sum` (:issue:`13297`) - :meth:`Series.str.split` now supports a ``regex`` argument that explicitly specifies whether the pattern is a regular expression. Default is ``None`` (:issue:`43563`, :issue:`32835`, :issue:`25549`) - :meth:`DataFrame.dropna` now accepts a single label as ``subset`` along with array-like (:issue:`41021`) +- :meth:`read_excel` now accepts a ``decimal`` argument that allow the user to specify the decimal point when parsing string columns to numeric (:issue:`14403`) - :meth:`.GroupBy.mean` now supports `Numba <http://numba.pydata.org/>`_ execution with the ``engine`` keyword (:issue:`43731`) .. --------------------------------------------------------------------------- diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 5dd85707220ba..e543c9161a26e 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -234,6 +234,14 @@ this parameter is only necessary for columns stored as TEXT in Excel, any numeric columns will automatically be parsed, regardless of display format. +decimal : str, default '.' + Character to recognize as decimal point for parsing string columns to numeric. + Note that this parameter is only necessary for columns stored as TEXT in Excel, + any numeric columns will automatically be parsed, regardless of display + format.(e.g. use ',' for European data). + + .. versionadded:: 1.4.0 + comment : str, default None Comments out remainder of line. Pass a character or characters to this argument to indicate comments in the input file. Any data between the @@ -356,6 +364,7 @@ def read_excel( parse_dates=False, date_parser=None, thousands=None, + decimal=".", comment=None, skipfooter=0, convert_float=None, @@ -394,6 +403,7 @@ def read_excel( parse_dates=parse_dates, date_parser=date_parser, thousands=thousands, + decimal=decimal, comment=comment, skipfooter=skipfooter, convert_float=convert_float, @@ -498,6 +508,7 @@ def parse( parse_dates=False, date_parser=None, thousands=None, + decimal=".", comment=None, skipfooter=0, convert_float=None, @@ -624,6 +635,7 @@ def parse( parse_dates=parse_dates, date_parser=date_parser, thousands=thousands, + decimal=decimal, comment=comment, skipfooter=skipfooter, usecols=usecols, diff --git a/pandas/tests/io/data/excel/test_decimal.ods b/pandas/tests/io/data/excel/test_decimal.ods new file mode 100644 index 0000000000000..308a851809dde Binary files /dev/null and b/pandas/tests/io/data/excel/test_decimal.ods differ diff --git a/pandas/tests/io/data/excel/test_decimal.xls b/pandas/tests/io/data/excel/test_decimal.xls new file mode 100644 index 0000000000000..ce34667873cb5 Binary files /dev/null and b/pandas/tests/io/data/excel/test_decimal.xls differ diff --git a/pandas/tests/io/data/excel/test_decimal.xlsb b/pandas/tests/io/data/excel/test_decimal.xlsb new file mode 100644 index 0000000000000..addfd1480a190 Binary files /dev/null and b/pandas/tests/io/data/excel/test_decimal.xlsb differ diff --git a/pandas/tests/io/data/excel/test_decimal.xlsm b/pandas/tests/io/data/excel/test_decimal.xlsm new file mode 100644 index 0000000000000..7dd6b1e7da036 Binary files /dev/null and b/pandas/tests/io/data/excel/test_decimal.xlsm differ diff --git a/pandas/tests/io/data/excel/test_decimal.xlsx b/pandas/tests/io/data/excel/test_decimal.xlsx new file mode 100644 index 0000000000000..0cedf3899a566 Binary files /dev/null and b/pandas/tests/io/data/excel/test_decimal.xlsx differ diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 657e64bd01809..60302928420d0 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -1289,6 +1289,19 @@ def test_ignore_chartsheets_by_int(self, request, read_ext): ): pd.read_excel("chartsheet" + read_ext, sheet_name=1) + def test_euro_decimal_format(self, request, read_ext): + # copied from read_csv + result = pd.read_excel("test_decimal" + read_ext, decimal=",", skiprows=1) + expected = DataFrame( + [ + [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819], + [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872], + [3, 878.158, 108013.434, "GHI", "rez", 2.735694704], + ], + columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"], + ) + tm.assert_frame_equal(result, expected) + class TestExcelFileRead: @pytest.fixture(autouse=True)
- [ ] closes #14403 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44317
2021-11-05T02:03:51Z
2021-11-05T13:05:22Z
2021-11-05T13:05:22Z
2022-12-28T06:34:43Z
BUG: failure to cast all-int floating dtypes when setting into int dtypes
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 3f0744abd1d59..9ad53cd189348 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -535,6 +535,7 @@ Indexing - Bug in :meth:`DataFrame.nlargest` and :meth:`Series.nlargest` where sorted result did not count indexes containing ``np.nan`` (:issue:`28984`) - Bug in indexing on a non-unique object-dtype :class:`Index` with an NA scalar (e.g. ``np.nan``) (:issue:`43711`) - Bug in :meth:`DataFrame.__setitem__` incorrectly writing into an existing column's array rather than setting a new array when the new dtype and the old dtype match (:issue:`43406`) +- Bug in setting floating-dtype values into a :class:`Series` with integer dtype failing to set inplace when those values can be losslessly converted to integers (:issue:`44316`) - Bug in :meth:`Series.__setitem__` with object dtype when setting an array with matching size and dtype='datetime64[ns]' or dtype='timedelta64[ns]' incorrectly converting the datetime/timedeltas to integers (:issue:`43868`) - Bug in :meth:`DataFrame.sort_index` where ``ignore_index=True`` was not being respected when the index was already sorted (:issue:`43591`) - Bug in :meth:`Index.get_indexer_non_unique` when index contains multiple ``np.datetime64("NaT")`` and ``np.timedelta64("NaT")`` (:issue:`43869`) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 261359767cf60..432074a8dd699 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -2205,6 +2205,14 @@ def can_hold_element(arr: ArrayLike, element: Any) -> bool: if tipo.kind not in ["i", "u"]: if is_float(element) and element.is_integer(): return True + + if isinstance(element, np.ndarray) and element.dtype.kind == "f": + # If all can be losslessly cast to integers, then we can hold them + # We do something similar in putmask_smart + casted = element.astype(dtype) + comp = casted == element + return comp.all() + # Anything other than integer we cannot hold return False elif dtype.itemsize < tipo.itemsize: diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 40aa70a2ada2f..4d8c411478993 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -425,6 +425,18 @@ def asi8(self) -> npt.NDArray[np.int64]: ) return self._values.view(self._default_dtype) + def _validate_fill_value(self, value): + # e.g. np.array([1.0]) we want np.array([1], dtype=self.dtype) + # see TestSetitemFloatNDarrayIntoIntegerSeries + super()._validate_fill_value(value) + if hasattr(value, "dtype") and is_float_dtype(value.dtype): + converted = value.astype(self.dtype) + if (converted == value).all(): + # See also: can_hold_element + return converted + raise TypeError + return value + class Int64Index(IntegerIndex): _index_descr_args = { diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 33c78f396b80b..2589015e0f0b1 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1193,6 +1193,14 @@ def where(self, other, cond) -> list[Block]: values, icond.sum(), other # type: ignore[arg-type] ) if alt is not other: + if is_list_like(other) and len(other) < len(values): + # call np.where with other to get the appropriate ValueError + np.where(~icond, values, other) + raise NotImplementedError( + "This should not be reached; call to np.where above is " + "expected to raise ValueError. Please report a bug at " + "github.com/pandas-dev/pandas" + ) result = values.copy() np.putmask(result, icond, alt) else: diff --git a/pandas/tests/dtypes/cast/test_can_hold_element.py b/pandas/tests/dtypes/cast/test_can_hold_element.py index c4776f2a1e143..3a486f795f23e 100644 --- a/pandas/tests/dtypes/cast/test_can_hold_element.py +++ b/pandas/tests/dtypes/cast/test_can_hold_element.py @@ -40,3 +40,16 @@ def test_can_hold_element_range(any_int_numpy_dtype): rng = range(10 ** 10, 10 ** 10) assert len(rng) == 0 assert can_hold_element(arr, rng) + + +def test_can_hold_element_int_values_float_ndarray(): + arr = np.array([], dtype=np.int64) + + element = np.array([1.0, 2.0]) + assert can_hold_element(arr, element) + + assert not can_hold_element(arr, element + 0.5) + + # integer but not losslessly castable to int64 + element = np.array([3, 2 ** 65], dtype=np.float64) + assert not can_hold_element(arr, element) diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py index 5d0aeba4aebbc..b97aaf6c551d8 100644 --- a/pandas/tests/indexing/multiindex/test_setitem.py +++ b/pandas/tests/indexing/multiindex/test_setitem.py @@ -196,16 +196,35 @@ def test_multiindex_assignment(self): df.loc[4, "d"] = arr tm.assert_series_equal(df.loc[4, "d"], Series(arr, index=[8, 10], name="d")) + def test_multiindex_assignment_single_dtype(self, using_array_manager): + # GH3777 part 2b # single dtype + arr = np.array([0.0, 1.0]) + df = DataFrame( np.random.randint(5, 10, size=9).reshape(3, 3), columns=list("abc"), index=[[4, 4, 8], [8, 10, 12]], + dtype=np.int64, ) + view = df["c"].iloc[:2].values + # arr can be losslessly cast to int, so this setitem is inplace df.loc[4, "c"] = arr - exp = Series(arr, index=[8, 10], name="c", dtype="float64") - tm.assert_series_equal(df.loc[4, "c"], exp) + exp = Series(arr, index=[8, 10], name="c", dtype="int64") + result = df.loc[4, "c"] + tm.assert_series_equal(result, exp) + if not using_array_manager: + # FIXME(ArrayManager): this correctly preserves dtype, + # but incorrectly is not inplace. + # extra check for inplace-ness + tm.assert_numpy_array_equal(view, exp.values) + + # arr + 0.5 cannot be cast losslessly to int, so we upcast + df.loc[4, "c"] = arr + 0.5 + result = df.loc[4, "c"] + exp = exp + 0.5 + tm.assert_series_equal(result, exp) # scalar ok df.loc[4, "c"] = 10 diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 046d349b92f3f..d446d606d726f 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -515,9 +515,18 @@ def test_iloc_setitem_frame_duplicate_columns_multiple_blocks( # but on a DataFrame with multiple blocks df = DataFrame([[0, 1], [2, 3]], columns=["B", "B"]) + # setting float values that can be held by existing integer arrays + # is inplace df.iloc[:, 0] = df.iloc[:, 0].astype("f8") + if not using_array_manager: + assert len(df._mgr.blocks) == 1 + + # if the assigned values cannot be held by existing integer arrays, + # we cast + df.iloc[:, 0] = df.iloc[:, 0] + 0.5 if not using_array_manager: assert len(df._mgr.blocks) == 2 + expected = df.copy() # assign back to self @@ -892,7 +901,7 @@ def test_iloc_with_boolean_operation(self): tm.assert_frame_equal(result, expected) result.iloc[[False, False, True, True]] /= 2 - expected = DataFrame([[0.0, 4.0], [8.0, 12.0], [4.0, 5.0], [6.0, np.nan]]) + expected = DataFrame([[0, 4.0], [8, 12.0], [4, 5.0], [6, np.nan]]) tm.assert_frame_equal(result, expected) def test_iloc_getitem_singlerow_slice_categoricaldtype_gives_series(self): diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 4706025b70db6..ea754127b98e9 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -620,23 +620,27 @@ def test_mask_key(self, obj, key, expected, val, indexer_sli): mask[key] = True obj = obj.copy() + + if is_list_like(val) and len(val) < mask.sum(): + msg = "boolean index did not match indexed array along dimension" + with pytest.raises(IndexError, match=msg): + indexer_sli(obj)[mask] = val + return + indexer_sli(obj)[mask] = val tm.assert_series_equal(obj, expected) def test_series_where(self, obj, key, expected, val, is_inplace): - if is_list_like(val) and len(val) < len(obj): - # Series.where is not valid here - if isinstance(val, range): - return - - # FIXME: The remaining TestSetitemDT64IntoInt that go through here - # are relying on technically-incorrect behavior because Block.where - # uses np.putmask instead of expressions.where in those cases, - # which has different length-checking semantics. - mask = np.zeros(obj.shape, dtype=bool) mask[key] = True + if is_list_like(val) and len(val) < len(obj): + # Series.where is not valid here + msg = "operands could not be broadcast together with shapes" + with pytest.raises(ValueError, match=msg): + obj.where(~mask, val) + return + orig = obj obj = obj.copy() arr = obj._values @@ -1014,6 +1018,39 @@ def inplace(self): return True +@pytest.mark.parametrize( + "val", + [ + np.array([2.0, 3.0]), + np.array([2.5, 3.5]), + np.array([2 ** 65, 2 ** 65 + 1], dtype=np.float64), # all ints, but can't cast + ], +) +class TestSetitemFloatNDarrayIntoIntegerSeries(SetitemCastingEquivalents): + @pytest.fixture + def obj(self): + return Series(range(5), dtype=np.int64) + + @pytest.fixture + def key(self): + return slice(0, 2) + + @pytest.fixture + def inplace(self, val): + # NB: this condition is based on currently-harcoded "val" cases + return val[0] == 2 + + @pytest.fixture + def expected(self, val, inplace): + if inplace: + dtype = np.int64 + else: + dtype = np.float64 + res_values = np.array(range(5), dtype=dtype) + res_values[:2] = val + return Series(res_values) + + def test_setitem_int_as_positional_fallback_deprecation(): # GH#42215 deprecated falling back to positional on __setitem__ with an # int not contained in the index
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry Overlap with (but not rebased on top of) #44275
https://api.github.com/repos/pandas-dev/pandas/pulls/44316
2021-11-04T22:38:05Z
2021-11-05T20:56:22Z
2021-11-05T20:56:22Z
2021-11-05T21:07:29Z
Pin aiobotocore to lower than 2.0
diff --git a/ci/deps/actions-38-db.yaml b/ci/deps/actions-38-db.yaml index 3e959f9b7e992..7b73f43b7ba03 100644 --- a/ci/deps/actions-38-db.yaml +++ b/ci/deps/actions-38-db.yaml @@ -12,6 +12,7 @@ dependencies: - pytest-cov>=2.10.1 # this is only needed in the coverage build, ref: GH 35737 # pandas dependencies + - aiobotocore<2.0.0 - beautifulsoup4 - botocore>=1.11 - dask diff --git a/environment.yml b/environment.yml index f5f495bed4d78..7aa7bb0842eca 100644 --- a/environment.yml +++ b/environment.yml @@ -105,7 +105,7 @@ dependencies: - pytables>=3.6.1 # pandas.read_hdf, DataFrame.to_hdf - s3fs>=0.4.0 # file IO when using 's3://...' path - - aiobotocore + - aiobotocore<2.0.0 - fsspec>=0.7.4 # for generic remote file operations - gcsfs>=0.6.0 # file IO when using 'gcs://...' path - sqlalchemy # pandas.read_sql, DataFrame.to_sql diff --git a/requirements-dev.txt b/requirements-dev.txt index b384d3b6af5b8..6247b4e5a12b1 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -69,7 +69,7 @@ pyarrow>=1.0.1 python-snappy tables>=3.6.1 s3fs>=0.4.0 -aiobotocore +aiobotocore<2.0.0 fsspec>=0.7.4 gcsfs>=0.6.0 sqlalchemy
- [x] xref #44311
https://api.github.com/repos/pandas-dev/pandas/pulls/44315
2021-11-04T22:27:32Z
2021-11-05T00:02:19Z
2021-11-05T00:02:19Z
2021-11-05T08:32:43Z
BUG: PeriodIndex in pytables
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index f85128ea0ca4a..8c8e9b9feeb80 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2074,6 +2074,14 @@ def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str): factory: type[Index] | type[DatetimeIndex] = Index if is_datetime64_dtype(values.dtype) or is_datetime64tz_dtype(values.dtype): factory = DatetimeIndex + elif values.dtype == "i8" and "freq" in kwargs: + # PeriodIndex data is stored as i8 + # error: Incompatible types in assignment (expression has type + # "Callable[[Any, KwArg(Any)], PeriodIndex]", variable has type + # "Union[Type[Index], Type[DatetimeIndex]]") + factory = lambda x, **kwds: PeriodIndex( # type: ignore[assignment] + ordinal=x, **kwds + ) # making an Index instance could throw a number of different errors try: diff --git a/pandas/tests/io/pytables/test_put.py b/pandas/tests/io/pytables/test_put.py index 3707079d03d64..f4b70bc6f238a 100644 --- a/pandas/tests/io/pytables/test_put.py +++ b/pandas/tests/io/pytables/test_put.py @@ -243,10 +243,8 @@ def check(format, index): check("table", index) check("fixed", index) - # period index currently broken for table - # seee GH7796 FIXME check("fixed", tm.makePeriodIndex) - # check('table',tm.makePeriodIndex) + check("table", tm.makePeriodIndex) # GH#7796 # unicode index = tm.makeUnicodeIndex
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry I've ONLY checked that the edit here fixes the commented-out test, not sure if thats sufficient to close #7796
https://api.github.com/repos/pandas-dev/pandas/pulls/44314
2021-11-04T21:29:29Z
2021-11-06T19:43:42Z
2021-11-06T19:43:41Z
2021-11-06T19:45:02Z
CLN/TYP: address TODOs, ignores
diff --git a/pandas/_libs/tslibs/dtypes.pyi b/pandas/_libs/tslibs/dtypes.pyi index 9dbf9d082d8cc..8c510b05de4ce 100644 --- a/pandas/_libs/tslibs/dtypes.pyi +++ b/pandas/_libs/tslibs/dtypes.pyi @@ -14,6 +14,8 @@ class PeriodDtypeBase: def date_offset(self) -> BaseOffset: ... @classmethod def from_date_offset(cls, offset: BaseOffset) -> PeriodDtypeBase: ... + @property + def resolution(self) -> Resolution: ... class FreqGroup(Enum): FR_ANN: int = ... diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 46b505e7384b4..2e2880cced85a 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1012,10 +1012,8 @@ def factorize(self, na_sentinel: int = -1) -> tuple[np.ndarray, ExtensionArray]: arr, na_sentinel=na_sentinel, na_value=na_value ) - uniques = self._from_factorized(uniques, self) - # error: Incompatible return value type (got "Tuple[ndarray, ndarray]", - # expected "Tuple[ndarray, ExtensionArray]") - return codes, uniques # type: ignore[return-value] + uniques_ea = self._from_factorized(uniques, self) + return codes, uniques_ea _extension_array_shared_docs[ "repeat" diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 0247cd717edec..b11b11ded2f22 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -574,14 +574,8 @@ def factorize(self, na_sentinel: int = -1) -> tuple[np.ndarray, ExtensionArray]: # the hashtables don't handle all different types of bits uniques = uniques.astype(self.dtype.numpy_dtype, copy=False) - # error: Incompatible types in assignment (expression has type - # "BaseMaskedArray", variable has type "ndarray") - uniques = type(self)( # type: ignore[assignment] - uniques, np.zeros(len(uniques), dtype=bool) - ) - # error: Incompatible return value type (got "Tuple[ndarray, ndarray]", - # expected "Tuple[ndarray, ExtensionArray]") - return codes, uniques # type: ignore[return-value] + uniques_ea = type(self)(uniques, np.zeros(len(uniques), dtype=bool)) + return codes, uniques_ea def value_counts(self, dropna: bool = True) -> Series: """ diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 16512305e07ec..261359767cf60 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1792,6 +1792,7 @@ def ensure_nanosecond_dtype(dtype: DtypeObj) -> DtypeObj: return dtype +# TODO: overload to clarify that if all types are np.dtype then result is np.dtype def find_common_type(types: list[DtypeObj]) -> DtypeObj: """ Find a common data type among the given dtypes. diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index e80a88826e4ed..fd5b5bb7396af 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -151,6 +151,7 @@ class PeriodIndex(DatetimeIndexOpsMixin): _data: PeriodArray freq: BaseOffset + dtype: PeriodDtype _data_cls = PeriodArray _engine_type = libindex.PeriodEngine @@ -434,9 +435,7 @@ def get_loc(self, key, method=None, tolerance=None): # TODO: pass if method is not None, like DTI does? raise KeyError(key) from err - # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any], - # ExtensionDtype]" has no attribute "resolution" - if reso == self.dtype.resolution: # type: ignore[union-attr] + if reso == self.dtype.resolution: # the reso < self.dtype.resolution case goes through _get_string_slice key = Period(parsed, freq=self.freq) loc = self.get_loc(key, method=method, tolerance=tolerance) @@ -489,9 +488,7 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime): def _can_partial_date_slice(self, reso: Resolution) -> bool: assert isinstance(reso, Resolution), (type(reso), reso) # e.g. test_getitem_setitem_periodindex - # error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any], - # ExtensionDtype]" has no attribute "resolution" - return reso > self.dtype.resolution # type: ignore[union-attr] + return reso > self.dtype.resolution def period_range( diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 29edb80f473fa..fc7d2168c1c79 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -1108,8 +1108,8 @@ def as_array( arr : ndarray """ if len(self.arrays) == 0: - arr = np.empty(self.shape, dtype=float) - return arr.transpose() if transpose else arr + empty_arr = np.empty(self.shape, dtype=float) + return empty_arr.transpose() if transpose else empty_arr # We want to copy when na_value is provided to avoid # mutating the original object @@ -1129,9 +1129,7 @@ def as_array( result = np.empty(self.shape_proper, dtype=dtype) - # error: Incompatible types in assignment (expression has type "Union[ndarray, - # ExtensionArray]", variable has type "ndarray") - for i, arr in enumerate(self.arrays): # type: ignore[assignment] + for i, arr in enumerate(self.arrays): arr = arr.astype(dtype, copy=copy) result[:, i] = arr @@ -1139,6 +1137,7 @@ def as_array( result[isna(result)] = na_value return result + # FIXME: don't leave commented-out # return arr.transpose() if transpose else arr diff --git a/pandas/core/internals/base.py b/pandas/core/internals/base.py index 080796e7957a3..74d8b20332fff 100644 --- a/pandas/core/internals/base.py +++ b/pandas/core/internals/base.py @@ -157,10 +157,11 @@ class SingleDataManager(DataManager): @final @property - def array(self): + def array(self) -> ArrayLike: """ Quick access to the backing array of the Block or SingleArrayManager. """ + # error: "SingleDataManager" has no attribute "arrays"; maybe "array" return self.arrays[0] # type: ignore[attr-defined] def setitem_inplace(self, indexer, value) -> None: diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 74388e0b2b91e..159c20382dcfb 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -617,7 +617,7 @@ def _extract_index(data) -> Index: index = None if len(data) == 0: index = Index([]) - elif len(data) > 0: + else: raw_lengths = [] indexes: list[list[Hashable] | Index] = [] @@ -641,7 +641,7 @@ def _extract_index(data) -> Index: if not indexes and not raw_lengths: raise ValueError("If using all scalar values, you must pass an index") - if have_series: + elif have_series: index = union_indexes(indexes) elif have_dicts: index = union_indexes(indexes, sort=False) diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 39c6fa13f79a4..2f695200e486b 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -5,7 +5,10 @@ import datetime from functools import partial import operator -from typing import Any +from typing import ( + Any, + cast, +) import numpy as np @@ -91,11 +94,9 @@ def _masked_arith_op(x: np.ndarray, y, op): assert isinstance(x, np.ndarray), type(x) if isinstance(y, np.ndarray): dtype = find_common_type([x.dtype, y.dtype]) - # error: Argument "dtype" to "empty" has incompatible type - # "Union[dtype, ExtensionDtype]"; expected "Union[dtype, None, type, - # _SupportsDtype, str, Tuple[Any, int], Tuple[Any, Union[int, - # Sequence[int]]], List[Any], _DtypeDict, Tuple[Any, Any]]" - result = np.empty(x.size, dtype=dtype) # type: ignore[arg-type] + # x and y are both ndarrays -> common_dtype is np.dtype + dtype = cast(np.dtype, dtype) + result = np.empty(x.size, dtype=dtype) if len(x) != len(y): raise ValueError(x.shape, y.shape) diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 813c4282e543a..9c7107ab40644 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -1000,7 +1000,7 @@ def _get_dummies_1d( codes, levels = factorize_from_iterable(Series(data)) if dtype is None: - dtype = np.uint8 + dtype = np.dtype(np.uint8) # error: Argument 1 to "dtype" has incompatible type "Union[ExtensionDtype, str, # dtype[Any], Type[object]]"; expected "Type[Any]" dtype = np.dtype(dtype) # type: ignore[arg-type] @@ -1046,9 +1046,7 @@ def get_empty_frame(data) -> DataFrame: fill_value: bool | float | int if is_integer_dtype(dtype): fill_value = 0 - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[bool]") - elif dtype == bool: # type: ignore[comparison-overlap] + elif dtype == np.dtype(bool): fill_value = False else: fill_value = 0.0 diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 9f163f77a2ae8..1e27febab2af9 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -419,7 +419,9 @@ def _get_series_list(self, others): ) @forbid_nonstring_types(["bytes", "mixed", "mixed-integer"]) - def cat(self, others=None, sep=None, na_rep=None, join="left"): + def cat( + self, others=None, sep=None, na_rep=None, join="left" + ) -> str | Series | Index: """ Concatenate strings in the Series/Index with given separator. @@ -628,30 +630,22 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"): # no NaNs - can just concatenate result = cat_safe(all_cols, sep) + out: Index | Series if isinstance(self._orig, ABCIndex): # add dtype for case that result is all-NA - # error: Incompatible types in assignment (expression has type - # "Index", variable has type "ndarray") - result = Index( # type: ignore[assignment] - result, dtype=object, name=self._orig.name - ) + out = Index(result, dtype=object, name=self._orig.name) else: # Series if is_categorical_dtype(self._orig.dtype): # We need to infer the new categories. dtype = None else: dtype = self._orig.dtype - # error: Incompatible types in assignment (expression has type - # "Series", variable has type "ndarray") - result = Series( # type: ignore[assignment] + res_ser = Series( result, dtype=dtype, index=data.index, name=self._orig.name ) - # error: "ndarray" has no attribute "__finalize__" - result = result.__finalize__( # type: ignore[attr-defined] - self._orig, method="str_cat" - ) - return result + out = res_ser.__finalize__(self._orig, method="str_cat") + return out _shared_docs[ "str_split" diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py index 3081575f50700..ba2f56c79bdfe 100644 --- a/pandas/core/strings/object_array.py +++ b/pandas/core/strings/object_array.py @@ -71,7 +71,7 @@ def _str_map( map_convert = convert and not np.all(mask) try: result = lib.map_infer_mask(arr, f, mask.view(np.uint8), map_convert) - except (TypeError, AttributeError) as e: + except (TypeError, AttributeError) as err: # Reraise the exception if callable `f` got wrong number of args. # The user may want to be warned by this, instead of getting NaN p_err = ( @@ -79,9 +79,9 @@ def _str_map( r"(?(3)required )positional arguments?" ) - if len(e.args) >= 1 and re.search(p_err, e.args[0]): + if len(err.args) >= 1 and re.search(p_err, err.args[0]): # FIXME: this should be totally avoidable - raise e + raise err def g(x): # This type of fallback behavior can be removed once diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index aca751362c915..669a39fcb3a74 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -888,11 +888,9 @@ def to_datetime( result = arg if tz is not None: if arg.tz is not None: - # error: Too many arguments for "tz_convert" of "NaTType" - result = result.tz_convert(tz) # type: ignore[call-arg] + result = arg.tz_convert(tz) else: - # error: Too many arguments for "tz_localize" of "NaTType" - result = result.tz_localize(tz) # type: ignore[call-arg] + result = arg.tz_localize(tz) elif isinstance(arg, ABCSeries): cache_array = _maybe_cache(arg, format, cache, convert_listlike) if not cache_array.empty: diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 34bcc6687e902..f6c93e6f751c8 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -934,25 +934,15 @@ def __eq__(self, other: Any) -> bool: @classmethod def get_base_missing_value(cls, dtype: np.dtype) -> int | float: - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[signedinteger[Any]]") - if dtype == np.int8: # type: ignore[comparison-overlap] + if dtype.type is np.int8: value = cls.BASE_MISSING_VALUES["int8"] - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[signedinteger[Any]]") - elif dtype == np.int16: # type: ignore[comparison-overlap] + elif dtype.type is np.int16: value = cls.BASE_MISSING_VALUES["int16"] - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[signedinteger[Any]]") - elif dtype == np.int32: # type: ignore[comparison-overlap] + elif dtype.type is np.int32: value = cls.BASE_MISSING_VALUES["int32"] - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[floating[Any]]") - elif dtype == np.float32: # type: ignore[comparison-overlap] + elif dtype.type is np.float32: value = cls.BASE_MISSING_VALUES["float32"] - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[floating[Any]]") - elif dtype == np.float64: # type: ignore[comparison-overlap] + elif dtype.type is np.float64: value = cls.BASE_MISSING_VALUES["float64"] else: raise ValueError("Unsupported dtype") @@ -2120,30 +2110,20 @@ def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int: type inserted. """ # TODO: expand to handle datetime to integer conversion - if dtype.type == np.object_: # try to coerce it to the biggest string + if dtype.type is np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? itemsize = max_len_string_array(ensure_object(column._values)) return max(itemsize, 1) - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[floating[Any]]") - elif dtype == np.float64: # type: ignore[comparison-overlap] + elif dtype.type is np.float64: return 255 - # Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[floating[Any]]") - elif dtype == np.float32: # type: ignore[comparison-overlap] + elif dtype.type is np.float32: return 254 - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[signedinteger[Any]]") - elif dtype == np.int32: # type: ignore[comparison-overlap] + elif dtype.type is np.int32: return 253 - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[signedinteger[Any]]") - elif dtype == np.int16: # type: ignore[comparison-overlap] + elif dtype.type is np.int16: return 252 - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[signedinteger[Any]]") - elif dtype == np.int8: # type: ignore[comparison-overlap] + elif dtype.type is np.int8: return 251 else: # pragma : no cover raise NotImplementedError(f"Data type {dtype} not supported.") @@ -2174,7 +2154,7 @@ def _dtype_to_default_stata_fmt( max_str_len = 2045 if force_strl: return "%9s" - if dtype.type == np.object_: + if dtype.type is np.object_: itemsize = max_len_string_array(ensure_object(column._values)) if itemsize > max_str_len: if dta_version >= 117: @@ -2400,11 +2380,11 @@ def _prepare_categoricals(self, data: DataFrame) -> DataFrame: # Upcast if needed so that correct missing values can be set if values.max() >= get_base_missing_value(dtype): if dtype == np.int8: - dtype = np.int16 + dtype = np.dtype(np.int16) elif dtype == np.int16: - dtype = np.int32 + dtype = np.dtype(np.int32) else: - dtype = np.float64 + dtype = np.dtype(np.float64) values = np.array(values, dtype=dtype) # Replace missing values with Stata missing value for type @@ -2624,7 +2604,7 @@ def _encode_strings(self) -> None: continue column = self.data[col] dtype = column.dtype - if dtype.type == np.object_: + if dtype.type is np.object_: inferred_dtype = infer_dtype(column, skipna=True) if not ((inferred_dtype == "string") or len(column) == 0): col = column.name @@ -2912,7 +2892,7 @@ def _dtype_to_stata_type_117(dtype: np.dtype, column: Series, force_strl: bool) # TODO: expand to handle datetime to integer conversion if force_strl: return 32768 - if dtype.type == np.object_: # try to coerce it to the biggest string + if dtype.type is np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? itemsize = max_len_string_array(ensure_object(column._values)) @@ -2920,25 +2900,15 @@ def _dtype_to_stata_type_117(dtype: np.dtype, column: Series, force_strl: bool) if itemsize <= 2045: return itemsize return 32768 - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[floating[Any]]") - elif dtype == np.float64: # type: ignore[comparison-overlap] + elif dtype.type is np.float64: return 65526 - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[floating[Any]]") - elif dtype == np.float32: # type: ignore[comparison-overlap] + elif dtype.type is np.float32: return 65527 - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[signedinteger[Any]]") [comparison-overlap] - elif dtype == np.int32: # type: ignore[comparison-overlap] + elif dtype.type is np.int32: return 65528 - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[signedinteger[Any]]") - elif dtype == np.int16: # type: ignore[comparison-overlap] + elif dtype.type is np.int16: return 65529 - # error: Non-overlapping equality check (left operand type: "dtype[Any]", right - # operand type: "Type[signedinteger[Any]]") - elif dtype == np.int8: # type: ignore[comparison-overlap] + elif dtype.type is np.int8: return 65530 else: # pragma : no cover raise NotImplementedError(f"Data type {dtype} not supported.") diff --git a/pandas/tests/frame/test_reductions.py b/pandas/tests/frame/test_reductions.py index 258e4e6eb0cc9..919d8ab14778e 100644 --- a/pandas/tests/frame/test_reductions.py +++ b/pandas/tests/frame/test_reductions.py @@ -1718,7 +1718,7 @@ def test_mad_nullable_integer_all_na(any_signed_int_ea_dtype): df2 = df.astype(any_signed_int_ea_dtype) # case with all-NA row/column - df2.iloc[:, 1] = pd.NA # FIXME: this doesn't operate in-place + df2.iloc[:, 1] = pd.NA # FIXME(GH#44199): this doesn't operate in-place df2.iloc[:, 1] = pd.array([pd.NA] * len(df2), dtype=any_signed_int_ea_dtype) result = df2.mad() expected = df.mad() diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py index f43e3104c64d7..2a1fa8a015ccc 100644 --- a/pandas/tests/indexes/multi/test_setops.py +++ b/pandas/tests/indexes/multi/test_setops.py @@ -253,12 +253,14 @@ def test_union(idx, sort): the_union = idx.union(idx[:0], sort=sort) tm.assert_index_equal(the_union, idx) - # FIXME: dont leave commented-out - # won't work in python 3 - # tuples = _index.values - # result = _index[:4] | tuples[4:] - # assert result.equals(tuples) + tuples = idx.values + result = idx[:4].union(tuples[4:], sort=sort) + if sort is None: + tm.equalContents(result, idx) + else: + assert result.equals(idx) + # FIXME: don't leave commented-out # not valid for python 3 # def test_union_with_regular_index(self): # other = Index(['A', 'B', 'C']) @@ -290,11 +292,9 @@ def test_intersection(idx, sort): expected = idx[:0] assert empty.equals(expected) - # FIXME: dont leave commented-out - # can't do in python 3 - # tuples = _index.values - # result = _index & tuples - # assert result.equals(tuples) + tuples = idx.values + result = idx.intersection(tuples) + assert result.equals(idx) @pytest.mark.parametrize(
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44308
2021-11-04T05:32:51Z
2021-11-05T09:09:57Z
2021-11-05T09:09:57Z
2021-11-05T15:54:04Z
CLN: libparsers
diff --git a/pandas/_libs/parsers.pyi b/pandas/_libs/parsers.pyi index 9e3c163fb54d9..01f5d5802ccd5 100644 --- a/pandas/_libs/parsers.pyi +++ b/pandas/_libs/parsers.pyi @@ -16,7 +16,6 @@ STR_NA_VALUES: set[str] def sanitize_objects( values: npt.NDArray[np.object_], na_values: set, - convert_empty: bool = ..., ) -> int: ... class TextReader: diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index c9f3e1f01a55c..d2975f83b97d7 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -454,7 +454,7 @@ cdef class TextReader: # usecols into TextReader. self.usecols = usecols - # XXX + # TODO: XXX? if skipfooter > 0: self.parser.on_bad_lines = SKIP @@ -842,7 +842,7 @@ cdef class TextReader: cdef _read_rows(self, rows, bint trim): cdef: int64_t buffered_lines - int64_t irows, footer = 0 + int64_t irows self._start_clock() @@ -866,16 +866,13 @@ cdef class TextReader: if status < 0: raise_parser_error('Error tokenizing data', self.parser) - footer = self.skipfooter if self.parser_start >= self.parser.lines: raise StopIteration self._end_clock('Tokenization') self._start_clock() - columns = self._convert_column_data(rows=rows, - footer=footer, - upcast_na=True) + columns = self._convert_column_data(rows) self._end_clock('Type conversion') self._start_clock() if len(columns) > 0: @@ -904,10 +901,7 @@ cdef class TextReader: def remove_noconvert(self, i: int) -> None: self.noconvert.remove(i) - # TODO: upcast_na only ever False, footer never passed - def _convert_column_data( - self, rows: int | None = None, upcast_na: bool = False, footer: int = 0 - ) -> dict[int, "ArrayLike"]: + def _convert_column_data(self, rows: int | None) -> dict[int, "ArrayLike"]: cdef: int64_t i int nused @@ -925,11 +919,6 @@ cdef class TextReader: else: end = min(start + rows, self.parser.lines) - # FIXME: dont leave commented-out - # # skip footer - # if footer > 0: - # end -= footer - num_cols = -1 # Py_ssize_t cast prevents build warning for i in range(<Py_ssize_t>self.parser.lines): @@ -1031,8 +1020,7 @@ cdef class TextReader: self._free_na_set(na_hashset) # don't try to upcast EAs - try_upcast = upcast_na and na_count > 0 - if try_upcast and not is_extension_array_dtype(col_dtype): + if na_count > 0 and not is_extension_array_dtype(col_dtype): col_res = _maybe_upcast(col_res) if col_res is None: @@ -1985,18 +1973,14 @@ cdef list _maybe_encode(list values): return [x.encode('utf-8') if isinstance(x, str) else x for x in values] -# TODO: only ever called with convert_empty=False -def sanitize_objects(ndarray[object] values, set na_values, - bint convert_empty=True) -> int: +def sanitize_objects(ndarray[object] values, set na_values) -> int: """ - Convert specified values, including the given set na_values and empty - strings if convert_empty is True, to np.nan. + Convert specified values, including the given set na_values to np.nan. Parameters ---------- values : ndarray[object] na_values : set - convert_empty : bool, default True Returns ------- @@ -2013,7 +1997,7 @@ def sanitize_objects(ndarray[object] values, set na_values, for i in range(n): val = values[i] - if (convert_empty and val == '') or (val in na_values): + if val in na_values: values[i] = onan na_count += 1 elif val in memo: diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index e8248eeb07395..0ac515f8b0a79 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1858,7 +1858,8 @@ cdef class YearOffset(SingleConstructorOffset): """ _attributes = tuple(["n", "normalize", "month"]) - # _default_month: int # FIXME: python annotation here breaks things + # FIXME(cython#4446): python annotation here gives compile-time errors + # _default_month: int cdef readonly: int month @@ -2009,7 +2010,7 @@ cdef class QuarterOffset(SingleConstructorOffset): # point. Also apply_index, is_on_offset, rule_code if # startingMonth vs month attr names are resolved - # FIXME: python annotations here breaks things + # FIXME(cython#4446): python annotation here gives compile-time errors # _default_starting_month: int # _from_name_starting_month: int diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 983f7b6a20a48..8cdcc05f60266 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -713,13 +713,13 @@ def _infer_types(self, values, na_values, try_num_bool=True): # e.g. encountering datetime string gets ValueError # TypeError can be raised in floatify result = values - na_count = parsers.sanitize_objects(result, na_values, False) + na_count = parsers.sanitize_objects(result, na_values) else: na_count = isna(result).sum() else: result = values if values.dtype == np.object_: - na_count = parsers.sanitize_objects(values, na_values, False) + na_count = parsers.sanitize_objects(values, na_values) if result.dtype == np.object_ and try_num_bool: result, _ = libops.maybe_convert_bool(
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44307
2021-11-04T05:30:26Z
2021-11-05T13:03:31Z
2021-11-05T13:03:30Z
2021-11-05T15:55:17Z
CLN: address TODOs
diff --git a/pandas/_libs/hashtable.pyi b/pandas/_libs/hashtable.pyi index 9c1de67a7ba2a..f4b90648a8dc8 100644 --- a/pandas/_libs/hashtable.pyi +++ b/pandas/_libs/hashtable.pyi @@ -194,21 +194,6 @@ class StringHashTable(HashTable): ... class PyObjectHashTable(HashTable): ... class IntpHashTable(HashTable): ... -def duplicated_int64( - values: np.ndarray, # const int64_t[:] values - keep: Literal["last", "first", False] = ..., -) -> npt.NDArray[np.bool_]: ... - -# TODO: Is it actually bool or is it uint8? - -def mode_int64( - values: np.ndarray, # const int64_t[:] values - dropna: bool, -) -> npt.NDArray[np.int64]: ... -def value_count_int64( - values: np.ndarray, # const int64_t[:] - dropna: bool, -) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ... def duplicated( values: np.ndarray, keep: Literal["last", "first", False] = ..., diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index e8248eeb07395..6588b0435d6f0 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -777,7 +777,7 @@ cdef class Tick(SingleConstructorOffset): "Tick offset with `normalize=True` are not allowed." ) - # FIXME: Without making this cpdef, we get AttributeError when calling + # Note: Without making this cpdef, we get AttributeError when calling # from __mul__ cpdef Tick _next_higher_resolution(Tick self): if type(self) is Day: diff --git a/pandas/_libs/writers.pyi b/pandas/_libs/writers.pyi index c188dc2bd9048..930322fcbeb77 100644 --- a/pandas/_libs/writers.pyi +++ b/pandas/_libs/writers.pyi @@ -1,8 +1,11 @@ +from __future__ import annotations + import numpy as np -# TODO: can make this more specific +from pandas._typing import ArrayLike + def write_csv_rows( - data: list, + data: list[ArrayLike], data_index: np.ndarray, nlevels: int, cols: np.ndarray, diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx index 46f04cf8e15b3..eac6ee4366e33 100644 --- a/pandas/_libs/writers.pyx +++ b/pandas/_libs/writers.pyx @@ -30,7 +30,7 @@ def write_csv_rows( Parameters ---------- - data : list + data : list[ArrayLike] data_index : ndarray nlevels : int cols : ndarray diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py index ac27aaa42d151..54324bf721945 100644 --- a/pandas/core/array_algos/putmask.py +++ b/pandas/core/array_algos/putmask.py @@ -65,7 +65,7 @@ def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) np.putmask(values, mask, value) -def putmask_smart(values: np.ndarray, mask: np.ndarray, new) -> np.ndarray: +def putmask_smart(values: np.ndarray, mask: npt.NDArray[np.bool_], new) -> np.ndarray: """ Return a new ndarray, try to preserve dtype if possible. @@ -84,12 +84,11 @@ def putmask_smart(values: np.ndarray, mask: np.ndarray, new) -> np.ndarray: See Also -------- - ndarray.putmask + np.putmask """ # we cannot use np.asarray() here as we cannot have conversions # that numpy does when numeric are mixed with strings - # n should be the length of the mask or a scalar here if not is_list_like(new): new = np.broadcast_to(new, mask.shape) @@ -139,7 +138,7 @@ def putmask_smart(values: np.ndarray, mask: np.ndarray, new) -> np.ndarray: return _putmask_preserve(values, new, mask) -def _putmask_preserve(new_values: np.ndarray, new, mask: np.ndarray): +def _putmask_preserve(new_values: np.ndarray, new, mask: npt.NDArray[np.bool_]): try: new_values[mask] = new[mask] except (IndexError, ValueError): @@ -147,7 +146,9 @@ def _putmask_preserve(new_values: np.ndarray, new, mask: np.ndarray): return new_values -def putmask_without_repeat(values: np.ndarray, mask: np.ndarray, new: Any) -> None: +def putmask_without_repeat( + values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any +) -> None: """ np.putmask will truncate or repeat if `new` is a listlike with len(new) != len(values). We require an exact match. @@ -181,7 +182,9 @@ def putmask_without_repeat(values: np.ndarray, mask: np.ndarray, new: Any) -> No np.putmask(values, mask, new) -def validate_putmask(values: ArrayLike, mask: np.ndarray) -> tuple[np.ndarray, bool]: +def validate_putmask( + values: ArrayLike, mask: np.ndarray +) -> tuple[npt.NDArray[np.bool_], bool]: """ Validate mask and check if this putmask operation is a no-op. """ @@ -193,7 +196,7 @@ def validate_putmask(values: ArrayLike, mask: np.ndarray) -> tuple[np.ndarray, b return mask, noop -def extract_bool_array(mask: ArrayLike) -> np.ndarray: +def extract_bool_array(mask: ArrayLike) -> npt.NDArray[np.bool_]: """ If we have a SparseArray or BooleanArray, convert it to ndarray[bool]. """ diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 005c5f75e6cfa..e2dd5ecfde5a8 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -390,6 +390,7 @@ def fillna(self, value, downcast=None): return type(self)._simple_new(cat, name=self.name) + # TODO(2.0): remove reindex once non-unique deprecation is enforced def reindex( self, target, method=None, level=None, limit=None, tolerance=None ) -> tuple[Index, npt.NDArray[np.intp] | None]: diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index f0d01f8727d5a..9b757e9cacdf3 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -2132,7 +2132,6 @@ def test_float_series_rdiv_td64arr(self, box_with_array, names): result = ser.__rtruediv__(tdi) if box is DataFrame: - # TODO: Should we skip this case sooner or test something else? assert result is NotImplemented else: tm.assert_equal(result, expected) diff --git a/pandas/tests/arrays/categorical/test_replace.py b/pandas/tests/arrays/categorical/test_replace.py index 007c4bdea17f8..b5e1fe030ce1c 100644 --- a/pandas/tests/arrays/categorical/test_replace.py +++ b/pandas/tests/arrays/categorical/test_replace.py @@ -29,14 +29,14 @@ ([1, 2, "3"], "5", ["5", "5", 3], True), ], ) -def test_replace(to_replace, value, expected, flip_categories): +def test_replace_categorical_series(to_replace, value, expected, flip_categories): # GH 31720 stays_categorical = not isinstance(value, list) or len(pd.unique(value)) == 1 - s = pd.Series([1, 2, 3], dtype="category") - result = s.replace(to_replace, value) + ser = pd.Series([1, 2, 3], dtype="category") + result = ser.replace(to_replace, value) expected = pd.Series(expected, dtype="category") - s.replace(to_replace, value, inplace=True) + ser.replace(to_replace, value, inplace=True) if flip_categories: expected = expected.cat.set_categories(expected.cat.categories[::-1]) @@ -46,7 +46,7 @@ def test_replace(to_replace, value, expected, flip_categories): expected = pd.Series(np.asarray(expected)) tm.assert_series_equal(expected, result, check_category_order=False) - tm.assert_series_equal(expected, s, check_category_order=False) + tm.assert_series_equal(expected, ser, check_category_order=False) @pytest.mark.parametrize( @@ -59,8 +59,7 @@ def test_replace(to_replace, value, expected, flip_categories): ("b", None, ["a", None], "Categorical.categories length are different"), ], ) -def test_replace2(to_replace, value, result, expected_error_msg): - # TODO: better name +def test_replace_categorical(to_replace, value, result, expected_error_msg): # GH#26988 cat = Categorical(["a", "b"]) expected = Categorical(result) diff --git a/pandas/tests/arrays/timedeltas/test_reductions.py b/pandas/tests/arrays/timedeltas/test_reductions.py index 5f278b09dc818..9e854577f7e3c 100644 --- a/pandas/tests/arrays/timedeltas/test_reductions.py +++ b/pandas/tests/arrays/timedeltas/test_reductions.py @@ -84,19 +84,8 @@ def test_sum(self): assert isinstance(result, Timedelta) assert result == expected - # TODO: de-duplicate with test_npsum below - def test_np_sum(self): - # GH#25282 - vals = np.arange(5, dtype=np.int64).view("m8[h]").astype("m8[ns]") - arr = TimedeltaArray(vals) - result = np.sum(arr) - assert result == vals.sum() - - result = np.sum(pd.TimedeltaIndex(arr)) - assert result == vals.sum() - def test_npsum(self): - # GH#25335 np.sum should return a Timedelta, not timedelta64 + # GH#25282, GH#25335 np.sum should return a Timedelta, not timedelta64 tdi = pd.TimedeltaIndex(["3H", "3H", "2H", "5H", "4H"]) arr = tdi.array diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index bca87bb8ec2aa..53416b6a3e9db 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -174,11 +174,12 @@ def test_series_repr(self, data): assert "Decimal: " in repr(ser) -# TODO(extension) @pytest.mark.xfail( reason=( - "raising AssertionError as this is not implemented, though easy enough to do" - ) + "DecimalArray constructor raises bc _from_sequence wants Decimals, not ints." + "Easy to fix, just need to do it." + ), + raises=TypeError, ) def test_series_constructor_coerce_data_to_extension_dtype_raises(): xpr = ( diff --git a/pandas/tests/indexes/categorical/test_indexing.py b/pandas/tests/indexes/categorical/test_indexing.py index 6f8b18f449779..2297f8cf87209 100644 --- a/pandas/tests/indexes/categorical/test_indexing.py +++ b/pandas/tests/indexes/categorical/test_indexing.py @@ -300,8 +300,8 @@ def test_get_indexer_same_categories_different_order(self): class TestWhere: - def test_where(self, listlike_box_with_tuple): - klass = listlike_box_with_tuple + def test_where(self, listlike_box): + klass = listlike_box i = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) cond = [True] * len(i) diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index e34620d4caf17..33d2558613baf 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -375,8 +375,8 @@ def test_numpy_repeat(self, simple_index): with pytest.raises(ValueError, match=msg): np.repeat(idx, rep, axis=0) - def test_where(self, listlike_box_with_tuple, simple_index): - klass = listlike_box_with_tuple + def test_where(self, listlike_box, simple_index): + klass = listlike_box idx = simple_index if isinstance(idx, (DatetimeIndex, TimedeltaIndex)): diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py index 2eae51c62aa0d..1e701945c79a0 100644 --- a/pandas/tests/indexes/conftest.py +++ b/pandas/tests/indexes/conftest.py @@ -33,19 +33,9 @@ def freq_sample(request): return request.param -@pytest.fixture(params=[list, np.array, array, Series]) +@pytest.fixture(params=[list, tuple, np.array, array, Series]) def listlike_box(request): """ Types that may be passed as the indexer to searchsorted. """ return request.param - - -# TODO: not clear if this _needs_ to be different from listlike_box or -# if that is just a historical artifact -@pytest.fixture(params=[list, tuple, np.array, Series]) -def listlike_box_with_tuple(request): - """ - Types that may be passed as the indexer to searchsorted. - """ - return request.param diff --git a/pandas/tests/indexes/interval/test_base.py b/pandas/tests/indexes/interval/test_base.py index 411e76ca5d8b7..5418f3a5964d9 100644 --- a/pandas/tests/indexes/interval/test_base.py +++ b/pandas/tests/indexes/interval/test_base.py @@ -43,8 +43,8 @@ def test_take(self, closed): expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2], closed=closed) tm.assert_index_equal(result, expected) - def test_where(self, simple_index, listlike_box_with_tuple): - klass = listlike_box_with_tuple + def test_where(self, simple_index, listlike_box): + klass = listlike_box idx = simple_index cond = [True] * len(idx) diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index 99322f474dd9e..34722ad388ae0 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -720,12 +720,12 @@ def test_where(self): with pytest.raises(NotImplementedError, match=msg): i.where(True) - def test_where_array_like(self, listlike_box_with_tuple): + def test_where_array_like(self, listlike_box): mi = MultiIndex.from_tuples([("A", 1), ("A", 2)]) cond = [False, True] msg = r"\.where is not supported for MultiIndex operations" with pytest.raises(NotImplementedError, match=msg): - mi.where(listlike_box_with_tuple(cond)) + mi.where(listlike_box(cond)) class TestContains: diff --git a/pandas/tests/indexes/numeric/test_indexing.py b/pandas/tests/indexes/numeric/test_indexing.py index cc309beef92d6..7f7239828f9cf 100644 --- a/pandas/tests/indexes/numeric/test_indexing.py +++ b/pandas/tests/indexes/numeric/test_indexing.py @@ -397,14 +397,14 @@ class TestWhere: UInt64Index(np.arange(5, dtype="uint64")), ], ) - def test_where(self, listlike_box_with_tuple, index): + def test_where(self, listlike_box, index): cond = [True] * len(index) expected = index - result = index.where(listlike_box_with_tuple(cond)) + result = index.where(listlike_box(cond)) cond = [False] + [True] * (len(index) - 1) expected = Float64Index([index._na_value] + index[1:].tolist()) - result = index.where(listlike_box_with_tuple(cond)) + result = index.where(listlike_box(cond)) tm.assert_index_equal(result, expected) def test_where_uint64(self): diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index 78afcf2fdc78a..dfa750bf933a0 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -602,16 +602,16 @@ def test_get_indexer2(self): class TestWhere: - def test_where(self, listlike_box_with_tuple): + def test_where(self, listlike_box): i = period_range("20130101", periods=5, freq="D") cond = [True] * len(i) expected = i - result = i.where(listlike_box_with_tuple(cond)) + result = i.where(listlike_box(cond)) tm.assert_index_equal(result, expected) cond = [False] + [True] * (len(i) - 1) expected = PeriodIndex([NaT] + i[1:].tolist(), freq="D") - result = i.where(listlike_box_with_tuple(cond)) + result = i.where(listlike_box(cond)) tm.assert_index_equal(result, expected) def test_where_other(self): diff --git a/pandas/tests/io/formats/style/test_html.py b/pandas/tests/io/formats/style/test_html.py index e15283e558479..fc5db59db8336 100644 --- a/pandas/tests/io/formats/style/test_html.py +++ b/pandas/tests/io/formats/style/test_html.py @@ -488,8 +488,8 @@ def test_replaced_css_class_names(styler_mi): uuid_len=0, ).set_table_styles(css_class_names=css) styler_mi.index.names = ["n1", "n2"] - styler_mi.hide_index(styler_mi.index[1:]) - styler_mi.hide_columns(styler_mi.columns[1:]) + styler_mi.hide(styler_mi.index[1:], axis=0) + styler_mi.hide(styler_mi.columns[1:], axis=1) styler_mi.applymap_index(lambda v: "color: red;", axis=0) styler_mi.applymap_index(lambda v: "color: green;", axis=1) styler_mi.applymap(lambda v: "color: blue;") @@ -611,9 +611,9 @@ def test_hiding_index_columns_multiindex_alignment(): ) df = DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=cidx) styler = Styler(df, uuid_len=0) - styler.hide_index(level=1).hide_columns(level=0) - styler.hide_index([("j0", "i1", "j2")]) - styler.hide_columns([("c0", "d1", "d2")]) + styler.hide(level=1, axis=0).hide(level=0, axis=1) + styler.hide([("j0", "i1", "j2")], axis=0) + styler.hide([("c0", "d1", "d2")], axis=1) result = styler.to_html() expected = dedent( """\ diff --git a/pandas/tests/io/formats/style/test_style.py b/pandas/tests/io/formats/style/test_style.py index 8ac0dd03c9fd6..c5e0985205c76 100644 --- a/pandas/tests/io/formats/style/test_style.py +++ b/pandas/tests/io/formats/style/test_style.py @@ -1530,7 +1530,7 @@ def test_hiding_headers_over_index_no_sparsify(): df = DataFrame(9, index=midx, columns=[0]) ctx = df.style._translate(False, False) assert len(ctx["body"]) == 6 - ctx = df.style.hide_index((1, "a"))._translate(False, False) + ctx = df.style.hide((1, "a"), axis=0)._translate(False, False) assert len(ctx["body"]) == 4 assert "row2" in ctx["body"][0][0]["class"] diff --git a/pandas/tests/io/formats/style/test_to_latex.py b/pandas/tests/io/formats/style/test_to_latex.py index 63658e9bf60d7..9c2a364b396b8 100644 --- a/pandas/tests/io/formats/style/test_to_latex.py +++ b/pandas/tests/io/formats/style/test_to_latex.py @@ -803,7 +803,7 @@ def test_css_convert_apply_index(styler, axis): def test_hide_index_latex(styler): # GH 43637 - styler.hide_index([0]) + styler.hide([0], axis=0) result = styler.to_latex() expected = dedent( """\ @@ -826,9 +826,9 @@ def test_latex_hiding_index_columns_multiindex_alignment(): ) df = DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=cidx) styler = Styler(df, uuid_len=0) - styler.hide_index(level=1).hide_columns(level=0) - styler.hide_index([("i0", "i1", "i2")]) - styler.hide_columns([("c0", "c1", "c2")]) + styler.hide(level=1, axis=0).hide(level=0, axis=1) + styler.hide([("i0", "i1", "i2")], axis=0) + styler.hide([("c0", "c1", "c2")], axis=1) styler.applymap(lambda x: "color:{red};" if x == 5 else "") styler.applymap_index(lambda x: "color:{blue};" if "j" in x else "") result = styler.to_latex()
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44305
2021-11-03T17:06:55Z
2021-11-05T13:07:28Z
2021-11-05T13:07:28Z
2021-11-05T15:55:58Z
ENH: Implement multi-column `DataFrame.quantiles`
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index a1a2149da7cf6..ce7fa940d018a 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -293,6 +293,7 @@ Other enhancements - :class:`Series` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) will now successfully operate when the dtype is numeric and ``numeric_only=True`` is provided; previously this would raise a ``NotImplementedError`` (:issue:`47500`) - :meth:`RangeIndex.union` now can return a :class:`RangeIndex` instead of a :class:`Int64Index` if the resulting values are equally spaced (:issue:`47557`, :issue:`43885`) - :meth:`DataFrame.compare` now accepts an argument ``result_names`` to allow the user to specify the result's names of both left and right DataFrame which are being compared. This is by default ``'self'`` and ``'other'`` (:issue:`44354`) +- :meth:`DataFrame.quantile` gained a ``method`` argument that can accept ``table`` to evaluate multi-column quantiles (:issue:`43881`) - :class:`Interval` now supports checking whether one interval is contained by another interval (:issue:`46613`) - :meth:`Series.add_suffix`, :meth:`DataFrame.add_suffix`, :meth:`Series.add_prefix` and :meth:`DataFrame.add_prefix` support a ``copy`` argument. If ``False``, the underlying data is not copied in the returned object (:issue:`47934`) - :meth:`DataFrame.set_index` now supports a ``copy`` keyword. If ``False``, the underlying data is not copied when a new :class:`DataFrame` is returned (:issue:`48043`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6cfca4ebdc612..74d4184fe985d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -83,7 +83,10 @@ npt, ) from pandas.compat._optional import import_optional_dependency -from pandas.compat.numpy import function as nv +from pandas.compat.numpy import ( + function as nv, + np_percentile_argname, +) from pandas.util._decorators import ( Appender, Substitution, @@ -11129,6 +11132,7 @@ def quantile( axis: Axis = 0, numeric_only: bool | lib.NoDefault = no_default, interpolation: QuantileInterpolation = "linear", + method: Literal["single", "table"] = "single", ) -> Series | DataFrame: """ Return values at the given quantile over requested axis. @@ -11157,6 +11161,10 @@ def quantile( * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. + method : {'single', 'table'}, default 'single' + Whether to compute quantiles per-column ('single') or over all columns + ('table'). When 'table', the only allowed interpolation methods are + 'nearest', 'lower', and 'higher'. Returns ------- @@ -11186,6 +11194,17 @@ def quantile( 0.1 1.3 3.7 0.5 2.5 55.0 + Specifying `method='table'` will compute the quantile over all columns. + + >>> df.quantile(.1, method="table", interpolation="nearest") + a 1 + b 1 + Name: 0.1, dtype: int64 + >>> df.quantile([.1, .5], method="table", interpolation="nearest") + a b + 0.1 1 1 + 0.5 3 100 + Specifying `numeric_only=False` will also compute the quantile of datetime and timedelta data. @@ -11212,13 +11231,18 @@ def quantile( # error: List item 0 has incompatible type "Union[float, Union[Union[ # ExtensionArray, ndarray[Any, Any]], Index, Series], Sequence[float]]"; # expected "float" - res_df = self.quantile( - [q], # type: ignore[list-item] + res_df = self.quantile( # type: ignore[call-overload] + [q], axis=axis, numeric_only=numeric_only, interpolation=interpolation, + method=method, ) - res = res_df.iloc[0] + if method == "single": + res = res_df.iloc[0] + else: + # cannot directly iloc over sparse arrays + res = res_df.T.iloc[:, 0] if axis == 1 and len(self) == 0: # GH#41544 try to get an appropriate dtype dtype = find_common_type(list(self.dtypes)) @@ -11246,11 +11270,47 @@ def quantile( res = self._constructor([], index=q, columns=cols, dtype=dtype) return res.__finalize__(self, method="quantile") - # error: Argument "qs" to "quantile" of "BlockManager" has incompatible type - # "Index"; expected "Float64Index" - res = data._mgr.quantile( - qs=q, axis=1, interpolation=interpolation # type: ignore[arg-type] - ) + valid_method = {"single", "table"} + if method not in valid_method: + raise ValueError( + f"Invalid method: {method}. Method must be in {valid_method}." + ) + if method == "single": + # error: Argument "qs" to "quantile" of "BlockManager" has incompatible type + # "Index"; expected "Float64Index" + res = data._mgr.quantile( + qs=q, axis=1, interpolation=interpolation # type: ignore[arg-type] + ) + elif method == "table": + valid_interpolation = {"nearest", "lower", "higher"} + if interpolation not in valid_interpolation: + raise ValueError( + f"Invalid interpolation: {interpolation}. " + f"Interpolation must be in {valid_interpolation}" + ) + # handle degenerate case + if len(data) == 0: + if data.ndim == 2: + dtype = find_common_type(list(self.dtypes)) + else: + dtype = self.dtype + return self._constructor([], index=q, columns=data.columns, dtype=dtype) + + q_idx = np.quantile( # type: ignore[call-overload] + np.arange(len(data)), q, **{np_percentile_argname: interpolation} + ) + + by = data.columns + if len(by) > 1: + keys = [data._get_label_or_level_values(x) for x in by] + indexer = lexsort_indexer(keys) + else: + by = by[0] + k = data._get_label_or_level_values(by) # type: ignore[arg-type] + indexer = nargsort(k) + + res = data._mgr.take(indexer[q_idx], verify=False) + res.axes[1] = q result = self._constructor(res) return result.__finalize__(self, method="quantile") diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index 16b82727fd069..14b416011b956 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -16,6 +16,14 @@ import pandas._testing as tm +@pytest.fixture( + params=[["linear", "single"], ["nearest", "table"]], ids=lambda x: "-".join(x) +) +def interp_method(request): + """(interpolation, method) arguments for quantile""" + return request.param + + class TestDataFrameQuantile: @pytest.mark.parametrize( "non_num_col", @@ -25,8 +33,11 @@ class TestDataFrameQuantile: [DataFrame, Series, Timestamp], ], ) - def test_numeric_only_default_false_warning(self, non_num_col): + def test_numeric_only_default_false_warning( + self, non_num_col, interp_method, request, using_array_manager + ): # GH #7308 + interpolation, method = interp_method df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}) df["C"] = non_num_col @@ -35,8 +46,14 @@ def test_numeric_only_default_false_warning(self, non_num_col): index=["A", "B"], name=0.5, ) + if interpolation == "nearest": + expected = expected.astype(np.int64) + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) with tm.assert_produces_warning(FutureWarning, match="numeric_only"): - result = df.quantile(0.5) + result = df.quantile(0.5, interpolation=interpolation, method=method) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( @@ -64,66 +81,142 @@ def test_quantile_sparse(self, df, expected): tm.assert_series_equal(result, expected) - def test_quantile(self, datetime_frame): - from numpy import percentile - + def test_quantile( + self, datetime_frame, interp_method, using_array_manager, request + ): + interpolation, method = interp_method df = datetime_frame - q = df.quantile(0.1, axis=0, numeric_only=True) - assert q["A"] == percentile(df["A"], 10) - tm.assert_index_equal(q.index, df.columns) - - q = df.quantile(0.9, axis=1, numeric_only=True) - assert q["2000-01-17"] == percentile(df.loc["2000-01-17"], 90) - tm.assert_index_equal(q.index, df.index) - - # test degenerate case - q = DataFrame({"x": [], "y": []}).quantile(0.1, axis=0, numeric_only=True) + result = df.quantile( + 0.1, axis=0, numeric_only=True, interpolation=interpolation, method=method + ) + expected = Series( + [np.percentile(df[col], 10) for col in df.columns], + index=df.columns, + name=0.1, + ) + if interpolation == "linear": + # np.percentile values only comparable to linear interpolation + tm.assert_series_equal(result, expected) + else: + tm.assert_index_equal(result.index, expected.index) + request.node.add_marker( + pytest.mark.xfail( + using_array_manager, reason="Name set incorrectly for arraymanager" + ) + ) + assert result.name == expected.name + + result = df.quantile( + 0.9, axis=1, numeric_only=True, interpolation=interpolation, method=method + ) + expected = Series( + [np.percentile(df.loc[date], 90) for date in df.index], + index=df.index, + name=0.9, + ) + if interpolation == "linear": + # np.percentile values only comparable to linear interpolation + tm.assert_series_equal(result, expected) + else: + tm.assert_index_equal(result.index, expected.index) + request.node.add_marker( + pytest.mark.xfail( + using_array_manager, reason="Name set incorrectly for arraymanager" + ) + ) + assert result.name == expected.name + + def test_empty(self, interp_method): + interpolation, method = interp_method + q = DataFrame({"x": [], "y": []}).quantile( + 0.1, axis=0, numeric_only=True, interpolation=interpolation, method=method + ) assert np.isnan(q["x"]) and np.isnan(q["y"]) - # non-numeric exclusion + def test_non_numeric_exclusion(self, interp_method, request, using_array_manager): + interpolation, method = interp_method df = DataFrame({"col1": ["A", "A", "B", "B"], "col2": [1, 2, 3, 4]}) - rs = df.quantile(0.5, numeric_only=True) + rs = df.quantile( + 0.5, numeric_only=True, interpolation=interpolation, method=method + ) with tm.assert_produces_warning(FutureWarning, match="Select only valid"): xp = df.median().rename(0.5) + if interpolation == "nearest": + xp = (xp + 0.5).astype(np.int64) + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) tm.assert_series_equal(rs, xp) + def test_axis(self, interp_method, request, using_array_manager): # axis + interpolation, method = interp_method df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3]) - result = df.quantile(0.5, axis=1) + result = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5) + if interpolation == "nearest": + expected = expected.astype(np.int64) + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) tm.assert_series_equal(result, expected) - result = df.quantile([0.5, 0.75], axis=1) + result = df.quantile( + [0.5, 0.75], axis=1, interpolation=interpolation, method=method + ) expected = DataFrame( {1: [1.5, 1.75], 2: [2.5, 2.75], 3: [3.5, 3.75]}, index=[0.5, 0.75] ) + if interpolation == "nearest": + expected.iloc[0, :] -= 0.5 + expected.iloc[1, :] += 0.25 + expected = expected.astype(np.int64) tm.assert_frame_equal(result, expected, check_index_type=True) + def test_axis_numeric_only_true(self, interp_method, request, using_array_manager): # We may want to break API in the future to change this # so that we exclude non-numeric along the same axis # See GH #7312 + interpolation, method = interp_method df = DataFrame([[1, 2, 3], ["a", "b", 4]]) - result = df.quantile(0.5, axis=1, numeric_only=True) + result = df.quantile( + 0.5, axis=1, numeric_only=True, interpolation=interpolation, method=method + ) expected = Series([3.0, 4.0], index=[0, 1], name=0.5) + if interpolation == "nearest": + expected = expected.astype(np.int64) + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) tm.assert_series_equal(result, expected) - def test_quantile_date_range(self): + def test_quantile_date_range(self, interp_method, request, using_array_manager): # GH 2460 - + interpolation, method = interp_method dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") ser = Series(dti) df = DataFrame(ser) - result = df.quantile(numeric_only=False) + result = df.quantile( + numeric_only=False, interpolation=interpolation, method=method + ) expected = Series( ["2016-01-02 00:00:00"], name=0.5, dtype="datetime64[ns, US/Pacific]" ) + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) tm.assert_series_equal(result, expected) - def test_quantile_axis_mixed(self): + def test_quantile_axis_mixed(self, interp_method, request, using_array_manager): # mixed on axis=1 + interpolation, method = interp_method df = DataFrame( { "A": [1, 2, 3], @@ -132,8 +225,16 @@ def test_quantile_axis_mixed(self): "D": ["foo", "bar", "baz"], } ) - result = df.quantile(0.5, axis=1, numeric_only=True) + result = df.quantile( + 0.5, axis=1, numeric_only=True, interpolation=interpolation, method=method + ) expected = Series([1.5, 2.5, 3.5], name=0.5) + if interpolation == "nearest": + expected -= 0.5 + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) tm.assert_series_equal(result, expected) # must raise @@ -141,30 +242,44 @@ def test_quantile_axis_mixed(self): with pytest.raises(TypeError, match=msg): df.quantile(0.5, axis=1, numeric_only=False) - def test_quantile_axis_parameter(self): + def test_quantile_axis_parameter(self, interp_method, request, using_array_manager): # GH 9543/9544 - + interpolation, method = interp_method + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3]) - result = df.quantile(0.5, axis=0) + result = df.quantile(0.5, axis=0, interpolation=interpolation, method=method) expected = Series([2.0, 3.0], index=["A", "B"], name=0.5) + if interpolation == "nearest": + expected = expected.astype(np.int64) tm.assert_series_equal(result, expected) - expected = df.quantile(0.5, axis="index") + expected = df.quantile( + 0.5, axis="index", interpolation=interpolation, method=method + ) + if interpolation == "nearest": + expected = expected.astype(np.int64) tm.assert_series_equal(result, expected) - result = df.quantile(0.5, axis=1) + result = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5) + if interpolation == "nearest": + expected = expected.astype(np.int64) tm.assert_series_equal(result, expected) - result = df.quantile(0.5, axis="columns") + result = df.quantile( + 0.5, axis="columns", interpolation=interpolation, method=method + ) tm.assert_series_equal(result, expected) msg = "No axis named -1 for object type DataFrame" with pytest.raises(ValueError, match=msg): - df.quantile(0.1, axis=-1) + df.quantile(0.1, axis=-1, interpolation=interpolation, method=method) msg = "No axis named column for object type DataFrame" with pytest.raises(ValueError, match=msg): df.quantile(0.1, axis="column") @@ -247,24 +362,45 @@ def test_quantile_interpolation_int(self, int_frame): assert q1["A"] == np.percentile(df["A"], 10) tm.assert_series_equal(q, q1) - def test_quantile_multi(self): + def test_quantile_multi(self, interp_method, request, using_array_manager): + interpolation, method = interp_method df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"]) - result = df.quantile([0.25, 0.5]) + result = df.quantile([0.25, 0.5], interpolation=interpolation, method=method) expected = DataFrame( [[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]], index=[0.25, 0.5], columns=["a", "b", "c"], ) + if interpolation == "nearest": + expected = expected.astype(np.int64) + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) tm.assert_frame_equal(result, expected) - # axis = 1 - result = df.quantile([0.25, 0.5], axis=1) + def test_quantile_multi_axis_1(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"]) + result = df.quantile( + [0.25, 0.5], axis=1, interpolation=interpolation, method=method + ) expected = DataFrame( - [[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]], index=[0.25, 0.5], columns=[0, 1, 2] + [[1.0, 2.0, 3.0]] * 2, index=[0.25, 0.5], columns=[0, 1, 2] ) + if interpolation == "nearest": + expected = expected.astype(np.int64) + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) + tm.assert_frame_equal(result, expected) - # empty - result = DataFrame({"x": [], "y": []}).quantile([0.1, 0.9], axis=0) + def test_quantile_multi_empty(self, interp_method): + interpolation, method = interp_method + result = DataFrame({"x": [], "y": []}).quantile( + [0.1, 0.9], axis=0, interpolation=interpolation, method=method + ) expected = DataFrame( {"x": [np.nan, np.nan], "y": [np.nan, np.nan]}, index=[0.1, 0.9] ) @@ -275,7 +411,8 @@ def test_quantile_datetime(self): # exclude datetime result = df.quantile(0.5, numeric_only=True) - expected = Series([2.5], index=["b"]) + expected = Series([2.5], index=["b"], name=0.5) + tm.assert_series_equal(result, expected) # datetime result = df.quantile(0.5, numeric_only=False) @@ -327,26 +464,41 @@ def test_quantile_datetime(self): "Period[D]", ], ) - def test_quantile_dt64_empty(self, dtype): + def test_quantile_dt64_empty(self, dtype, interp_method): # GH#41544 + interpolation, method = interp_method df = DataFrame(columns=["a", "b"], dtype=dtype) - res = df.quantile(0.5, axis=1, numeric_only=False) + res = df.quantile( + 0.5, axis=1, numeric_only=False, interpolation=interpolation, method=method + ) expected = Series([], index=[], name=0.5, dtype=dtype) tm.assert_series_equal(res, expected) # no columns in result, so no dtype preservation - res = df.quantile([0.5], axis=1, numeric_only=False) + res = df.quantile( + [0.5], + axis=1, + numeric_only=False, + interpolation=interpolation, + method=method, + ) expected = DataFrame(index=[0.5]) tm.assert_frame_equal(res, expected) - def test_quantile_invalid(self, datetime_frame): + @pytest.mark.parametrize("invalid", [-1, 2, [0.5, -1], [0.5, 2]]) + def test_quantile_invalid(self, invalid, datetime_frame, interp_method): msg = "percentiles should all be in the interval \\[0, 1\\]" - for invalid in [-1, 2, [0.5, -1], [0.5, 2]]: - with pytest.raises(ValueError, match=msg): - datetime_frame.quantile(invalid) - - def test_quantile_box(self): + interpolation, method = interp_method + with pytest.raises(ValueError, match=msg): + datetime_frame.quantile(invalid, interpolation=interpolation, method=method) + + def test_quantile_box(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) df = DataFrame( { "A": [ @@ -367,7 +519,9 @@ def test_quantile_box(self): } ) - res = df.quantile(0.5, numeric_only=False) + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) exp = Series( [ @@ -380,7 +534,9 @@ def test_quantile_box(self): ) tm.assert_series_equal(res, exp) - res = df.quantile([0.5], numeric_only=False) + res = df.quantile( + [0.5], numeric_only=False, interpolation=interpolation, method=method + ) exp = DataFrame( [ [ @@ -394,6 +550,7 @@ def test_quantile_box(self): ) tm.assert_frame_equal(res, exp) + def test_quantile_box_nat(self): # DatetimeLikeBlock may be consolidated and contain NaT in different loc df = DataFrame( { @@ -469,49 +626,73 @@ def test_quantile_box(self): ) tm.assert_frame_equal(res, exp) - def test_quantile_nan(self): - + def test_quantile_nan(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) # GH 14357 - float block where some cols have missing values df = DataFrame({"a": np.arange(1, 6.0), "b": np.arange(1, 6.0)}) df.iloc[-1, 1] = np.nan - res = df.quantile(0.5) - exp = Series([3.0, 2.5], index=["a", "b"], name=0.5) + res = df.quantile(0.5, interpolation=interpolation, method=method) + exp = Series( + [3.0, 2.5 if interpolation == "linear" else 3.0], index=["a", "b"], name=0.5 + ) tm.assert_series_equal(res, exp) - res = df.quantile([0.5, 0.75]) - exp = DataFrame({"a": [3.0, 4.0], "b": [2.5, 3.25]}, index=[0.5, 0.75]) + res = df.quantile([0.5, 0.75], interpolation=interpolation, method=method) + exp = DataFrame( + { + "a": [3.0, 4.0], + "b": [2.5, 3.25] if interpolation == "linear" else [3.0, 4.0], + }, + index=[0.5, 0.75], + ) tm.assert_frame_equal(res, exp) - res = df.quantile(0.5, axis=1) + res = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) exp = Series(np.arange(1.0, 6.0), name=0.5) tm.assert_series_equal(res, exp) - res = df.quantile([0.5, 0.75], axis=1) + res = df.quantile( + [0.5, 0.75], axis=1, interpolation=interpolation, method=method + ) exp = DataFrame([np.arange(1.0, 6.0)] * 2, index=[0.5, 0.75]) + if interpolation == "nearest": + exp.iloc[1, -1] = np.nan tm.assert_frame_equal(res, exp) # full-nan column df["b"] = np.nan - res = df.quantile(0.5) + res = df.quantile(0.5, interpolation=interpolation, method=method) exp = Series([3.0, np.nan], index=["a", "b"], name=0.5) tm.assert_series_equal(res, exp) - res = df.quantile([0.5, 0.75]) + res = df.quantile([0.5, 0.75], interpolation=interpolation, method=method) exp = DataFrame({"a": [3.0, 4.0], "b": [np.nan, np.nan]}, index=[0.5, 0.75]) tm.assert_frame_equal(res, exp) - def test_quantile_nat(self): - + def test_quantile_nat(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + if method == "table" and using_array_manager: + request.node.add_marker( + pytest.mark.xfail(reason="Axis name incorrectly set.") + ) # full NaT column df = DataFrame({"a": [pd.NaT, pd.NaT, pd.NaT]}) - res = df.quantile(0.5, numeric_only=False) + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) exp = Series([pd.NaT], index=["a"], name=0.5) tm.assert_series_equal(res, exp) - res = df.quantile([0.5], numeric_only=False) + res = df.quantile( + [0.5], numeric_only=False, interpolation=interpolation, method=method + ) exp = DataFrame({"a": [pd.NaT]}, index=[0.5]) tm.assert_frame_equal(res, exp) @@ -527,50 +708,57 @@ def test_quantile_nat(self): } ) - res = df.quantile(0.5, numeric_only=False) + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) exp = Series([Timestamp("2012-01-02"), pd.NaT], index=["a", "b"], name=0.5) tm.assert_series_equal(res, exp) - res = df.quantile([0.5], numeric_only=False) + res = df.quantile( + [0.5], numeric_only=False, interpolation=interpolation, method=method + ) exp = DataFrame( [[Timestamp("2012-01-02"), pd.NaT]], index=[0.5], columns=["a", "b"] ) tm.assert_frame_equal(res, exp) - def test_quantile_empty_no_rows_floats(self): + def test_quantile_empty_no_rows_floats(self, interp_method): + interpolation, method = interp_method - # floats df = DataFrame(columns=["a", "b"], dtype="float64") - res = df.quantile(0.5) + res = df.quantile(0.5, interpolation=interpolation, method=method) exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5) tm.assert_series_equal(res, exp) - res = df.quantile([0.5]) + res = df.quantile([0.5], interpolation=interpolation, method=method) exp = DataFrame([[np.nan, np.nan]], columns=["a", "b"], index=[0.5]) tm.assert_frame_equal(res, exp) - res = df.quantile(0.5, axis=1) + res = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) exp = Series([], index=[], dtype="float64", name=0.5) tm.assert_series_equal(res, exp) - res = df.quantile([0.5], axis=1) + res = df.quantile([0.5], axis=1, interpolation=interpolation, method=method) exp = DataFrame(columns=[], index=[0.5]) tm.assert_frame_equal(res, exp) - def test_quantile_empty_no_rows_ints(self): - # ints + def test_quantile_empty_no_rows_ints(self, interp_method): + interpolation, method = interp_method df = DataFrame(columns=["a", "b"], dtype="int64") - res = df.quantile(0.5) + res = df.quantile(0.5, interpolation=interpolation, method=method) exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5) tm.assert_series_equal(res, exp) - def test_quantile_empty_no_rows_dt64(self): + def test_quantile_empty_no_rows_dt64(self, interp_method): + interpolation, method = interp_method # datetimes df = DataFrame(columns=["a", "b"], dtype="datetime64[ns]") - res = df.quantile(0.5, numeric_only=False) + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) exp = Series( [pd.NaT, pd.NaT], index=["a", "b"], dtype="datetime64[ns]", name=0.5 ) @@ -578,43 +766,61 @@ def test_quantile_empty_no_rows_dt64(self): # Mixed dt64/dt64tz df["a"] = df["a"].dt.tz_localize("US/Central") - res = df.quantile(0.5, numeric_only=False) + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) exp = exp.astype(object) tm.assert_series_equal(res, exp) # both dt64tz df["b"] = df["b"].dt.tz_localize("US/Central") - res = df.quantile(0.5, numeric_only=False) + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) exp = exp.astype(df["b"].dtype) tm.assert_series_equal(res, exp) - def test_quantile_empty_no_columns(self): + def test_quantile_empty_no_columns(self, interp_method): # GH#23925 _get_numeric_data may drop all columns + interpolation, method = interp_method df = DataFrame(pd.date_range("1/1/18", periods=5)) df.columns.name = "captain tightpants" - result = df.quantile(0.5, numeric_only=True) + result = df.quantile( + 0.5, numeric_only=True, interpolation=interpolation, method=method + ) expected = Series([], index=[], name=0.5, dtype=np.float64) expected.index.name = "captain tightpants" tm.assert_series_equal(result, expected) - result = df.quantile([0.5], numeric_only=True) + result = df.quantile( + [0.5], numeric_only=True, interpolation=interpolation, method=method + ) expected = DataFrame([], index=[0.5], columns=[]) expected.columns.name = "captain tightpants" tm.assert_frame_equal(result, expected) - def test_quantile_item_cache(self, using_array_manager): + def test_quantile_item_cache(self, using_array_manager, interp_method): # previous behavior incorrect retained an invalid _item_cache entry + interpolation, method = interp_method df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"]) df["D"] = df["A"] * 2 ser = df["A"] if not using_array_manager: assert len(df._mgr.blocks) == 2 - df.quantile(numeric_only=False) + df.quantile(numeric_only=False, interpolation=interpolation, method=method) ser.values[0] = 99 assert df.iloc[0, 0] == df["A"][0] + def test_invalid_method(self): + with pytest.raises(ValueError, match="Invalid method: foo"): + DataFrame(range(1)).quantile(0.5, method="foo") + + def test_table_invalid_interpolation(self): + with pytest.raises(ValueError, match="Invalid interpolation: foo"): + DataFrame(range(1)).quantile(0.5, method="table", interpolation="foo") + class TestQuantileExtensionDtype: # TODO: tests for axis=1?
Rough attempt at implementing cuDF's `DataFrame.quantiles`; shares a lot of common logic with `sort_values`, as the indexer that sorts the dataframe by all columns is ultimately what is used to grab the desired quantiles. cc @quasiben @rjzamora - [ ] closes #43881 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44301
2021-11-03T15:08:09Z
2022-08-17T02:01:44Z
2022-08-17T02:01:44Z
2022-08-17T02:01:59Z
Detect CPORT header in SAS files
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index bdb7d86a9b37e..3f9bf6662e99f 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -279,6 +279,12 @@ def _read_header(self): # read file header line1 = self._get_row() if line1 != _correct_line1: + if "**COMPRESSED**" in line1: + # this was created with the PROC CPORT method and can't be read + # https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/movefile/p1bm6aqp3fw4uin1hucwh718f6kp.htm + raise ValueError( + "Header record indicates a CPORT file, which is not readable." + ) raise ValueError("Header record is not an XPORT file.") line2 = self._get_row() diff --git a/pandas/tests/io/sas/data/DEMO_PUF.cpt b/pandas/tests/io/sas/data/DEMO_PUF.cpt new file mode 100644 index 0000000000000..d74b6a70d2812 Binary files /dev/null and b/pandas/tests/io/sas/data/DEMO_PUF.cpt differ diff --git a/pandas/tests/io/sas/test_xport.py b/pandas/tests/io/sas/test_xport.py index 5d3e3b8e23cdb..9232ea8a25e4d 100644 --- a/pandas/tests/io/sas/test_xport.py +++ b/pandas/tests/io/sas/test_xport.py @@ -30,6 +30,7 @@ def setup_method(self, datapath): self.file02 = os.path.join(self.dirpath, "SSHSV1_A.xpt") self.file03 = os.path.join(self.dirpath, "DRXFCD_G.xpt") self.file04 = os.path.join(self.dirpath, "paxraw_d_short.xpt") + self.file05 = os.path.join(self.dirpath, "DEMO_PUF.cpt") with td.file_leak_context(): yield @@ -157,3 +158,11 @@ def test_truncated_float_support(self): data = read_sas(self.file04, format="xport") tm.assert_frame_equal(data.astype("int64"), data_csv) + + def test_cport_header_found_raises(self): + # Test with DEMO_PUF.cpt, the beginning of puf2019_1_fall.xpt + # from https://www.cms.gov/files/zip/puf2019.zip + # (despite the extension, it's a cpt file) + msg = "Header record indicates a CPORT file, which is not readable." + with pytest.raises(ValueError, match=msg): + read_sas(self.file05, format="xport")
Refers https://github.com/pandas-dev/pandas/issues/15825 . Doesn't fix the issue (might be unfixable), but gives a more helpful warning.
https://api.github.com/repos/pandas-dev/pandas/pulls/44300
2021-11-03T13:56:42Z
2021-11-06T19:41:49Z
2021-11-06T19:41:49Z
2021-11-07T09:50:03Z
BUG: partially-inferring pydatetime objects
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 2a718fdcf16e7..46d0ef1bb8ad5 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -475,6 +475,7 @@ Datetimelike - Bug in :meth:`date_range` and :meth:`bdate_range` do not return right bound when ``start`` = ``end`` and set is closed on one side (:issue:`43394`) - Bug in inplace addition and subtraction of :class:`DatetimeIndex` or :class:`TimedeltaIndex` with :class:`DatetimeArray` or :class:`TimedeltaArray` (:issue:`43904`) - Bug in in calling ``np.isnan``, ``np.isfinite``, or ``np.isinf`` on a timezone-aware :class:`DatetimeIndex` incorrectly raising ``TypeError`` (:issue:`43917`) +- Bug in constructing a :class:`Series` from datetime-like strings with mixed timezones incorrectly partially-inferring datetime values (:issue:`40111`) - Timedelta diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index c0ac9098ec7fc..7693f4cd13e9b 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1526,7 +1526,7 @@ def try_datetime(v: np.ndarray) -> ArrayLike: try: # GH#19671 we pass require_iso8601 to be relatively strict # when parsing strings. - dta = sequence_to_datetimes(v, require_iso8601=True, allow_object=True) + dta = sequence_to_datetimes(v, require_iso8601=True, allow_object=False) except (ValueError, TypeError): # e.g. <class 'numpy.timedelta64'> is not convertible to datetime return v.reshape(shape) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index dbf6d5627c00b..2c33284df18c5 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1045,6 +1045,18 @@ def test_constructor_with_datetime_tz2(self): expected = Series(DatetimeIndex(["NaT", "NaT"], tz="US/Eastern")) tm.assert_series_equal(s, expected) + def test_constructor_no_partial_datetime_casting(self): + # GH#40111 + vals = [ + "nan", + Timestamp("1990-01-01"), + "2015-03-14T16:15:14.123-08:00", + "2019-03-04T21:56:32.620-07:00", + None, + ] + ser = Series(vals) + assert all(ser[i] is vals[i] for i in range(len(vals))) + @pytest.mark.parametrize("arr_dtype", [np.int64, np.float64]) @pytest.mark.parametrize("dtype", ["M8", "m8"]) @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"]) diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 3fa6441e47242..1f75bc11005bc 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -1123,17 +1123,32 @@ def test_iso8601_strings_mixed_offsets_with_naive(self): def test_mixed_offsets_with_native_datetime_raises(self): # GH 25978 - ser = Series( + + vals = [ + "nan", + Timestamp("1990-01-01"), + "2015-03-14T16:15:14.123-08:00", + "2019-03-04T21:56:32.620-07:00", + None, + ] + ser = Series(vals) + assert all(ser[i] is vals[i] for i in range(len(vals))) # GH#40111 + + mixed = to_datetime(ser) + expected = Series( [ - "nan", + "NaT", Timestamp("1990-01-01"), - "2015-03-14T16:15:14.123-08:00", - "2019-03-04T21:56:32.620-07:00", + Timestamp("2015-03-14T16:15:14.123-08:00").to_pydatetime(), + Timestamp("2019-03-04T21:56:32.620-07:00").to_pydatetime(), None, - ] + ], + dtype=object, ) + tm.assert_series_equal(mixed, expected) + with pytest.raises(ValueError, match="Tz-aware datetime.datetime"): - to_datetime(ser) + to_datetime(mixed) def test_non_iso_strings_with_tz_offset(self): result = to_datetime(["March 1, 2018 12:00:00+0400"] * 2)
- [x] xref #40111 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44296
2021-11-02T23:29:00Z
2021-11-04T00:38:40Z
2021-11-04T00:38:40Z
2021-11-04T01:11:32Z
DEPR: unused 'errors' keyword in where, mask
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 2a718fdcf16e7..b58a917f5d8b4 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -397,7 +397,7 @@ Other Deprecations - Deprecated silent dropping of columns that raised a ``TypeError``, ``DataError``, and some cases of ``ValueError`` in :meth:`Series.aggregate`, :meth:`DataFrame.aggregate`, :meth:`Series.groupby.aggregate`, and :meth:`DataFrame.groupby.aggregate` when used with a list (:issue:`43740`) - Deprecated casting behavior when setting timezone-aware value(s) into a timezone-aware :class:`Series` or :class:`DataFrame` column when the timezones do not match. Previously this cast to object dtype. In a future version, the values being inserted will be converted to the series or column's existing timezone (:issue:`37605`) - Deprecated casting behavior when passing an item with mismatched-timezone to :meth:`DatetimeIndex.insert`, :meth:`DatetimeIndex.putmask`, :meth:`DatetimeIndex.where` :meth:`DatetimeIndex.fillna`, :meth:`Series.mask`, :meth:`Series.where`, :meth:`Series.fillna`, :meth:`Series.shift`, :meth:`Series.replace`, :meth:`Series.reindex` (and :class:`DataFrame` column analogues). In the past this has cast to object dtype. In a future version, these will cast the passed item to the index or series's timezone (:issue:`37605`) -- +- Deprecated the 'errors' keyword argument in :meth:`Series.where`, :meth:`DataFrame.where`, :meth:`Series.mask`, and meth:`DataFrame.mask`; in a future version the argument will be removed (:issue:`44294`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5c24c57925393..9186365fc390e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -10907,7 +10907,7 @@ def where( inplace=False, axis=None, level=None, - errors="raise", + errors=lib.no_default, try_cast=lib.no_default, ): return super().where(cond, other, inplace, axis, level, errors, try_cast) @@ -10922,7 +10922,7 @@ def mask( inplace=False, axis=None, level=None, - errors="raise", + errors=lib.no_default, try_cast=lib.no_default, ): return super().mask(cond, other, inplace, axis, level, errors, try_cast) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f3d7d6cee5446..b53679e2b584a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8900,7 +8900,7 @@ def _where( inplace=False, axis=None, level=None, - errors="raise", + errors=lib.no_default, ): """ Equivalent to public method `where`, except that `other` is not @@ -8908,6 +8908,14 @@ def _where( """ inplace = validate_bool_kwarg(inplace, "inplace") + if errors is not lib.no_default: + warnings.warn( + f"The 'errors' keyword in {type(self).__name__}.where and mask is " + "deprecated and will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if axis is not None: axis = self._get_axis_number(axis) @@ -9025,7 +9033,6 @@ def _where( other=other, cond=cond, align=align, - errors=errors, ) result = self._constructor(new_data) return result.__finalize__(self) @@ -9044,7 +9051,7 @@ def where( inplace=False, axis=None, level=None, - errors="raise", + errors=lib.no_default, try_cast=lib.no_default, ): """ @@ -9077,6 +9084,9 @@ def where( - 'raise' : allow exceptions to be raised. - 'ignore' : suppress exceptions. On error return original object. + .. deprecated:: 1.4.0 + Previously was silently ignored. + try_cast : bool, default None Try to cast the result back to the input type (if possible). @@ -9197,7 +9207,7 @@ def mask( inplace=False, axis=None, level=None, - errors="raise", + errors=lib.no_default, try_cast=lib.no_default, ): diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 7f728ac9ddae5..29edb80f473fa 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -327,7 +327,7 @@ def apply_with_block(self: T, f, align_keys=None, swap_axis=True, **kwargs) -> T return type(self)(result_arrays, self._axes) - def where(self: T, other, cond, align: bool, errors: str) -> T: + def where(self: T, other, cond, align: bool) -> T: if align: align_keys = ["other", "cond"] else: @@ -339,7 +339,6 @@ def where(self: T, other, cond, align: bool, errors: str) -> T: align_keys=align_keys, other=other, cond=cond, - errors=errors, ) # TODO what is this used for? diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 751cf41a09f14..33c78f396b80b 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1144,7 +1144,7 @@ def shift(self, periods: int, axis: int = 0, fill_value: Any = None) -> list[Blo return [self.make_block(new_values)] - def where(self, other, cond, errors="raise") -> list[Block]: + def where(self, other, cond) -> list[Block]: """ evaluate the block; return result block(s) from the result @@ -1152,9 +1152,6 @@ def where(self, other, cond, errors="raise") -> list[Block]: ---------- other : a ndarray/object cond : np.ndarray[bool], SparseArray[bool], or BooleanArray - errors : str, {'raise', 'ignore'}, default 'raise' - - ``raise`` : allow exceptions to be raised - - ``ignore`` : suppress exceptions. On error return original object Returns ------- @@ -1163,7 +1160,6 @@ def where(self, other, cond, errors="raise") -> list[Block]: assert cond.ndim == self.ndim assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) - assert errors in ["raise", "ignore"] transpose = self.ndim == 2 values = self.values @@ -1185,9 +1181,8 @@ def where(self, other, cond, errors="raise") -> list[Block]: # or if we are a single block (ndim == 1) if not self._can_hold_element(other): # we cannot coerce, return a compat dtype - # we are explicitly ignoring errors block = self.coerce_to_target_dtype(other) - blocks = block.where(orig_other, cond, errors=errors) + blocks = block.where(orig_other, cond) return self._maybe_downcast(blocks, "infer") # error: Argument 1 to "setitem_datetimelike_compat" has incompatible type @@ -1586,7 +1581,7 @@ def shift(self, periods: int, axis: int = 0, fill_value: Any = None) -> list[Blo new_values = self.values.shift(periods=periods, fill_value=fill_value) return [self.make_block_same_class(new_values)] - def where(self, other, cond, errors="raise") -> list[Block]: + def where(self, other, cond) -> list[Block]: cond = extract_bool_array(cond) assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) @@ -1619,7 +1614,7 @@ def where(self, other, cond, errors="raise") -> list[Block]: # For now at least only support casting e.g. # Interval[int64]->Interval[float64] raise - return blk.where(other, cond, errors) + return blk.where(other, cond) raise return [self.make_block_same_class(result)] @@ -1704,7 +1699,7 @@ def putmask(self, mask, new) -> list[Block]: arr.T.putmask(mask, new) return [self] - def where(self, other, cond, errors="raise") -> list[Block]: + def where(self, other, cond) -> list[Block]: arr = self.values cond = extract_bool_array(cond) @@ -1712,7 +1707,7 @@ def where(self, other, cond, errors="raise") -> list[Block]: try: res_values = arr.T._where(cond, other).T except (ValueError, TypeError): - return Block.where(self, other, cond, errors=errors) + return Block.where(self, other, cond) nb = self.make_block_same_class(res_values) return [nb] diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 9286238e81fc3..7db19eda0f2fb 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -315,7 +315,7 @@ def apply( out = type(self).from_blocks(result_blocks, self.axes) return out - def where(self: T, other, cond, align: bool, errors: str) -> T: + def where(self: T, other, cond, align: bool) -> T: if align: align_keys = ["other", "cond"] else: @@ -327,7 +327,6 @@ def where(self: T, other, cond, align: bool, errors: str) -> T: align_keys=align_keys, other=other, cond=cond, - errors=errors, ) def setitem(self: T, indexer, value) -> T: diff --git a/pandas/core/series.py b/pandas/core/series.py index b67f16008bb13..391169af598c2 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -5461,7 +5461,7 @@ def where( inplace=False, axis=None, level=None, - errors="raise", + errors=lib.no_default, try_cast=lib.no_default, ): return super().where(cond, other, inplace, axis, level, errors, try_cast) @@ -5476,7 +5476,7 @@ def mask( inplace=False, axis=None, level=None, - errors="raise", + errors=lib.no_default, try_cast=lib.no_default, ): return super().mask(cond, other, inplace, axis, level, errors, try_cast) diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py index 2feaf4e951ab8..b132041f8afd0 100644 --- a/pandas/tests/series/methods/test_fillna.py +++ b/pandas/tests/series/methods/test_fillna.py @@ -147,15 +147,18 @@ def test_fillna_consistency(self): ) tm.assert_series_equal(result, expected) - # where (we ignore the errors=) - result = ser.where( - [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore" - ) + msg = "The 'errors' keyword in " + with tm.assert_produces_warning(FutureWarning, match=msg): + # where (we ignore the errors=) + result = ser.where( + [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore" + ) tm.assert_series_equal(result, expected) - result = ser.where( - [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore" - ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.where( + [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore" + ) tm.assert_series_equal(result, expected) # with a non-datetime
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44294
2021-11-02T22:05:54Z
2021-11-05T00:31:48Z
2021-11-05T00:31:48Z
2021-11-05T01:01:10Z
TYP: _ensure_data and infer_dtype_from_array
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index c1b587ce3a6b2..011f9c2a680cb 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -112,16 +112,19 @@ # --------------- # def _ensure_data(values: ArrayLike) -> np.ndarray: """ - routine to ensure that our data is of the correct - input dtype for lower-level routines + Ensure values is of the correct input dtype for lower-level routines. This will coerce: - ints -> int64 - uint -> uint64 - - bool -> uint64 (TODO this should be uint8) + - bool -> uint8 - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes + - categorical[bool] without nulls -> uint8 + - categorical[bool] with nulls -> ValueError: cannot convert float NaN to integer + - boolean without nulls -> uint8 + - boolean with nulls -> object Parameters ---------- @@ -165,10 +168,8 @@ def _ensure_data(values: ArrayLike) -> np.ndarray: return np.asarray(values) elif is_complex_dtype(values.dtype): - # Incompatible return value type (got "Tuple[Union[Any, ExtensionArray, - # ndarray[Any, Any]], Union[Any, ExtensionDtype]]", expected - # "Tuple[ndarray[Any, Any], Union[dtype[Any], ExtensionDtype]]") - return values # type: ignore[return-value] + assert isinstance(values, np.ndarray) # for mypy + return values # datetimelike elif needs_i8_conversion(values.dtype): @@ -1723,11 +1724,7 @@ def safe_sort( if not isinstance(values, (np.ndarray, ABCExtensionArray)): # don't convert to string types dtype, _ = infer_dtype_from_array(values) - # error: Argument "dtype" to "asarray" has incompatible type "Union[dtype[Any], - # ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str, - # Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any], - # _DTypeDict, Tuple[Any, Any]]]" - values = np.asarray(values, dtype=dtype) # type: ignore[arg-type] + values = np.asarray(values, dtype=dtype) sorter = None diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index c0ac9098ec7fc..6b4fe68d65a8b 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -14,6 +14,7 @@ from typing import ( TYPE_CHECKING, Any, + Literal, Sized, TypeVar, cast, @@ -795,6 +796,25 @@ def dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]: return {maybe_box_datetimelike(key): value for key, value in d.items()} +@overload +def infer_dtype_from_array( + arr, +) -> tuple[np.dtype, ArrayLike]: + ... + + +@overload +def infer_dtype_from_array( + arr, pandas_dtype: Literal[False] = ... +) -> tuple[np.dtype, ArrayLike]: + ... + + +@overload +def infer_dtype_from_array(arr, pandas_dtype: bool = ...) -> tuple[DtypeObj, ArrayLike]: + ... + + def infer_dtype_from_array( arr, pandas_dtype: bool = False ) -> tuple[DtypeObj, ArrayLike]:
null
https://api.github.com/repos/pandas-dev/pandas/pulls/44292
2021-11-02T21:06:55Z
2022-04-24T03:13:35Z
null
2022-04-24T03:13:35Z
TYP: remove a `type: ignore`
diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi index ecddd83322bbf..0f7800480837c 100644 --- a/pandas/_libs/tslibs/timestamps.pyi +++ b/pandas/_libs/tslibs/timestamps.pyi @@ -131,9 +131,17 @@ class Timestamp(datetime): def utcoffset(self) -> timedelta | None: ... def tzname(self) -> str | None: ... def dst(self) -> timedelta | None: ... + # error: Argument 1 of "__le__" is incompatible with supertype "date"; + # supertype defines the argument type as "date" def __le__(self, other: datetime) -> bool: ... # type: ignore + # error: Argument 1 of "__lt__" is incompatible with supertype "date"; + # supertype defines the argument type as "date" def __lt__(self, other: datetime) -> bool: ... # type: ignore + # error: Argument 1 of "__ge__" is incompatible with supertype "date"; + # supertype defines the argument type as "date" def __ge__(self, other: datetime) -> bool: ... # type: ignore + # error: Argument 1 of "__gt__" is incompatible with supertype "date"; + # supertype defines the argument type as "date" def __gt__(self, other: datetime) -> bool: ... # type: ignore # error: Signature of "__add__" incompatible with supertype "date"/"datetime" @overload # type: ignore[override] diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index fa07b5fea5ea3..737de8a636b7d 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -480,7 +480,7 @@ def find( dtype_type = type(dtype) else: dtype_type = dtype - if issubclass(dtype_type, ExtensionDtype): + if dtype_type in self.dtypes: # cast needed here as mypy doesn't know we have figured # out it is an ExtensionDtype or type_t[ExtensionDtype] return cast("ExtensionDtype | type_t[ExtensionDtype]", dtype) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 6776064342db0..802109c41567f 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1426,7 +1426,7 @@ def is_1d_only_ea_dtype(dtype: DtypeObj | None) -> bool: ) -def is_extension_array_dtype(arr_or_dtype) -> bool: +def is_extension_array_dtype(arr_or_dtype: object) -> bool: """ Check if an object is a pandas extension array type. @@ -1476,6 +1476,10 @@ def is_extension_array_dtype(arr_or_dtype) -> bool: return True elif isinstance(dtype, np.dtype): return False + elif isinstance(dtype, type) and issubclass(dtype, ExtensionDtype): + return True + elif not isinstance(dtype, str): + return False else: return registry.find(dtype) is not None diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index 329d28c263ff2..cb1e42258336b 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -386,6 +386,7 @@ def registry_without_decimal(): def test_array_not_registered(registry_without_decimal): # check we aren't on it assert registry.find("decimal") is None + assert registry.find(DecimalDtype) is None data = [decimal.Decimal("1"), decimal.Decimal("2")] result = pd.array(data, dtype=DecimalDtype)
null
https://api.github.com/repos/pandas-dev/pandas/pulls/44288
2021-11-02T13:21:16Z
2022-03-06T01:21:00Z
null
2022-03-06T01:21:01Z
Revert "CLN: DataFrame.__repr__"
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2b0f7a36b6fa2..5c24c57925393 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -988,13 +988,15 @@ def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ + buf = StringIO("") if self._info_repr(): - buf = StringIO("") self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() - return self.to_string(**repr_params) + self.to_string(buf=buf, **repr_params) + + return buf.getvalue() def _repr_html_(self) -> str | None: """
Reverts pandas-dev/pandas#44271
https://api.github.com/repos/pandas-dev/pandas/pulls/44286
2021-11-02T08:55:05Z
2021-11-02T10:09:33Z
2021-11-02T10:09:33Z
2021-11-02T10:09:37Z
BUG: Period.__eq__ numpy scalar (#44182 (comment))
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 337876d610c5e..f594e0a8bdafd 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1657,8 +1657,12 @@ cdef class _Period(PeriodMixin): elif other is NaT: return _nat_scalar_rules[op] elif util.is_array(other): - # in particular ndarray[object]; see test_pi_cmp_period - return np.array([PyObject_RichCompare(self, x, op) for x in other]) + # GH#44285 + if cnp.PyArray_IsZeroDim(other): + return PyObject_RichCompare(self, other.item(), op) + else: + # in particular ndarray[object]; see test_pi_cmp_period + return np.array([PyObject_RichCompare(self, x, op) for x in other]) return NotImplemented def __hash__(self): diff --git a/pandas/tests/arithmetic/test_period.py b/pandas/tests/arithmetic/test_period.py index d7cb314743e86..41c2cb2cc4f1e 100644 --- a/pandas/tests/arithmetic/test_period.py +++ b/pandas/tests/arithmetic/test_period.py @@ -189,6 +189,10 @@ def test_pi_cmp_period(self): result = idx.values.reshape(10, 2) < idx[10] tm.assert_numpy_array_equal(result, exp.reshape(10, 2)) + # Tests Period.__richcmp__ against ndarray[object, ndim=0] + result = idx < np.array(idx[10]) + tm.assert_numpy_array_equal(result, exp) + # TODO: moved from test_datetime64; de-duplicate with version below def test_parr_cmp_period_scalar2(self, box_with_array): xbox = get_expected_box(box_with_array) diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index f1b8c1cfdd39b..cd1bf21753249 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -1148,6 +1148,16 @@ def test_period_cmp_nat(self): assert not left <= right assert not left >= right + @pytest.mark.parametrize( + "zerodim_arr, expected", + ((np.array(0), False), (np.array(Period("2000-01", "M")), True)), + ) + def test_comparison_numpy_zerodim_arr(self, zerodim_arr, expected): + p = Period("2000-01", "M") + + assert (p == zerodim_arr) is expected + assert (zerodim_arr == p) is expected + class TestArithmetic: def test_sub_delta(self):
- [x] fixes https://github.com/pandas-dev/pandas/pull/44182#issuecomment-956851322 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/44285
2021-11-02T07:47:55Z
2021-11-04T00:43:43Z
2021-11-04T00:43:43Z
2021-11-04T07:33:15Z
BENCH: Add more numba rolling benchmarks
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py index 3d2273b6d7324..406b27dd37ea5 100644 --- a/asv_bench/benchmarks/rolling.py +++ b/asv_bench/benchmarks/rolling.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np import pandas as pd @@ -44,29 +46,56 @@ def time_rolling(self, constructor, window, dtype, function, raw): self.roll.apply(function, raw=raw) -class Engine: +class NumbaEngine: params = ( ["DataFrame", "Series"], ["int", "float"], [np.sum, lambda x: np.sum(x) + 5], - ["cython", "numba"], ["sum", "max", "min", "median", "mean"], + [True, False], + [None, 100], ) - param_names = ["constructor", "dtype", "function", "engine", "method"] + param_names = ["constructor", "dtype", "function", "method", "parallel", "cols"] - def setup(self, constructor, dtype, function, engine, method): + def setup(self, constructor, dtype, function, method, parallel, cols): N = 10 ** 3 - arr = (100 * np.random.random(N)).astype(dtype) - self.data = getattr(pd, constructor)(arr) - - def time_rolling_apply(self, constructor, dtype, function, engine, method): - self.data.rolling(10).apply(function, raw=True, engine=engine) - - def time_expanding_apply(self, constructor, dtype, function, engine, method): - self.data.expanding().apply(function, raw=True, engine=engine) - - def time_rolling_methods(self, constructor, dtype, function, engine, method): - getattr(self.data.rolling(10), method)(engine=engine) + shape = (N, cols) if cols is not None and constructor != "Series" else N + arr = (100 * np.random.random(shape)).astype(dtype) + data = getattr(pd, constructor)(arr) + + # Warm the cache + with warnings.catch_warnings(record=True): + # Catch parallel=True not being applicable e.g. 1D data + self.roll = data.rolling(10) + self.roll.apply( + function, raw=True, engine="numba", engine_kwargs={"parallel": parallel} + ) + getattr(self.roll, method)( + engine="numba", engine_kwargs={"parallel": parallel} + ) + + self.expand = data.expanding() + self.expand.apply( + function, raw=True, engine="numba", engine_kwargs={"parallel": parallel} + ) + + def time_rolling_apply(self, constructor, dtype, function, method, parallel, col): + with warnings.catch_warnings(record=True): + self.roll.apply( + function, raw=True, engine="numba", engine_kwargs={"parallel": parallel} + ) + + def time_expanding_apply(self, constructor, dtype, function, method, parallel, col): + with warnings.catch_warnings(record=True): + self.expand.apply( + function, raw=True, engine="numba", engine_kwargs={"parallel": parallel} + ) + + def time_rolling_methods(self, constructor, dtype, function, method, parallel, col): + with warnings.catch_warnings(record=True): + getattr(self.roll, method)( + engine="numba", engine_kwargs={"parallel": parallel} + ) class ExpandingMethods:
- [x] xref #https://github.com/pandas-dev/pandas/pull/44176#issuecomment-953441787 - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/44283
2021-11-02T05:41:05Z
2021-11-05T01:00:19Z
2021-11-05T01:00:19Z
2021-11-05T02:51:10Z
BUG: Add fix for hashing timestamps with folds
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 3924191bebcfd..b93b210078a75 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -712,6 +712,7 @@ Datetimelike - Bug in :class:`DateOffset`` addition with :class:`Timestamp` where ``offset.nanoseconds`` would not be included in the result (:issue:`43968`, :issue:`36589`) - Bug in :meth:`Timestamp.fromtimestamp` not supporting the ``tz`` argument (:issue:`45083`) - Bug in :class:`DataFrame` construction from dict of :class:`Series` with mismatched index dtypes sometimes raising depending on the ordering of the passed dict (:issue:`44091`) +- Bug in :class:`Timestamp` hashing during some DST transitions caused a segmentation fault (:issue:`33931` and :issue:`40817`) - Timedelta diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 304ac9405c5e1..03ee62e59aa3d 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -180,6 +180,8 @@ cdef class _Timestamp(ABCTimestamp): def __hash__(_Timestamp self): if self.nanosecond: return hash(self.value) + if self.fold: + return datetime.__hash__(self.replace(fold=0)) return datetime.__hash__(self) def __richcmp__(_Timestamp self, object other, int op): diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py index bee8025275b42..b70ceea845ee8 100644 --- a/pandas/tests/frame/methods/test_reindex.py +++ b/pandas/tests/frame/methods/test_reindex.py @@ -8,6 +8,8 @@ import numpy as np import pytest +from pandas._libs.tslibs.timezones import dateutil_gettz as gettz + import pandas as pd from pandas import ( Categorical, @@ -78,6 +80,41 @@ def test_setitem_reset_index_dtypes(self): result = df2.reset_index() tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize( + "timezone, year, month, day, hour", + [["America/Chicago", 2013, 11, 3, 1], ["America/Santiago", 2021, 4, 3, 23]], + ) + def test_reindex_timestamp_with_fold(self, timezone, year, month, day, hour): + # see gh-40817 + test_timezone = gettz(timezone) + transition_1 = pd.Timestamp( + year=year, + month=month, + day=day, + hour=hour, + minute=0, + fold=0, + tzinfo=test_timezone, + ) + transition_2 = pd.Timestamp( + year=year, + month=month, + day=day, + hour=hour, + minute=0, + fold=1, + tzinfo=test_timezone, + ) + df = ( + DataFrame({"index": [transition_1, transition_2], "vals": ["a", "b"]}) + .set_index("index") + .reindex(["1", "2"]) + ) + tm.assert_frame_equal( + df, + DataFrame({"index": ["1", "2"], "vals": [None, None]}).set_index("index"), + ) + class TestDataFrameSelectReindex: # These are specific reindex-based tests; other indexing tests should go in diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index 008731b13172e..4639a24d019fe 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -452,6 +452,33 @@ def test_hash_equivalent(self): stamp = Timestamp(datetime(2011, 1, 1)) assert d[stamp] == 5 + @pytest.mark.parametrize( + "timezone, year, month, day, hour", + [["America/Chicago", 2013, 11, 3, 1], ["America/Santiago", 2021, 4, 3, 23]], + ) + def test_hash_timestamp_with_fold(self, timezone, year, month, day, hour): + # see gh-33931 + test_timezone = gettz(timezone) + transition_1 = Timestamp( + year=year, + month=month, + day=day, + hour=hour, + minute=0, + fold=0, + tzinfo=test_timezone, + ) + transition_2 = Timestamp( + year=year, + month=month, + day=day, + hour=hour, + minute=0, + fold=1, + tzinfo=test_timezone, + ) + assert hash(transition_1) == hash(transition_2) + def test_tz_conversion_freq(self, tz_naive_fixture): # GH25241 with tm.assert_produces_warning(FutureWarning, match="freq"):
- [x] closes #33931, #40817 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry Edit: https://github.com/pandas-dev/pandas/issues/42305#issuecomment-895218386 This comment identifies the problem as being upstream in datetime's construction of timestamps which fails during some DST changes. The datetime hash function then being used tries to unset the fold representing the DST change, (https://github.com/python/cpython/blob/main/Lib/datetime.py#L2117). But it does this using the datetime replace function - this change fixes that problem by removing the fold prior to calling the datetime hash function using timestamp's replace function instead.
https://api.github.com/repos/pandas-dev/pandas/pulls/44282
2021-11-02T05:21:01Z
2022-01-04T00:25:24Z
2022-01-04T00:25:24Z
2022-01-04T00:30:29Z
DOC: Fix rolling apply raw default arg
diff --git a/pandas/core/window/doc.py b/pandas/core/window/doc.py index 9a645f55ffa39..2cc7962c6bd7b 100644 --- a/pandas/core/window/doc.py +++ b/pandas/core/window/doc.py @@ -59,7 +59,7 @@ def create_section_header(header: str) -> str: .. versionchanged:: 1.0.0 - raw : bool, default None + raw : bool, default False * ``False`` : passes each row or column as a Series to the function. * ``True`` : the passed function will receive ndarray
- [x] closes #44277 - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/44281
2021-11-02T04:15:52Z
2021-11-05T00:59:40Z
2021-11-05T00:59:40Z
2021-11-05T02:47:16Z
CI: Revert splitting 3.10 tests
diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml index d6647e8059306..824e7de3bde41 100644 --- a/.github/workflows/python-dev.yml +++ b/.github/workflows/python-dev.yml @@ -17,6 +17,7 @@ env: PANDAS_CI: 1 PATTERN: "not slow and not network and not clipboard" COVERAGE: true + PYTEST_TARGET: pandas jobs: build: @@ -24,12 +25,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macOS-latest] - pytest_target: ["pandas/tests/[a-h]*", "pandas/tests/[i-z]*"] - include: - # No need to split tests on windows - - os: windows-latest - pytest_target: pandas + os: [ubuntu-latest, macOS-latest, windows-latest] name: actions-310-dev timeout-minutes: 80 @@ -67,8 +63,6 @@ jobs: python -c "import pandas; pandas.show_versions();" - name: Test with pytest - env: - PYTEST_TARGET: ${{ matrix.pytest_target }} shell: bash run: | ci/run_tests.sh
Maths says this should work now. - [ ] closes #44173 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [ ] whatsnew entry Don't backport.
https://api.github.com/repos/pandas-dev/pandas/pulls/44280
2021-11-01T23:15:23Z
2021-11-06T21:30:23Z
2021-11-06T21:30:23Z
2021-11-06T22:01:14Z
CLN: missing f for f-string
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index d8c58d1eaf4c7..c0ac9098ec7fc 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1590,7 +1590,7 @@ def try_timedelta(v: np.ndarray) -> np.ndarray: warnings.warn( f"Inferring {value.dtype} from data containing strings is deprecated " "and will be removed in a future version. To retain the old behavior " - "explicitly pass Series(data, dtype={value.dtype})", + f"explicitly pass Series(data, dtype={value.dtype})", FutureWarning, stacklevel=find_stack_level(), )
null
https://api.github.com/repos/pandas-dev/pandas/pulls/44278
2021-11-01T22:39:33Z
2021-11-01T23:27:33Z
2021-11-01T23:27:33Z
2022-04-01T01:36:45Z
BUG: broadcasting listlike values in Series.__setitem__ GH#44265
diff --git a/doc/source/whatsnew/v1.4.0.rst b/doc/source/whatsnew/v1.4.0.rst index 1b3be65ee66f2..05e7026b0faa3 100644 --- a/doc/source/whatsnew/v1.4.0.rst +++ b/doc/source/whatsnew/v1.4.0.rst @@ -540,6 +540,7 @@ Indexing - Bug in setting a scalar :class:`Interval` value into a :class:`Series` with ``IntervalDtype`` when the scalar's sides are floats and the values' sides are integers (:issue:`44201`) - Bug when setting string-backed :class:`Categorical` values that can be parsed to datetimes into a :class:`DatetimeArray` or :class:`Series` or :class:`DataFrame` column backed by :class:`DatetimeArray` failing to parse these strings (:issue:`44236`) - Bug in :meth:`Series.__setitem__` with an integer dtype other than ``int64`` setting with a ``range`` object unnecessarily upcasting to ``int64`` (:issue:`44261`) +- Bug in :meth:`Series.__setitem__` with a boolean mask indexer setting a listlike value of length 1 incorrectly broadcasting that value (:issue:`44265`) - Missing diff --git a/pandas/core/series.py b/pandas/core/series.py index 391169af598c2..02f4810bb1e6b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1096,9 +1096,26 @@ def __setitem__(self, key, value) -> None: if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) + + if ( + is_list_like(value) + and len(value) != len(self) + and not isinstance(value, Series) + and not is_object_dtype(self.dtype) + ): + # Series will be reindexed to have matching length inside + # _where call below + # GH#44265 + indexer = key.nonzero()[0] + self._set_values(indexer, value) + return + + # otherwise with listlike other we interpret series[mask] = other + # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: + # test_where_dups self.iloc[key] = value return diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 5f0710dfbb85a..4706025b70db6 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -1064,3 +1064,43 @@ def test_setitem_with_bool_indexer(): df.loc[[True, False, False], "a"] = 10 expected = DataFrame({"a": [10, 2, 3]}) tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("size", range(2, 6)) +@pytest.mark.parametrize( + "mask", [[True, False, False, False, False], [True, False], [False]] +) +@pytest.mark.parametrize( + "item", [2.0, np.nan, np.finfo(float).max, np.finfo(float).min] +) +# Test numpy arrays, lists and tuples as the input to be +# broadcast +@pytest.mark.parametrize( + "box", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)] +) +def test_setitem_bool_indexer_dont_broadcast_length1_values(size, mask, item, box): + # GH#44265 + # see also tests.series.indexing.test_where.test_broadcast + + selection = np.resize(mask, size) + + data = np.arange(size, dtype=float) + + ser = Series(data) + + if selection.sum() != 1: + msg = ( + "cannot set using a list-like indexer with a different " + "length than the value" + ) + with pytest.raises(ValueError, match=msg): + # GH#44265 + ser[selection] = box(item) + else: + # In this corner case setting is equivalent to setting with the unboxed + # item + ser[selection] = box(item) + + expected = Series(np.arange(size, dtype=float)) + expected[selection] = item + tm.assert_series_equal(ser, expected) diff --git a/pandas/tests/series/indexing/test_where.py b/pandas/tests/series/indexing/test_where.py index fc9d3a1e1e6ab..88b75164d2f3e 100644 --- a/pandas/tests/series/indexing/test_where.py +++ b/pandas/tests/series/indexing/test_where.py @@ -88,7 +88,7 @@ def test_where_unsafe(): s = Series(np.arange(10)) mask = s > 5 - msg = "cannot assign mismatch length to masked array" + msg = "cannot set using a list-like indexer with a different length than the value" with pytest.raises(ValueError, match=msg): s[mask] = [5, 4, 3, 2, 1] @@ -161,13 +161,10 @@ def test_where_error(): tm.assert_series_equal(s, expected) # failures - msg = "cannot assign mismatch length to masked array" + msg = "cannot set using a list-like indexer with a different length than the value" with pytest.raises(ValueError, match=msg): s[[True, False]] = [0, 2, 3] - msg = ( - "NumPy boolean array indexing assignment cannot assign 0 input " - "values to the 1 output values where the mask is true" - ) + with pytest.raises(ValueError, match=msg): s[[True, False]] = [] @@ -298,6 +295,7 @@ def test_where_setitem_invalid(): "box", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)] ) def test_broadcast(size, mask, item, box): + # GH#8801, GH#4195 selection = np.resize(mask, size) data = np.arange(size, dtype=float) @@ -309,7 +307,8 @@ def test_broadcast(size, mask, item, box): ) s = Series(data) - s[selection] = box(item) + + s[selection] = item tm.assert_series_equal(s, expected) s = Series(data)
- [x] closes #44265 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit) for how to run them - [x] whatsnew entry This should in turn allow us to avoid some special-casing that we do in internals/putmask_smart. Sits on top of #44261
https://api.github.com/repos/pandas-dev/pandas/pulls/44275
2021-11-01T19:36:00Z
2021-11-05T16:18:15Z
2021-11-05T16:18:14Z
2021-11-05T18:22:24Z
DOC: added examples to DataFrame.var #44162
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f3d7d6cee5446..cbfbd62975692 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10661,12 +10661,12 @@ def sem( @doc( _num_ddof_doc, desc="Return unbiased variance over requested axis.\n\nNormalized by " - "N-1 by default. This can be changed using the ddof argument", + "N-1 by default. This can be changed using the ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes="", - examples="", + examples=_var_examples, ) def var( self, @@ -11221,6 +11221,32 @@ def _doc_params(cls): age 16.269219 height 0.205609""" +_var_examples = """ + +Examples +-------- +>>> df = pd.DataFrame({'person_id': [0, 1, 2, 3], +... 'age': [21, 25, 62, 43], +... 'height': [1.61, 1.87, 1.49, 2.01]} +... ).set_index('person_id') +>>> df + age height +person_id +0 21 1.61 +1 25 1.87 +2 62 1.49 +3 43 2.01 + +>>> df.var() +age 352.916667 +height 0.056367 + +Alternatively, ``ddof=0`` can be set to normalize by N instead of N-1: + +>>> df.var(ddof=0) +age 264.687500 +height 0.042275""" + _bool_doc = """ {desc}
- [x] closes #44162 by adding two examples to docstring of DataFrame.var - [x] tests added / passed - [x] All linting tests passed (pre-commit) Screenshot of locally built documentation: ![var](https://user-images.githubusercontent.com/58558211/139727701-f6c92ad1-948d-42bd-8989-2d874e3cfa58.PNG)
https://api.github.com/repos/pandas-dev/pandas/pulls/44274
2021-11-01T19:12:28Z
2021-11-06T20:51:37Z
2021-11-06T20:51:36Z
2021-11-06T20:51:44Z
TYP: use TYPE_CHECKING for import_optional_dependency("numba")
diff --git a/pandas/core/_numba/executor.py b/pandas/core/_numba/executor.py index c2b6191c05152..c9d3a9b1660ea 100644 --- a/pandas/core/_numba/executor.py +++ b/pandas/core/_numba/executor.py @@ -1,6 +1,9 @@ from __future__ import annotations -from typing import Callable +from typing import ( + TYPE_CHECKING, + Callable, +) import numpy as np @@ -42,10 +45,12 @@ def generate_shared_aggregator( if cache_key in NUMBA_FUNC_CACHE: return NUMBA_FUNC_CACHE[cache_key] - numba = import_optional_dependency("numba") + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") - # error: Untyped decorator makes function "column_looper" untyped - @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) # type: ignore[misc] + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def column_looper( values: np.ndarray, start: np.ndarray, diff --git a/pandas/core/groupby/numba_.py b/pandas/core/groupby/numba_.py index ea295af8d7910..24d66725caa70 100644 --- a/pandas/core/groupby/numba_.py +++ b/pandas/core/groupby/numba_.py @@ -3,6 +3,7 @@ import inspect from typing import ( + TYPE_CHECKING, Any, Callable, ) @@ -90,10 +91,12 @@ def generate_numba_agg_func( return NUMBA_FUNC_CACHE[cache_key] numba_func = jit_user_function(func, nopython, nogil, parallel) - numba = import_optional_dependency("numba") + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") - # error: Untyped decorator makes function "group_agg" untyped - @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) # type: ignore[misc] + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def group_agg( values: np.ndarray, index: np.ndarray, @@ -152,10 +155,12 @@ def generate_numba_transform_func( return NUMBA_FUNC_CACHE[cache_key] numba_func = jit_user_function(func, nopython, nogil, parallel) - numba = import_optional_dependency("numba") + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") - # error: Untyped decorator makes function "group_transform" untyped - @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) # type: ignore[misc] + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def group_transform( values: np.ndarray, index: np.ndarray, diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py index c738213d4c487..06630989444bb 100644 --- a/pandas/core/util/numba_.py +++ b/pandas/core/util/numba_.py @@ -1,9 +1,11 @@ """Common utilities for Numba operations""" -# pyright: reportUntypedFunctionDecorator = false from __future__ import annotations import types -from typing import Callable +from typing import ( + TYPE_CHECKING, + Callable, +) import numpy as np @@ -84,7 +86,10 @@ def jit_user_function( function Numba JITed function """ - numba = import_optional_dependency("numba") + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") if numba.extending.is_jitted(func): # Don't jit a user passed jitted function diff --git a/pandas/core/window/numba_.py b/pandas/core/window/numba_.py index 74dc104b6db90..0e8eea3ec671e 100644 --- a/pandas/core/window/numba_.py +++ b/pandas/core/window/numba_.py @@ -1,8 +1,8 @@ -# pyright: reportUntypedFunctionDecorator = false from __future__ import annotations import functools from typing import ( + TYPE_CHECKING, Any, Callable, ) @@ -56,10 +56,12 @@ def generate_numba_apply_func( return NUMBA_FUNC_CACHE[cache_key] numba_func = jit_user_function(func, nopython, nogil, parallel) - numba = import_optional_dependency("numba") + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") - # error: Untyped decorator makes function "roll_apply" untyped - @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) # type: ignore[misc] + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def roll_apply( values: np.ndarray, begin: np.ndarray, @@ -115,10 +117,12 @@ def generate_numba_ewm_func( if cache_key in NUMBA_FUNC_CACHE: return NUMBA_FUNC_CACHE[cache_key] - numba = import_optional_dependency("numba") + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") - # error: Untyped decorator makes function "ewma" untyped - @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) # type: ignore[misc] + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def ewm( values: np.ndarray, begin: np.ndarray, @@ -217,10 +221,12 @@ def generate_numba_table_func( return NUMBA_FUNC_CACHE[cache_key] numba_func = jit_user_function(func, nopython, nogil, parallel) - numba = import_optional_dependency("numba") + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") - # error: Untyped decorator makes function "roll_table" untyped - @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) # type: ignore[misc] + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def roll_table( values: np.ndarray, begin: np.ndarray, @@ -250,7 +256,10 @@ def roll_table( # https://github.com/numba/numba/issues/1269 @functools.lru_cache(maxsize=None) def generate_manual_numpy_nan_agg_with_axis(nan_func): - numba = import_optional_dependency("numba") + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") @numba.jit(nopython=True, nogil=True, parallel=True) def nan_agg_with_axis(table): @@ -296,10 +305,12 @@ def generate_numba_ewm_table_func( if cache_key in NUMBA_FUNC_CACHE: return NUMBA_FUNC_CACHE[cache_key] - numba = import_optional_dependency("numba") + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") - # error: Untyped decorator makes function "ewm_table" untyped - @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) # type: ignore[misc] + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def ewm_table( values: np.ndarray, begin: np.ndarray, diff --git a/pandas/core/window/online.py b/pandas/core/window/online.py index e8804936da78f..8ef4aee154db4 100644 --- a/pandas/core/window/online.py +++ b/pandas/core/window/online.py @@ -1,4 +1,5 @@ from typing import ( + TYPE_CHECKING, Dict, Optional, ) @@ -31,10 +32,12 @@ def generate_online_numba_ewma_func(engine_kwargs: Optional[Dict[str, bool]]): if cache_key in NUMBA_FUNC_CACHE: return NUMBA_FUNC_CACHE[cache_key] - numba = import_optional_dependency("numba") + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") - # error: Untyped decorator makes function "online_ewma" untyped - @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) # type: ignore[misc] + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def online_ewma( values: np.ndarray, deltas: np.ndarray, diff --git a/typings/numba.pyi b/typings/numba.pyi index 526119951a000..f877cbf339a8b 100644 --- a/typings/numba.pyi +++ b/typings/numba.pyi @@ -40,3 +40,4 @@ def jit( ) -> Callable[[F], F]: ... njit = jit +generated_jit = jit
xref https://github.com/pandas-dev/pandas/pull/44233#issuecomment-955594933 or could go with https://github.com/pandas-dev/pandas/pull/44254#issuecomment-956450632 instead. cc @twoertwein
https://api.github.com/repos/pandas-dev/pandas/pulls/44273
2021-11-01T18:33:08Z
2021-11-29T01:08:35Z
2021-11-29T01:08:35Z
2021-11-29T01:08:40Z