title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
CLN: Separate transform tests
diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py index 5a1e448beb40f..bc09501583e2c 100644 --- a/pandas/tests/frame/apply/test_frame_apply.py +++ b/pandas/tests/frame/apply/test_frame_apply.py @@ -1,7 +1,6 @@ from collections import OrderedDict from datetime import datetime from itertools import chain -import operator import warnings import numpy as np @@ -14,6 +13,7 @@ import pandas._testing as tm from pandas.core.apply import frame_apply from pandas.core.base import SpecificationError +from pandas.tests.frame.common import zip_frames @pytest.fixture @@ -1058,25 +1058,6 @@ def test_consistency_for_boxed(self, box, int_frame_const_col): tm.assert_frame_equal(result, expected) -def zip_frames(frames, axis=1): - """ - take a list of frames, zip them together under the - assumption that these all have the first frames' index/columns. - - Returns - ------- - new_frame : DataFrame - """ - if axis == 1: - columns = frames[0].columns - zipped = [f.loc[:, c] for c in columns for f in frames] - return pd.concat(zipped, axis=1) - else: - index = frames[0].index - zipped = [f.loc[i, :] for i in index for f in frames] - return pd.DataFrame(zipped) - - class TestDataFrameAggregate: def test_agg_transform(self, axis, float_frame): other_axis = 1 if axis in {0, "index"} else 0 @@ -1087,16 +1068,10 @@ def test_agg_transform(self, axis, float_frame): f_sqrt = np.sqrt(float_frame) # ufunc - result = float_frame.transform(np.sqrt, axis=axis) expected = f_sqrt.copy() - tm.assert_frame_equal(result, expected) - result = float_frame.apply(np.sqrt, axis=axis) tm.assert_frame_equal(result, expected) - result = float_frame.transform(np.sqrt, axis=axis) - tm.assert_frame_equal(result, expected) - # list-like result = float_frame.apply([np.sqrt], axis=axis) expected = f_sqrt.copy() @@ -1110,9 +1085,6 @@ def test_agg_transform(self, axis, float_frame): ) tm.assert_frame_equal(result, expected) - result = float_frame.transform([np.sqrt], axis=axis) - tm.assert_frame_equal(result, expected) - # multiple items in list # these are in the order as if we are applying both # functions per series and then concatting @@ -1128,38 +1100,19 @@ def test_agg_transform(self, axis, float_frame): ) tm.assert_frame_equal(result, expected) - result = float_frame.transform([np.abs, "sqrt"], axis=axis) - tm.assert_frame_equal(result, expected) - def test_transform_and_agg_err(self, axis, float_frame): # cannot both transform and agg - msg = "transforms cannot produce aggregated results" - with pytest.raises(ValueError, match=msg): - float_frame.transform(["max", "min"], axis=axis) - msg = "cannot combine transform and aggregation operations" with pytest.raises(ValueError, match=msg): with np.errstate(all="ignore"): float_frame.agg(["max", "sqrt"], axis=axis) - with pytest.raises(ValueError, match=msg): - with np.errstate(all="ignore"): - float_frame.transform(["max", "sqrt"], axis=axis) - df = pd.DataFrame({"A": range(5), "B": 5}) def f(): with np.errstate(all="ignore"): df.agg({"A": ["abs", "sum"], "B": ["mean", "max"]}, axis=axis) - @pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"]) - def test_transform_method_name(self, method): - # GH 19760 - df = pd.DataFrame({"A": [-1, 2]}) - result = df.transform(method) - expected = operator.methodcaller(method)(df) - tm.assert_frame_equal(result, expected) - def test_demo(self): # demonstration tests df = pd.DataFrame({"A": range(5), "B": 5}) diff --git a/pandas/tests/frame/apply/test_frame_transform.py b/pandas/tests/frame/apply/test_frame_transform.py new file mode 100644 index 0000000000000..3a345215482ed --- /dev/null +++ b/pandas/tests/frame/apply/test_frame_transform.py @@ -0,0 +1,72 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.tests.frame.common import zip_frames + + +def test_agg_transform(axis, float_frame): + other_axis = 1 if axis in {0, "index"} else 0 + + with np.errstate(all="ignore"): + + f_abs = np.abs(float_frame) + f_sqrt = np.sqrt(float_frame) + + # ufunc + result = float_frame.transform(np.sqrt, axis=axis) + expected = f_sqrt.copy() + tm.assert_frame_equal(result, expected) + + result = float_frame.transform(np.sqrt, axis=axis) + tm.assert_frame_equal(result, expected) + + # list-like + expected = f_sqrt.copy() + if axis in {0, "index"}: + expected.columns = pd.MultiIndex.from_product( + [float_frame.columns, ["sqrt"]] + ) + else: + expected.index = pd.MultiIndex.from_product([float_frame.index, ["sqrt"]]) + result = float_frame.transform([np.sqrt], axis=axis) + tm.assert_frame_equal(result, expected) + + # multiple items in list + # these are in the order as if we are applying both + # functions per series and then concatting + expected = zip_frames([f_abs, f_sqrt], axis=other_axis) + if axis in {0, "index"}: + expected.columns = pd.MultiIndex.from_product( + [float_frame.columns, ["absolute", "sqrt"]] + ) + else: + expected.index = pd.MultiIndex.from_product( + [float_frame.index, ["absolute", "sqrt"]] + ) + result = float_frame.transform([np.abs, "sqrt"], axis=axis) + tm.assert_frame_equal(result, expected) + + +def test_transform_and_agg_err(axis, float_frame): + # cannot both transform and agg + msg = "transforms cannot produce aggregated results" + with pytest.raises(ValueError, match=msg): + float_frame.transform(["max", "min"], axis=axis) + + msg = "cannot combine transform and aggregation operations" + with pytest.raises(ValueError, match=msg): + with np.errstate(all="ignore"): + float_frame.transform(["max", "sqrt"], axis=axis) + + +@pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"]) +def test_transform_method_name(method): + # GH 19760 + df = pd.DataFrame({"A": [-1, 2]}) + result = df.transform(method) + expected = operator.methodcaller(method)(df) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/common.py b/pandas/tests/frame/common.py index 463a140972ab5..73e60ff389038 100644 --- a/pandas/tests/frame/common.py +++ b/pandas/tests/frame/common.py @@ -1,3 +1,8 @@ +from typing import List + +from pandas import DataFrame, concat + + def _check_mixed_float(df, dtype=None): # float16 are most likely to be upcasted to float32 dtypes = dict(A="float32", B="float32", C="float16", D="float64") @@ -29,3 +34,22 @@ def _check_mixed_int(df, dtype=None): assert df.dtypes["C"] == dtypes["C"] if dtypes.get("D"): assert df.dtypes["D"] == dtypes["D"] + + +def zip_frames(frames: List[DataFrame], axis: int = 1) -> DataFrame: + """ + take a list of frames, zip them together under the + assumption that these all have the first frames' index/columns. + + Returns + ------- + new_frame : DataFrame + """ + if axis == 1: + columns = frames[0].columns + zipped = [f.loc[:, c] for c in columns for f in frames] + return concat(zipped, axis=1) + else: + index = frames[0].index + zipped = [f.loc[i, :] for i in index for f in frames] + return DataFrame(zipped) diff --git a/pandas/tests/series/apply/test_series_apply.py b/pandas/tests/series/apply/test_series_apply.py index 308398642895c..b948317f32062 100644 --- a/pandas/tests/series/apply/test_series_apply.py +++ b/pandas/tests/series/apply/test_series_apply.py @@ -209,25 +209,16 @@ def test_transform(self, string_series): f_abs = np.abs(string_series) # ufunc - result = string_series.transform(np.sqrt) expected = f_sqrt.copy() - tm.assert_series_equal(result, expected) - result = string_series.apply(np.sqrt) tm.assert_series_equal(result, expected) # list-like - result = string_series.transform([np.sqrt]) + result = string_series.apply([np.sqrt]) expected = f_sqrt.to_frame().copy() expected.columns = ["sqrt"] tm.assert_frame_equal(result, expected) - result = string_series.transform([np.sqrt]) - tm.assert_frame_equal(result, expected) - - result = string_series.transform(["sqrt"]) - tm.assert_frame_equal(result, expected) - # multiple items in list # these are in the order as if we are applying both functions per # series and then concatting @@ -236,10 +227,6 @@ def test_transform(self, string_series): result = string_series.apply([np.sqrt, np.abs]) tm.assert_frame_equal(result, expected) - result = string_series.transform(["sqrt", "abs"]) - expected.columns = ["sqrt", "abs"] - tm.assert_frame_equal(result, expected) - # dict, provide renaming expected = pd.concat([f_sqrt, f_abs], axis=1) expected.columns = ["foo", "bar"] @@ -250,19 +237,11 @@ def test_transform(self, string_series): def test_transform_and_agg_error(self, string_series): # we are trying to transform with an aggregator - msg = "transforms cannot produce aggregated results" - with pytest.raises(ValueError, match=msg): - string_series.transform(["min", "max"]) - msg = "cannot combine transform and aggregation" with pytest.raises(ValueError, match=msg): with np.errstate(all="ignore"): string_series.agg(["sqrt", "max"]) - with pytest.raises(ValueError, match=msg): - with np.errstate(all="ignore"): - string_series.transform(["sqrt", "max"]) - msg = "cannot perform both aggregation and transformation" with pytest.raises(ValueError, match=msg): with np.errstate(all="ignore"): @@ -463,14 +442,6 @@ def test_agg_cython_table_raises(self, series, func, expected): # e.g. Series('a b'.split()).cumprod() will raise series.agg(func) - def test_transform_none_to_type(self): - # GH34377 - df = pd.DataFrame({"a": [None]}) - - msg = "DataFrame constructor called with incompatible data and dtype" - with pytest.raises(TypeError, match=msg): - df.transform({"a": int}) - class TestSeriesMap: def test_map(self, datetime_series): diff --git a/pandas/tests/series/apply/test_series_transform.py b/pandas/tests/series/apply/test_series_transform.py new file mode 100644 index 0000000000000..8bc3d2dc4d0db --- /dev/null +++ b/pandas/tests/series/apply/test_series_transform.py @@ -0,0 +1,59 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +def test_transform(string_series): + # transforming functions + + with np.errstate(all="ignore"): + f_sqrt = np.sqrt(string_series) + f_abs = np.abs(string_series) + + # ufunc + result = string_series.transform(np.sqrt) + expected = f_sqrt.copy() + tm.assert_series_equal(result, expected) + + # list-like + result = string_series.transform([np.sqrt]) + expected = f_sqrt.to_frame().copy() + expected.columns = ["sqrt"] + tm.assert_frame_equal(result, expected) + + result = string_series.transform([np.sqrt]) + tm.assert_frame_equal(result, expected) + + result = string_series.transform(["sqrt"]) + tm.assert_frame_equal(result, expected) + + # multiple items in list + # these are in the order as if we are applying both functions per + # series and then concatting + expected = pd.concat([f_sqrt, f_abs], axis=1) + result = string_series.transform(["sqrt", "abs"]) + expected.columns = ["sqrt", "abs"] + tm.assert_frame_equal(result, expected) + + +def test_transform_and_agg_error(string_series): + # we are trying to transform with an aggregator + msg = "transforms cannot produce aggregated results" + with pytest.raises(ValueError, match=msg): + string_series.transform(["min", "max"]) + + msg = "cannot combine transform and aggregation operations" + with pytest.raises(ValueError, match=msg): + with np.errstate(all="ignore"): + string_series.transform(["sqrt", "max"]) + + +def test_transform_none_to_type(): + # GH34377 + df = pd.DataFrame({"a": [None]}) + + msg = "DataFrame constructor called with incompatible data and dtype" + with pytest.raises(TypeError, match=msg): + df.transform({"a": int})
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Setup for #35964. There were two tests that are accidental duplicates; I've just copied them over to the transform modules. Will cleanup in #35964.
https://api.github.com/repos/pandas-dev/pandas/pulls/36146
2020-09-05T19:29:24Z
2020-09-06T17:11:05Z
2020-09-06T17:11:05Z
2020-09-10T00:07:40Z
DOC: add userwarning doc about mpl #35684
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index b1229a5d5823d..d7d2e3cf876ca 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -301,7 +301,7 @@ Plotting ^^^^^^^^ - Bug in :meth:`DataFrame.plot` where a marker letter in the ``style`` keyword sometimes causes a ``ValueError`` (:issue:`21003`) -- +- meth:`DataFrame.plot` and meth:`Series.plot` raise ``UserWarning`` about usage of FixedFormatter and FixedLocator (:issue:`35684` and :issue:`35945`) Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^
- [x] closes #35684 - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36145
2020-09-05T15:39:27Z
2020-09-05T19:50:44Z
2020-09-05T19:50:43Z
2020-09-06T05:16:07Z
STY+CI: check for private function access across modules
diff --git a/Makefile b/Makefile index f26689ab65ba5..4a9a48992f92f 100644 --- a/Makefile +++ b/Makefile @@ -25,3 +25,10 @@ doc: cd doc; \ python make.py clean; \ python make.py html + +check: + python3 scripts/validate_unwanted_patterns.py \ + --validation-type="private_function_across_module" \ + --included-file-extensions="py" \ + --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored \ + pandas/ diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 6006d09bc3e78..9efc53ced1b8f 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -116,6 +116,14 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then fi RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check for use of private module attribute access' ; echo $MSG + if [[ "$GITHUB_ACTIONS" == "true" ]]; then + $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored --format="##[error]{source_path}:{line_number}:{msg}" pandas/ + else + $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored pandas/ + fi + RET=$(($RET + $?)) ; echo $MSG "DONE" + echo "isort --version-number" isort --version-number diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 0a70afda893cf..c4723a5f064c7 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -412,7 +412,7 @@ ctypedef fused algos_t: uint8_t -def _validate_limit(nobs: int, limit=None) -> int: +def validate_limit(nobs: int, limit=None) -> int: """ Check that the `limit` argument is a positive integer. @@ -452,7 +452,7 @@ def pad(ndarray[algos_t] old, ndarray[algos_t] new, limit=None): indexer = np.empty(nright, dtype=np.int64) indexer[:] = -1 - lim = _validate_limit(nright, limit) + lim = validate_limit(nright, limit) if nleft == 0 or nright == 0 or new[nright - 1] < old[0]: return indexer @@ -509,7 +509,7 @@ def pad_inplace(algos_t[:] values, const uint8_t[:] mask, limit=None): if N == 0: return - lim = _validate_limit(N, limit) + lim = validate_limit(N, limit) val = values[0] for i in range(N): @@ -537,7 +537,7 @@ def pad_2d_inplace(algos_t[:, :] values, const uint8_t[:, :] mask, limit=None): if N == 0: return - lim = _validate_limit(N, limit) + lim = validate_limit(N, limit) for j in range(K): fill_count = 0 @@ -593,7 +593,7 @@ def backfill(ndarray[algos_t] old, ndarray[algos_t] new, limit=None) -> ndarray: indexer = np.empty(nright, dtype=np.int64) indexer[:] = -1 - lim = _validate_limit(nright, limit) + lim = validate_limit(nright, limit) if nleft == 0 or nright == 0 or new[0] > old[nleft - 1]: return indexer @@ -651,7 +651,7 @@ def backfill_inplace(algos_t[:] values, const uint8_t[:] mask, limit=None): if N == 0: return - lim = _validate_limit(N, limit) + lim = validate_limit(N, limit) val = values[N - 1] for i in range(N - 1, -1, -1): @@ -681,7 +681,7 @@ def backfill_2d_inplace(algos_t[:, :] values, if N == 0: return - lim = _validate_limit(N, limit) + lim = validate_limit(N, limit) for j in range(K): fill_count = 0 diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index f297c7165208f..50ec3714f454b 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -60,7 +60,7 @@ from pandas.core.indexers import validate_indices if TYPE_CHECKING: - from pandas import Categorical, DataFrame, Series + from pandas import Categorical, DataFrame, Series # noqa:F401 _shared_docs: Dict[str, str] = {} @@ -767,7 +767,7 @@ def value_counts( counts = result._values else: - keys, counts = _value_counts_arraylike(values, dropna) + keys, counts = value_counts_arraylike(values, dropna) result = Series(counts, index=keys, name=name) @@ -780,8 +780,8 @@ def value_counts( return result -# Called once from SparseArray -def _value_counts_arraylike(values, dropna: bool): +# Called once from SparseArray, otherwise could be private +def value_counts_arraylike(values, dropna: bool): """ Parameters ---------- diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 1531f7b292365..47c960dc969d6 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -735,7 +735,7 @@ def value_counts(self, dropna=True): """ from pandas import Index, Series - keys, counts = algos._value_counts_arraylike(self.sp_values, dropna=dropna) + keys, counts = algos.value_counts_arraylike(self.sp_values, dropna=dropna) fcounts = self.sp_index.ngaps if fcounts > 0: if self._null_fill_value and dropna: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 3bcd4debbf41a..9f4e535dc787d 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -390,7 +390,7 @@ def fillna( mask = isna(self.values) if limit is not None: - limit = libalgos._validate_limit(None, limit=limit) + limit = libalgos.validate_limit(None, limit=limit) mask[mask.cumsum(self.ndim - 1) > limit] = False if not self._can_hold_na: diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 7802c5cbdbfb3..be66b19d10064 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -228,7 +228,7 @@ def interpolate_1d( ) # default limit is unlimited GH #16282 - limit = algos._validate_limit(nobs=None, limit=limit) + limit = algos.validate_limit(nobs=None, limit=limit) # These are sets of index pointers to invalid values... i.e. {0, 1, etc... all_nans = set(np.flatnonzero(invalid)) diff --git a/pandas/plotting/_matplotlib/compat.py b/pandas/plotting/_matplotlib/compat.py index 7f107f18eca25..964596d9b6319 100644 --- a/pandas/plotting/_matplotlib/compat.py +++ b/pandas/plotting/_matplotlib/compat.py @@ -17,8 +17,8 @@ def inner(): return inner -_mpl_ge_2_2_3 = _mpl_version("2.2.3", operator.ge) -_mpl_ge_3_0_0 = _mpl_version("3.0.0", operator.ge) -_mpl_ge_3_1_0 = _mpl_version("3.1.0", operator.ge) -_mpl_ge_3_2_0 = _mpl_version("3.2.0", operator.ge) -_mpl_ge_3_3_0 = _mpl_version("3.3.0", operator.ge) +mpl_ge_2_2_3 = _mpl_version("2.2.3", operator.ge) +mpl_ge_3_0_0 = _mpl_version("3.0.0", operator.ge) +mpl_ge_3_1_0 = _mpl_version("3.1.0", operator.ge) +mpl_ge_3_2_0 = _mpl_version("3.2.0", operator.ge) +mpl_ge_3_3_0 = _mpl_version("3.3.0", operator.ge) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index f0b35e1cd2a74..17f6696ba3480 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -30,7 +30,7 @@ import pandas.core.common as com from pandas.io.formats.printing import pprint_thing -from pandas.plotting._matplotlib.compat import _mpl_ge_3_0_0 +from pandas.plotting._matplotlib.compat import mpl_ge_3_0_0 from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.timeseries import ( @@ -939,7 +939,7 @@ def _plot_colorbar(self, ax: "Axes", **kwds): img = ax.collections[-1] cbar = self.fig.colorbar(img, ax=ax, **kwds) - if _mpl_ge_3_0_0(): + if mpl_ge_3_0_0(): # The workaround below is no longer necessary. return diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 98aaab6838fba..c5b44f37150bb 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -307,7 +307,7 @@ def handle_shared_axes( sharey: bool, ): if nplots > 1: - if compat._mpl_ge_3_2_0(): + if compat.mpl_ge_3_2_0(): row_num = lambda x: x.get_subplotspec().rowspan.start col_num = lambda x: x.get_subplotspec().colspan.start else: diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index b753c96af6290..9301a29933d45 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -28,10 +28,10 @@ def setup_method(self, method): mpl.rcdefaults() - self.mpl_ge_2_2_3 = compat._mpl_ge_2_2_3() - self.mpl_ge_3_0_0 = compat._mpl_ge_3_0_0() - self.mpl_ge_3_1_0 = compat._mpl_ge_3_1_0() - self.mpl_ge_3_2_0 = compat._mpl_ge_3_2_0() + self.mpl_ge_2_2_3 = compat.mpl_ge_2_2_3() + self.mpl_ge_3_0_0 = compat.mpl_ge_3_0_0() + self.mpl_ge_3_1_0 = compat.mpl_ge_3_1_0() + self.mpl_ge_3_2_0 = compat.mpl_ge_3_2_0() self.bp_n_objects = 7 self.polycollection_factor = 2 diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 128a7bdb6730a..9a323f2024721 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -51,7 +51,7 @@ def _assert_xtickslabels_visibility(self, axes, expected): @pytest.mark.xfail(reason="Waiting for PR 34334", strict=True) @pytest.mark.slow def test_plot(self): - from pandas.plotting._matplotlib.compat import _mpl_ge_3_1_0 + from pandas.plotting._matplotlib.compat import mpl_ge_3_1_0 df = self.tdf _check_plot_works(df.plot, grid=False) @@ -69,7 +69,7 @@ def test_plot(self): self._check_axes_shape(axes, axes_num=4, layout=(4, 1)) df = DataFrame({"x": [1, 2], "y": [3, 4]}) - if _mpl_ge_3_1_0(): + if mpl_ge_3_1_0(): msg = "'Line2D' object has no property 'blarg'" else: msg = "Unknown property blarg" diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 130acaa8bcd58..0208ab3e0225b 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -96,7 +96,7 @@ def test_bootstrap_plot(self): class TestDataFramePlots(TestPlotBase): @td.skip_if_no_scipy def test_scatter_matrix_axis(self): - from pandas.plotting._matplotlib.compat import _mpl_ge_3_0_0 + from pandas.plotting._matplotlib.compat import mpl_ge_3_0_0 scatter_matrix = plotting.scatter_matrix @@ -105,7 +105,7 @@ def test_scatter_matrix_axis(self): # we are plotting multiples on a sub-plot with tm.assert_produces_warning( - UserWarning, raise_on_extra_warnings=_mpl_ge_3_0_0() + UserWarning, raise_on_extra_warnings=mpl_ge_3_0_0() ): axes = _check_plot_works( scatter_matrix, filterwarnings="always", frame=df, range_padding=0.1 diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py index 193fef026a96b..1a6d8cc8b9914 100755 --- a/scripts/validate_unwanted_patterns.py +++ b/scripts/validate_unwanted_patterns.py @@ -16,9 +16,7 @@ import sys import token import tokenize -from typing import IO, Callable, FrozenSet, Iterable, List, Tuple - -PATHS_TO_IGNORE: Tuple[str, ...] = ("asv_bench/env",) +from typing import IO, Callable, FrozenSet, Iterable, List, Set, Tuple def _get_literal_string_prefix_len(token_string: str) -> int: @@ -114,6 +112,58 @@ def bare_pytest_raises(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: ) +PRIVATE_FUNCTIONS_ALLOWED = {"sys._getframe"} # no known alternative + + +def private_function_across_module(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: + """ + Checking that a private function is not used across modules. + Parameters + ---------- + file_obj : IO + File-like object containing the Python code to validate. + Yields + ------ + line_number : int + Line number of the private function that is used across modules. + msg : str + Explenation of the error. + """ + contents = file_obj.read() + tree = ast.parse(contents) + + imported_modules: Set[str] = set() + + for node in ast.walk(tree): + if isinstance(node, (ast.Import, ast.ImportFrom)): + for module in node.names: + module_fqdn = module.name if module.asname is None else module.asname + imported_modules.add(module_fqdn) + + if not isinstance(node, ast.Call): + continue + + try: + module_name = node.func.value.id + function_name = node.func.attr + except AttributeError: + continue + + # Exception section # + + # (Debatable) Class case + if module_name[0].isupper(): + continue + # (Debatable) Dunder methods case + elif function_name.startswith("__") and function_name.endswith("__"): + continue + elif module_name + "." + function_name in PRIVATE_FUNCTIONS_ALLOWED: + continue + + if module_name in imported_modules and function_name.startswith("_"): + yield (node.lineno, f"Private function '{module_name}.{function_name}'") + + def strings_to_concatenate(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: """ This test case is necessary after 'Black' (https://github.com/psf/black), @@ -293,6 +343,7 @@ def main( source_path: str, output_format: str, file_extensions_to_check: str, + excluded_file_paths: str, ) -> bool: """ Main entry point of the script. @@ -305,6 +356,10 @@ def main( Source path representing path to a file/directory. output_format : str Output format of the error message. + file_extensions_to_check : str + Coma seperated values of what file extensions to check. + excluded_file_paths : str + Coma seperated values of what file paths to exclude during the check. Returns ------- @@ -325,6 +380,7 @@ def main( FILE_EXTENSIONS_TO_CHECK: FrozenSet[str] = frozenset( file_extensions_to_check.split(",") ) + PATHS_TO_IGNORE = frozenset(excluded_file_paths.split(",")) if os.path.isfile(source_path): file_path = source_path @@ -362,6 +418,7 @@ def main( if __name__ == "__main__": available_validation_types: List[str] = [ "bare_pytest_raises", + "private_function_across_module", "strings_to_concatenate", "strings_with_wrong_placed_whitespace", ] @@ -389,6 +446,11 @@ def main( default="py,pyx,pxd,pxi", help="Coma seperated file extensions to check.", ) + parser.add_argument( + "--excluded-file-paths", + default="asv_bench/env", + help="Comma separated file extensions to check.", + ) args = parser.parse_args() @@ -398,5 +460,6 @@ def main( source_path=args.path, output_format=args.format, file_extensions_to_check=args.included_file_extensions, + excluded_file_paths=args.excluded_file_paths, ) )
The added checks here are broken off from #36055.
https://api.github.com/repos/pandas-dev/pandas/pulls/36144
2020-09-05T15:03:38Z
2020-09-05T17:53:22Z
2020-09-05T17:53:22Z
2021-04-12T15:30:07Z
CLN: unused case in compare_or_regex_search
diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py index 6ac3cc1f9f2fe..09f9aefd64096 100644 --- a/pandas/core/array_algos/replace.py +++ b/pandas/core/array_algos/replace.py @@ -3,7 +3,7 @@ """ import operator import re -from typing import Optional, Pattern, Union +from typing import Pattern, Union import numpy as np @@ -14,14 +14,10 @@ is_numeric_v_string_like, is_scalar, ) -from pandas.core.dtypes.missing import isna def compare_or_regex_search( - a: ArrayLike, - b: Union[Scalar, Pattern], - regex: bool = False, - mask: Optional[ArrayLike] = None, + a: ArrayLike, b: Union[Scalar, Pattern], regex: bool, mask: ArrayLike, ) -> Union[ArrayLike, bool]: """ Compare two array_like inputs of the same shape or two scalar values @@ -33,8 +29,8 @@ def compare_or_regex_search( ---------- a : array_like b : scalar or regex pattern - regex : bool, default False - mask : array_like or None (default) + regex : bool + mask : array_like Returns ------- @@ -68,8 +64,6 @@ def _check_comparison_types( ) # GH#32621 use mask to avoid comparing to NAs - if mask is None and isinstance(a, np.ndarray) and not isinstance(b, np.ndarray): - mask = np.reshape(~(isna(a)), a.shape) if isinstance(a, np.ndarray): a = a[mask]
https://api.github.com/repos/pandas-dev/pandas/pulls/36143
2020-09-05T15:00:00Z
2020-09-05T17:54:30Z
2020-09-05T17:54:30Z
2020-09-05T18:49:02Z
Arrow string array dtype
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 8193d65b3b30c..9b1b2c0d74e3f 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -356,6 +356,8 @@ def __ne__(self, other: Any) -> ArrayLike: """ Return for `self != other` (element-wise in-equality). """ + if isinstance(other, (ABCSeries, ABCDataFrame, ABCIndexClass)): + return NotImplemented return ~(self == other) def to_numpy( @@ -459,6 +461,7 @@ def astype(self, dtype, copy=True): from pandas.core.arrays.string_ import StringDtype dtype = pandas_dtype(dtype) + # FIXME: Really hard-code here? if isinstance(dtype, StringDtype): # allow conversion to StringArrays return dtype.construct_array_type()._from_sequence(self, copy=False) @@ -924,9 +927,9 @@ def take( from the right (the default). This is similar to :func:`numpy.take`. - * True: negative values in `indices` indicate - missing values. These values are set to `fill_value`. Any other - other negative values raise a ``ValueError``. + * True: ``-1`` in `indices` indicate missing values. + These values are set to `fill_value`. Any other other negative + value raise a ``ValueError``. fill_value : any, optional Fill value to use for NA-indices when `allow_fill` is True. diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 381968f9724b6..0e7c5a8036bcf 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -1,8 +1,10 @@ import operator -from typing import TYPE_CHECKING, Type, Union +from typing import TYPE_CHECKING, Any, Type, Union import numpy as np +from pandas._config import get_option + from pandas._libs import lib, missing as libmissing from pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype @@ -50,17 +52,83 @@ class StringDtype(ExtensionDtype): StringDtype """ - name = "string" - #: StringDtype.na_value uses pandas.NA na_value = libmissing.NA + _metadata = ("storage",) + + def __init__(self, storage=None): + if storage is None: + storage = get_option("mode.string_storage") + if storage not in {"python", "pyarrow"}: + raise ValueError( + f"Storage must be 'python' or 'pyarrow'. Got {storage} instead." + ) + self.storage = storage + + @property + def name(self): + return f"StringDtype[{self.storage}]" @property def type(self) -> Type[str]: return str @classmethod - def construct_array_type(cls) -> Type["StringArray"]: + def construct_from_string(cls, string): + """ + Construct a StringDtype from a string. + + Parameters + ---------- + string : str + The type of the name. The storage type will be taking from `string`. + Valid options and their storage types are + + ========================== ============== + string result storage + ========================== ============== + ``'string'`` global default + ``'string[python]'`` python + ``'StringDtype[python]'`` python + ``'string[pyarrow]'`` pyarrow + ``'StringDtype[pyarrow]'`` pyarrow + ========================== ============= + + Returns + ------- + StringDtype + + Raise + ----- + TypeError + If the string is not a valid option. + + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + if string == "string": + # TODO: use global default + return cls() + elif string in {"string[python]", "StringDtype[python]"}: + return cls(storage="python") + elif string in {"string[pyarrow]", "StringDtype[pyarrow]"}: + return cls(storage="pyarrow") + else: + raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") + + def __eq__(self, other: Any) -> bool: + if isinstance(other, str) and other == "string": + return True + return super().__eq__(other) + + def __hash__(self) -> int: + # custom __eq__ so have to override __hash__ + return super().__hash__() + + # XXX: this is a classmethod, but we need to know the storage type. + def construct_array_type(self) -> Type["StringArray"]: """ Return the array type associated with this dtype. @@ -68,10 +136,15 @@ def construct_array_type(cls) -> Type["StringArray"]: ------- type """ - return StringArray + from .string_arrow import ArrowStringArray + + if self.storage == "python": + return StringArray + else: + return ArrowStringArray - def __repr__(self) -> str: - return "StringDtype" + def __repr__(self): + return self.name def __from_arrow__( self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"] @@ -80,6 +153,7 @@ def __from_arrow__( Construct StringArray from pyarrow Array/ChunkedArray. """ import pyarrow # noqa: F811 + from .string_arrow import ArrowStringArray if isinstance(array, pyarrow.Array): chunks = [array] @@ -93,7 +167,7 @@ def __from_arrow__( str_arr = StringArray._from_sequence(np.array(arr)) results.append(str_arr) - return StringArray._concat_same_type(results) + return ArrowStringArray._concat_same_type(results) class StringArray(PandasArray): diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py new file mode 100644 index 0000000000000..c0831a65b3644 --- /dev/null +++ b/pandas/core/arrays/string_arrow.py @@ -0,0 +1,436 @@ +from collections.abc import Iterable +from typing import Any, Optional, Sequence, Tuple, Union + +import numpy as np +import pyarrow as pa +import pyarrow.compute as pc + +from pandas._libs import missing as libmissing +from pandas._typing import ArrayLike + +from pandas.core.dtypes.missing import isna + +from pandas.api.types import ( + is_array_like, + is_bool_dtype, + is_int64_dtype, + is_integer, + is_integer_dtype, + is_scalar, +) +from pandas.core.algorithms import factorize +from pandas.core.arrays.base import ExtensionArray +from pandas.core.arrays.string_ import StringDtype +from pandas.core.indexers import check_array_indexer + + +def _as_pandas_scalar(arrow_scalar: pa.Scalar) -> Optional[str]: + scalar = arrow_scalar.as_py() + if scalar is None: + return libmissing.NA + else: + return scalar + + +class ArrowStringArray(ExtensionArray): + """ + Extension array for string data in a ``pyarrow.ChunkedArray``. + + .. versionadded:: 1.1.0 + + .. warning:: + + ArrowStringArray is considered experimental. The implementation and + parts of the API may change without warning. + + Parameters + ---------- + values : pyarrow.Array or pyarrow.ChunkedArray + The array of data. + + Attributes + ---------- + None + + Methods + ------- + None + + See Also + -------- + array + The recommended function for creating a ArrowStringArray. + Series.str + The string methods are available on Series backed by + a ArrowStringArray. + + Notes + ----- + ArrowStringArray returns a BooleanArray for comparison methods. + + Examples + -------- + >>> pd.array(['This is', 'some text', None, 'data.'], dtype="arrow_string") + <ArrowStringArray> + ['This is', 'some text', <NA>, 'data.'] + Length: 4, dtype: arrow_string + """ + + def __init__(self, values): + if isinstance(values, pa.Array): + self.data = pa.chunked_array([values]) + elif isinstance(values, pa.ChunkedArray): + self.data = values + else: + raise ValueError(f"Unsupported type '{type(values)}' for ArrowStringArray") + self._dtype = StringDtype(storage="pyarrow") + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + # TODO(ARROW-9407): Accept pd.NA in Arrow + scalars_corrected = [None if isna(x) else x for x in scalars] + return cls(pa.array(scalars_corrected, type=pa.string())) + + @property + def dtype(self) -> StringDtype: + """ + An instance of 'StringDtype'. + """ + return self._dtype + + def __array__(self, *args, **kwargs) -> "np.ndarray": + """Correctly construct numpy arrays when passed to `np.asarray()`.""" + return self.data.__array__(*args, **kwargs) + + def __arrow_array__(self, type=None): + """Convert myself to a pyarrow Array or ChunkedArray.""" + return self.data + + @property + def size(self) -> int: + """ + Return the number of elements in this array. + + Returns + ------- + size : int + """ + return len(self.data) + + @property + def shape(self) -> Tuple[int]: + """Return the shape of the data.""" + # This may be patched by pandas to support pseudo-2D operations. + return (len(self.data),) + + @property + def ndim(self) -> int: + """Return the number of dimensions of the underlying data.""" + return 1 + + def __len__(self) -> int: + """ + Length of this array. + + Returns + ------- + length : int + """ + return len(self.data) + + @classmethod + def _from_sequence_of_strings(cls, strings, dtype=None, copy=False): + return cls._from_sequence(strings, dtype=dtype, copy=copy) + + def __getitem__(self, item): + # type (Any) -> Any + """Select a subset of self. + + Parameters + ---------- + item : int, slice, or ndarray + * int: The position in 'self' to get. + * slice: A slice object, where 'start', 'stop', and 'step' are + integers or None + * ndarray: A 1-d boolean NumPy ndarray the same length as 'self' + + Returns + ------- + item : scalar or ExtensionArray + + Notes + ----- + For scalar ``item``, return a scalar value suitable for the array's + type. This should be an instance of ``self.dtype.type``. + For slice ``key``, return an instance of ``ExtensionArray``, even + if the slice is length 0 or 1. + For a boolean mask, return an instance of ``ExtensionArray``, filtered + to the values where ``item`` is True. + """ + item = check_array_indexer(self, item) + + if isinstance(item, Iterable): + if not is_array_like(item): + item = np.array(item) + if len(item) == 0: + return type(self)(pa.chunked_array([], type=pa.string())) + elif is_integer_dtype(item): + return self.take(item) + elif is_bool_dtype(item): + return type(self)(self.data.filter(item)) + else: + raise IndexError( + "Only integers, slices and integer or " + "boolean arrays are valid indices." + ) + elif is_integer(item): + if item < 0: + item += len(self) + if item >= len(self): + raise IndexError("index out of bounds") + + value = self.data[item] + if isinstance(value, pa.ChunkedArray): + return type(self)(value) + else: + return _as_pandas_scalar(value) + + @property + def nbytes(self) -> int: + """ + The number of bytes needed to store this object in memory. + """ + return self.data.nbytes + + def isna(self) -> np.ndarray: + """ + Boolean NumPy array indicating if each value is missing. + + This should return a 1-D array the same length as 'self'. + """ + # TODO: Implement .to_numpy for ChunkedArray + return self.data.is_null().to_pandas().values + + def copy(self) -> ExtensionArray: + """ + Return a copy of the array. + + Parameters + ---------- + deep : bool, default False + Also copy the underlying data backing this array. + + Returns + ------- + ExtensionArray + """ + return type(self)(self.data) + + def __eq__(self, other: Any) -> ArrayLike: + """ + Return for `self == other` (element-wise equality). + """ + from pandas import array, Series, DataFrame, Index + + if isinstance(other, (Series, DataFrame, Index)): + return NotImplemented + if isinstance(other, ArrowStringArray): + result = pc.equal(self.data, other.data) + elif is_scalar(other): + result = pc.equal(self.data, pa.scalar(other)) + else: + raise NotImplementedError("Neither scalar nor ArrowStringArray") + + # TODO(ARROW-9429): Add a .to_numpy() to ChunkedArray + return array(result.to_pandas().values, dtype="boolean") + + def __setitem__(self, key, value): + # type: (Union[int, np.ndarray], Any) -> None + """Set one or more values inplace. + + Parameters + ---------- + key : int, ndarray, or slice + When called from, e.g. ``Series.__setitem__``, ``key`` will be + one of + + * scalar int + * ndarray of integers. + * boolean ndarray + * slice object + + value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object + value or values to be set of ``key``. + + Returns + ------- + None + """ + key = check_array_indexer(self, key) + + if is_integer(key): + if not is_scalar(value): + raise ValueError("Must pass scalars with scalar indexer") + elif isna(value): + value = None + elif not isinstance(value, str): + raise ValueError("Scalar must be NA or str") + + # Slice data and insert inbetween + new_data = [ + *self.data[0:key].chunks, + pa.array([value], type=pa.string()), + *self.data[(key + 1) :].chunks, + ] + self.data = pa.chunked_array(new_data) + else: + # Convert to integer indices and iteratively assign. + # TODO: Make a faster variant of this in Arrow upstream. + # This is probably extremely slow. + + # Convert all possible input key types to an array of integers + if is_bool_dtype(key): + # TODO(ARROW-9430): Directly support setitem(booleans) + key_array = np.argwhere(key).flatten() + elif isinstance(key, slice): + key_array = np.array(range(len(self))[key]) + else: + # TODO(ARROW-9431): Directly support setitem(integers) + key_array = np.asanyarray(key) + + if is_scalar(value): + value = np.broadcast_to(value, len(key_array)) + else: + value = np.asarray(value) + + if len(key_array) != len(value): + raise ValueError("Length of indexer and values mismatch") + + for k, v in zip(key_array, value): + self[k] = v + + def take( + self, indices: Sequence[int], allow_fill: bool = False, fill_value: Any = None + ) -> "ExtensionArray": + """ + Take elements from an array. + + Parameters + ---------- + indices : sequence of int + Indices to be taken. + allow_fill : bool, default False + How to handle negative values in `indices`. + + * False: negative values in `indices` indicate positional indices + from the right (the default). This is similar to + :func:`numpy.take`. + + * True: negative values in `indices` indicate + missing values. These values are set to `fill_value`. Any other + other negative values raise a ``ValueError``. + + fill_value : any, optional + Fill value to use for NA-indices when `allow_fill` is True. + This may be ``None``, in which case the default NA value for + the type, ``self.dtype.na_value``, is used. + + For many ExtensionArrays, there will be two representations of + `fill_value`: a user-facing "boxed" scalar, and a low-level + physical NA value. `fill_value` should be the user-facing version, + and the implementation should handle translating that to the + physical version for processing the take if necessary. + + Returns + ------- + ExtensionArray + + Raises + ------ + IndexError + When the indices are out of bounds for the array. + ValueError + When `indices` contains negative values other than ``-1`` + and `allow_fill` is True. + + See Also + -------- + numpy.take + api.extensions.take + + Notes + ----- + ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``, + ``iloc``, when `indices` is a sequence of values. Additionally, + it's called by :meth:`Series.reindex`, or any other method + that causes realignment, with a `fill_value`. + """ + # TODO: Remove once we got rid of the (indices < 0) check + if not is_array_like(indices): + indices_array = np.asanyarray(indices) + else: + indices_array = indices + + if len(self.data) == 0 and (indices_array >= 0).any(): + raise IndexError("cannot do a non-empty take") + if len(indices_array) > 0 and indices_array.max() >= len(self.data): + raise IndexError("out of bounds value in 'indices'.") + + if allow_fill: + if (indices_array < 0).any(): + if indices_array.min() < -1: + raise ValueError( + "'indicies' contains negative values other " + "-1 with 'allow_fill=True." + ) + # TODO(ARROW-9433): Treat negative indices as NULL + indices_array = pa.array(indices_array, mask=indices_array < 0) + result = self.data.take(indices_array) + if isna(fill_value): + return type(self)(result) + return type(self)(pc.fill_null(result, pa.scalar(fill_value))) + else: + # Nothing to fill + return type(self)(self.data.take(indices)) + else: # allow_fill=False + # TODO(ARROW-9432): Treat negative indices as indices from the right. + if (indices_array < 0).any(): + # Don't modify in-place + indices_array = np.copy(indices_array) + indices_array[indices_array < 0] += len(self.data) + return type(self)(self.data.take(indices_array)) + + def value_counts(self, dropna=True): + from pandas import Series + + if dropna: + na = self.isna() + self = self[~na] + counts = self.data.value_counts() + return Series(counts.field(1), counts.field(0)) + + def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, "ExtensionArray"]: + # see https://github.com/xhochy/fletcher/blob/master/fletcher/base.py + # doesn't handle dictionary types. + if self.data.num_chunks == 1: + encoded = self.data.chunk(0).dictionary_encode() + indices = encoded.indices.to_pandas() + if indices.dtype.kind == "f": + indices[np.isnan(indices)] = na_sentinel + indices = indices.astype(int) + if not is_int64_dtype(indices): + indices = indices.astype(np.int64) + return indices.values, type(self)(encoded.dictionary) + else: + np_array = self.data.to_pandas().values + return factorize(np_array, na_sentinel=na_sentinel) + + @classmethod + def _concat_same_type( + cls, to_concat: Sequence["ArrowStringArray"] + ) -> "ArrowStringArray": + return cls( + pa.chunked_array( + [array for ea in to_concat for array in ea.data.iterchunks()] + ) + ) diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 0c23f1b4bcdf2..a58e6eccf7644 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -504,6 +504,19 @@ def use_inf_as_na_cb(key): ) +string_storage_doc = """ +: string + The default storage for StringDtype. +""" + +with cf.config_prefix("mode"): + cf.register_option( + "string_storage", + "python", + string_storage_doc, + validator=is_one_of_factory(["python", "pyarrow"]), + ) + # Set up the io.excel specific reader configuration. reader_engine_doc = """ : string diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 6702bf519c52e..59aa8fc5cfa0e 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -901,8 +901,10 @@ def _result_dtype(arr): # workaround #27953 # ideally we just pass `dtype=arr.dtype` unconditionally, but this fails # when the list of values is empty. - if arr.dtype.name == "string": - return "string" + from pandas.core.arrays.string_ import StringDtype + + if isinstance(arr.dtype.name, StringDtype): + return arr.dtype.name else: return object @@ -2097,9 +2099,11 @@ class StringMethods(NoNewAttributesMixin): """ def __init__(self, data): + from pandas.core.arrays.string_ import StringDtype + self._inferred_dtype = self._validate(data) self._is_categorical = is_categorical_dtype(data.dtype) - self._is_string = data.dtype.name == "string" + self._is_string = isinstance(data.dtype, StringDtype) # ._values.categories works for both Series/Index self._parent = data._values.categories if self._is_categorical else data diff --git a/pandas/tests/arrays/string_/test_string_arrow.py b/pandas/tests/arrays/string_/test_string_arrow.py new file mode 100644 index 0000000000000..40e3f21670ea0 --- /dev/null +++ b/pandas/tests/arrays/string_/test_string_arrow.py @@ -0,0 +1,26 @@ +import pytest + +import pandas as pd +import pandas.testing as tm + + +def test_eq_all_na(): + a = pd.array([pd.NA, pd.NA], dtype=pd.StringDtype("pyarrow")) + result = a == a + expected = pd.array([pd.NA, pd.NA], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +def test_config(): + # python by default + assert pd.StringDtype().storage == "python" + arr = pd.array(["a", "b"]) + assert arr.dtype.storage == "python" + + with pd.option_context("mode.string_storage", "pyarrow"): + assert pd.StringDtype().storage == "pyarrow" + arr = pd.array(["a", "b"]) + assert arr.dtype.storage == "pyarrow" + + with pytest.raises(ValueError): + pd.options.mode.string_storage = "foo" diff --git a/pandas/tests/extension/arrow/test_string.py b/pandas/tests/extension/arrow/test_string.py index abd5c1f386dc5..f32f1e415ddc7 100644 --- a/pandas/tests/extension/arrow/test_string.py +++ b/pandas/tests/extension/arrow/test_string.py @@ -4,10 +4,9 @@ pytest.importorskip("pyarrow", minversion="0.13.0") -from .arrays import ArrowStringDtype # isort:skip - def test_constructor_from_list(): # GH 27673 - result = pd.Series(["E"], dtype=ArrowStringDtype()) - assert isinstance(result.dtype, ArrowStringDtype) + result = pd.Series(["E"], dtype=pd.StringDtype(storage="pyarrow")) + assert isinstance(result.dtype, pd.StringDtype) + assert result.dtype.storage == "pyarrow" diff --git a/pandas/tests/extension/test_string_arrow.py b/pandas/tests/extension/test_string_arrow.py new file mode 100644 index 0000000000000..848e8a435b530 --- /dev/null +++ b/pandas/tests/extension/test_string_arrow.py @@ -0,0 +1,150 @@ +import string + +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.string_arrow import ArrowStringArray +from pandas.tests.extension import base + + +@pytest.fixture +def dtype(): + return pd.StringDtype(storage="pyarrow") + + +@pytest.fixture +def data(): + strings = np.random.choice(list(string.ascii_letters), size=100) + while strings[0] == strings[1]: + strings = np.random.choice(list(string.ascii_letters), size=100) + + return ArrowStringArray._from_sequence(strings) + + +@pytest.fixture +def data_missing(): + """Length 2 array with [NA, Valid]""" + return ArrowStringArray._from_sequence([pd.NA, "A"]) + + +@pytest.fixture +def data_for_sorting(): + return ArrowStringArray._from_sequence(["B", "C", "A"]) + + +@pytest.fixture +def data_missing_for_sorting(): + return ArrowStringArray._from_sequence(["B", pd.NA, "A"]) + + +@pytest.fixture +def na_value(): + return pd.NA + + +@pytest.fixture +def data_for_grouping(): + return ArrowStringArray._from_sequence(["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"]) + + +class TestDtype(base.BaseDtypeTests): + pass + + +class TestInterface(base.BaseInterfaceTests): + @pytest.mark.xfail(reason="Fails until implement, remove before merge") + def test_view(self, data): + base.BaseInterfaceTests.test_view(self, data) + + +class TestConstructors(base.BaseConstructorsTests): + pass + + +class TestReshaping(base.BaseReshapingTests): + pass + + +class TestGetitem(base.BaseGetitemTests): + @pytest.mark.xfail( + reason="pyarrow.lib.ArrowNotImplementedError: Function " + "fill_null has no kernel matching input types " + "(array[string], scalar[string])" + ) + def test_take_non_na_fill_value(self, data_missing): + super().test_take_non_na_fill_value(data_missing) + + @pytest.mark.xfail( + reason="pyarrow.lib.ArrowNotImplementedError: Function fill_null has no " + "kernel matching input types (array[string], scalar[string])" + ) + def test_reindex_non_na_fill_value(self, data_missing): + super().test_reindex_non_na_fill_value(self, data_missing) + + +class TestSetitem(base.BaseSetitemTests): + @pytest.mark.xfail(reason="TODO") + def test_setitem_preserves_views(self, data): + # Unclear where the issue is (pyarrow getitem, our getitem, our slice) + # and what to do here. + super().test_setitem_preserves_views(data) + + +class TestMissing(base.BaseMissingTests): + pass + + +class TestNoReduce(base.BaseNoReduceTests): + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna): + op_name = all_numeric_reductions + + if op_name in ["min", "max"]: + return None + + s = pd.Series(data) + with pytest.raises(TypeError): + getattr(s, op_name)(skipna=skipna) + + +class TestMethods(base.BaseMethodsTests): + @pytest.mark.skip(reason="returns nullable") + def test_value_counts(self, all_data, dropna): + return super().test_value_counts(all_data, dropna) + + +class TestCasting(base.BaseCastingTests): + pass + + +class TestComparisonOps(base.BaseComparisonOpsTests): + def _compare_other(self, s, data, op_name, other): + if op_name not in {"__eq__", "__ne__"}: + pytest.skip(f"{op_name} is not implemented.") + result = getattr(s, op_name)(other) + expected = getattr(s.astype(object), op_name)(other).astype("boolean") + self.assert_series_equal(result, expected) + + def test_compare_scalar(self, data, all_compare_operators): + op_name = all_compare_operators + s = pd.Series(data) + self._compare_other(s, data, op_name, "abc") + + def test_compare_array(self, data, all_compare_operators): + op_name = all_compare_operators + s = pd.Series(data) + other = pd.Series([data[0]] * len(data), dtype=data.dtype) + self._compare_other(s, data, op_name, other) + + +class TestParsing(base.BaseParsingTests): + pass + + +class TestPrinting(base.BasePrintingTests): + pass + + +class TestGroupBy(base.BaseGroupbyTests): + pass
This is a continuation of https://github.com/pandas-dev/pandas/pull/35259, updated to use a single `StringDtype` parameterized by `storage` (python or pyarrow). We do use different classes for the array though. I wouldn't recommend reviewing this yet. In particular, I need to refactor the string ops implementation. That will likely be done in a separate PR to keep things smaller here. ```python In [1]: import pandas as pd In [2]: a = pd.Series(pd.array([pd.NA, pd.NA], dtype=pd.StringDtype("python"))) In [3]: b = pd.Series(pd.array([pd.NA, pd.NA], dtype=pd.StringDtype("pyarrow"))) In [4]: a.array Out[4]: <StringArray> [<NA>, <NA>] Length: 2, dtype: StringDtype[python] In [5]: b.array Out[5]: <ArrowStringArray> [<NA>, <NA>] Length: 2, dtype: StringDtype[pyarrow] ``` Closes https://github.com/pandas-dev/pandas/issues/35169
https://api.github.com/repos/pandas-dev/pandas/pulls/36142
2020-09-05T14:59:38Z
2021-02-19T13:48:10Z
null
2021-02-19T14:05:49Z
BUG: copying series into empty dataframe does not preserve dataframe index name
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index da261907565a1..c40430256f64b 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -42,6 +42,7 @@ Bug fixes - Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`) - Bug in :meth:`DataFrame.corr` causing subsequent indexing lookups to be incorrect (:issue:`35882`) - Bug in :meth:`import_optional_dependency` returning incorrect package names in cases where package name is different from import name (:issue:`35948`) +- Bug when setting empty :class:`DataFrame` column to a :class:`Series` in preserving name of index in frame (:issue:`31368`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e1a889bf79d95..59cf4c0e2f81d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3206,9 +3206,11 @@ def _ensure_valid_index(self, value): "and a value that cannot be converted to a Series" ) from err - self._mgr = self._mgr.reindex_axis( - value.index.copy(), axis=1, fill_value=np.nan - ) + # GH31368 preserve name of index + index_copy = value.index.copy() + index_copy.name = self.index.name + + self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values, loc: int) -> Series: """ diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index 350f86b4e9fd0..7afbbc2b9ab2b 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -660,3 +660,15 @@ def test_indexing_timeseries_regression(self): expected = Series(rng, index=rng) tm.assert_series_equal(result, expected) + + def test_index_name_empty(self): + # GH 31368 + df = pd.DataFrame({}, index=pd.RangeIndex(0, name="df_index")) + series = pd.Series(1.23, index=pd.RangeIndex(4, name="series_index")) + + df["series"] = series + expected = pd.DataFrame( + {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="df_index") + ) + + tm.assert_frame_equal(df, expected)
- [x] closes #31368 - [x] tests added / passed `indexing/test_partial.py:TestPartialSetting:test_index_name_empty` - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36141
2020-09-05T14:58:57Z
2020-09-08T13:45:09Z
2020-09-08T13:45:09Z
2020-09-18T11:34:07Z
TYP: remove string literals for type annotations in pandas\core\frame.py
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c48bec9b670ad..8ee782d7c8584 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -420,7 +420,7 @@ class DataFrame(NDFrame): _typ = "dataframe" @property - def _constructor(self) -> Type["DataFrame"]: + def _constructor(self) -> Type[DataFrame]: return DataFrame _constructor_sliced: Type[Series] = Series @@ -1233,7 +1233,7 @@ def __rmatmul__(self, other): # IO methods (to / from other formats) @classmethod - def from_dict(cls, data, orient="columns", dtype=None, columns=None) -> "DataFrame": + def from_dict(cls, data, orient="columns", dtype=None, columns=None) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. @@ -1671,7 +1671,7 @@ def from_records( columns=None, coerce_float=False, nrows=None, - ) -> "DataFrame": + ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. @@ -2012,7 +2012,7 @@ def _from_arrays( index, dtype: Optional[Dtype] = None, verify_integrity: bool = True, - ) -> "DataFrame": + ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. @@ -2720,7 +2720,7 @@ def memory_usage(self, index=True, deep=False) -> Series: ).append(result) return result - def transpose(self, *args, copy: bool = False) -> "DataFrame": + def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. @@ -2843,7 +2843,7 @@ def transpose(self, *args, copy: bool = False) -> "DataFrame": return result.__finalize__(self, method="transpose") @property - def T(self) -> "DataFrame": + def T(self) -> DataFrame: return self.transpose() # ---------------------------------------------------------------------- @@ -3503,7 +3503,7 @@ def eval(self, expr, inplace=False, **kwargs): return _eval(expr, inplace=inplace, **kwargs) - def select_dtypes(self, include=None, exclude=None) -> "DataFrame": + def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. @@ -3667,7 +3667,7 @@ def insert(self, loc, column, value, allow_duplicates=False) -> None: value = self._sanitize_column(column, value, broadcast=False) self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates) - def assign(self, **kwargs) -> "DataFrame": + def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. @@ -3965,7 +3965,7 @@ def _reindex_columns( allow_dups=False, ) - def _reindex_multi(self, axes, copy, fill_value) -> "DataFrame": + def _reindex_multi(self, axes, copy, fill_value) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ @@ -3998,7 +3998,7 @@ def align( limit=None, fill_axis=0, broadcast_axis=None, - ) -> "DataFrame": + ) -> DataFrame: return super().align( other, join=join, @@ -4067,7 +4067,7 @@ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False): ("tolerance", None), ], ) - def reindex(self, *args, **kwargs) -> "DataFrame": + def reindex(self, *args, **kwargs) -> DataFrame: axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex") kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names @@ -4229,7 +4229,7 @@ def rename( inplace: bool = False, level: Optional[Level] = None, errors: str = "ignore", - ) -> Optional["DataFrame"]: + ) -> Optional[DataFrame]: """ Alter axes labels. @@ -4357,7 +4357,7 @@ def fillna( inplace=False, limit=None, downcast=None, - ) -> Optional["DataFrame"]: + ) -> Optional[DataFrame]: return super().fillna( value=value, method=method, @@ -4465,7 +4465,7 @@ def _replace_columnwise( return res.__finalize__(self) @doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"]) - def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> "DataFrame": + def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> DataFrame: return super().shift( periods=periods, freq=freq, axis=axis, fill_value=fill_value ) @@ -4666,7 +4666,7 @@ def reset_index( inplace: bool = False, col_level: Hashable = 0, col_fill: Label = "", - ) -> Optional["DataFrame"]: + ) -> Optional[DataFrame]: """ Reset the index, or a level of it. @@ -4910,20 +4910,20 @@ def _maybe_casted_values(index, labels=None): # Reindex-based selection methods @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) - def isna(self) -> "DataFrame": + def isna(self) -> DataFrame: result = self._constructor(self._data.isna(func=isna)) return result.__finalize__(self, method="isna") @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) - def isnull(self) -> "DataFrame": + def isnull(self) -> DataFrame: return self.isna() @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) - def notna(self) -> "DataFrame": + def notna(self) -> DataFrame: return ~self.isna() @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) - def notnull(self) -> "DataFrame": + def notnull(self) -> DataFrame: return ~self.isna() def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False): @@ -5074,7 +5074,7 @@ def drop_duplicates( keep: Union[str, bool] = "first", inplace: bool = False, ignore_index: bool = False, - ) -> Optional["DataFrame"]: + ) -> Optional[DataFrame]: """ Return DataFrame with duplicate rows removed. @@ -5168,7 +5168,7 @@ def duplicated( self, subset: Optional[Union[Hashable, Sequence[Hashable]]] = None, keep: Union[str, bool] = "first", - ) -> "Series": + ) -> Series: """ Return boolean Series denoting duplicate rows. @@ -5619,7 +5619,7 @@ def value_counts( return counts - def nlargest(self, n, columns, keep="first") -> "DataFrame": + def nlargest(self, n, columns, keep="first") -> DataFrame: """ Return the first `n` rows ordered by `columns` in descending order. @@ -5728,7 +5728,7 @@ def nlargest(self, n, columns, keep="first") -> "DataFrame": """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest() - def nsmallest(self, n, columns, keep="first") -> "DataFrame": + def nsmallest(self, n, columns, keep="first") -> DataFrame: """ Return the first `n` rows ordered by `columns` in ascending order. @@ -5830,7 +5830,7 @@ def nsmallest(self, n, columns, keep="first") -> "DataFrame": self, n=n, keep=keep, columns=columns ).nsmallest() - def swaplevel(self, i=-2, j=-1, axis=0) -> "DataFrame": + def swaplevel(self, i=-2, j=-1, axis=0) -> DataFrame: """ Swap levels i and j in a MultiIndex on a particular axis. @@ -5861,7 +5861,7 @@ def swaplevel(self, i=-2, j=-1, axis=0) -> "DataFrame": result.columns = result.columns.swaplevel(i, j) return result - def reorder_levels(self, order, axis=0) -> "DataFrame": + def reorder_levels(self, order, axis=0) -> DataFrame: """ Rearrange index levels using input order. May not drop or duplicate levels. @@ -5894,7 +5894,7 @@ def reorder_levels(self, order, axis=0) -> "DataFrame": # ---------------------------------------------------------------------- # Arithmetic / combination related - def _combine_frame(self, other: "DataFrame", func, fill_value=None): + def _combine_frame(self, other: DataFrame, func, fill_value=None): # at this point we have `self._indexed_same(other)` if fill_value is None: @@ -5914,7 +5914,7 @@ def _arith_op(left, right): new_data = ops.dispatch_to_series(self, other, _arith_op) return new_data - def _construct_result(self, result) -> "DataFrame": + def _construct_result(self, result) -> DataFrame: """ Wrap the result of an arithmetic, comparison, or logical operation. @@ -6031,11 +6031,11 @@ def _construct_result(self, result) -> "DataFrame": @Appender(_shared_docs["compare"] % _shared_doc_kwargs) def compare( self, - other: "DataFrame", + other: DataFrame, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, - ) -> "DataFrame": + ) -> DataFrame: return super().compare( other=other, align_axis=align_axis, @@ -6044,8 +6044,8 @@ def compare( ) def combine( - self, other: "DataFrame", func, fill_value=None, overwrite=True - ) -> "DataFrame": + self, other: DataFrame, func, fill_value=None, overwrite=True + ) -> DataFrame: """ Perform column-wise combine with another DataFrame. @@ -6212,7 +6212,7 @@ def combine( # convert_objects just in case return self._constructor(result, index=new_index, columns=new_columns) - def combine_first(self, other: "DataFrame") -> "DataFrame": + def combine_first(self, other: DataFrame) -> DataFrame: """ Update null elements with value in the same location in `other`. @@ -6718,7 +6718,7 @@ def groupby( @Substitution("") @Appender(_shared_docs["pivot"]) - def pivot(self, index=None, columns=None, values=None) -> "DataFrame": + def pivot(self, index=None, columns=None, values=None) -> DataFrame: from pandas.core.reshape.pivot import pivot return pivot(self, index=index, columns=columns, values=values) @@ -6870,7 +6870,7 @@ def pivot_table( dropna=True, margins_name="All", observed=False, - ) -> "DataFrame": + ) -> DataFrame: from pandas.core.reshape.pivot import pivot_table return pivot_table( @@ -7056,7 +7056,7 @@ def stack(self, level=-1, dropna=True): def explode( self, column: Union[str, Tuple], ignore_index: bool = False - ) -> "DataFrame": + ) -> DataFrame: """ Transform each element of a list-like to a row, replicating index values. @@ -7211,7 +7211,7 @@ def melt( value_name="value", col_level=None, ignore_index=True, - ) -> "DataFrame": + ) -> DataFrame: return melt( self, @@ -7299,7 +7299,7 @@ def melt( 1 255.0""" ), ) - def diff(self, periods: int = 1, axis: Axis = 0) -> "DataFrame": + def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame: bm_axis = self._get_block_manager_axis(axis) self._consolidate_inplace() @@ -7462,7 +7462,7 @@ def _aggregate(self, arg, axis=0, *args, **kwargs): klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) - def transform(self, func, axis=0, *args, **kwargs) -> "DataFrame": + def transform(self, func, axis=0, *args, **kwargs) -> DataFrame: axis = self._get_axis_number(axis) if axis == 1: return self.T.transform(func, *args, **kwargs).T @@ -7616,7 +7616,7 @@ def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds): ) return op.get_result() - def applymap(self, func) -> "DataFrame": + def applymap(self, func) -> DataFrame: """ Apply a function to a Dataframe elementwise. @@ -7678,7 +7678,7 @@ def infer(x): def append( self, other, ignore_index=False, verify_integrity=False, sort=False - ) -> "DataFrame": + ) -> DataFrame: """ Append rows of `other` to the end of caller, returning a new object. @@ -7818,7 +7818,7 @@ def append( def join( self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False - ) -> "DataFrame": + ) -> DataFrame: """ Join columns of another DataFrame. @@ -8009,7 +8009,7 @@ def merge( copy=True, indicator=False, validate=None, - ) -> "DataFrame": + ) -> DataFrame: from pandas.core.reshape.merge import merge return merge( @@ -8028,7 +8028,7 @@ def merge( validate=validate, ) - def round(self, decimals=0, *args, **kwargs) -> "DataFrame": + def round(self, decimals=0, *args, **kwargs) -> DataFrame: """ Round a DataFrame to a variable number of decimal places. @@ -8142,7 +8142,7 @@ def _series_round(s, decimals): # ---------------------------------------------------------------------- # Statistical methods, etc. - def corr(self, method="pearson", min_periods=1) -> "DataFrame": + def corr(self, method="pearson", min_periods=1) -> DataFrame: """ Compute pairwise correlation of columns, excluding NA/null values. @@ -8233,7 +8233,7 @@ def corr(self, method="pearson", min_periods=1) -> "DataFrame": def cov( self, min_periods: Optional[int] = None, ddof: Optional[int] = 1 - ) -> "DataFrame": + ) -> DataFrame: """ Compute pairwise covariance of columns, excluding NA/null values. @@ -8636,7 +8636,7 @@ def func(values): else: return op(values, axis=axis, skipna=skipna, **kwds) - def _get_data(axis_matters: bool) -> "DataFrame": + def _get_data(axis_matters: bool) -> DataFrame: if filter_type is None: data = self._get_numeric_data() elif filter_type == "bool": @@ -8937,7 +8937,7 @@ def _get_agg_axis(self, axis_num: int) -> Index: else: raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})") - def mode(self, axis=0, numeric_only=False, dropna=True) -> "DataFrame": + def mode(self, axis=0, numeric_only=False, dropna=True) -> DataFrame: """ Get the mode(s) of each element along the selected axis. @@ -9122,7 +9122,7 @@ def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"): def to_timestamp( self, freq=None, how: str = "start", axis: Axis = 0, copy: bool = True - ) -> "DataFrame": + ) -> DataFrame: """ Cast to DatetimeIndex of timestamps, at *beginning* of period. @@ -9151,7 +9151,7 @@ def to_timestamp( setattr(new_obj, axis_name, new_ax) return new_obj - def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> "DataFrame": + def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> DataFrame: """ Convert DataFrame from DatetimeIndex to PeriodIndex. @@ -9180,7 +9180,7 @@ def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> "DataFrame" setattr(new_obj, axis_name, new_ax) return new_obj - def isin(self, values) -> "DataFrame": + def isin(self, values) -> DataFrame: """ Whether each element in the DataFrame is contained in values. @@ -9287,10 +9287,10 @@ def isin(self, values) -> "DataFrame": _info_axis_number = 1 _info_axis_name = "columns" - index: "Index" = properties.AxisProperty( + index: Index = properties.AxisProperty( axis=1, doc="The index (row labels) of the DataFrame." ) - columns: "Index" = properties.AxisProperty( + columns: Index = properties.AxisProperty( axis=0, doc="The column labels of the DataFrame." )
https://api.github.com/repos/pandas-dev/pandas/pulls/36140
2020-09-05T14:17:16Z
2020-09-05T15:03:54Z
2020-09-05T15:03:54Z
2020-09-05T15:09:24Z
Updated series documentation to close #35406
diff --git a/pandas/core/series.py b/pandas/core/series.py index 9d84ce4b9ab2e..d8fdaa2a60252 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -164,9 +164,9 @@ class Series(base.IndexOpsMixin, generic.NDFrame): index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to - RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index - sequence are used, the index will override the keys found in the - dict. + RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like + and index is None, then the values in the index are used to + reindex the Series after it is created using the keys in the data. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`.
- [x ] closes #35406 - [x ] tests added / passed - [ x] passes `black pandas` - [ x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Not sure how to do whatsnew entry for this - first contribution! Do let me know how to improve. Output of scripts/validate_docstring.py: ``` ################################################################################ ########################## Docstring (pandas.Series) ########################## ################################################################################ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, *, **) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. .. versionchanged:: 0.23.0 If data is a dict, argument order is maintained for Python 3.6 and later. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the values in the index are used to reindex the Series after it is created using the keys in the data. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : str, optional The name to give to the Series. copy : bool, default False Copy input data. ################################################################################ ################################## Validation ################################## ################################################################################ 3 Errors found: Parameters {'fastpath'} not documented See Also section not found No examples section found ```
https://api.github.com/repos/pandas-dev/pandas/pulls/36139
2020-09-05T13:52:12Z
2020-09-05T14:44:27Z
2020-09-05T14:44:27Z
2020-09-13T15:28:37Z
TYP: misc property return types
diff --git a/pandas/_typing.py b/pandas/_typing.py index b237013ac7805..8ab64486ddbfa 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -15,6 +15,7 @@ List, Mapping, Optional, + Tuple, Type, TypeVar, Union, @@ -86,6 +87,9 @@ Ordered = Optional[bool] JSONSerializable = Optional[Union[PythonScalar, List, Dict]] Axes = Collection +Shape1D = Tuple[int] +Shape2D = Tuple[int, int] +Shape = Union[Shape1D, Shape2D] # For functions like rename that convert one label to another Renamer = Union[Mapping[Label, Any], Callable[[Label], Label]] diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 2976747d66dfa..277215f09f710 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -1,7 +1,8 @@ -from typing import Any, Sequence, Tuple, TypeVar +from typing import Any, Sequence, TypeVar import numpy as np +from pandas._typing import Shape from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly @@ -68,7 +69,7 @@ def _validate_fill_value(self, fill_value): # TODO: make this a cache_readonly; for that to work we need to remove # the _index_data kludge in libreduction @property - def shape(self) -> Tuple[int, ...]: + def shape(self) -> Shape: return self._ndarray.shape def __len__(self) -> int: diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 8193d65b3b30c..e0a88a95078c6 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -12,7 +12,7 @@ import numpy as np from pandas._libs import lib -from pandas._typing import ArrayLike +from pandas._typing import ArrayLike, Shape from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError @@ -405,7 +405,7 @@ def dtype(self) -> ExtensionDtype: raise AbstractMethodError(self) @property - def shape(self) -> Tuple[int, ...]: + def shape(self) -> Shape: """ Return a tuple of the array dimensions. """ diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index c3c9009dda659..5425b19901d97 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1299,7 +1299,7 @@ def __setstate__(self, state): setattr(self, k, v) @property - def nbytes(self): + def nbytes(self) -> int: return self._codes.nbytes + self.dtype.categories.values.nbytes def memory_usage(self, deep=False): diff --git a/pandas/core/base.py b/pandas/core/base.py index 1926803d8f04b..b2a112377ae41 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -9,6 +9,7 @@ import numpy as np import pandas._libs.lib as lib +from pandas._typing import Shape from pandas.compat import PYPY from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError @@ -624,7 +625,7 @@ def transpose(self, *args, **kwargs): ) @property - def shape(self): + def shape(self) -> Shape: """ Return a tuple of the shape of the underlying data. """ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 29d6fb9aa7d56..a9439b4b09496 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -56,6 +56,7 @@ Label, Level, Renamer, + Shape2D, StorageOptions, ValueKeyFunc, ) @@ -586,7 +587,7 @@ def axes(self) -> List[Index]: return [self.index, self.columns] @property - def shape(self) -> Tuple[int, int]: + def shape(self) -> Shape2D: """ Return a tuple representing the dimensionality of the DataFrame. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 93c945638a174..59a5cef410cff 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -44,6 +44,7 @@ Label, Level, Renamer, + Shape, StorageOptions, TimedeltaConvertibleTypes, TimestampConvertibleTypes, @@ -557,11 +558,13 @@ def _stat_axis(self) -> Index: return getattr(self, self._stat_axis_name) @property - def shape(self) -> Tuple[int, ...]: + def shape(self) -> Shape: """ Return a tuple of axis dimensions """ - return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) + result = tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) + # len(_AXIS_ORDERS) is 1 for Series, 2 for DataFrame + return cast(Shape, result) @property def axes(self) -> List[Index]: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 65b5dfb6df911..ae3ee51587a44 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -24,7 +24,7 @@ from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp from pandas._libs.tslibs.period import IncompatibleFrequency from pandas._libs.tslibs.timezones import tz_compare -from pandas._typing import AnyArrayLike, Dtype, DtypeObj, Label +from pandas._typing import AnyArrayLike, Dtype, DtypeObj, Label, Shape from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.errors import DuplicateLabelError, InvalidIndexError @@ -5520,7 +5520,7 @@ def _add_logical_methods_disabled(cls): cls.any = make_invalid_op("any") @property - def shape(self): + def shape(self) -> Shape: """ Return a tuple of the shape of the underlying data. """ diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index f66b009e6d505..e2275fceaf3b1 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -19,7 +19,7 @@ from pandas._libs import algos as libalgos, index as libindex, lib from pandas._libs.hashtable import duplicated_int64 -from pandas._typing import AnyArrayLike, Label, Scalar +from pandas._typing import AnyArrayLike, Label, Scalar, Shape1D from pandas.compat.numpy import function as nv from pandas.errors import InvalidIndexError, PerformanceWarning, UnsortedIndexError from pandas.util._decorators import Appender, cache_readonly, doc @@ -678,7 +678,7 @@ def array(self): ) @property - def shape(self): + def shape(self) -> Shape1D: """ Return a tuple of the shape of the underlying data. """ diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 3bcd4debbf41a..4289519aeda03 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -11,7 +11,7 @@ from pandas._libs.internals import BlockPlacement from pandas._libs.tslibs import conversion from pandas._libs.tslibs.timezones import tz_compare -from pandas._typing import ArrayLike, Scalar +from pandas._typing import ArrayLike, Scalar, Shape from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( @@ -309,7 +309,7 @@ def getitem_block(self, slicer, new_mgr_locs=None): return type(self)._simple_new(new_values, new_mgr_locs, self.ndim) @property - def shape(self): + def shape(self) -> Shape: return self.values.shape @property @@ -1625,10 +1625,10 @@ def __init__(self, values, placement, ndim=None): raise AssertionError("block.size != values.size") @property - def shape(self): + def shape(self) -> Shape: # TODO(EA2D): override unnecessary with 2D EAs if self.ndim == 1: - return ((len(self.values)),) + return (len(self.values),) return (len(self.mgr_locs), len(self.values)) def iget(self, col): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 57a4a8c2ace8a..8e8295b497622 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -7,16 +7,16 @@ List, Optional, Sequence, - Tuple, TypeVar, Union, + cast, ) import warnings import numpy as np from pandas._libs import internals as libinternals, lib -from pandas._typing import ArrayLike, DtypeObj, Label +from pandas._typing import ArrayLike, DtypeObj, Label, Shape from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( @@ -128,8 +128,9 @@ def __init__( axes: Sequence[Index], do_integrity_check: bool = True, ): + assert len(axes) == 2 self.axes = [ensure_index(ax) for ax in axes] - self.blocks: Tuple[Block, ...] = tuple(blocks) + self.blocks = tuple(blocks) for block in blocks: if self.ndim != block.ndim: @@ -203,8 +204,10 @@ def __nonzero__(self) -> bool: __bool__ = __nonzero__ @property - def shape(self) -> Tuple[int, ...]: - return tuple(len(ax) for ax in self.axes) + def shape(self) -> Shape: + result = tuple(len(ax) for ax in self.axes) + # len(self.axis)==2 for BlockManager, 1 for SingleBlockManager + return cast(Shape, result) @property def ndim(self) -> int: diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py index 8a18f505058bc..5e930b7b22f30 100644 --- a/pandas/tests/extension/arrow/arrays.py +++ b/pandas/tests/extension/arrow/arrays.py @@ -68,6 +68,8 @@ def construct_array_type(cls) -> Type["ArrowStringArray"]: class ArrowExtensionArray(ExtensionArray): + _data: pa.ChunkedArray + @classmethod def from_scalars(cls, values): arr = pa.chunked_array([pa.array(np.asarray(values))]) @@ -129,7 +131,7 @@ def __or__(self, other): return self._boolean_op(other, operator.or_) @property - def nbytes(self): + def nbytes(self) -> int: return sum( x.size for chunk in self._data.chunks
https://api.github.com/repos/pandas-dev/pandas/pulls/36138
2020-09-05T13:33:15Z
2020-09-13T19:12:11Z
null
2020-09-13T19:12:11Z
TYP: Check for use of Union[Series, DataFrame] instead of FrameOrSeriesUnion alias
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 6006d09bc3e78..8ee579cd25203 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -230,6 +230,9 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --include=*.{py,pyx} '!r}' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" + # ------------------------------------------------------------------------- + # Type annotations + MSG='Check for use of comment-based annotation syntax' ; echo $MSG invgrep -R --include="*.py" -P '# type: (?!ignore)' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" @@ -238,6 +241,11 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --include="*.py" -P '# type:\s?ignore(?!\[)' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check for use of Union[Series, DataFrame] instead of FrameOrSeriesUnion alias' ; echo $MSG + invgrep -R --include="*.py" --exclude=_typing.py -E 'Union\[.*(Series.*DataFrame|DataFrame.*Series).*\]' pandas + RET=$(($RET + $?)) ; echo $MSG "DONE" + + # ------------------------------------------------------------------------- MSG='Check for use of foo.__class__ instead of type(foo)' ; echo $MSG invgrep -R --include=*.{py,pyx} '\.__class__' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 99a9e1377563c..bbf832f33065b 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -1,12 +1,12 @@ import abc import inspect -from typing import TYPE_CHECKING, Any, Dict, Iterator, Optional, Tuple, Type, Union +from typing import TYPE_CHECKING, Any, Dict, Iterator, Optional, Tuple, Type import numpy as np from pandas._config import option_context -from pandas._typing import Axis +from pandas._typing import Axis, FrameOrSeriesUnion from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import is_dict_like, is_list_like, is_sequence @@ -73,7 +73,7 @@ def series_generator(self) -> Iterator["Series"]: @abc.abstractmethod def wrap_results_for_axis( self, results: ResType, res_index: "Index" - ) -> Union["Series", "DataFrame"]: + ) -> FrameOrSeriesUnion: pass # --------------------------------------------------------------- @@ -289,9 +289,7 @@ def apply_series_generator(self) -> Tuple[ResType, "Index"]: return results, res_index - def wrap_results( - self, results: ResType, res_index: "Index" - ) -> Union["Series", "DataFrame"]: + def wrap_results(self, results: ResType, res_index: "Index") -> FrameOrSeriesUnion: from pandas import Series # see if we can infer the results @@ -335,7 +333,7 @@ def result_columns(self) -> "Index": def wrap_results_for_axis( self, results: ResType, res_index: "Index" - ) -> Union["Series", "DataFrame"]: + ) -> FrameOrSeriesUnion: """ return the results for the rows """ if self.result_type == "reduce": @@ -408,9 +406,9 @@ def result_columns(self) -> "Index": def wrap_results_for_axis( self, results: ResType, res_index: "Index" - ) -> Union["Series", "DataFrame"]: + ) -> FrameOrSeriesUnion: """ return the results for the columns """ - result: Union["Series", "DataFrame"] + result: FrameOrSeriesUnion # we have requested to expand if self.result_type == "expand": diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index b855ce65f41b2..260e21b1f2593 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -308,7 +308,7 @@ def _aggregate_multiple_funcs(self, arg): arg = zip(columns, arg) - results: Dict[base.OutputKey, Union[Series, DataFrame]] = {} + results: Dict[base.OutputKey, FrameOrSeriesUnion] = {} for idx, (name, func) in enumerate(arg): obj = self @@ -332,7 +332,7 @@ def _wrap_series_output( self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]], index: Optional[Index], - ) -> Union[Series, DataFrame]: + ) -> FrameOrSeriesUnion: """ Wraps the output of a SeriesGroupBy operation into the expected result. @@ -355,7 +355,7 @@ def _wrap_series_output( indexed_output = {key.position: val for key, val in output.items()} columns = Index(key.label for key in output) - result: Union[Series, DataFrame] + result: FrameOrSeriesUnion if len(output) > 1: result = self.obj._constructor_expanddim(indexed_output, index=index) result.columns = columns @@ -373,7 +373,7 @@ def _wrap_aggregated_output( self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]], index: Optional[Index], - ) -> Union[Series, DataFrame]: + ) -> FrameOrSeriesUnion: """ Wraps the output of a SeriesGroupBy aggregation into the expected result. @@ -1085,7 +1085,7 @@ def blk_func(bvalues: ArrayLike) -> ArrayLike: raise # We get here with a) EADtypes and b) object dtype - obj: Union[Series, DataFrame] + obj: FrameOrSeriesUnion # call our grouper again with only this block if isinstance(bvalues, ExtensionArray): # TODO(EA2D): special case not needed with 2D EAs diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 6678edc3821c8..59ea7781025c4 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -393,7 +393,7 @@ class Grouping: ---------- index : Index grouper : - obj Union[DataFrame, Series]: + obj : DataFrame or Series name : Label level : observed : bool, default False diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 602ff226f8878..f1c5486222ea1 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -6,14 +6,14 @@ import datetime from functools import partial import string -from typing import TYPE_CHECKING, Optional, Tuple, Union +from typing import TYPE_CHECKING, Optional, Tuple import warnings import numpy as np from pandas._libs import Timedelta, hashtable as libhashtable, lib import pandas._libs.join as libjoin -from pandas._typing import ArrayLike, FrameOrSeries +from pandas._typing import ArrayLike, FrameOrSeries, FrameOrSeriesUnion from pandas.errors import MergeError from pandas.util._decorators import Appender, Substitution @@ -51,7 +51,7 @@ from pandas.core.sorting import is_int64_overflow_possible if TYPE_CHECKING: - from pandas import DataFrame, Series # noqa:F401 + from pandas import DataFrame # noqa:F401 @Substitution("\nleft : DataFrame") @@ -575,8 +575,8 @@ class _MergeOperation: def __init__( self, - left: Union["Series", "DataFrame"], - right: Union["Series", "DataFrame"], + left: FrameOrSeriesUnion, + right: FrameOrSeriesUnion, how: str = "inner", on=None, left_on=None, diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 969ac56e41860..842a42f80e1b7 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -12,7 +12,7 @@ import numpy as np -from pandas._typing import Label +from pandas._typing import FrameOrSeriesUnion, Label from pandas.util._decorators import Appender, Substitution from pandas.core.dtypes.cast import maybe_downcast_to_dtype @@ -200,7 +200,7 @@ def pivot_table( def _add_margins( - table: Union["Series", "DataFrame"], + table: FrameOrSeriesUnion, data, values, rows, diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 0913627324c48..e850a101a0a63 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -16,7 +16,7 @@ from pandas._libs import lib, writers as libwriters from pandas._libs.tslibs import timezones -from pandas._typing import ArrayLike, FrameOrSeries, Label +from pandas._typing import ArrayLike, FrameOrSeries, FrameOrSeriesUnion, Label from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import PerformanceWarning @@ -2566,7 +2566,7 @@ class Fixed: pandas_kind: str format_type: str = "fixed" # GH#30962 needed by dask - obj_type: Type[Union[DataFrame, Series]] + obj_type: Type[FrameOrSeriesUnion] ndim: int encoding: str parent: HDFStore @@ -4442,7 +4442,7 @@ class AppendableFrameTable(AppendableTable): pandas_kind = "frame_table" table_type = "appendable_frame" ndim = 2 - obj_type: Type[Union[DataFrame, Series]] = DataFrame + obj_type: Type[FrameOrSeriesUnion] = DataFrame @property def is_transposed(self) -> bool:
https://api.github.com/repos/pandas-dev/pandas/pulls/36137
2020-09-05T12:11:57Z
2020-09-05T14:55:12Z
2020-09-05T14:55:12Z
2020-09-05T15:10:22Z
API: Make ExtensionDtype.construct_array_type a method
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 1617bf66c4f04..a98134e547630 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -57,6 +57,13 @@ Other enhancements - - +.. _whatsnew_120.api_breaking.experimental: + +Changes to experimental APIs +---------------------------- + +- :meth:`pandas.api.extensions.ExtensionDtype.construct_array_type` has changed from a classmethod to a regular method to support one dtype being used for multiple arrays. To migrate, change your definition to a regular method and ensure that your method is called on instances rather than the class (:issue:`36126`). + .. _whatsnew_120.api_breaking.python: Increased minimum version for Python diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index bd4bdc5ecb46f..807bf0f6ddf88 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -70,8 +70,7 @@ def kind(self) -> str: def numpy_dtype(self) -> np.dtype: return np.dtype("bool") - @classmethod - def construct_array_type(cls) -> Type["BooleanArray"]: + def construct_array_type(self) -> Type["BooleanArray"]: """ Return the array type associated with this dtype. diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index d83ff91a1315f..bf6e0163d0b3b 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -80,8 +80,7 @@ def itemsize(self) -> int: """ Return the number of bytes in this dtype """ return self.numpy_dtype.itemsize - @classmethod - def construct_array_type(cls) -> Type["IntegerArray"]: + def construct_array_type(self) -> Type["IntegerArray"]: """ Return the array type associated with this dtype. diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 1237dea5c1a64..6e7d3dbcceeaf 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -40,8 +40,7 @@ class BaseMaskedDtype(ExtensionDtype): def numpy_dtype(self) -> np.dtype: raise AbstractMethodError - @classmethod - def construct_array_type(cls) -> Type["BaseMaskedArray"]: + def construct_array_type(self) -> Type["BaseMaskedArray"]: """ Return the array type associated with this dtype. diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 23a4a70734c81..4b3db24e343b3 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -94,8 +94,7 @@ def construct_from_string(cls, string: str) -> "PandasDtype": raise TypeError(msg) from err return cls(dtype) - @classmethod - def construct_array_type(cls) -> Type["PandasArray"]: + def construct_array_type(self) -> Type["PandasArray"]: """ Return the array type associated with this dtype. diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py index ccf2825162f51..fef995884d781 100644 --- a/pandas/core/arrays/sparse/dtype.py +++ b/pandas/core/arrays/sparse/dtype.py @@ -171,8 +171,7 @@ def name(self): def __repr__(self) -> str: return self.name - @classmethod - def construct_array_type(cls) -> Type["SparseArray"]: + def construct_array_type(self) -> Type["SparseArray"]: """ Return the array type associated with this dtype. diff --git a/pandas/core/construction.py b/pandas/core/construction.py index f145e76046bee..c15440014c33c 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -280,6 +280,9 @@ def array( # this returns None for not-found dtypes. if isinstance(dtype, str): dtype = registry.find(dtype) or dtype + if isinstance(dtype, type) and issubclass(dtype, ExtensionDtype): + # Needed for test_array_not_registered GH#36136 + dtype = dtype() if is_extension_array_dtype(dtype): cls = cast(ExtensionDtype, dtype).construct_array_type() diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index 07c73876954d0..a9cc8c9f25d2e 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -187,8 +187,7 @@ def names(self) -> Optional[List[str]]: """ return None - @classmethod - def construct_array_type(cls) -> Type["ExtensionArray"]: + def construct_array_type(self) -> Type["ExtensionArray"]: """ Return the array type associated with this dtype. diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 8dc500dddeafa..a8d8501eb86f8 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -430,8 +430,7 @@ def _hash_categories(categories, ordered: Ordered = True) -> int: hashed = _combine_hash_arrays(iter(cat_array), num_items=len(cat_array)) return np.bitwise_xor.reduce(hashed) - @classmethod - def construct_array_type(cls) -> Type["Categorical"]: + def construct_array_type(self) -> Type["Categorical"]: """ Return the array type associated with this dtype. @@ -679,8 +678,7 @@ def tz(self): """ return self._tz - @classmethod - def construct_array_type(cls) -> Type["DatetimeArray"]: + def construct_array_type(self) -> Type["DatetimeArray"]: """ Return the array type associated with this dtype. @@ -922,8 +920,7 @@ def is_dtype(cls, dtype: object) -> bool: return False return super().is_dtype(dtype) - @classmethod - def construct_array_type(cls) -> Type["PeriodArray"]: + def construct_array_type(self) -> Type["PeriodArray"]: """ Return the array type associated with this dtype. @@ -1047,8 +1044,7 @@ def subtype(self): """ return self._subtype - @classmethod - def construct_array_type(cls) -> Type["IntervalArray"]: + def construct_array_type(self) -> Type["IntervalArray"]: """ Return the array type associated with this dtype. diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index a0525aa511ee2..da771edd42608 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -277,8 +277,7 @@ def test_scalar_raises(): class DecimalDtype2(DecimalDtype): name = "decimal2" - @classmethod - def construct_array_type(cls): + def construct_array_type(self): """ Return the array type associated with this dtype. diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py index 8a18f505058bc..6a837f94ae746 100644 --- a/pandas/tests/extension/arrow/arrays.py +++ b/pandas/tests/extension/arrow/arrays.py @@ -31,8 +31,7 @@ class ArrowBoolDtype(ExtensionDtype): name = "arrow_bool" na_value = pa.NULL - @classmethod - def construct_array_type(cls) -> Type["ArrowBoolArray"]: + def construct_array_type(self) -> Type["ArrowBoolArray"]: """ Return the array type associated with this dtype. @@ -55,8 +54,7 @@ class ArrowStringDtype(ExtensionDtype): name = "arrow_string" na_value = pa.NULL - @classmethod - def construct_array_type(cls) -> Type["ArrowStringArray"]: + def construct_array_type(self) -> Type["ArrowStringArray"]: """ Return the array type associated with this dtype. diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index 2fbeec8dd8378..4626b4a30a60b 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -28,8 +28,7 @@ def __init__(self, context=None): def __repr__(self) -> str: return f"DecimalDtype(context={self.context})" - @classmethod - def construct_array_type(cls) -> Type["DecimalArray"]: + def construct_array_type(self) -> Type["DecimalArray"]: """ Return the array type associated with this dtype. diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index 447a6108fc3c7..4094a2d9bd28c 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -32,8 +32,7 @@ class JSONDtype(ExtensionDtype): name = "json" na_value: Mapping[str, Any] = UserDict() - @classmethod - def construct_array_type(cls) -> Type["JSONArray"]: + def construct_array_type(self) -> Type["JSONArray"]: """ Return the array type associated with this dtype. diff --git a/pandas/tests/extension/list/array.py b/pandas/tests/extension/list/array.py index d86f90e58d897..8cca8b00614c1 100644 --- a/pandas/tests/extension/list/array.py +++ b/pandas/tests/extension/list/array.py @@ -21,8 +21,7 @@ class ListDtype(ExtensionDtype): name = "list" na_value = np.nan - @classmethod - def construct_array_type(cls) -> Type["ListArray"]: + def construct_array_type(self) -> Type["ListArray"]: """ Return the array type associated with this dtype.
This allows a single dtype to support multiple array classes. For arrow-backed strings, we'll likely want a separate array class for ease of implementation, clarity. But we'll have a parametrized dtype. ```python class StringDtype: def __init__(self, storage="python"): self.storage = storage def construct_array_type(self): # regular method if self.storage == "python": return StringArray else: return ArrowStringArray ``` Closes #36126
https://api.github.com/repos/pandas-dev/pandas/pulls/36136
2020-09-05T11:10:53Z
2020-10-19T14:01:43Z
null
2020-10-19T19:04:26Z
Backport PR #36114 on branch 1.1.x (REGR: fix consolidation/cache issue with take operation)
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 39850905f60fa..d1a66256454ca 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -17,6 +17,7 @@ Fixed regressions - Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`) - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) +- Fix regression in invalid cache after an indexing operation; this can manifest when setting which does not update the data (:issue:`35521`) - Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) - Fix regression in pickle roundtrip of the ``closed`` attribute of :class:`IntervalIndex` (:issue:`35658`) - Fixed regression in :meth:`DataFrameGroupBy.agg` where a ``ValueError: buffer source array is read-only`` would be raised when the underlying array is read-only (:issue:`36014`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 67e5759b39808..935bad2624637 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3342,6 +3342,8 @@ class max_speed nv.validate_take(tuple(), kwargs) + self._consolidate_inplace() + new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True ) diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index b8183eb9f4185..217409d56c3ab 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -640,3 +640,26 @@ def test_update_inplace_sets_valid_block_values(): # smoketest for OP bug from GH#35731 assert df.isnull().sum().sum() == 0 + + +def test_nonconsolidated_item_cache_take(): + # https://github.com/pandas-dev/pandas/issues/35521 + + # create non-consolidated dataframe with object dtype columns + df = pd.DataFrame() + df["col1"] = pd.Series(["a"], dtype=object) + df["col2"] = pd.Series([0], dtype=object) + + # access column (item cache) + df["col1"] == "A" + # take operation + # (regression was that this consolidated but didn't reset item cache, + # resulting in an invalid cache and the .at operation not working properly) + df[df["col2"] == 0] + + # now setting value should update actual dataframe + df.at[0, "col1"] = "A" + + expected = pd.DataFrame({"col1": ["A"], "col2": [0]}, dtype=object) + tm.assert_frame_equal(df, expected) + assert df.at[0, "col1"] == "A"
Backport PR #36114: REGR: fix consolidation/cache issue with take operation
https://api.github.com/repos/pandas-dev/pandas/pulls/36135
2020-09-05T06:05:41Z
2020-09-05T07:56:14Z
2020-09-05T07:56:14Z
2020-09-05T07:56:14Z
BUG: shows correct package name when import_optional_dependency is ca…
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index b8f6d0e52d058..e3aa7194f04ab 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -39,6 +39,7 @@ Bug fixes - Bug in :meth:`Series.dt.isocalendar` and :meth:`DatetimeIndex.isocalendar` that returned incorrect year for certain dates (:issue:`36032`) - Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`) - Bug in :meth:`DataFrame.corr` causing subsequent indexing lookups to be incorrect (:issue:`35882`) +- Bug in :meth:`import_optional_dependency` returning incorrect package names in cases where package name is different from import name (:issue:`35948`) .. --------------------------------------------------------------------------- diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index 689c7c889ef66..40688a3978cfc 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -33,6 +33,19 @@ "numba": "0.46.0", } +# A mapping from import name to package name (on PyPI) for packages where +# these two names are different. + +INSTALL_MAPPING = { + "bs4": "beautifulsoup4", + "bottleneck": "Bottleneck", + "lxml.etree": "lxml", + "odf": "odfpy", + "pandas_gbq": "pandas-gbq", + "sqlalchemy": "SQLAlchemy", + "jinja2": "Jinja2", +} + def _get_version(module: types.ModuleType) -> str: version = getattr(module, "__version__", None) @@ -82,9 +95,13 @@ def import_optional_dependency( is False, or when the package's version is too old and `on_version` is ``'warn'``. """ + + package_name = INSTALL_MAPPING.get(name) + install_name = package_name if package_name is not None else name + msg = ( - f"Missing optional dependency '{name}'. {extra} " - f"Use pip or conda to install {name}." + f"Missing optional dependency '{install_name}'. {extra} " + f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name)
…lled. fixes #35948 - [x] closes #35948 - [x] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36134
2020-09-05T04:19:41Z
2020-09-07T11:16:12Z
2020-09-07T11:16:12Z
2020-09-07T19:30:44Z
WIP/DEPR: DataFrame reductions match Series behavior
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c48bec9b670ad..c0f0c11f1bb43 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8681,6 +8681,17 @@ def blk_func(values): assert numeric_only is None + def reduce_columnwise(self, func): + from pandas.core.apply import frame_apply + + opa = frame_apply( + self, func=func, result_type="expand", ignore_failures=True + ) + result = opa.get_result() + if result.ndim == self.ndim and len(result): + result = result.iloc[0].rename(None) + return result + if not self._is_homogeneous_type or self._mgr.any_extension_types: # try to avoid self.values call @@ -8698,15 +8709,7 @@ def blk_func(values): # numeric_only and yet we have tried a # column-by-column reduction, where we have mixed type. # So let's just do what we can - from pandas.core.apply import frame_apply - - opa = frame_apply( - self, func=func, result_type="expand", ignore_failures=True - ) - result = opa.get_result() - if result.ndim == self.ndim: - result = result.iloc[0].rename(None) - return result + return reduce_columnwise(self, func) data = self values = data.values @@ -8738,6 +8741,30 @@ def blk_func(values): if constructor is not None: result = self._constructor_sliced(result, index=labels) + + check_func = ( + lambda x: is_extension_array_dtype(x) + or is_object_dtype(x) + or needs_i8_conversion(x) + ) + msg = ( + "In a future version, DataFrame reduction operations with " + "ExtensionDtype or ObjectDtype will be performed column-wise. " + "To keep the old behavior, explicitly cast columns to bool/numeric " + "dtypes." + ) + if axis == 0: + if self.dtypes.apply(check_func).any(): + v2 = reduce_columnwise(self, func) + if type(v2) != type(result) or not v2.equals(result): + warnings.warn(msg, FutureWarning) + else: + if self.dtypes.apply(check_func).any(): + v1 = reduce_columnwise(self.T, func) + v2 = reduce_columnwise(_get_data(True).T, func) + if not (v1.equals(v2) and result.equals(v2)): + warnings.warn(msg, FutureWarning) + return result def nunique(self, axis=0, dropna=True) -> Series:
#34479, #36076 - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry cc @jorisvandenbossche this feels klunky, and the axis==1 part im pretty unsure of. how would you approach this?
https://api.github.com/repos/pandas-dev/pandas/pulls/36133
2020-09-05T03:34:39Z
2020-09-05T16:47:32Z
null
2021-11-20T23:22:46Z
API: reimplement FixedWindowIndexer.get_window_bounds to fix groupby bug
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 2a8b6fe3ade6a..1f7cb2d64bcdb 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -347,6 +347,7 @@ Groupby/resample/rolling - Bug when subsetting columns on a :class:`~pandas.core.groupby.DataFrameGroupBy` (e.g. ``df.groupby('a')[['b']])``) would reset the attributes ``axis``, ``dropna``, ``group_keys``, ``level``, ``mutated``, ``sort``, and ``squeeze`` to their default values. (:issue:`9959`) - Bug in :meth:`DataFrameGroupby.tshift` failing to raise ``ValueError`` when a frequency cannot be inferred for the index of a group (:issue:`35937`) - Bug in :meth:`DataFrame.groupby` does not always maintain column index name for ``any``, ``all``, ``bfill``, ``ffill``, ``shift`` (:issue:`29764`) +- Bug in :meth:`DataFrame.groupby.rolling` output incorrect when using a partial window (:issue:`36040`) - Bug in :meth:`DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`) - Bug in :meth:`Rolling.sum()` returned wrong values when dtypes where mixed between float and integer and axis was equal to one (:issue:`20649`, :issue:`35596`) diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index 6452eb8c6b3a9..3f7323abfb552 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -71,6 +71,7 @@ def _apply( is_weighted: bool = False, name: Optional[str] = None, use_numba_cache: bool = False, + skip_offset: bool = False, **kwargs, ): """ diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py index a21521f4ce8bb..d8aeed0578836 100644 --- a/pandas/core/window/indexers.py +++ b/pandas/core/window/indexers.py @@ -78,17 +78,17 @@ def get_window_bounds( closed: Optional[str] = None, ) -> Tuple[np.ndarray, np.ndarray]: - start_s = np.zeros(self.window_size, dtype="int64") - start_e = ( - np.arange(self.window_size, num_values, dtype="int64") - - self.window_size - + 1 - ) - start = np.concatenate([start_s, start_e])[:num_values] + if center: + offset = (self.window_size - 1) // 2 + else: + offset = 0 + + end = np.arange(1 + offset, num_values + 1 + offset).astype("int64") + start = end - self.window_size + + end = np.clip(end, 0, num_values) + start = np.clip(start, 0, num_values) - end_s = np.arange(self.window_size, dtype="int64") + 1 - end_e = start_e + self.window_size - end = np.concatenate([end_s, end_e])[:num_values] return start, end diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 335fc3db5cd86..94de560dbdbcb 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -72,24 +72,6 @@ from pandas.core.internals import Block # noqa:F401 -def calculate_center_offset(window) -> int: - """ - Calculate an offset necessary to have the window label to be centered. - - Parameters - ---------- - window: ndarray or int - window weights or window - - Returns - ------- - int - """ - if not is_integer(window): - window = len(window) - return int((window - 1) / 2.0) - - def calculate_min_periods( window: int, min_periods: Optional[int], @@ -417,18 +399,44 @@ def _insert_on_column(self, result: "DataFrame", obj: "DataFrame"): # insert at the end result[name] = extra_col - def _center_window(self, result: np.ndarray, window) -> np.ndarray: + def calculate_center_offset(self, window, center: bool) -> int: + """ + Calculate an offset necessary to have the window label to be centered. + + Parameters + ---------- + window : ndarray or int + window weights or window + center : bool + Set the labels at the center of the window. + + Returns + ------- + int + """ + if not center: + return 0 + + if self.is_freq_type or isinstance(self.window, BaseIndexer): + return 0 + + if not is_integer(window): + window = len(window) + return int((window - 1) / 2.0) + + def _center_window(self, result: np.ndarray, window, center) -> np.ndarray: """ Center the result in the window. """ if self.axis > result.ndim - 1: raise ValueError("Requested axis is larger then no. of argument dimensions") - offset = calculate_center_offset(window) + offset = self.calculate_center_offset(window, center) if offset > 0: lead_indexer = [slice(None)] * result.ndim lead_indexer[self.axis] = slice(offset, None) result = np.copy(result[tuple(lead_indexer)]) + return result def _get_roll_func(self, func_name: str) -> Callable: @@ -524,6 +532,7 @@ def _apply( is_weighted: bool = False, name: Optional[str] = None, use_numba_cache: bool = False, + skip_offset: bool = False, **kwargs, ): """ @@ -543,6 +552,8 @@ def _apply( use_numba_cache : bool whether to cache a numba compiled function. Only available for numba enabled methods (so far only apply) + skip_offset : bool + whether to skip offsetting x **kwargs additional arguments for rolling function and window function @@ -560,7 +571,11 @@ def homogeneous_func(values: np.ndarray): if values.size == 0: return values.copy() - offset = calculate_center_offset(window) if center else 0 + if skip_offset: + offset = 0 + else: + offset = self.calculate_center_offset(window, center) + additional_nans = np.array([np.nan] * offset) if not is_weighted: @@ -603,8 +618,8 @@ def calc(x): if use_numba_cache: NUMBA_FUNC_CACHE[(kwargs["original_func"], "rolling_apply")] = func - if center: - result = self._center_window(result, window) + if not skip_offset: + result = self._center_window(result, window, center) return result @@ -1189,7 +1204,7 @@ def sum(self, *args, **kwargs): window_func = self._get_roll_func("roll_weighted_sum") window_func = get_weighted_roll_func(window_func) return self._apply( - window_func, center=self.center, is_weighted=True, name="sum", **kwargs + window_func, center=self.center, is_weighted=True, name="sum", **kwargs, ) @Substitution(name="window") @@ -1210,7 +1225,7 @@ def var(self, ddof=1, *args, **kwargs): window_func = get_weighted_roll_func(window_func) kwargs.pop("name", None) return self._apply( - window_func, center=self.center, is_weighted=True, name="var", **kwargs + window_func, center=self.center, is_weighted=True, name="var", **kwargs, ) @Substitution(name="window", versionadded="\n.. versionadded:: 1.0.0\n") @@ -1388,7 +1403,8 @@ def apply( # Cython apply functions handle center, so don't need to use # _apply's center handling window = self._get_window() - offset = calculate_center_offset(window) if self.center else 0 + + offset = self.calculate_center_offset(window, self.center) apply_func = self._generate_cython_apply_func( args, kwargs, raw, offset, func ) @@ -1406,19 +1422,17 @@ def apply( raw=raw, original_func=func, args=args, + skip_offset=True, kwargs=kwargs, ) def _generate_cython_apply_func(self, args, kwargs, raw, offset, func): from pandas import Series + cython_func = self._get_cython_func_type("roll_generic") + window_func = partial( - self._get_cython_func_type("roll_generic"), - args=args, - kwargs=kwargs, - raw=raw, - offset=offset, - func=func, + cython_func, args=args, kwargs=kwargs, raw=raw, offset=offset, func=func, ) def apply_func(values, begin, end, min_periods, raw=raw): @@ -1433,7 +1447,7 @@ def sum(self, *args, **kwargs): window_func = self._get_cython_func_type("roll_sum") kwargs.pop("floor", None) return self._apply( - window_func, center=self.center, floor=0, name="sum", **kwargs + window_func, center=self.center, floor=0, name="sum", **kwargs, ) _shared_docs["max"] = dedent( @@ -1540,7 +1554,9 @@ def median(self, **kwargs): window_func = self._get_roll_func("roll_median_c") # GH 32865. Move max window size calculation to # the median function implementation - return self._apply(window_func, center=self.center, name="median", **kwargs) + return self._apply( + window_func, center=self.center, name="median", skip_offset=True, **kwargs + ) def std(self, ddof=1, *args, **kwargs): nv.validate_window_func("std", args, kwargs) @@ -1563,7 +1579,8 @@ def zsqrt_func(values, begin, end, min_periods): def var(self, ddof=1, *args, **kwargs): nv.validate_window_func("var", args, kwargs) kwargs.pop("require_min_periods", None) - window_func = partial(self._get_cython_func_type("roll_var"), ddof=ddof) + cython_func = self._get_cython_func_type("roll_var") + window_func = partial(cython_func, ddof=ddof) # ddof passed again for compat with groupby.rolling return self._apply( window_func, @@ -1696,8 +1713,10 @@ def kurt(self, **kwargs): def quantile(self, quantile, interpolation="linear", **kwargs): if quantile == 1.0: window_func = self._get_cython_func_type("roll_max") + skip_offset = False elif quantile == 0.0: window_func = self._get_cython_func_type("roll_min") + skip_offset = False else: window_func = partial( self._get_roll_func("roll_quantile"), @@ -1705,11 +1724,18 @@ def quantile(self, quantile, interpolation="linear", **kwargs): quantile=quantile, interpolation=interpolation, ) + skip_offset = True # Pass through for groupby.rolling kwargs["quantile"] = quantile kwargs["interpolation"] = interpolation - return self._apply(window_func, center=self.center, name="quantile", **kwargs) + return self._apply( + window_func, + center=self.center, + name="quantile", + skip_offset=skip_offset, + **kwargs, + ) _shared_docs[ "cov" @@ -2189,6 +2215,7 @@ def _apply( is_weighted: bool = False, name: Optional[str] = None, use_numba_cache: bool = False, + skip_offset: bool = True, **kwargs, ): result = Rolling._apply( @@ -2200,6 +2227,7 @@ def _apply( is_weighted, name, use_numba_cache, + skip_offset, **kwargs, ) # Cannot use _wrap_outputs because we calculate the result all at once @@ -2243,6 +2271,31 @@ def _create_data(self, obj: FrameOrSeries) -> FrameOrSeries: obj = obj.take(groupby_order) return super()._create_data(obj) + def calculate_center_offset(self, window, center: bool) -> int: + """ + Calculate an offset necessary to have the window label to be centered. + + Parameters + ---------- + window : ndarray or int + window weights or window + center : bool + Set the labels at the center of the window. + + Returns + ------- + int + """ + if not center or not self.win_type: + return 0 + + if self.is_freq_type or isinstance(self.window, BaseIndexer): + return 0 + + if not is_integer(window): + window = len(window) + return int((window - 1) / 2.0) + def _get_cython_func_type(self, func: str) -> Callable: """ Return the cython function type. diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py index dfcbdde466d44..42c3d02089c6f 100644 --- a/pandas/tests/window/moments/test_moments_consistency_rolling.py +++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py @@ -136,6 +136,53 @@ def test_rolling_apply_consistency( tm.assert_equal(rolling_f_result, rolling_apply_f_result) +@pytest.mark.parametrize( + "window,min_periods,center", list(_rolling_consistency_cases()) +) +def test_rolling_groupby(base_functions, window, min_periods, center): + base_df = DataFrame({"group": "A", "data": randn(20)}) + + b_df = base_df.copy() + b_df["group"] = "B" + + grp_df = pd.concat([base_df, b_df]).groupby("group") + + for (f, require_min_periods, name) in base_functions: + if ( + require_min_periods + and (min_periods is not None) + and (min_periods < require_min_periods) + ): + continue + + base_rolling_f = getattr( + base_df[["data"]].rolling( + window=window, center=center, min_periods=min_periods + ), + name, + ) + + grp_rolling_f = getattr( + grp_df[["data"]].rolling( + window=window, center=center, min_periods=min_periods + ), + name, + ) + + base_result = base_rolling_f().reset_index(drop=True) + grp_result = grp_rolling_f().reset_index() + + a_result = grp_result[grp_result["group"] == "A"][["data"]].reset_index( + drop=True + ) + b_result = grp_result[grp_result["group"] == "B"][["data"]].reset_index( + drop=True + ) + + tm.assert_frame_equal(base_result, a_result) + tm.assert_frame_equal(base_result, b_result) + + @pytest.mark.parametrize("window", range(7)) def test_rolling_corr_with_zero_variance(window): # GH 18430 diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index 786cf68d28871..20797095a5692 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -274,6 +274,32 @@ def test_groupby_rolling_center_center(self): ) tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize("min_periods", [5, 4, 3]) + def test_groupby_rolling_center_min_periods(self, min_periods): + df = pd.DataFrame({"group": ["A"] * 10 + ["B"] * 10, "data": range(20)}) + + window_size = 5 + result = ( + df.groupby("group") + .rolling(window_size, center=True, min_periods=min_periods) + .mean() + ) + result = result.reset_index()[["group", "data"]] + + grp_A_mean = [1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 7.5, 8.0] + grp_B_mean = [x + 10.0 for x in grp_A_mean] + + num_nans = max(0, min_periods - 3) # For window_size of 5 + nans = [np.nan] * num_nans + grp_A_expected = nans + grp_A_mean[num_nans : 10 - num_nans] + nans + grp_B_expected = nans + grp_B_mean[num_nans : 10 - num_nans] + nans + + expected = pd.DataFrame( + {"group": ["A"] * 10 + ["B"] * 10, "data": grp_A_expected + grp_B_expected} + ) + + tm.assert_frame_equal(result, expected) + def test_groupby_subselect_rolling(self): # GH 35486 df = DataFrame(
- [x] closes #36040 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Creating PR to solve [Issue 36040](https://github.com/pandas-dev/pandas/issues/36040). The old `FixedWindowIndexer.get_window_bounds` function provided unintuitive bounds that went beyond the length of the original array. Additionally, to do a centered rolling operation, it required NaN values to be appended to the end of original array to enable some roundabout way of achieving the centering. I replaced it with one that seems much simpler and actually creates "fixed" size windows (at least prior to clipping the ends), which the previous function did not. That being said, I know this PR fails some tests, I'm would appreciate some advice on how best to proceed!
https://api.github.com/repos/pandas-dev/pandas/pulls/36132
2020-09-04T23:37:20Z
2020-10-10T15:55:40Z
null
2020-10-10T15:55:41Z
BUG: Timestamp == date match stdlib
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index d8bd2efcf17b1..d33686190e0b5 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -156,6 +156,7 @@ Deprecations - Deprecated allowing subclass-specific keyword arguments in the :class:`Index` constructor, use the specific subclass directly instead (:issue:`14093`,:issue:`21311`,:issue:`22315`,:issue:`26974`) - Deprecated ``astype`` of datetimelike (``timedelta64[ns]``, ``datetime64[ns]``, ``Datetime64TZDtype``, ``PeriodDtype``) to integer dtypes, use ``values.view(...)`` instead (:issue:`38544`) - Deprecated keyword ``try_cast`` in :meth:`Series.where`, :meth:`Series.mask`, :meth:`DataFrame.where`, :meth:`DataFrame.mask`; cast results manually if desired (:issue:`38836`) +- Deprecated comparison of :class:`Timestamp` object with ``datetime.date`` objects. Instead of e.g. ``ts <= mydate`` use ``ts <= pd.Timestamp(mydate)`` or ``ts.date() <= mydate`` (:issue:`36131`) - .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 242eb89d1e723..df4677a242758 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -16,6 +16,7 @@ from numpy cimport int8_t, int64_t, ndarray, uint8_t cnp.import_array() from cpython.datetime cimport ( # alias bc `tzinfo` is a kwarg below + PyDate_Check, PyDateTime_Check, PyDateTime_IMPORT, PyDelta_Check, @@ -281,6 +282,20 @@ cdef class _Timestamp(ABCTimestamp): return np.zeros(other.shape, dtype=np.bool_) return NotImplemented + elif PyDate_Check(other): + # returning NotImplemented defers to the `date` implementation + # which incorrectly drops tz and normalizes to midnight + # before comparing + # We follow the stdlib datetime behavior of never being equal + warnings.warn( + "Comparison of Timestamp with datetime.date is deprecated in " + "order to match the standard library behavior. " + "In a future version these will be considered non-comparable." + "Use 'ts == pd.Timestamp(date)' or 'ts.date() == date' instead.", + FutureWarning, + stacklevel=1, + ) + return NotImplemented else: return NotImplemented diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 49eb570c4ffe0..4cbdf61ff8dae 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1429,7 +1429,10 @@ def test_loc_setitem_datetime_coercion(self): assert Timestamp("2008-08-08") == df.loc[0, "c"] assert Timestamp("2008-08-08") == df.loc[1, "c"] df.loc[2, "c"] = date(2005, 5, 5) - assert Timestamp("2005-05-05") == df.loc[2, "c"] + with tm.assert_produces_warning(FutureWarning): + # Comparing Timestamp to date obj is deprecated + assert Timestamp("2005-05-05") == df.loc[2, "c"] + assert Timestamp("2005-05-05").date() == df.loc[2, "c"] def test_loc_setitem_datetimelike_with_inference(self): # GH 7592 diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 232ebc608e465..385390e9d7b98 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -613,8 +613,13 @@ def test_get_indexer_mixed_dtypes(self, target): ([date(9999, 1, 1), date(9999, 1, 1)], [-1, -1]), ], ) + # FIXME: these warnings are flaky GH#36131 + @pytest.mark.filterwarnings( + "ignore:Comparison of Timestamp with datetime.date:FutureWarning" + ) def test_get_indexer_out_of_bounds_date(self, target, positions): values = DatetimeIndex([Timestamp("2020-01-01"), Timestamp("2020-01-02")]) + result = values.get_indexer(target) expected = np.array(positions, dtype=np.intp) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/scalar/timestamp/test_comparisons.py b/pandas/tests/scalar/timestamp/test_comparisons.py index 3d1f71def5836..285733dc2c7af 100644 --- a/pandas/tests/scalar/timestamp/test_comparisons.py +++ b/pandas/tests/scalar/timestamp/test_comparisons.py @@ -142,6 +142,42 @@ def test_compare_invalid(self): assert val != np.float64(1) assert val != np.int64(1) + @pytest.mark.parametrize("tz", [None, "US/Pacific"]) + def test_compare_date(self, tz): + # GH#36131 comparing Timestamp with date object is deprecated + ts = Timestamp.now(tz) + dt = ts.to_pydatetime().date() + # These are incorrectly considered as equal because they + # dispatch to the date comparisons which truncates ts + + for left, right in [(ts, dt), (dt, ts)]: + with tm.assert_produces_warning(FutureWarning): + assert left == right + with tm.assert_produces_warning(FutureWarning): + assert not left != right + with tm.assert_produces_warning(FutureWarning): + assert not left < right + with tm.assert_produces_warning(FutureWarning): + assert left <= right + with tm.assert_produces_warning(FutureWarning): + assert not left > right + with tm.assert_produces_warning(FutureWarning): + assert left >= right + + # Once the deprecation is enforced, the following assertions + # can be enabled: + # assert not left == right + # assert left != right + # + # with pytest.raises(TypeError): + # left < right + # with pytest.raises(TypeError): + # left <= right + # with pytest.raises(TypeError): + # left > right + # with pytest.raises(TypeError): + # left >= right + def test_cant_compare_tz_naive_w_aware(self, utc_fixture): # see GH#1404 a = Timestamp("3/12/2012")
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry ATM we have one reasonable behavior `Timestamp("2020-09-04") == date(2020, 9, 4)` and two un-reasonable behaviors: ``Timestamp("2020-09-04").tz_localize("US/Pacific") == date(2020, 9, 4)`, `Timestamp.now() == Timestamp.now().date()`. Since the stdlib datetime doesnt consider `datetime(2020, 9, 4) == date(2020, 9, 4)`, this follows the stdlib and considers them never equal. <s>I'm still getting one test failure locally.</s> cc @mroeschke
https://api.github.com/repos/pandas-dev/pandas/pulls/36131
2020-09-04T22:08:55Z
2021-01-01T20:40:19Z
2021-01-01T20:40:19Z
2021-01-01T21:09:12Z
CLN: De-privatize
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 8dc500dddeafa..e321fdd9b3a9b 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -395,7 +395,7 @@ def _hash_categories(categories, ordered: Ordered = True) -> int: from pandas.core.dtypes.common import DT64NS_DTYPE, is_datetime64tz_dtype from pandas.core.util.hashing import ( - _combine_hash_arrays, + combine_hash_arrays, hash_array, hash_tuples, ) @@ -427,7 +427,7 @@ def _hash_categories(categories, ordered: Ordered = True) -> int: ) else: cat_array = [cat_array] - hashed = _combine_hash_arrays(iter(cat_array), num_items=len(cat_array)) + hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array)) return np.bitwise_xor.reduce(hashed) @classmethod diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 6dcb9250812d0..3fd93a8159041 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -354,9 +354,9 @@ def _mpl_repr(self): @property def _formatter_func(self): - from pandas.io.formats.format import _get_format_datetime64 + from pandas.io.formats.format import get_format_datetime64 - formatter = _get_format_datetime64(is_dates_only=self._is_dates_only) + formatter = get_format_datetime64(is_dates_only=self._is_dates_only) return lambda x: f"'{formatter(x, tz=self.tz)}'" # -------------------------------------------------------------------- diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index cfb17b9498a36..fe2fec1c52063 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2291,7 +2291,7 @@ def need_slice(obj) -> bool: ) -def _non_reducing_slice(slice_): +def non_reducing_slice(slice_): """ Ensure that a slice doesn't reduce to a Series or Scalar. @@ -2330,7 +2330,7 @@ def pred(part) -> bool: return tuple(slice_) -def _maybe_numeric_slice(df, slice_, include_bool=False): +def maybe_numeric_slice(df, slice_, include_bool: bool = False): """ Want nice defaults for background_gradient that don't break with non-numeric data. But if slice_ is passed go with that. diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index d79b9f4092325..df082c7285ae8 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -24,7 +24,7 @@ _default_hash_key = "0123456789123456" -def _combine_hash_arrays(arrays, num_items: int): +def combine_hash_arrays(arrays, num_items: int): """ Parameters ---------- @@ -108,7 +108,7 @@ def hash_pandas_object( for _ in [None] ) arrays = itertools.chain([h], index_iter) - h = _combine_hash_arrays(arrays, 2) + h = combine_hash_arrays(arrays, 2) h = Series(h, index=obj.index, dtype="uint64", copy=False) @@ -131,7 +131,7 @@ def hash_pandas_object( # keep `hashes` specifically a generator to keep mypy happy _hashes = itertools.chain(hashes, index_hash_generator) hashes = (x for x in _hashes) - h = _combine_hash_arrays(hashes, num_items) + h = combine_hash_arrays(hashes, num_items) h = Series(h, index=obj.index, dtype="uint64", copy=False) else: @@ -175,7 +175,7 @@ def hash_tuples(vals, encoding="utf8", hash_key: str = _default_hash_key): hashes = ( _hash_categorical(cat, encoding=encoding, hash_key=hash_key) for cat in vals ) - h = _combine_hash_arrays(hashes, len(vals)) + h = combine_hash_arrays(hashes, len(vals)) if is_tuple: h = h[0] diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 3d441f6e737bc..3dc4290953360 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1624,7 +1624,7 @@ def _format_datetime64_dateonly( return x._date_repr -def _get_format_datetime64( +def get_format_datetime64( is_dates_only: bool, nat_rep: str = "NaT", date_format: None = None ) -> Callable: @@ -1656,7 +1656,7 @@ def _format_strings(self) -> List[str]: """ we by definition have a TZ """ values = self.values.astype(object) is_dates_only = _is_dates_only(values) - formatter = self.formatter or _get_format_datetime64( + formatter = self.formatter or get_format_datetime64( is_dates_only, date_format=self.date_format ) fmt_values = [formatter(x) for x in values] diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 3bbb5271bce61..023557dd6494d 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -36,7 +36,7 @@ import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame -from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice +from pandas.core.indexing import maybe_numeric_slice, non_reducing_slice jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.") @@ -475,7 +475,7 @@ def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> "Style row_locs = range(len(self.data)) col_locs = range(len(self.data.columns)) else: - subset = _non_reducing_slice(subset) + subset = non_reducing_slice(subset) if len(subset) == 1: subset = subset, self.data.columns @@ -633,7 +633,7 @@ def _apply( **kwargs, ) -> "Styler": subset = slice(None) if subset is None else subset - subset = _non_reducing_slice(subset) + subset = non_reducing_slice(subset) data = self.data.loc[subset] if axis is not None: result = data.apply(func, axis=axis, result_type="expand", **kwargs) @@ -725,7 +725,7 @@ def _applymap(self, func: Callable, subset=None, **kwargs) -> "Styler": func = partial(func, **kwargs) # applymap doesn't take kwargs? if subset is None: subset = pd.IndexSlice[:] - subset = _non_reducing_slice(subset) + subset = non_reducing_slice(subset) result = self.data.loc[subset].applymap(func) self._update_ctx(result) return self @@ -985,7 +985,7 @@ def hide_columns(self, subset) -> "Styler": ------- self : Styler """ - subset = _non_reducing_slice(subset) + subset = non_reducing_slice(subset) hidden_df = self.data.loc[subset] self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns) return self @@ -1087,8 +1087,8 @@ def background_gradient( of the data is extended by ``low * (x.max() - x.min())`` and ``high * (x.max() - x.min())`` before normalizing. """ - subset = _maybe_numeric_slice(self.data, subset) - subset = _non_reducing_slice(subset) + subset = maybe_numeric_slice(self.data, subset) + subset = non_reducing_slice(subset) self.apply( self._background_gradient, cmap=cmap, @@ -1322,8 +1322,8 @@ def bar( "(eg: color=['#d65f5f', '#5fba7d'])" ) - subset = _maybe_numeric_slice(self.data, subset) - subset = _non_reducing_slice(subset) + subset = maybe_numeric_slice(self.data, subset) + subset = non_reducing_slice(subset) self.apply( self._bar, subset=subset, @@ -1390,7 +1390,7 @@ def _highlight_handler( axis: Optional[Axis] = None, max_: bool = True, ) -> "Styler": - subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset)) + subset = non_reducing_slice(maybe_numeric_slice(self.data, subset)) self.apply( self._highlight_extrema, color=color, axis=axis, subset=subset, max_=max_ ) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 147e4efd74bc3..c1ba7881165f1 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -33,6 +33,13 @@ from pandas.plotting._matplotlib.compat import _mpl_ge_3_0_0 from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.timeseries import ( + decorate_axes, + format_dateaxis, + maybe_convert_index, + maybe_resample, + use_dynamic_x, +) from pandas.plotting._matplotlib.tools import ( create_subplots, flatten_axes, @@ -1074,15 +1081,11 @@ def _is_ts_plot(self) -> bool: return not self.x_compat and self.use_index and self._use_dynamic_x() def _use_dynamic_x(self): - from pandas.plotting._matplotlib.timeseries import _use_dynamic_x - - return _use_dynamic_x(self._get_ax(0), self.data) + return use_dynamic_x(self._get_ax(0), self.data) def _make_plot(self): if self._is_ts_plot(): - from pandas.plotting._matplotlib.timeseries import _maybe_convert_index - - data = _maybe_convert_index(self._get_ax(0), self.data) + data = maybe_convert_index(self._get_ax(0), self.data) x = data.index # dummy, not used plotf = self._ts_plot @@ -1142,24 +1145,18 @@ def _plot( @classmethod def _ts_plot(cls, ax: "Axes", x, data, style=None, **kwds): - from pandas.plotting._matplotlib.timeseries import ( - _decorate_axes, - _maybe_resample, - format_dateaxis, - ) - # accept x to be consistent with normal plot func, # x is not passed to tsplot as it uses data.index as x coordinate # column_num must be in kwds for stacking purpose - freq, data = _maybe_resample(data, ax, kwds) + freq, data = maybe_resample(data, ax, kwds) # Set ax with freq info - _decorate_axes(ax, freq, kwds) + decorate_axes(ax, freq, kwds) # digging deeper if hasattr(ax, "left_ax"): - _decorate_axes(ax.left_ax, freq, kwds) + decorate_axes(ax.left_ax, freq, kwds) if hasattr(ax, "right_ax"): - _decorate_axes(ax.right_ax, freq, kwds) + decorate_axes(ax.right_ax, freq, kwds) ax._plot_data.append((data, cls._kind, kwds)) lines = cls._plot(ax, data.index, data.values, style=style, **kwds) diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index fd89a093d25a4..f8faac6a6a026 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -32,7 +32,7 @@ # Plotting functions and monkey patches -def _maybe_resample(series: "Series", ax: "Axes", kwargs): +def maybe_resample(series: "Series", ax: "Axes", kwargs): # resample against axes freq if necessary freq, ax_freq = _get_freq(ax, series) @@ -105,7 +105,7 @@ def _replot_ax(ax: "Axes", freq, kwargs): ax._plot_data = [] ax.clear() - _decorate_axes(ax, freq, kwargs) + decorate_axes(ax, freq, kwargs) lines = [] labels = [] @@ -128,7 +128,7 @@ def _replot_ax(ax: "Axes", freq, kwargs): return lines, labels -def _decorate_axes(ax: "Axes", freq, kwargs): +def decorate_axes(ax: "Axes", freq, kwargs): """Initialize axes for time-series plotting""" if not hasattr(ax, "_plot_data"): ax._plot_data = [] @@ -193,7 +193,7 @@ def _get_freq(ax: "Axes", series: "Series"): return freq, ax_freq -def _use_dynamic_x(ax: "Axes", data: FrameOrSeriesUnion) -> bool: +def use_dynamic_x(ax: "Axes", data: FrameOrSeriesUnion) -> bool: freq = _get_index_freq(data.index) ax_freq = _get_ax_freq(ax) @@ -235,7 +235,7 @@ def _get_index_freq(index: "Index") -> Optional[BaseOffset]: return freq -def _maybe_convert_index(ax: "Axes", data): +def maybe_convert_index(ax: "Axes", data): # tsplot converts automatically, but don't want to convert index # over and over for DataFrames if isinstance(data.index, (ABCDatetimeIndex, ABCPeriodIndex)): diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py index 532bb4f2e6dac..ec0391a2ccc26 100644 --- a/pandas/tests/indexing/multiindex/test_slice.py +++ b/pandas/tests/indexing/multiindex/test_slice.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, Timestamp import pandas._testing as tm -from pandas.core.indexing import _non_reducing_slice +from pandas.core.indexing import non_reducing_slice from pandas.tests.indexing.common import _mklbl @@ -739,7 +739,7 @@ def test_non_reducing_slice_on_multiindex(self): df = pd.DataFrame(dic, index=[0, 1]) idx = pd.IndexSlice slice_ = idx[:, idx["b", "d"]] - tslice_ = _non_reducing_slice(slice_) + tslice_ = non_reducing_slice(slice_) result = df.loc[tslice_] expected = pd.DataFrame({("b", "d"): [4, 1]}) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 5b7f013d5de31..a080c5d169215 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -12,7 +12,7 @@ import pandas as pd from pandas import DataFrame, Index, NaT, Series import pandas._testing as tm -from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice +from pandas.core.indexing import maybe_numeric_slice, non_reducing_slice from pandas.tests.indexing.common import _mklbl # ------------------------------------------------------------------------ @@ -822,7 +822,7 @@ def test_range_in_series_indexing(self, size): def test_non_reducing_slice(self, slc): df = DataFrame([[0, 1], [2, 3]]) - tslice_ = _non_reducing_slice(slc) + tslice_ = non_reducing_slice(slc) assert isinstance(df.loc[tslice_], DataFrame) def test_list_slice(self): @@ -831,18 +831,18 @@ def test_list_slice(self): df = DataFrame({"A": [1, 2], "B": [3, 4]}, index=["A", "B"]) expected = pd.IndexSlice[:, ["A"]] for subset in slices: - result = _non_reducing_slice(subset) + result = non_reducing_slice(subset) tm.assert_frame_equal(df.loc[result], df.loc[expected]) def test_maybe_numeric_slice(self): df = DataFrame({"A": [1, 2], "B": ["c", "d"], "C": [True, False]}) - result = _maybe_numeric_slice(df, slice_=None) + result = maybe_numeric_slice(df, slice_=None) expected = pd.IndexSlice[:, ["A"]] assert result == expected - result = _maybe_numeric_slice(df, None, include_bool=True) + result = maybe_numeric_slice(df, None, include_bool=True) expected = pd.IndexSlice[:, ["A", "C"]] - result = _maybe_numeric_slice(df, [1]) + result = maybe_numeric_slice(df, [1]) expected = [1] assert result == expected
https://api.github.com/repos/pandas-dev/pandas/pulls/36130
2020-09-04T21:47:24Z
2020-09-05T02:51:04Z
2020-09-05T02:51:04Z
2020-09-05T14:10:44Z
CLN: remove unused args/kwargs
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 53edd056a6802..173ff99912f05 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1084,6 +1084,7 @@ def blk_func(bvalues: ArrayLike) -> ArrayLike: assert how == "ohlc" raise + # We get here with a) EADtypes and b) object dtype obj: Union[Series, DataFrame] # call our grouper again with only this block if isinstance(bvalues, ExtensionArray): diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 651af2d314251..6ef2e67030881 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1012,6 +1012,8 @@ def _agg_general( # raised in _get_cython_function, in some cases can # be trimmed by implementing cython funcs for more dtypes pass + else: + raise # apply a non-cython aggregation result = self.aggregate(lambda x: npfunc(x, axis=self.axis)) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index c076b6e2e181b..e9525f03368fa 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -601,7 +601,7 @@ def _transform( return result - def agg_series(self, obj: Series, func: F, *args, **kwargs): + def agg_series(self, obj: Series, func: F): # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 @@ -649,7 +649,7 @@ def _aggregate_series_fast(self, obj: Series, func: F): result, counts = grouper.get_result() return result, counts - def _aggregate_series_pure_python(self, obj: Series, func: F, *args, **kwargs): + def _aggregate_series_pure_python(self, obj: Series, func: F): group_index, _, ngroups = self.group_info counts = np.zeros(ngroups, dtype=int) @@ -658,7 +658,7 @@ def _aggregate_series_pure_python(self, obj: Series, func: F, *args, **kwargs): splitter = get_splitter(obj, group_index, ngroups, axis=0) for label, group in splitter: - res = func(group, *args, **kwargs) + res = func(group) if result is None: if isinstance(res, (Series, Index, np.ndarray)): @@ -835,7 +835,7 @@ def groupings(self) -> "List[grouper.Grouping]": for lvl, name in zip(self.levels, self.names) ] - def agg_series(self, obj: Series, func: F, *args, **kwargs): + def agg_series(self, obj: Series, func: F): # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 assert len(self.bins) > 0 # otherwise we'd get IndexError in get_result
https://api.github.com/repos/pandas-dev/pandas/pulls/36129
2020-09-04T21:02:11Z
2020-09-05T03:11:40Z
2020-09-05T03:11:40Z
2020-09-05T14:08:13Z
CLN: remove xfails/skips for no-longer-supported numpys
diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py index 904a760a03e58..3e0954ef3d74d 100644 --- a/pandas/plotting/_matplotlib/style.py +++ b/pandas/plotting/_matplotlib/style.py @@ -11,7 +11,7 @@ def get_standard_colors( - num_colors=None, colormap=None, color_type: str = "default", color=None + num_colors: int, colormap=None, color_type: str = "default", color=None ): import matplotlib.pyplot as plt diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index 04215bfe1bedb..ece9367cea7fe 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -194,8 +194,7 @@ def test_constructor_inferred_fill_value(self, data, fill_value): @pytest.mark.parametrize("format", ["coo", "csc", "csr"]) @pytest.mark.parametrize( - "size", - [pytest.param(0, marks=td.skip_if_np_lt("1.16", reason="NumPy-11383")), 10], + "size", [0, 10], ) @td.skip_if_no_scipy def test_from_spmatrix(self, size, format): @@ -904,7 +903,6 @@ def test_all(self, data, pos, neg): ([1.0, 2.0, 1.0], 1.0, 0.0), ], ) - @td.skip_if_np_lt("1.15") # prior didn't dispatch def test_numpy_all(self, data, pos, neg): # GH 17570 out = np.all(SparseArray(data)) @@ -956,7 +954,6 @@ def test_any(self, data, pos, neg): ([0.0, 2.0, 0.0], 2.0, 0.0), ], ) - @td.skip_if_np_lt("1.15") # prior didn't dispatch def test_numpy_any(self, data, pos, neg): # GH 17570 out = np.any(SparseArray(data)) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index b0ba0d991c9b0..f21b1d3dfe487 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1060,54 +1060,14 @@ def test_any_all_bool_only(self): (np.any, {"A": pd.Series([0.0, 1.0], dtype="float")}, True), (np.all, {"A": pd.Series([0, 1], dtype=int)}, False), (np.any, {"A": pd.Series([0, 1], dtype=int)}, True), - pytest.param( - np.all, - {"A": pd.Series([0, 1], dtype="M8[ns]")}, - False, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.any, - {"A": pd.Series([0, 1], dtype="M8[ns]")}, - True, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.all, - {"A": pd.Series([1, 2], dtype="M8[ns]")}, - True, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.any, - {"A": pd.Series([1, 2], dtype="M8[ns]")}, - True, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.all, - {"A": pd.Series([0, 1], dtype="m8[ns]")}, - False, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.any, - {"A": pd.Series([0, 1], dtype="m8[ns]")}, - True, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.all, - {"A": pd.Series([1, 2], dtype="m8[ns]")}, - True, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.any, - {"A": pd.Series([1, 2], dtype="m8[ns]")}, - True, - marks=[td.skip_if_np_lt("1.15")], - ), + pytest.param(np.all, {"A": pd.Series([0, 1], dtype="M8[ns]")}, False,), + pytest.param(np.any, {"A": pd.Series([0, 1], dtype="M8[ns]")}, True,), + pytest.param(np.all, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True,), + pytest.param(np.any, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True,), + pytest.param(np.all, {"A": pd.Series([0, 1], dtype="m8[ns]")}, False,), + pytest.param(np.any, {"A": pd.Series([0, 1], dtype="m8[ns]")}, True,), + pytest.param(np.all, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True,), + pytest.param(np.any, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True,), (np.all, {"A": pd.Series([0, 1], dtype="category")}, False), (np.any, {"A": pd.Series([0, 1], dtype="category")}, True), (np.all, {"A": pd.Series([1, 2], dtype="category")}, True), @@ -1120,8 +1080,6 @@ def test_any_all_bool_only(self): "B": pd.Series([10, 20], dtype="m8[ns]"), }, True, - # In 1.13.3 and 1.14 np.all(df) returns a Timedelta here - marks=[td.skip_if_np_lt("1.15")], ), ], ) diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index 753b8b6eda9c5..c40935b2cc5dd 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -11,10 +11,6 @@ class TestToCSV: - @pytest.mark.xfail( - (3, 6, 5) > sys.version_info, - reason=("Python csv library bug (see https://bugs.python.org/issue32255)"), - ) def test_to_csv_with_single_column(self): # see gh-18676, https://bugs.python.org/issue32255 # diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index ab8618eb0a7d4..e39083b709f38 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -3,8 +3,6 @@ import numpy as np import pytest -import pandas.util._test_decorators as td - import pandas as pd from pandas import DataFrame, Series import pandas._testing as tm @@ -130,7 +128,6 @@ def test_is_monotonic(self): @pytest.mark.parametrize("func", [np.any, np.all]) @pytest.mark.parametrize("kwargs", [dict(keepdims=True), dict(out=object())]) - @td.skip_if_np_lt("1.15") def test_validate_any_all_out_keepdims_raises(self, kwargs, func): s = pd.Series([1, 2]) param = list(kwargs)[0] @@ -144,7 +141,6 @@ def test_validate_any_all_out_keepdims_raises(self, kwargs, func): with pytest.raises(ValueError, match=msg): func(s, **kwargs) - @td.skip_if_np_lt("1.15") def test_validate_sum_initial(self): s = pd.Series([1, 2]) msg = ( @@ -167,7 +163,6 @@ def test_validate_median_initial(self): # method instead of the ufunc. s.median(overwrite_input=True) - @td.skip_if_np_lt("1.15") def test_validate_stat_keepdims(self): s = pd.Series([1, 2]) msg = (
https://api.github.com/repos/pandas-dev/pandas/pulls/36128
2020-09-04T20:37:37Z
2020-09-04T21:57:25Z
2020-09-04T21:57:25Z
2020-09-04T22:03:52Z
Backport PR #36050 on branch 1.1.x (BUG: incorrect year returned in isocalendar for certain dates)
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 232d0c4b4bbcd..39850905f60fa 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -33,6 +33,7 @@ Bug fixes - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) - Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should be ``""`` (:issue:`35712`) - Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) +- Bug in :meth:`Series.dt.isocalendar` and :meth:`DatetimeIndex.isocalendar` that returned incorrect year for certain dates (:issue:`36032`) - Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index 6cce2f5e1fd95..d8c83daa661a3 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -201,10 +201,10 @@ cpdef iso_calendar_t get_iso_calendar(int year, int month, int day) nogil: iso_week = 1 iso_year = year - if iso_week == 1 and doy > 7: + if iso_week == 1 and month == 12: iso_year += 1 - elif iso_week >= 52 and doy < 7: + elif iso_week >= 52 and month == 1: iso_year -= 1 return iso_year, iso_week, dow + 1 diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index d2ad9c8c398ea..723bd303b1974 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -682,6 +682,9 @@ def test_setitem_with_different_tz(self): [[pd.NaT], [[np.NaN, np.NaN, np.NaN]]], [["2019-12-31", "2019-12-29"], [[2020, 1, 2], [2019, 52, 7]]], [["2010-01-01", pd.NaT], [[2009, 53, 5], [np.NaN, np.NaN, np.NaN]]], + # see GH#36032 + [["2016-01-08", "2016-01-04"], [[2016, 1, 5], [2016, 1, 1]]], + [["2016-01-07", "2016-01-01"], [[2016, 1, 4], [2015, 53, 5]]], ], ) def test_isocalendar(self, input_series, expected_output): diff --git a/pandas/tests/tslibs/test_ccalendar.py b/pandas/tests/tslibs/test_ccalendar.py index aab86d3a2df69..1ff700fdc23a3 100644 --- a/pandas/tests/tslibs/test_ccalendar.py +++ b/pandas/tests/tslibs/test_ccalendar.py @@ -1,10 +1,13 @@ from datetime import date, datetime +from hypothesis import given, strategies as st import numpy as np import pytest from pandas._libs.tslibs import ccalendar +import pandas as pd + @pytest.mark.parametrize( "date_tuple,expected", @@ -48,3 +51,15 @@ def test_dt_correct_iso_8601_year_week_and_day(input_date_tuple, expected_iso_tu expected_from_date_isocalendar = date(*input_date_tuple).isocalendar() assert result == expected_from_date_isocalendar assert result == expected_iso_tuple + + +@given( + st.datetimes( + min_value=pd.Timestamp.min.to_pydatetime(warn=False), + max_value=pd.Timestamp.max.to_pydatetime(warn=False), + ) +) +def test_isocalendar(dt): + expected = dt.isocalendar() + result = ccalendar.get_iso_calendar(dt.year, dt.month, dt.day) + assert result == expected
Backport PR #36050: BUG: incorrect year returned in isocalendar for certain dates
https://api.github.com/repos/pandas-dev/pandas/pulls/36127
2020-09-04T20:29:38Z
2020-09-04T21:58:13Z
2020-09-04T21:58:13Z
2020-09-04T21:58:14Z
DOC: add missing data handling info to pd.Categorical docstring
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 27b1afdb438cb..ef363ca6b0187 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -280,6 +280,19 @@ class Categorical(NDArrayBackedExtensionArray, PandasObject): ['a', 'b', 'c', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] + Missing values are not included as a category. + + >>> c = pd.Categorical([1, 2, 3, 1, 2, 3, np.nan]) + >>> c + [1, 2, 3, 1, 2, 3, NaN] + Categories (3, int64): [1, 2, 3] + + However, their presence is indicated in the `codes` attribute + by code `-1`. + + >>> c.codes + array([ 0, 1, 2, 0, 1, 2, -1], dtype=int8) + Ordered `Categoricals` can be sorted according to the custom order of the categories and can have a min and max value.
- [x] closes #35162 - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/36125
2020-09-04T19:21:33Z
2020-09-05T10:50:58Z
2020-09-05T10:50:58Z
2020-09-05T10:50:58Z
Bug 29764 groupby loses index name sometimes
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index e65daa439a225..7ff946574c778 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -311,6 +311,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupby.apply` would drop a :class:`CategoricalIndex` when grouped on. (:issue:`35792`) - Bug when subsetting columns on a :class:`~pandas.core.groupby.DataFrameGroupBy` (e.g. ``df.groupby('a')[['b']])``) would reset the attributes ``axis``, ``dropna``, ``group_keys``, ``level``, ``mutated``, ``sort``, and ``squeeze`` to their default values. (:issue:`9959`) - Bug in :meth:`DataFrameGroupby.tshift` failing to raise ``ValueError`` when a frequency cannot be inferred for the index of a group (:issue:`35937`) +- Bug in :meth:`DataFrame.groupby` does not always maintain column index name for ``any``, ``all``, ``bfill``, ``ffill``, ``shift`` (:issue:`29764`) - Reshaping diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 537feace59fcb..a58f28880945f 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1694,6 +1694,7 @@ def _wrap_transformed_output( """ indexed_output = {key.position: val for key, val in output.items()} columns = Index(key.label for key in output) + columns.name = self.obj.columns.name result = self.obj._constructor(indexed_output) result.columns = columns diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index eec9e8064d584..e0196df7ceac0 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2111,3 +2111,26 @@ def test_subsetting_columns_keeps_attrs(klass, attr, value): expected = df.groupby("a", **{attr: value}) result = expected[["b"]] if klass is DataFrame else expected["b"] assert getattr(result, attr) == getattr(expected, attr) + + +@pytest.mark.parametrize("func", ["sum", "any", "shift"]) +def test_groupby_column_index_name_lost(func): + # GH: 29764 groupby loses index sometimes + expected = pd.Index(["a"], name="idx") + df = pd.DataFrame([[1]], columns=expected) + df_grouped = df.groupby([1]) + result = getattr(df_grouped, func)().columns + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("func", ["ffill", "bfill"]) +def test_groupby_column_index_name_lost_fill_funcs(func): + # GH: 29764 groupby loses index sometimes + df = pd.DataFrame( + [[1, 1.0, -1.0], [1, np.nan, np.nan], [1, 2.0, -2.0]], + columns=pd.Index(["type", "a", "b"], name="idx"), + ) + df_grouped = df.groupby(["type"])[["a", "b"]] + result = getattr(df_grouped, func)().columns + expected = pd.Index(["a", "b"], name="idx") + tm.assert_index_equal(result, expected)
- [x] closes #29764 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry´ Issue was fixed for sum and any with c9144ca54dcc924995acae3d9dcb890a5802d7c0. So the only thing left was to add the index name in `_wrap_transformed_output`. I kept the tests from #33111, to show, that alle cases are now handled correctly. I hope it is okay, to add a new PR for this?
https://api.github.com/repos/pandas-dev/pandas/pulls/36121
2020-09-04T16:42:54Z
2020-09-05T03:18:13Z
2020-09-05T03:18:12Z
2020-09-05T19:04:08Z
TYP: io
diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py index 285aeaf7d4c6e..a4b5b61734ab7 100644 --- a/pandas/io/excel/_util.py +++ b/pandas/io/excel/_util.py @@ -1,3 +1,5 @@ +from typing import List + from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import is_integer, is_list_like @@ -56,7 +58,7 @@ def get_writer(engine_name): raise ValueError(f"No Excel writer '{engine_name}'") from err -def _excel2num(x): +def _excel2num(x: str) -> int: """ Convert Excel column name like 'AB' to 0-based column index. @@ -88,7 +90,7 @@ def _excel2num(x): return index - 1 -def _range2cols(areas): +def _range2cols(areas: str) -> List[int]: """ Convert comma separated list of column names and ranges to indices. @@ -109,12 +111,12 @@ def _range2cols(areas): >>> _range2cols('A,C,Z:AB') [0, 2, 25, 26, 27] """ - cols = [] + cols: List[int] = [] for rng in areas.split(","): if ":" in rng: - rng = rng.split(":") - cols.extend(range(_excel2num(rng[0]), _excel2num(rng[1]) + 1)) + rngs = rng.split(":") + cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1)) else: cols.append(_excel2num(rng)) diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py index 4d6f03489725f..2e9ee192a1182 100644 --- a/pandas/io/formats/css.py +++ b/pandas/io/formats/css.py @@ -3,6 +3,7 @@ """ import re +from typing import Optional import warnings @@ -93,6 +94,7 @@ def __call__(self, declarations_str, inherited=None): props[prop] = val # 2. resolve relative font size + font_size: Optional[float] if props.get("font-size"): if "font-size" in inherited: em_pt = inherited["font-size"] @@ -173,10 +175,11 @@ def _error(): warnings.warn(f"Unhandled size: {repr(in_val)}", CSSWarning) return self.size_to_pt("1!!default", conversions=conversions) - try: - val, unit = re.match(r"^(\S*?)([a-zA-Z%!].*)", in_val).groups() - except AttributeError: + match = re.match(r"^(\S*?)([a-zA-Z%!].*)", in_val) + if match is None: return _error() + + val, unit = match.groups() if val == "": # hack for 'large' etc. val = 1 diff --git a/setup.cfg b/setup.cfg index 29c731848de8e..e7d7df7ff19a2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -279,9 +279,6 @@ check_untyped_defs=False [mypy-pandas.io.formats.console] check_untyped_defs=False -[mypy-pandas.io.formats.css] -check_untyped_defs=False - [mypy-pandas.io.formats.csvs] check_untyped_defs=False
@simonjayhawkins are there any typing-related areas that you'd suggest I prioritize?
https://api.github.com/repos/pandas-dev/pandas/pulls/36120
2020-09-04T16:17:42Z
2020-09-04T17:34:50Z
2020-09-04T17:34:50Z
2020-09-04T17:53:49Z
Backport PR #36118 on branch 1.1.x (REGR: ensure closed attribute of IntervalIndex is preserved in pickle roundtrip)
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 7195f3d7a3885..232d0c4b4bbcd 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -18,8 +18,9 @@ Fixed regressions - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) - Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) +- Fix regression in pickle roundtrip of the ``closed`` attribute of :class:`IntervalIndex` (:issue:`35658`) - Fixed regression in :meth:`DataFrameGroupBy.agg` where a ``ValueError: buffer source array is read-only`` would be raised when the underlying array is read-only (:issue:`36014`) - +- .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 446e57d58a779..dcf89f24ebaf2 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -190,7 +190,7 @@ def func(intvidx_self, other, sort=False): class IntervalIndex(IntervalMixin, ExtensionIndex): _typ = "intervalindex" _comparables = ["name"] - _attributes = ["name"] + _attributes = ["name", "closed"] # we would like our indexing holder to defer to us _defer_to_indexing = True diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 2755b186f3eae..a20e542b1edd7 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -874,6 +874,13 @@ def test_get_value_non_scalar_errors(self, key): with tm.assert_produces_warning(FutureWarning): idx.get_value(s, key) + @pytest.mark.parametrize("closed", ["left", "right", "both"]) + def test_pickle_round_trip_closed(self, closed): + # https://github.com/pandas-dev/pandas/issues/35658 + idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed) + result = tm.round_trip_pickle(idx) + tm.assert_index_equal(result, idx) + def test_dir(): # GH#27571 dir(interval_index) should not raise
Backport PR #36118: REGR: ensure closed attribute of IntervalIndex is preserved in pickle roundtrip
https://api.github.com/repos/pandas-dev/pandas/pulls/36119
2020-09-04T15:59:58Z
2020-09-04T17:49:50Z
2020-09-04T17:49:50Z
2020-09-04T17:49:50Z
REGR: ensure closed attribute of IntervalIndex is preserved in pickle roundtrip
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 7195f3d7a3885..232d0c4b4bbcd 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -18,8 +18,9 @@ Fixed regressions - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) - Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) +- Fix regression in pickle roundtrip of the ``closed`` attribute of :class:`IntervalIndex` (:issue:`35658`) - Fixed regression in :meth:`DataFrameGroupBy.agg` where a ``ValueError: buffer source array is read-only`` would be raised when the underlying array is read-only (:issue:`36014`) - +- .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 08f9bd51de77b..419ff81a2a478 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -193,7 +193,7 @@ def func(intvidx_self, other, sort=False): class IntervalIndex(IntervalMixin, ExtensionIndex): _typ = "intervalindex" _comparables = ["name"] - _attributes = ["name"] + _attributes = ["name", "closed"] # we would like our indexing holder to defer to us _defer_to_indexing = True diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 2755b186f3eae..a20e542b1edd7 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -874,6 +874,13 @@ def test_get_value_non_scalar_errors(self, key): with tm.assert_produces_warning(FutureWarning): idx.get_value(s, key) + @pytest.mark.parametrize("closed", ["left", "right", "both"]) + def test_pickle_round_trip_closed(self, closed): + # https://github.com/pandas-dev/pandas/issues/35658 + idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed) + result = tm.round_trip_pickle(idx) + tm.assert_index_equal(result, idx) + def test_dir(): # GH#27571 dir(interval_index) should not raise
Closes #35658 cc @jbrockmendel (I suppose the long term way would rather to properly pickle the IntervalArray itself and restore from that, instead of restoring from the left/right arrays, but would rather leave such a change for 1.2)
https://api.github.com/repos/pandas-dev/pandas/pulls/36118
2020-09-04T14:30:02Z
2020-09-04T15:17:10Z
2020-09-04T15:17:10Z
2020-09-04T15:59:48Z
Backport PR #36061 on branch 1.1.x (BUG: groupby and agg on read-only array gives ValueError: buffer source array is read-only)
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index ac9fe9d2fca26..7195f3d7a3885 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -18,7 +18,7 @@ Fixed regressions - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) - Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) -- +- Fixed regression in :meth:`DataFrameGroupBy.agg` where a ``ValueError: buffer source array is read-only`` would be raised when the underlying array is read-only (:issue:`36014`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 38cb973d6dde9..a83634aad3ce2 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -229,7 +229,7 @@ def group_cumprod_float64(float64_t[:, :] out, @cython.boundscheck(False) @cython.wraparound(False) def group_cumsum(numeric[:, :] out, - numeric[:, :] values, + ndarray[numeric, ndim=2] values, const int64_t[:] labels, int ngroups, is_datetimelike, @@ -472,7 +472,7 @@ ctypedef fused complexfloating_t: @cython.boundscheck(False) def _group_add(complexfloating_t[:, :] out, int64_t[:] counts, - complexfloating_t[:, :] values, + ndarray[complexfloating_t, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=0): """ @@ -483,8 +483,9 @@ def _group_add(complexfloating_t[:, :] out, complexfloating_t val, count complexfloating_t[:, :] sumx int64_t[:, :] nobs + Py_ssize_t len_values = len(values), len_labels = len(labels) - if len(values) != len(labels): + if len_values != len_labels: raise ValueError("len(index) != len(labels)") nobs = np.zeros((<object>out).shape, dtype=np.int64) @@ -530,7 +531,7 @@ group_add_complex128 = _group_add['double complex'] @cython.boundscheck(False) def _group_prod(floating[:, :] out, int64_t[:] counts, - floating[:, :] values, + ndarray[floating, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=0): """ @@ -541,8 +542,9 @@ def _group_prod(floating[:, :] out, floating val, count floating[:, :] prodx int64_t[:, :] nobs + Py_ssize_t len_values = len(values), len_labels = len(labels) - if not len(values) == len(labels): + if len_values != len_labels: raise ValueError("len(index) != len(labels)") nobs = np.zeros((<object>out).shape, dtype=np.int64) @@ -582,7 +584,7 @@ group_prod_float64 = _group_prod['double'] @cython.cdivision(True) def _group_var(floating[:, :] out, int64_t[:] counts, - floating[:, :] values, + ndarray[floating, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=-1, int64_t ddof=1): @@ -591,10 +593,11 @@ def _group_var(floating[:, :] out, floating val, ct, oldmean floating[:, :] mean int64_t[:, :] nobs + Py_ssize_t len_values = len(values), len_labels = len(labels) assert min_count == -1, "'min_count' only used in add and prod" - if not len(values) == len(labels): + if len_values != len_labels: raise ValueError("len(index) != len(labels)") nobs = np.zeros((<object>out).shape, dtype=np.int64) @@ -639,7 +642,7 @@ group_var_float64 = _group_var['double'] @cython.boundscheck(False) def _group_mean(floating[:, :] out, int64_t[:] counts, - floating[:, :] values, + ndarray[floating, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=-1): cdef: @@ -647,10 +650,11 @@ def _group_mean(floating[:, :] out, floating val, count floating[:, :] sumx int64_t[:, :] nobs + Py_ssize_t len_values = len(values), len_labels = len(labels) assert min_count == -1, "'min_count' only used in add and prod" - if not len(values) == len(labels): + if len_values != len_labels: raise ValueError("len(index) != len(labels)") nobs = np.zeros((<object>out).shape, dtype=np.int64) @@ -689,7 +693,7 @@ group_mean_float64 = _group_mean['double'] @cython.boundscheck(False) def _group_ohlc(floating[:, :] out, int64_t[:] counts, - floating[:, :] values, + ndarray[floating, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=-1): """ @@ -740,7 +744,7 @@ group_ohlc_float64 = _group_ohlc['double'] @cython.boundscheck(False) @cython.wraparound(False) def group_quantile(ndarray[float64_t] out, - numeric[:] values, + ndarray[numeric, ndim=1] values, ndarray[int64_t] labels, ndarray[uint8_t] mask, float64_t q, @@ -1072,7 +1076,7 @@ def group_nth(rank_t[:, :] out, @cython.boundscheck(False) @cython.wraparound(False) def group_rank(float64_t[:, :] out, - rank_t[:, :] values, + ndarray[rank_t, ndim=2] values, const int64_t[:] labels, int ngroups, bint is_datetimelike, object ties_method="average", @@ -1424,7 +1428,7 @@ def group_min(groupby_t[:, :] out, @cython.boundscheck(False) @cython.wraparound(False) def group_cummin(groupby_t[:, :] out, - groupby_t[:, :] values, + ndarray[groupby_t, ndim=2] values, const int64_t[:] labels, int ngroups, bint is_datetimelike): @@ -1484,7 +1488,7 @@ def group_cummin(groupby_t[:, :] out, @cython.boundscheck(False) @cython.wraparound(False) def group_cummax(groupby_t[:, :] out, - groupby_t[:, :] values, + ndarray[groupby_t, ndim=2] values, const int64_t[:] labels, int ngroups, bint is_datetimelike): diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py index 5ddda264642de..87ebd8b5a27fb 100644 --- a/pandas/tests/groupby/aggregate/test_cython.py +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -236,3 +236,44 @@ def test_cython_with_timestamp_and_nat(op, data): result = df.groupby("a").aggregate(op) tm.assert_frame_equal(expected, result) + + +@pytest.mark.parametrize( + "agg", + [ + "min", + "max", + "count", + "sum", + "prod", + "var", + "mean", + "median", + "ohlc", + "cumprod", + "cumsum", + "shift", + "any", + "all", + "quantile", + "first", + "last", + "rank", + "cummin", + "cummax", + ], +) +def test_read_only_buffer_source_agg(agg): + # https://github.com/pandas-dev/pandas/issues/36014 + df = DataFrame( + { + "sepal_length": [5.1, 4.9, 4.7, 4.6, 5.0], + "species": ["setosa", "setosa", "setosa", "setosa", "setosa"], + } + ) + df._mgr.blocks[0].values.flags.writeable = False + + result = df.groupby(["species"]).agg({"sepal_length": agg}) + expected = df.copy().groupby(["species"]).agg({"sepal_length": agg}) + + tm.assert_equal(result, expected)
Backport PR #36061: BUG: groupby and agg on read-only array gives ValueError: buffer source array is read-only
https://api.github.com/repos/pandas-dev/pandas/pulls/36117
2020-09-04T14:28:47Z
2020-09-04T15:14:26Z
2020-09-04T15:14:26Z
2020-09-04T17:59:04Z
REGR: append tz-aware DataFrame with tz-naive values
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 232d0c4b4bbcd..981bba1a30ff8 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -16,6 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`) - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) +- Fix regression in :meth:`DataFrame.append` mixing tz-aware and tz-naive datetime columns (:issue:`35460`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) - Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) - Fix regression in pickle roundtrip of the ``closed`` attribute of :class:`IntervalIndex` (:issue:`35658`) diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 9902016475b22..dd005752a4832 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -148,15 +148,17 @@ def is_nonempty(x) -> bool: any_ea = any(is_extension_array_dtype(x.dtype) for x in to_concat) if any_ea: + # we ignore axis here, as internally concatting with EAs is always + # for axis=0 if not single_dtype: target_dtype = find_common_type([x.dtype for x in to_concat]) to_concat = [_cast_to_common_type(arr, target_dtype) for arr in to_concat] - if isinstance(to_concat[0], ExtensionArray) and axis == 0: + if isinstance(to_concat[0], ExtensionArray): cls = type(to_concat[0]) return cls._concat_same_type(to_concat) else: - return np.concatenate(to_concat, axis=axis) + return np.concatenate(to_concat) elif _contains_datetime or "timedelta" in typs: return concat_datetime(to_concat, axis=axis, typs=typs) diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 88839d2211f81..a13a0d73d434f 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -24,7 +24,7 @@ from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos -from pandas.core.arrays import ExtensionArray +from pandas.core.arrays import DatetimeArray, ExtensionArray from pandas.core.internals.blocks import make_block from pandas.core.internals.managers import BlockManager @@ -335,9 +335,13 @@ def _concatenate_join_units(join_units, concat_axis, copy): # the non-EA values are 2D arrays with shape (1, n) to_concat = [t if isinstance(t, ExtensionArray) else t[0, :] for t in to_concat] concat_values = concat_compat(to_concat, axis=0) - if not isinstance(concat_values, ExtensionArray): + if not isinstance(concat_values, ExtensionArray) or ( + isinstance(concat_values, DatetimeArray) and concat_values.tz is None + ): # if the result of concat is not an EA but an ndarray, reshape to # 2D to put it a non-EA Block + # special case DatetimeArray, which *is* an EA, but is put in a + # consolidated 2D block concat_values = np.atleast_2d(concat_values) else: concat_values = concat_compat(to_concat, axis=concat_axis) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 38cf2cc2402a1..90705f827af25 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1110,6 +1110,23 @@ def test_append_empty_frame_to_series_with_dateutil_tz(self): result = df.append([s, s], ignore_index=True) tm.assert_frame_equal(result, expected) + def test_append_empty_tz_frame_with_datetime64ns(self): + # https://github.com/pandas-dev/pandas/issues/35460 + df = pd.DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") + + # pd.NaT gets inferred as tz-naive, so append result is tz-naive + result = df.append({"a": pd.NaT}, ignore_index=True) + expected = pd.DataFrame({"a": [pd.NaT]}).astype("datetime64[ns]") + tm.assert_frame_equal(result, expected) + + # also test with typed value to append + df = pd.DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") + result = df.append( + pd.Series({"a": pd.NaT}, dtype="datetime64[ns]"), ignore_index=True + ) + expected = pd.DataFrame({"a": [pd.NaT]}).astype("datetime64[ns]") + tm.assert_frame_equal(result, expected) + class TestConcatenate: def test_concat_copy(self):
Closes #35460
https://api.github.com/repos/pandas-dev/pandas/pulls/36115
2020-09-04T14:00:20Z
2020-09-06T16:58:33Z
2020-09-06T16:58:33Z
2021-01-28T22:36:17Z
REGR: fix consolidation/cache issue with take operation
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 7195f3d7a3885..f88198528fdf9 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -17,6 +17,7 @@ Fixed regressions - Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`) - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) +- Fix regression in invalid cache after an indexing operation; this can manifest when setting which does not update the data (:issue:`35521`) - Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) - Fixed regression in :meth:`DataFrameGroupBy.agg` where a ``ValueError: buffer source array is read-only`` would be raised when the underlying array is read-only (:issue:`36014`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6c8780a0fc186..2af323ccc1dd3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3534,6 +3534,8 @@ class max_speed nv.validate_take(tuple(), kwargs) + self._consolidate_inplace() + new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True ) diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 00cfa6265934f..4a85da72bc8b1 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -658,3 +658,26 @@ def test_update_inplace_sets_valid_block_values(): # smoketest for OP bug from GH#35731 assert df.isnull().sum().sum() == 0 + + +def test_nonconsolidated_item_cache_take(): + # https://github.com/pandas-dev/pandas/issues/35521 + + # create non-consolidated dataframe with object dtype columns + df = pd.DataFrame() + df["col1"] = pd.Series(["a"], dtype=object) + df["col2"] = pd.Series([0], dtype=object) + + # access column (item cache) + df["col1"] == "A" + # take operation + # (regression was that this consolidated but didn't reset item cache, + # resulting in an invalid cache and the .at operation not working properly) + df[df["col2"] == 0] + + # now setting value should update actual dataframe + df.at[0, "col1"] = "A" + + expected = pd.DataFrame({"col1": ["A"], "col2": [0]}, dtype=object) + tm.assert_frame_equal(df, expected) + assert df.at[0, "col1"] == "A"
Closes #35521 @jbrockmendel the "short term" fix for 1.1.2
https://api.github.com/repos/pandas-dev/pandas/pulls/36114
2020-09-04T13:14:06Z
2020-09-04T20:48:44Z
2020-09-04T20:48:43Z
2020-09-05T06:04:33Z
DOC: sync doc/source/whatsnew/v1.1.2.rst on 1.1.x
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 8695ff8d11e6d..ac9fe9d2fca26 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -20,6 +20,7 @@ Fixed regressions - Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) - + .. --------------------------------------------------------------------------- .. _whatsnew_112.bug_fixes:
~~don't merge this yet. I'll make sure a test is failing first~~
https://api.github.com/repos/pandas-dev/pandas/pulls/36112
2020-09-04T09:28:13Z
2020-09-04T11:44:22Z
2020-09-04T11:44:22Z
2020-09-04T11:44:29Z
CLN: use IS64 instead of is_platform_32bit #36108
diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx index 771e8053ac9be..abf38265ddc6d 100644 --- a/pandas/_libs/missing.pyx +++ b/pandas/_libs/missing.pyx @@ -18,7 +18,7 @@ from pandas._libs.tslibs.nattype cimport ( from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op -from pandas.compat import is_platform_32bit +from pandas.compat import IS64 cdef: float64_t INF = <float64_t>np.inf @@ -26,7 +26,7 @@ cdef: int64_t NPY_NAT = util.get_nat() - bint is_32bit = is_platform_32bit() + bint is_32bit = not IS64 cpdef bint checknull(object val): diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index ab2835932c95d..f2018a5c01711 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -8,7 +8,6 @@ * platform checker """ import platform -import struct import sys import warnings @@ -20,14 +19,6 @@ IS64 = sys.maxsize > 2 ** 32 -# ---------------------------------------------------------------------------- -# functions largely based / taken from the six module - -# Much of the code in this module comes from Benjamin Peterson's six library. -# The license for this library can be found in LICENSES/SIX and the code can be -# found at https://bitbucket.org/gutworth/six - - def set_function_name(f: F, name: str, cls) -> F: """ Bind the name/qualname attributes of the function. @@ -38,7 +29,6 @@ def set_function_name(f: F, name: str, cls) -> F: return f -# https://github.com/pandas-dev/pandas/pull/9123 def is_platform_little_endian() -> bool: """ Checking if the running platform is little endian. @@ -72,7 +62,7 @@ def is_platform_linux() -> bool: bool True if the running platform is linux. """ - return sys.platform == "linux2" + return sys.platform == "linux" def is_platform_mac() -> bool: @@ -87,18 +77,6 @@ def is_platform_mac() -> bool: return sys.platform == "darwin" -def is_platform_32bit() -> bool: - """ - Checking if the running platform is 32-bit. - - Returns - ------- - bool - True if the running platform is 32-bit. - """ - return struct.calcsize("P") * 8 < 64 - - def _import_lzma(): """ Importing the `lzma` module. diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index b1c31a6f90133..8b5d0c7ade56c 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -6,11 +6,12 @@ import numpy as np import pytest +from pandas.compat import IS64, is_platform_windows import pandas.util._test_decorators as td from pandas.util._test_decorators import async_mark, skip_if_no import pandas as pd -from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range +from pandas import Categorical, DataFrame, Series, date_range, timedelta_range import pandas._testing as tm @@ -254,7 +255,7 @@ def test_itertuples(self, float_frame): assert list(dfaa.itertuples()) == [(0, 1, 1), (1, 2, 2), (2, 3, 3)] # repr with int on 32-bit/windows - if not (compat.is_platform_windows() or compat.is_platform_32bit()): + if not (is_platform_windows() or not IS64): assert ( repr(list(df.itertuples(name=None))) == "[(0, 1, 4), (1, 2, 5), (2, 3, 6)]" diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py index 476ec1dd10b4b..ab6eac482211d 100644 --- a/pandas/tests/indexes/interval/test_interval_tree.py +++ b/pandas/tests/indexes/interval/test_interval_tree.py @@ -4,8 +4,8 @@ import pytest from pandas._libs.interval import IntervalTree +from pandas.compat import IS64 -from pandas import compat import pandas._testing as tm @@ -14,9 +14,7 @@ def skipif_32bit(param): Skip parameters in a parametrize on 32bit systems. Specifically used here to skip leaf_size parameters related to GH 23440. """ - marks = pytest.mark.skipif( - compat.is_platform_32bit(), reason="GH 23440: int type mismatch on 32bit" - ) + marks = pytest.mark.skipif(not IS64, reason="GH 23440: int type mismatch on 32bit") return pytest.param(param, marks=marks) @@ -181,7 +179,7 @@ def test_is_overlapping_trivial(self, closed, left, right): tree = IntervalTree(left, right, closed=closed) assert tree.is_overlapping is False - @pytest.mark.skipif(compat.is_platform_32bit(), reason="GH 23440") + @pytest.mark.skipif(not IS64, reason="GH 23440") def test_construction_overflow(self): # GH 25485 left, right = np.arange(101, dtype="int64"), [np.iinfo(np.int64).max] * 101 diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 1512c88a68778..1c5f00ff754a4 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -5,7 +5,7 @@ import numpy as np import pytest -import pandas.compat as compat +from pandas.compat import IS64, is_platform_windows import pandas as pd import pandas._testing as tm @@ -1041,7 +1041,7 @@ def test_replace_series(self, how, to_key, from_key): from_key == "complex128" and to_key in ("int64", "float64") ): - if compat.is_platform_32bit() or compat.is_platform_windows(): + if not IS64 or is_platform_windows(): pytest.skip(f"32-bit platform buggy: {from_key} -> {to_key}") # Expected: do not downcast by replacement diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 1bbfe4d7d74af..22942ed75d0f3 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -18,7 +18,7 @@ import pytest import pytz -from pandas.compat import is_platform_32bit, is_platform_windows +from pandas.compat import IS64, is_platform_windows import pandas.util._test_decorators as td import pandas as pd @@ -41,7 +41,7 @@ import pandas.io.formats.format as fmt import pandas.io.formats.printing as printing -use_32bit_repr = is_platform_windows() or is_platform_32bit() +use_32bit_repr = is_platform_windows() or not IS64 @pytest.fixture(params=["string", "pathlike", "buffer"]) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 2022abbaee323..59d64e1a6e909 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -9,7 +9,7 @@ import numpy as np import pytest -from pandas.compat import is_platform_32bit, is_platform_windows +from pandas.compat import IS64, is_platform_windows import pandas.util._test_decorators as td import pandas as pd @@ -154,7 +154,7 @@ def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype, int_frame) expected = int_frame if ( numpy - and (is_platform_32bit() or is_platform_windows()) + and (not IS64 or is_platform_windows()) and not dtype and orient != "split" ): @@ -361,9 +361,7 @@ def test_frame_infinity(self, orient, inf, dtype): result = read_json(df.to_json(), dtype=dtype) assert np.isnan(result.iloc[0, 2]) - @pytest.mark.skipif( - is_platform_32bit(), reason="not compliant on 32-bit, xref #15865" - ) + @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865") @pytest.mark.parametrize( "value,precision,expected_val", [ diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index f969cbca9f427..e2007e07c572a 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -15,7 +15,7 @@ import pandas._libs.json as ujson from pandas._libs.tslib import Timestamp -import pandas.compat as compat +from pandas.compat import IS64, is_platform_windows from pandas import DataFrame, DatetimeIndex, Index, NaT, Series, Timedelta, date_range import pandas._testing as tm @@ -53,7 +53,7 @@ def get_int32_compat_dtype(numpy, orient): # See GH#32527 dtype = np.int64 if not ((numpy is None or orient == "index") or (numpy is True and orient is None)): - if compat.is_platform_windows(): + if is_platform_windows(): dtype = np.int32 else: dtype = np.intp @@ -62,9 +62,7 @@ def get_int32_compat_dtype(numpy, orient): class TestUltraJSONTests: - @pytest.mark.skipif( - compat.is_platform_32bit(), reason="not compliant on 32-bit, xref #15865" - ) + @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865") def test_encode_decimal(self): sut = decimal.Decimal("1337.1337") encoded = ujson.encode(sut, double_precision=15) @@ -561,7 +559,7 @@ def test_encode_long_conversion(self): assert long_input == ujson.decode(output) @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)]) - @pytest.mark.xfail(not compat.IS64, reason="GH-35288") + @pytest.mark.xfail(not IS64, reason="GH-35288") def test_dumps_ints_larger_than_maxsize(self, bigNum): # GH34395 bigNum = sys.maxsize + 1 @@ -703,7 +701,7 @@ def test_int_array(self, any_int_dtype): tm.assert_numpy_array_equal(arr_input, arr_output) def test_int_max(self, any_int_dtype): - if any_int_dtype in ("int64", "uint64") and compat.is_platform_32bit(): + if any_int_dtype in ("int64", "uint64") and not IS64: pytest.skip("Cannot test 64-bit integer on 32-bit platform") klass = np.dtype(any_int_dtype).type diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 59c6a5d53e7bb..72a679d980641 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -8,6 +8,7 @@ from pandas._libs import algos as libalgos, hashtable as ht from pandas._libs.groupby import group_var_float32, group_var_float64 +from pandas.compat import IS64 from pandas.compat.numpy import np_array_datetime64_compat import pandas.util._test_decorators as td @@ -29,7 +30,6 @@ IntervalIndex, Series, Timestamp, - compat, ) import pandas._testing as tm import pandas.core.algorithms as algos @@ -1137,7 +1137,7 @@ def test_dropna(self): ) # 32-bit linux has a different ordering - if not compat.is_platform_32bit(): + if IS64: result = Series([10.3, 5.0, 5.0, None]).value_counts(dropna=False) expected = Series([2, 1, 1], index=[5.0, 10.3, np.nan]) tm.assert_series_equal(result, expected) @@ -1170,7 +1170,7 @@ def test_value_counts_uint64(self): result = algos.value_counts(arr) # 32-bit linux has a different ordering - if not compat.is_platform_32bit(): + if IS64: tm.assert_series_equal(result, expected) diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index ca7b99492bbf7..78facd6694635 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -31,7 +31,7 @@ def test_foo(): import numpy as np import pytest -from pandas.compat import is_platform_32bit, is_platform_windows +from pandas.compat import IS64, is_platform_windows from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import _np_version @@ -180,7 +180,7 @@ def skip_if_no(package: str, min_version: Optional[str] = None): _skip_if_no_mpl(), reason="Missing matplotlib dependency" ) skip_if_mpl = pytest.mark.skipif(not _skip_if_no_mpl(), reason="matplotlib is present") -skip_if_32bit = pytest.mark.skipif(is_platform_32bit(), reason="skipping for 32 bit") +skip_if_32bit = pytest.mark.skipif(not IS64, reason="skipping for 32 bit") skip_if_windows = pytest.mark.skipif(is_platform_windows(), reason="Running on Windows") skip_if_windows_python_3 = pytest.mark.skipif( is_platform_windows(), reason="not used on win32"
- [x] closes #36108 - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/36109
2020-09-04T05:30:05Z
2020-09-04T14:32:42Z
2020-09-04T14:32:42Z
2020-09-06T05:16:50Z
STY: de-privatize funcs imported cross-module
diff --git a/pandas/_libs/indexing.pyx b/pandas/_libs/indexing.pyx index f9aedeb8ad93e..7966fe8d4f045 100644 --- a/pandas/_libs/indexing.pyx +++ b/pandas/_libs/indexing.pyx @@ -1,4 +1,4 @@ -cdef class _NDFrameIndexerBase: +cdef class NDFrameIndexerBase: """ A base class for _NDFrameIndexer for fast instantiation and attribute access. """ diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 7478179df3b75..aeb1be121bc9e 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -771,7 +771,7 @@ class _timelex: _DATEUTIL_LEXER_SPLIT = _timelex.split -def _format_is_iso(f) -> bint: +def format_is_iso(f: str) -> bint: """ Does format match the iso8601 set that can be handled by the C parser? Generally of form YYYY-MM-DDTHH:MM:SS - date separator can be different @@ -789,7 +789,7 @@ def _format_is_iso(f) -> bint: return False -def _guess_datetime_format( +def guess_datetime_format( dt_str, bint dayfirst=False, dt_str_parse=du_parse, diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 8b2bb7832b5d0..1bea3a9eb137e 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -602,9 +602,9 @@ def astype(self, dtype, copy=True): # Rendering Methods def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): - from pandas.io.formats.format import _get_format_datetime64_from_values + from pandas.io.formats.format import get_format_datetime64_from_values - fmt = _get_format_datetime64_from_values(self, date_format) + fmt = get_format_datetime64_from_values(self, date_format) return tslib.format_array_from_datetime( self.asi8.ravel(), tz=self.tz, format=fmt, na_rep=na_rep diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 3e21d01355dda..2d694c469b3a9 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -379,14 +379,14 @@ def median( # Rendering Methods def _formatter(self, boxed=False): - from pandas.io.formats.format import _get_format_timedelta64 + from pandas.io.formats.format import get_format_timedelta64 - return _get_format_timedelta64(self, box=True) + return get_format_timedelta64(self, box=True) def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): - from pandas.io.formats.format import _get_format_timedelta64 + from pandas.io.formats.format import get_format_timedelta64 - formatter = _get_format_timedelta64(self._data, na_rep) + formatter = get_format_timedelta64(self._data, na_rep) return np.array([formatter(x) for x in self._data.ravel()]).reshape(self.shape) # ---------------------------------------------------------------- diff --git a/pandas/core/common.py b/pandas/core/common.py index 6fd4700ab7f3f..279d512e5a046 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -31,7 +31,7 @@ ABCIndexClass, ABCSeries, ) -from pandas.core.dtypes.inference import _iterable_not_string +from pandas.core.dtypes.inference import iterable_not_string from pandas.core.dtypes.missing import isna, isnull, notnull # noqa @@ -61,7 +61,7 @@ def flatten(l): flattened : generator """ for el in l: - if _iterable_not_string(el): + if iterable_not_string(el): for s in flatten(el): yield s else: diff --git a/pandas/core/computation/common.py b/pandas/core/computation/common.py index 327ec21c3c11c..8a9583c465f50 100644 --- a/pandas/core/computation/common.py +++ b/pandas/core/computation/common.py @@ -5,7 +5,7 @@ from pandas._config import get_option -def _ensure_decoded(s): +def ensure_decoded(s): """ If we have bytes, decode them to unicode. """ diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index e55df1e1d8155..b2144c45c6323 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -15,7 +15,7 @@ from pandas.core.dtypes.common import is_list_like, is_scalar import pandas.core.common as com -from pandas.core.computation.common import _ensure_decoded, result_type_many +from pandas.core.computation.common import ensure_decoded, result_type_many from pandas.core.computation.scope import _DEFAULT_GLOBALS from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded @@ -466,7 +466,7 @@ def stringify(value): v = rhs.value if isinstance(v, (int, float)): v = stringify(v) - v = Timestamp(_ensure_decoded(v)) + v = Timestamp(ensure_decoded(v)) if v.tz is not None: v = v.tz_convert("UTC") self.rhs.update(v) @@ -475,7 +475,7 @@ def stringify(value): v = lhs.value if isinstance(v, (int, float)): v = stringify(v) - v = Timestamp(_ensure_decoded(v)) + v = Timestamp(ensure_decoded(v)) if v.tz is not None: v = v.tz_convert("UTC") self.lhs.update(v) diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index f1b11a6869c2b..8dd7c1a22d0ae 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -14,7 +14,7 @@ import pandas as pd import pandas.core.common as com from pandas.core.computation import expr, ops, scope as _scope -from pandas.core.computation.common import _ensure_decoded +from pandas.core.computation.common import ensure_decoded from pandas.core.computation.expr import BaseExprVisitor from pandas.core.computation.ops import UndefinedVariableError, is_term from pandas.core.construction import extract_array @@ -189,12 +189,12 @@ def stringify(value): encoder = pprint_thing return encoder(value) - kind = _ensure_decoded(self.kind) - meta = _ensure_decoded(self.meta) + kind = ensure_decoded(self.kind) + meta = ensure_decoded(self.meta) if kind == "datetime64" or kind == "datetime": if isinstance(v, (int, float)): v = stringify(v) - v = _ensure_decoded(v) + v = ensure_decoded(v) v = Timestamp(v) if v.tz is not None: v = v.tz_convert("UTC") diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 1e70ff90fcd44..3d85ddc7a9abc 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -635,8 +635,8 @@ def is_dtype_equal(source, target) -> bool: False """ try: - source = _get_dtype(source) - target = _get_dtype(target) + source = get_dtype(source) + target = get_dtype(target) return source == target except (TypeError, AttributeError): @@ -984,10 +984,10 @@ def is_datetime64_ns_dtype(arr_or_dtype) -> bool: if arr_or_dtype is None: return False try: - tipo = _get_dtype(arr_or_dtype) + tipo = get_dtype(arr_or_dtype) except TypeError: if is_datetime64tz_dtype(arr_or_dtype): - tipo = _get_dtype(arr_or_dtype.dtype) + tipo = get_dtype(arr_or_dtype.dtype) else: return False return tipo == DT64NS_DTYPE or getattr(tipo, "base", None) == DT64NS_DTYPE @@ -1372,7 +1372,7 @@ def is_bool_dtype(arr_or_dtype) -> bool: if arr_or_dtype is None: return False try: - dtype = _get_dtype(arr_or_dtype) + dtype = get_dtype(arr_or_dtype) except TypeError: return False @@ -1558,13 +1558,13 @@ def _is_dtype(arr_or_dtype, condition) -> bool: if arr_or_dtype is None: return False try: - dtype = _get_dtype(arr_or_dtype) + dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError, UnicodeEncodeError): return False return condition(dtype) -def _get_dtype(arr_or_dtype) -> DtypeObj: +def get_dtype(arr_or_dtype) -> DtypeObj: """ Get the dtype instance associated with an array or dtype object. @@ -1695,7 +1695,7 @@ def infer_dtype_from_object(dtype): try: return infer_dtype_from_object(getattr(np, dtype)) except (AttributeError, TypeError): - # Handles cases like _get_dtype(int) i.e., + # Handles cases like get_dtype(int) i.e., # Python objects that are valid dtypes # (unlike user-defined types, in general) # diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index d1607b5ede6c3..329c4445b05bc 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -68,7 +68,7 @@ def is_number(obj) -> bool: return isinstance(obj, (Number, np.number)) -def _iterable_not_string(obj) -> bool: +def iterable_not_string(obj) -> bool: """ Check if the object is an iterable but not a string. @@ -83,11 +83,11 @@ def _iterable_not_string(obj) -> bool: Examples -------- - >>> _iterable_not_string([1, 2, 3]) + >>> iterable_not_string([1, 2, 3]) True - >>> _iterable_not_string("foo") + >>> iterable_not_string("foo") False - >>> _iterable_not_string(1) + >>> iterable_not_string(1) False """ return isinstance(obj, abc.Iterable) and not isinstance(obj, str) diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index f59bb31af2828..163500525dbd8 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -338,7 +338,7 @@ def notna(obj): notnull = notna -def _isna_compat(arr, fill_value=np.nan) -> bool: +def isna_compat(arr, fill_value=np.nan) -> bool: """ Parameters ---------- @@ -496,7 +496,7 @@ def array_equals(left: ArrayLike, right: ArrayLike) -> bool: return array_equivalent(left, right, dtype_equal=True) -def _infer_fill_value(val): +def infer_fill_value(val): """ infer the fill value for the nan/NaT from the provided scalar/ndarray/list-like if we are a NaT, return the correct dtyped @@ -516,11 +516,11 @@ def _infer_fill_value(val): return np.nan -def _maybe_fill(arr, fill_value=np.nan): +def maybe_fill(arr, fill_value=np.nan): """ if we have a compatible fill_value and arr dtype, then fill """ - if _isna_compat(arr, fill_value): + if isna_compat(arr, fill_value): arr.fill(fill_value) return arr diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 4dd5b7f30e7f0..c076b6e2e181b 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -37,7 +37,7 @@ is_timedelta64_dtype, needs_i8_conversion, ) -from pandas.core.dtypes.missing import _maybe_fill, isna +from pandas.core.dtypes.missing import isna, maybe_fill import pandas.core.algorithms as algorithms from pandas.core.base import SelectionMixin @@ -524,13 +524,11 @@ def _cython_operation( codes, _, _ = self.group_info if kind == "aggregate": - result = _maybe_fill( - np.empty(out_shape, dtype=out_dtype), fill_value=np.nan - ) + result = maybe_fill(np.empty(out_shape, dtype=out_dtype), fill_value=np.nan) counts = np.zeros(self.ngroups, dtype=np.int64) result = self._aggregate(result, counts, values, codes, func, min_count) elif kind == "transform": - result = _maybe_fill( + result = maybe_fill( np.empty_like(values, dtype=out_dtype), fill_value=np.nan ) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index dccc8369c5366..85c8396dfd1fe 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -177,9 +177,9 @@ def _simple_new(cls, values: TimedeltaArray, name: Label = None): @property def _formatter_func(self): - from pandas.io.formats.format import _get_format_timedelta64 + from pandas.io.formats.format import get_format_timedelta64 - return _get_format_timedelta64(self, box=True) + return get_format_timedelta64(self, box=True) # ------------------------------------------------------------------- diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index dd81823055390..cfb17b9498a36 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -4,7 +4,7 @@ from pandas._config.config import option_context -from pandas._libs.indexing import _NDFrameIndexerBase +from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas.errors import AbstractMethodError, InvalidIndexError from pandas.util._decorators import doc @@ -22,7 +22,7 @@ ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ABCDataFrame, ABCMultiIndex, ABCSeries -from pandas.core.dtypes.missing import _infer_fill_value, isna +from pandas.core.dtypes.missing import infer_fill_value, isna import pandas.core.common as com from pandas.core.construction import array as pd_array @@ -583,7 +583,7 @@ def iat(self) -> "_iAtIndexer": return _iAtIndexer("iat", self) -class _LocationIndexer(_NDFrameIndexerBase): +class _LocationIndexer(NDFrameIndexerBase): _valid_types: str axis = None @@ -1604,7 +1604,7 @@ def _setitem_with_indexer(self, indexer, value): return # add a new item with the dtype setup - self.obj[key] = _infer_fill_value(value) + self.obj[key] = infer_fill_value(value) new_indexer = convert_from_missing_indexer_tuple( indexer, self.obj.axes @@ -2017,7 +2017,7 @@ def _align_frame(self, indexer, df: ABCDataFrame): raise ValueError("Incompatible indexer with DataFrame") -class _ScalarAccessIndexer(_NDFrameIndexerBase): +class _ScalarAccessIndexer(NDFrameIndexerBase): """ Access scalars quickly. """ diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index ad388ef3f53b0..b2305736f9d46 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -56,7 +56,7 @@ ABCPandasArray, ABCSeries, ) -from pandas.core.dtypes.missing import _isna_compat, is_valid_nat_for_dtype, isna +from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, isna_compat import pandas.core.algorithms as algos from pandas.core.array_algos.transforms import shift @@ -487,7 +487,7 @@ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"] ): return blocks - return _extend_blocks([b.downcast(downcast) for b in blocks]) + return extend_blocks([b.downcast(downcast) for b in blocks]) def downcast(self, dtypes=None): """ try to downcast each item to the dict of dtypes if present """ @@ -2474,7 +2474,7 @@ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"] return blocks # split and convert the blocks - return _extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks]) + return extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks]) def _can_hold_element(self, element: Any) -> bool: return True @@ -2503,7 +2503,7 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True): result = b._replace_single( to_rep, v, inplace=inplace, regex=regex, convert=convert ) - result_blocks = _extend_blocks(result, result_blocks) + result_blocks = extend_blocks(result, result_blocks) blocks = result_blocks return result_blocks @@ -2514,7 +2514,7 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True): result = b._replace_single( to_rep, value, inplace=inplace, regex=regex, convert=convert ) - result_blocks = _extend_blocks(result, result_blocks) + result_blocks = extend_blocks(result, result_blocks) blocks = result_blocks return result_blocks @@ -2769,7 +2769,7 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None): # ----------------------------------------------------------------- -def _extend_blocks(result, blocks=None): +def extend_blocks(result, blocks=None): """ return a new extended blocks, given the result """ if blocks is None: blocks = [] @@ -2860,7 +2860,7 @@ def _putmask_smart(v: np.ndarray, mask: np.ndarray, n) -> np.ndarray: else: # make sure that we have a nullable type # if we have nulls - if not _isna_compat(v, nn[0]): + if not isna_compat(v, nn[0]): pass elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)): # only compare integers/floats diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 88839d2211f81..b45f0890cafa4 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -10,7 +10,7 @@ from pandas.core.dtypes.cast import maybe_promote from pandas.core.dtypes.common import ( - _get_dtype, + get_dtype, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, @@ -200,7 +200,7 @@ def dtype(self): if not self.needs_filling: return self.block.dtype else: - return _get_dtype(maybe_promote(self.block.dtype, self.block.fill_value)[0]) + return get_dtype(maybe_promote(self.block.dtype, self.block.fill_value)[0]) @cache_readonly def is_na(self): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 2e3098d94afcb..753b949f7c802 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -54,8 +54,8 @@ DatetimeTZBlock, ExtensionBlock, ObjectValuesExtensionBlock, - _extend_blocks, _safe_reshape, + extend_blocks, get_block_type, make_block, ) @@ -406,7 +406,7 @@ def apply( if not ignore_failures: raise continue - result_blocks = _extend_blocks(applied, result_blocks) + result_blocks = extend_blocks(applied, result_blocks) if ignore_failures: return self._combine(result_blocks) @@ -1868,7 +1868,7 @@ def _consolidate(blocks): merged_blocks = _merge_blocks( list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate ) - new_blocks = _extend_blocks(merged_blocks, new_blocks) + new_blocks = extend_blocks(merged_blocks, new_blocks) return new_blocks diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index e3f16a3ef4f90..6fdde22a1c514 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -13,7 +13,7 @@ from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask from pandas.core.dtypes.common import ( - _get_dtype, + get_dtype, is_any_int_dtype, is_bool_dtype, is_complex, @@ -678,7 +678,7 @@ def _get_counts_nanvar( count : scalar or array d : scalar or array """ - dtype = _get_dtype(dtype) + dtype = get_dtype(dtype) count = _get_counts(value_counts, mask, axis, dtype=dtype) d = count - dtype.type(ddof) @@ -1234,7 +1234,7 @@ def _get_counts( ------- count : scalar or array """ - dtype = _get_dtype(dtype) + dtype = get_dtype(dtype) if axis is None: if mask is not None: n = mask.size - mask.sum() diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 8724f7674f0c8..33ce5ed49b9c2 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -14,7 +14,7 @@ import pandas.core.common as com from pandas.core.indexes.api import Index, MultiIndex from pandas.core.reshape.concat import concat -from pandas.core.reshape.util import _tile_compat +from pandas.core.reshape.util import tile_compat from pandas.core.shared_docs import _shared_docs from pandas.core.tools.numeric import to_numeric @@ -136,7 +136,7 @@ def melt( result = frame._constructor(mdata, columns=mcolumns) if not ignore_index: - result.index = _tile_compat(frame.index, K) + result.index = tile_compat(frame.index, K) return result diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py index 6949270317f7c..a1bf3f8ee4119 100644 --- a/pandas/core/reshape/util.py +++ b/pandas/core/reshape/util.py @@ -48,10 +48,10 @@ def cartesian_product(X): # if any factor is empty, the cartesian product is empty b = np.zeros_like(cumprodX) - return [_tile_compat(np.repeat(x, b[i]), np.product(a[i])) for i, x in enumerate(X)] + return [tile_compat(np.repeat(x, b[i]), np.product(a[i])) for i, x in enumerate(X)] -def _tile_compat(arr, num: int): +def tile_compat(arr, num: int): """ Index compat for np.tile. diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 8fcc5f74ea897..09a53d5a10ae6 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -20,8 +20,8 @@ from pandas._libs.tslibs import Timestamp, conversion, parsing from pandas._libs.tslibs.parsing import ( # noqa DateParseError, - _format_is_iso, - _guess_datetime_format, + format_is_iso, + guess_datetime_format, ) from pandas._libs.tslibs.strptime import array_strptime from pandas._typing import ArrayLike, Label, Timezone @@ -73,7 +73,7 @@ def _guess_datetime_format_for_array(arr, **kwargs): # Try to guess the format based on the first non-NaN element non_nan_elements = notna(arr).nonzero()[0] if len(non_nan_elements): - return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs) + return guess_datetime_format(arr[non_nan_elements[0]], **kwargs) def should_cache( @@ -387,7 +387,7 @@ def _convert_listlike_datetimes( # datetime strings, so in those cases don't use the inferred # format because this path makes process slower in this # special case - format_is_iso8601 = _format_is_iso(format) + format_is_iso8601 = format_is_iso(format) if format_is_iso8601: require_iso8601 = not infer_datetime_format format = None diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 461ef6823918e..3d441f6e737bc 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1473,7 +1473,7 @@ def _format_strings(self) -> List[str]: fmt_values = format_array_from_datetime( values.asi8.ravel(), - format=_get_format_datetime64_from_values(values, self.date_format), + format=get_format_datetime64_from_values(values, self.date_format), na_rep=self.nat_rep, ).reshape(values.shape) return fmt_values.tolist() @@ -1636,7 +1636,7 @@ def _get_format_datetime64( return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep) -def _get_format_datetime64_from_values( +def get_format_datetime64_from_values( values: Union[np.ndarray, DatetimeArray, DatetimeIndex], date_format: Optional[str] ) -> Optional[str]: """ given values and a date_format, return a string format """ @@ -1677,13 +1677,13 @@ def __init__( self.box = box def _format_strings(self) -> List[str]: - formatter = self.formatter or _get_format_timedelta64( + formatter = self.formatter or get_format_timedelta64( self.values, nat_rep=self.nat_rep, box=self.box ) return [formatter(x) for x in self.values] -def _get_format_timedelta64( +def get_format_timedelta64( values: Union[np.ndarray, TimedeltaIndex, TimedeltaArray], nat_rep: str = "NaT", box: bool = False, diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index a6c526fcb008a..2db9a9a403e1c 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -649,8 +649,8 @@ def test_is_complex_dtype(): (IntervalDtype(), IntervalDtype()), ], ) -def test__get_dtype(input_param, result): - assert com._get_dtype(input_param) == result +def test_get_dtype(input_param, result): + assert com.get_dtype(input_param) == result @pytest.mark.parametrize( @@ -664,12 +664,12 @@ def test__get_dtype(input_param, result): (pd.DataFrame([1, 2]), "data type not understood"), ], ) -def test__get_dtype_fails(input_param, expected_error_message): +def test_get_dtype_fails(input_param, expected_error_message): # python objects # 2020-02-02 npdev changed error message expected_error_message += f"|Cannot interpret '{input_param}' as a data type" with pytest.raises(TypeError, match=expected_error_message): - com._get_dtype(input_param) + com.get_dtype(input_param) @pytest.mark.parametrize( diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py index dc7421ea63464..70fa724464226 100644 --- a/pandas/tests/tslibs/test_parsing.py +++ b/pandas/tests/tslibs/test_parsing.py @@ -148,14 +148,14 @@ def test_parsers_month_freq(date_str, expected): ], ) def test_guess_datetime_format_with_parseable_formats(string, fmt): - result = parsing._guess_datetime_format(string) + result = parsing.guess_datetime_format(string) assert result == fmt @pytest.mark.parametrize("dayfirst,expected", [(True, "%d/%m/%Y"), (False, "%m/%d/%Y")]) def test_guess_datetime_format_with_dayfirst(dayfirst, expected): ambiguous_string = "01/01/2011" - result = parsing._guess_datetime_format(ambiguous_string, dayfirst=dayfirst) + result = parsing.guess_datetime_format(ambiguous_string, dayfirst=dayfirst) assert result == expected @@ -169,7 +169,7 @@ def test_guess_datetime_format_with_dayfirst(dayfirst, expected): ], ) def test_guess_datetime_format_with_locale_specific_formats(string, fmt): - result = parsing._guess_datetime_format(string) + result = parsing.guess_datetime_format(string) assert result == fmt @@ -189,7 +189,7 @@ def test_guess_datetime_format_with_locale_specific_formats(string, fmt): def test_guess_datetime_format_invalid_inputs(invalid_dt): # A datetime string must include a year, month and a day for it to be # guessable, in addition to being a string that looks like a datetime. - assert parsing._guess_datetime_format(invalid_dt) is None + assert parsing.guess_datetime_format(invalid_dt) is None @pytest.mark.parametrize( @@ -205,7 +205,7 @@ def test_guess_datetime_format_invalid_inputs(invalid_dt): ) def test_guess_datetime_format_no_padding(string, fmt): # see gh-11142 - result = parsing._guess_datetime_format(string) + result = parsing.guess_datetime_format(string) assert result == fmt
https://api.github.com/repos/pandas-dev/pandas/pulls/36107
2020-09-04T03:15:36Z
2020-09-04T20:51:22Z
2020-09-04T20:51:22Z
2020-09-04T20:53:46Z
BUG: DataFrame.any with axis=1 and bool_only=True
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 2aac2596c18cb..ba556c8dcca54 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -246,6 +246,7 @@ Timezones Numeric ^^^^^^^ - Bug in :func:`to_numeric` where float precision was incorrect (:issue:`31364`) +- Bug in :meth:`DataFrame.any` with ``axis=1`` and ``bool_only=True`` ignoring the ``bool_only`` keyword (:issue:`32432`) - Conversion diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 59cf4c0e2f81d..3eed10917843b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8639,15 +8639,12 @@ def func(values): else: return op(values, axis=axis, skipna=skipna, **kwds) - def _get_data(axis_matters: bool) -> DataFrame: + def _get_data() -> DataFrame: if filter_type is None: data = self._get_numeric_data() elif filter_type == "bool": - if axis_matters: - # GH#25101, GH#24434 - data = self._get_bool_data() if axis == 0 else self - else: - data = self._get_bool_data() + # GH#25101, GH#24434 + data = self._get_bool_data() else: # pragma: no cover msg = ( f"Generating numeric_only data with filter_type {filter_type} " @@ -8659,7 +8656,7 @@ def _get_data(axis_matters: bool) -> DataFrame: if numeric_only is not None: df = self if numeric_only is True: - df = _get_data(axis_matters=True) + df = _get_data() if axis == 1: df = df.T axis = 0 @@ -8720,8 +8717,7 @@ def blk_func(values): except TypeError: # e.g. in nanops trying to convert strs to float - # TODO: why doesnt axis matter here? - data = _get_data(axis_matters=False) + data = _get_data() labels = data._get_agg_axis(axis) values = data.values diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index a112bc80b60b0..bbf2d9f1f0784 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -914,6 +914,13 @@ def test_all_any_boolean(self): tm.assert_series_equal(s.all(level=0), Series([False, True, False])) tm.assert_series_equal(s.any(level=0), Series([False, True, True])) + def test_any_axis1_bool_only(self): + # GH#32432 + df = pd.DataFrame({"A": [True, False], "B": [1, 2]}) + result = df.any(axis=1, bool_only=True) + expected = pd.Series([True, False]) + tm.assert_series_equal(result, expected) + def test_timedelta64_analytics(self): # index min/max
- [x] closes #32432 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36106
2020-09-04T02:55:33Z
2020-09-11T01:11:44Z
2020-09-11T01:11:44Z
2020-09-11T01:20:21Z
DEPR: making copies when indexing along columns
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 2e3098d94afcb..871e329f49e76 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1328,6 +1328,13 @@ def _slice_take_blocks_ax0( ] return blocks else: + warnings.warn( + "In a future version, indexing along columns will " + "always return a view, never a copy. " + "To restore the previous behavior, use result.copy() " + "instead of result", + FutureWarning, + ) return [ blk.take_nd( slobj, @@ -1390,6 +1397,13 @@ def _slice_take_blocks_ax0( nb = blk.getitem_block([i], new_mgr_locs=ml) blocks.append(nb) else: + warnings.warn( + "In a future version, indexing along columns will " + "always return a view, never a copy. " + "To restore the previous behavior, use result.copy() " + "instead of result", + FutureWarning, + ) nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs) blocks.append(nb)
- [x] closes #33780 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry xref #33597 for what this might look like once the deprecation is enforced (nice perf bump) cc @jorisvandenbossche my understanding is that we will need this deprecation for the all-1D world too. Is that correct? Any thoughts on nice ways to test this and avoid putting tm.assert_produces_warning in a zillion places across the code?
https://api.github.com/repos/pandas-dev/pandas/pulls/36105
2020-09-04T01:37:00Z
2020-09-05T19:01:35Z
null
2021-11-20T23:22:45Z
STY: De-privatize functions in io.excel imported elsewhere
diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 9bc1d7fedcb31..74eb65521f5b2 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -24,11 +24,11 @@ validate_header_arg, ) from pandas.io.excel._util import ( - _fill_mi_header, - _get_default_writer, - _maybe_convert_usecols, - _pop_header_name, + fill_mi_header, + get_default_writer, get_writer, + maybe_convert_usecols, + pop_header_name, ) from pandas.io.parsers import TextParser @@ -454,7 +454,7 @@ def parse( sheet = self.get_sheet_by_index(asheetname) data = self.get_sheet_data(sheet, convert_float) - usecols = _maybe_convert_usecols(usecols) + usecols = maybe_convert_usecols(usecols) if not data: output[asheetname] = DataFrame() @@ -473,10 +473,10 @@ def parse( if is_integer(skiprows): row += skiprows - data[row], control_row = _fill_mi_header(data[row], control_row) + data[row], control_row = fill_mi_header(data[row], control_row) if index_col is not None: - header_name, _ = _pop_header_name(data[row], index_col) + header_name, _ = pop_header_name(data[row], index_col) header_names.append(header_name) if is_list_like(index_col): @@ -645,7 +645,7 @@ def __new__(cls, path, engine=None, **kwargs): try: engine = config.get_option(f"io.excel.{ext}.writer") if engine == "auto": - engine = _get_default_writer(ext) + engine = get_default_writer(ext) except KeyError as err: raise ValueError(f"No engine for filetype: '{ext}'") from err cls = get_writer(engine) diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py index f39391ae1fe7f..e7684012c1d4c 100644 --- a/pandas/io/excel/_odswriter.py +++ b/pandas/io/excel/_odswriter.py @@ -5,7 +5,7 @@ import pandas._libs.json as json from pandas.io.excel._base import ExcelWriter -from pandas.io.excel._util import _validate_freeze_panes +from pandas.io.excel._util import validate_freeze_panes from pandas.io.formats.excel import ExcelCell @@ -59,7 +59,7 @@ def write_cells( wks = Table(name=sheet_name) self.sheets[sheet_name] = wks - if _validate_freeze_panes(freeze_panes): + if validate_freeze_panes(freeze_panes): assert freeze_panes is not None self._create_freeze_panes(sheet_name, freeze_panes) diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 3c67902d41baa..89b581da6ed31 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -6,7 +6,7 @@ from pandas.compat._optional import import_optional_dependency from pandas.io.excel._base import ExcelWriter, _BaseExcelReader -from pandas.io.excel._util import _validate_freeze_panes +from pandas.io.excel._util import validate_freeze_panes if TYPE_CHECKING: from openpyxl.descriptors.serialisable import Serialisable @@ -385,7 +385,7 @@ def write_cells( wks.title = sheet_name self.sheets[sheet_name] = wks - if _validate_freeze_panes(freeze_panes): + if validate_freeze_panes(freeze_panes): wks.freeze_panes = wks.cell( row=freeze_panes[0] + 1, column=freeze_panes[1] + 1 ) diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py index 285aeaf7d4c6e..0d67e6cd2f32b 100644 --- a/pandas/io/excel/_util.py +++ b/pandas/io/excel/_util.py @@ -21,7 +21,7 @@ def register_writer(klass): _writers[engine_name] = klass -def _get_default_writer(ext): +def get_default_writer(ext): """ Return the default writer for the given extension. @@ -121,7 +121,7 @@ def _range2cols(areas): return cols -def _maybe_convert_usecols(usecols): +def maybe_convert_usecols(usecols): """ Convert `usecols` into a compatible format for parsing in `parsers.py`. @@ -150,7 +150,7 @@ def _maybe_convert_usecols(usecols): return usecols -def _validate_freeze_panes(freeze_panes): +def validate_freeze_panes(freeze_panes): if freeze_panes is not None: if len(freeze_panes) == 2 and all( isinstance(item, int) for item in freeze_panes @@ -167,15 +167,7 @@ def _validate_freeze_panes(freeze_panes): return False -def _trim_excel_header(row): - # trim header row so auto-index inference works - # xlrd uses '' , openpyxl None - while len(row) > 0 and (row[0] == "" or row[0] is None): - row = row[1:] - return row - - -def _fill_mi_header(row, control_row): +def fill_mi_header(row, control_row): """ Forward fill blank entries in row but only inside the same parent index. @@ -208,7 +200,7 @@ def _fill_mi_header(row, control_row): return row, control_row -def _pop_header_name(row, index_col): +def pop_header_name(row, index_col): """ Pop the header name for MultiIndex parsing. diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py index bdbb006ae93dc..53f0c94d12e4c 100644 --- a/pandas/io/excel/_xlsxwriter.py +++ b/pandas/io/excel/_xlsxwriter.py @@ -3,7 +3,7 @@ import pandas._libs.json as json from pandas.io.excel._base import ExcelWriter -from pandas.io.excel._util import _validate_freeze_panes +from pandas.io.excel._util import validate_freeze_panes class _XlsxStyler: @@ -208,7 +208,7 @@ def write_cells( style_dict = {"null": None} - if _validate_freeze_panes(freeze_panes): + if validate_freeze_panes(freeze_panes): wks.freeze_panes(*(freeze_panes)) for cell in cells: diff --git a/pandas/io/excel/_xlwt.py b/pandas/io/excel/_xlwt.py index e1f72eb533c51..faebe526d17bd 100644 --- a/pandas/io/excel/_xlwt.py +++ b/pandas/io/excel/_xlwt.py @@ -3,7 +3,7 @@ import pandas._libs.json as json from pandas.io.excel._base import ExcelWriter -from pandas.io.excel._util import _validate_freeze_panes +from pandas.io.excel._util import validate_freeze_panes if TYPE_CHECKING: from xlwt import XFStyle @@ -48,7 +48,7 @@ def write_cells( wks = self.book.add_sheet(sheet_name) self.sheets[sheet_name] = wks - if _validate_freeze_panes(freeze_panes): + if validate_freeze_panes(freeze_panes): wks.set_panes_frozen(True) wks.set_horz_split_pos(freeze_panes[0]) wks.set_vert_split_pos(freeze_panes[1])
xref #36055
https://api.github.com/repos/pandas-dev/pandas/pulls/36104
2020-09-04T01:09:10Z
2020-09-04T20:52:14Z
2020-09-04T20:52:14Z
2020-09-04T20:56:48Z
TYP: misc fixes for numpy types 4
diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 2976747d66dfa..4e96685ba77df 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -17,7 +17,9 @@ class NDArrayBackedExtensionArray(ExtensionArray): ExtensionArray that is backed by a single NumPy ndarray. """ - _ndarray: np.ndarray + @property + def _ndarray(self) -> np.ndarray: + raise AbstractMethodError(self) def _from_backing_data(self: _T, arr: np.ndarray) -> _T: """ diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 23a4a70734c81..c999e8caf2d9a 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -153,7 +153,6 @@ class PandasArray( # pandas internals, which turns off things like block consolidation. _typ = "npy_extension" __array_priority__ = 1000 - _ndarray: np.ndarray # ------------------------------------------------------------------------ # Constructors @@ -172,9 +171,13 @@ def __init__(self, values: Union[np.ndarray, "PandasArray"], copy: bool = False) if copy: values = values.copy() - self._ndarray = values + self._data = values self._dtype = PandasDtype(values.dtype) + @property + def _ndarray(self) -> np.ndarray: + return self._data + @classmethod def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "PandasArray": if isinstance(dtype, PandasDtype): diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 8fcc5f74ea897..929fd0dc334c8 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -550,7 +550,8 @@ def _adjust_to_origin(arg, origin, unit): @overload -def to_datetime( +# error: Overloaded function signatures 1 and 3 overlap with incompatible return types +def to_datetime( # type: ignore[misc] arg: DatetimeScalar, errors: str = ..., dayfirst: bool = ..., @@ -567,7 +568,8 @@ def to_datetime( @overload -def to_datetime( +# error: Overloaded function signatures 2 and 3 overlap with incompatible return types +def to_datetime( # type: ignore[misc] arg: "Series", errors: str = ..., dayfirst: bool = ..., @@ -585,7 +587,7 @@ def to_datetime( @overload def to_datetime( - arg: Union[List, Tuple], + arg: Union[List, Tuple, ArrayLike], errors: str = ..., dayfirst: bool = ..., yearfirst: bool = ..., diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 98aaab6838fba..71b708b6520d8 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -351,7 +351,7 @@ def handle_shared_axes( _remove_labels_from_axis(ax.yaxis) -def flatten_axes(axes: Union["Axes", Sequence["Axes"]]) -> Sequence["Axes"]: +def flatten_axes(axes: Union["Axes", Sequence["Axes"]]) -> np.ndarray: if not is_list_like(axes): return np.array([axes]) elif isinstance(axes, (np.ndarray, ABCIndexClass)):
``` pandas\plotting\_matplotlib\tools.py:356: error: Incompatible return value type (got "ndarray", expected "Sequence[Any]") [return-value] pandas\plotting\_matplotlib\tools.py:358: error: Incompatible return value type (got "Union[ndarray, Any]", expected "Sequence[Any]") [return-value] pandas\plotting\_matplotlib\tools.py:359: error: Incompatible return value type (got "ndarray", expected "Sequence[Any]") [return-value] pandas\core\dtypes\cast.py:1079: error: No overload variant of "to_datetime" matches argument types "ndarray", "str" [call-overload] pandas\core\arrays\categorical.py:1818: error: Signature of "_ndarray" incompatible with supertype "NDArrayBackedExtensionArray" [override] pandas/core/arrays/datetimelike.py:465: error: Signature of "_ndarray" incompatible with supertype "NDArrayBackedExtensionArray" [override] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/36102
2020-09-03T20:03:57Z
2020-09-13T19:15:53Z
null
2020-09-13T19:15:53Z
CLN removing trailing commas
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index a7e3162ed7b73..1edcc937f72c3 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -2349,9 +2349,6 @@ def date_format(dt): def format_query(sql, *args): - """ - - """ processed_args = [] for arg in args: if isinstance(arg, float) and isna(arg): diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 6d7fec803a8e0..88f61390957a6 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -1153,7 +1153,7 @@ def test_read_chunks_117( from_frame = parsed.iloc[pos : pos + chunksize, :].copy() from_frame = self._convert_categorical(from_frame) tm.assert_frame_equal( - from_frame, chunk, check_dtype=False, check_datetimelike_compat=True, + from_frame, chunk, check_dtype=False, check_datetimelike_compat=True ) pos += chunksize @@ -1251,7 +1251,7 @@ def test_read_chunks_115( from_frame = parsed.iloc[pos : pos + chunksize, :].copy() from_frame = self._convert_categorical(from_frame) tm.assert_frame_equal( - from_frame, chunk, check_dtype=False, check_datetimelike_compat=True, + from_frame, chunk, check_dtype=False, check_datetimelike_compat=True ) pos += chunksize diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 9ab697cb57690..128a7bdb6730a 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -1321,7 +1321,7 @@ def test_scatter_with_c_column_name_with_colors(self, cmap): def test_plot_scatter_with_s(self): # this refers to GH 32904 - df = DataFrame(np.random.random((10, 3)) * 100, columns=["a", "b", "c"],) + df = DataFrame(np.random.random((10, 3)) * 100, columns=["a", "b", "c"]) ax = df.plot.scatter(x="a", y="b", s="c") tm.assert_numpy_array_equal(df["c"].values, right=ax.collections[0].get_sizes()) @@ -1716,7 +1716,7 @@ def test_hist_df(self): def test_hist_weights(self, weights): # GH 33173 np.random.seed(0) - df = pd.DataFrame(dict(zip(["A", "B"], np.random.randn(2, 100,)))) + df = pd.DataFrame(dict(zip(["A", "B"], np.random.randn(2, 100)))) ax1 = _check_plot_works(df.plot, kind="hist", weights=weights) ax2 = _check_plot_works(df.plot, kind="hist") diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index e7637a598403f..59a0183304c76 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -124,7 +124,7 @@ def test_resample_integerarray(): result = ts.resample("3T").mean() expected = Series( - [1, 4, 7], index=pd.date_range("1/1/2000", periods=3, freq="3T"), dtype="Int64", + [1, 4, 7], index=pd.date_range("1/1/2000", periods=3, freq="3T"), dtype="Int64" ) tm.assert_series_equal(result, expected) @@ -764,7 +764,7 @@ def test_resample_origin(): @pytest.mark.parametrize( - "origin", ["invalid_value", "epch", "startday", "startt", "2000-30-30", object()], + "origin", ["invalid_value", "epch", "startday", "startt", "2000-30-30", object()] ) def test_resample_bad_origin(origin): rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s") @@ -777,9 +777,7 @@ def test_resample_bad_origin(origin): ts.resample("5min", origin=origin) -@pytest.mark.parametrize( - "offset", ["invalid_value", "12dayys", "2000-30-30", object()], -) +@pytest.mark.parametrize("offset", ["invalid_value", "12dayys", "2000-30-30", object()]) def test_resample_bad_offset(offset): rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s") ts = Series(np.random.randn(len(rng)), index=rng) @@ -1595,7 +1593,7 @@ def test_downsample_dst_at_midnight(): "America/Havana", ambiguous=True ) dti = pd.DatetimeIndex(dti, freq="D") - expected = DataFrame([7.5, 28.0, 44.5], index=dti,) + expected = DataFrame([7.5, 28.0, 44.5], index=dti) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/merge/test_merge_index_as_string.py b/pandas/tests/reshape/merge/test_merge_index_as_string.py index 08614d04caf4b..d20d93370ec7e 100644 --- a/pandas/tests/reshape/merge/test_merge_index_as_string.py +++ b/pandas/tests/reshape/merge/test_merge_index_as_string.py @@ -29,7 +29,7 @@ def df2(): @pytest.fixture(params=[[], ["outer"], ["outer", "inner"]]) def left_df(request, df1): - """ Construct left test DataFrame with specified levels + """Construct left test DataFrame with specified levels (any of 'outer', 'inner', and 'v1') """ levels = request.param @@ -41,7 +41,7 @@ def left_df(request, df1): @pytest.fixture(params=[[], ["outer"], ["outer", "inner"]]) def right_df(request, df2): - """ Construct right test DataFrame with specified levels + """Construct right test DataFrame with specified levels (any of 'outer', 'inner', and 'v2') """ levels = request.param diff --git a/pandas/tests/reshape/test_crosstab.py b/pandas/tests/reshape/test_crosstab.py index 6f5550a6f8209..1aadcfdc30f1b 100644 --- a/pandas/tests/reshape/test_crosstab.py +++ b/pandas/tests/reshape/test_crosstab.py @@ -354,7 +354,7 @@ def test_crosstab_normalize(self): crosstab(df.a, df.b, normalize="columns"), ) tm.assert_frame_equal( - crosstab(df.a, df.b, normalize=0), crosstab(df.a, df.b, normalize="index"), + crosstab(df.a, df.b, normalize=0), crosstab(df.a, df.b, normalize="index") ) row_normal_margins = DataFrame( @@ -377,7 +377,7 @@ def test_crosstab_normalize(self): crosstab(df.a, df.b, normalize="index", margins=True), row_normal_margins ) tm.assert_frame_equal( - crosstab(df.a, df.b, normalize="columns", margins=True), col_normal_margins, + crosstab(df.a, df.b, normalize="columns", margins=True), col_normal_margins ) tm.assert_frame_equal( crosstab(df.a, df.b, normalize=True, margins=True), all_normal_margins diff --git a/pandas/tests/reshape/test_get_dummies.py b/pandas/tests/reshape/test_get_dummies.py index c003bfa6a239a..ce13762ea8f86 100644 --- a/pandas/tests/reshape/test_get_dummies.py +++ b/pandas/tests/reshape/test_get_dummies.py @@ -161,7 +161,7 @@ def test_get_dummies_unicode(self, sparse): s = [e, eacute, eacute] res = get_dummies(s, prefix="letter", sparse=sparse) exp = DataFrame( - {"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8, + {"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8 ) if sparse: exp = exp.apply(SparseArray, fill_value=0)
This PR is related to #35925, both current and new versions of black passes these formatting, also I'd like to mention that there seem to be a reported [bug](https://github.com/psf/black/issues/1629) in newer version of black which throws error after removing trailing comma from `pandas/tests/scalar/timestamp/test_constructors.py` that's why I didn't change this formatting. ```diff diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py index 316a299ba..c70eacdfa 100644 --- a/pandas/tests/scalar/timestamp/test_constructors.py +++ b/pandas/tests/scalar/timestamp/test_constructors.py @@ -267,7 +267,7 @@ class TestTimestampConstructors: hour=1, minute=2, second=3, - microsecond=999999, + microsecond=999999 ) ) == repr(Timestamp("2015-11-12 01:02:03.999999")) ~ ``` The workaround to this is after removing trailing comma from above code run black with --fast which will add trailing comma back again and and will not format afterwards on re-running black without --fast
https://api.github.com/repos/pandas-dev/pandas/pulls/36101
2020-09-03T19:56:42Z
2020-09-05T12:39:12Z
2020-09-05T12:39:12Z
2020-09-05T12:39:23Z
TYP: misc fixes for numpy types 3
diff --git a/pandas/_typing.py b/pandas/_typing.py index f8af92e07c674..2d6f23ad92885 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -40,7 +40,9 @@ # array-like AnyArrayLike = TypeVar("AnyArrayLike", "ExtensionArray", "Index", "Series", np.ndarray) +AnyArrayLikeUnion = Union["ExtensionArray", "Index", "Series", np.ndarray] ArrayLike = TypeVar("ArrayLike", "ExtensionArray", np.ndarray) +ArrayLikeUnion = Union["ExtensionArray", np.ndarray] # scalars diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 01e20f49917ac..c21425d101d35 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -13,7 +13,7 @@ from pandas._libs import Timedelta, hashtable as libhashtable, lib import pandas._libs.join as libjoin -from pandas._typing import ArrayLike, FrameOrSeries +from pandas._typing import ArrayLikeUnion, FrameOrSeries from pandas.errors import MergeError from pandas.util._decorators import Appender, Substitution @@ -1869,7 +1869,7 @@ def _right_outer_join(x, y, max_groups): def _factorize_keys( - lk: ArrayLike, rk: ArrayLike, sort: bool = True, how: str = "inner" + lk: ArrayLikeUnion, rk: ArrayLikeUnion, sort: bool = True, how: str = "inner" ) -> Tuple[np.array, np.array, int]: """ Encode left and right keys as enumerated types. diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 0913627324c48..8282b36a019d1 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -16,7 +16,7 @@ from pandas._libs import lib, writers as libwriters from pandas._libs.tslibs import timezones -from pandas._typing import ArrayLike, FrameOrSeries, Label +from pandas._typing import AnyArrayLikeUnion, ArrayLike, FrameOrSeries, Label from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import PerformanceWarning @@ -5076,7 +5076,7 @@ def _dtype_to_kind(dtype_str: str) -> str: return kind -def _get_data_and_dtype_name(data: ArrayLike): +def _get_data_and_dtype_name(data: AnyArrayLikeUnion): """ Convert the passed data into a storable form and a dtype string. """
``` pandas\core\reshape\merge.py:1931: error: Incompatible types in assignment (expression has type "ndarray", variable has type "ExtensionArray") [assignment] pandas\core\reshape\merge.py:1932: error: Incompatible types in assignment (expression has type "ndarray", variable has type "ExtensionArray") [assignment] pandas\core\reshape\merge.py:1941: error: Incompatible types in assignment (expression has type "ndarray", variable has type "ExtensionArray") [assignment] pandas\core\reshape\merge.py:1950: error: Incompatible types in assignment (expression has type "ndarray", variable has type "ExtensionArray") [assignment] pandas\core\reshape\merge.py:1951: error: Incompatible types in assignment (expression has type "ndarray", variable has type "ExtensionArray") [assignment] pandas\io\pytables.py:5084: error: Incompatible types in assignment (expression has type "ndarray", variable has type "ExtensionArray") [assignment] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/36100
2020-09-03T19:04:50Z
2020-09-13T19:18:46Z
null
2020-09-13T19:18:46Z
TYP: misc fixes for numpy types 2
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index e6b4cb598989b..1489e08d82bf0 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -651,7 +651,7 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, If False, scalar belongs to pandas extension types is inferred as object """ - dtype = np.dtype(object) + dtype: DtypeObj = np.dtype(object) # a 1-element ndarray if isinstance(val, np.ndarray): diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 1e70ff90fcd44..0bf032725547e 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -108,7 +108,7 @@ def ensure_str(value: Union[bytes, Any]) -> str: return value -def ensure_int_or_float(arr: ArrayLike, copy: bool = False) -> np.array: +def ensure_int_or_float(arr: ArrayLike, copy: bool = False) -> np.ndarray: """ Ensure that an dtype array of some integer dtype has an int64 dtype if possible. @@ -1388,8 +1388,7 @@ def is_bool_dtype(arr_or_dtype) -> bool: # guess this return arr_or_dtype.is_object and arr_or_dtype.inferred_type == "boolean" elif is_extension_array_dtype(arr_or_dtype): - dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype) - return dtype._is_boolean + return getattr(arr_or_dtype, "dtype", arr_or_dtype)._is_boolean return issubclass(dtype.type, np.bool_) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 01e20f49917ac..602ff226f8878 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1870,7 +1870,7 @@ def _right_outer_join(x, y, max_groups): def _factorize_keys( lk: ArrayLike, rk: ArrayLike, sort: bool = True, how: str = "inner" -) -> Tuple[np.array, np.array, int]: +) -> Tuple[np.ndarray, np.ndarray, int]: """ Encode left and right keys as enumerated types.
``` pandas\core\dtypes\cast.py:681: error: Incompatible types in assignment (expression has type "DatetimeTZDtype", variable has type "dtype") [assignment] pandas\core\dtypes\cast.py:715: error: Incompatible types in assignment (expression has type "IntervalDtype", variable has type "dtype") [assignment] pandas\core\dtypes\common.py:1392: error: Item "dtype" of "Union[dtype, ExtensionDtype]" has no attribute "_is_boolean" [union-attr] pandas\core\dtypes\common.py:111: error: Function "numpy.array" is not valid as a type [valid-type] pandas\core\reshape\merge.py:1873: error: Function "numpy.array" is not valid as a type [valid-type] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/36099
2020-09-03T18:37:42Z
2020-09-04T17:03:46Z
2020-09-04T17:03:46Z
2020-09-04T17:05:36Z
TYP: misc fixes for numpy types
diff --git a/pandas/_typing.py b/pandas/_typing.py index f8af92e07c674..74bfc9134c3af 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -62,7 +62,7 @@ # other Dtype = Union[ - "ExtensionDtype", str, np.dtype, Type[Union[str, float, int, complex, bool]] + "ExtensionDtype", str, np.dtype, Type[Union[str, float, int, complex, bool, object]] ] DtypeObj = Union[np.dtype, "ExtensionDtype"] FilePathOrBuffer = Union[str, Path, IO[AnyStr], IOBase] diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 9d75d21c5637a..f297c7165208f 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -6,7 +6,7 @@ import operator from textwrap import dedent -from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union +from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union, cast from warnings import catch_warnings, simplefilter, warn import numpy as np @@ -60,7 +60,7 @@ from pandas.core.indexers import validate_indices if TYPE_CHECKING: - from pandas import DataFrame, Series + from pandas import Categorical, DataFrame, Series _shared_docs: Dict[str, str] = {} @@ -429,8 +429,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: if is_categorical_dtype(comps): # TODO(extension) # handle categoricals - # error: "ExtensionArray" has no attribute "isin" [attr-defined] - return comps.isin(values) # type: ignore[attr-defined] + return cast("Categorical", comps).isin(values) comps, dtype = _ensure_data(comps) values, _ = _ensure_data(values, dtype=dtype) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 27b1afdb438cb..ec85ec47d625c 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2316,7 +2316,7 @@ def _concat_same_type(self, to_concat): return union_categoricals(to_concat) - def isin(self, values): + def isin(self, values) -> np.ndarray: """ Check whether `values` are contained in Categorical. diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 02b8ed17244cd..9d6c2789af25b 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -335,7 +335,7 @@ def array( return result -def extract_array(obj, extract_numpy: bool = False): +def extract_array(obj: AnyArrayLike, extract_numpy: bool = False) -> ArrayLike: """ Extract the ndarray or ExtensionArray from a Series or Index. @@ -383,7 +383,9 @@ def extract_array(obj, extract_numpy: bool = False): if extract_numpy and isinstance(obj, ABCPandasArray): obj = obj.to_numpy() - return obj + # error: Incompatible return value type (got "Index", expected "ExtensionArray") + # error: Incompatible return value type (got "Series", expected "ExtensionArray") + return obj # type: ignore[return-value] def sanitize_array( diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index e6b4cb598989b..2316ba8a5be67 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1488,7 +1488,7 @@ def find_common_type(types: List[DtypeObj]) -> DtypeObj: if has_bools: for t in types: if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t): - return object + return np.dtype("object") return np.find_common_type(types, []) @@ -1550,7 +1550,7 @@ def construct_1d_arraylike_from_scalar( elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "S"): # we need to coerce to object dtype to avoid # to allow numpy to take our string as a scalar value - dtype = object + dtype = np.dtype("object") if not isna(value): value = ensure_str(value)
``` pandas/core/dtypes/cast.py:1491: error: Incompatible return value type (got "Type[object]", expected "Union[dtype, ExtensionDtype]") [return-value] pandas/core/dtypes/cast.py:1553: error: Incompatible types in assignment (expression has type "Type[object]", variable has type "Union[dtype, ExtensionDtype]") [assignment] pandas/core/construction.py:601: error: Incompatible default for argument "dtype_if_empty" (default has type "Type[object]", argument has type "Union[ExtensionDtype, str, dtype, Type[str], Type[float], Type[int], Type[complex], Type[bool]]") [assignment] pandas/core/generic.py:6217: error: Argument "dtype_if_empty" to "create_series_with_explicit_dtype" has incompatible type "Type[object]"; expected "Union[ExtensionDtype, str, dtype, Type[str], Type[float], Type[int], Type[complex], Type[bool]]" [arg-type] pandas/tests/arrays/sparse/test_dtype.py:97: error: Argument 1 to "SparseDtype" has incompatible type "Type[object]"; expected "Union[ExtensionDtype, str, dtype, Type[str], Type[float], Type[int], Type[complex], Type[bool]]" [arg-type] pandas/tests/arrays/sparse/test_dtype.py:172: error: Argument 1 to "SparseDtype" has incompatible type "Type[object]"; expected "Union[ExtensionDtype, str, dtype, Type[str], Type[float], Type[int], Type[complex], Type[bool]]" [arg-type] pandas/tests/arrays/sparse/test_combine_concat.py:44: error: Argument 1 to "SparseDtype" has incompatible type "Type[object]"; expected "Union[ExtensionDtype, str, dtype, Type[str], Type[float], Type[int], Type[complex], Type[bool]]" [arg-type] pandas/tests/arrays/sparse/test_array.py:561: error: Argument 1 to "SparseDtype" has incompatible type "Type[object]"; expected "Union[ExtensionDtype, str, dtype, Type[str], Type[float], Type[int], Type[complex], Type[bool]]" [arg-type] ```
https://api.github.com/repos/pandas-dev/pandas/pulls/36098
2020-09-03T18:00:13Z
2020-09-05T02:56:31Z
2020-09-05T02:56:31Z
2020-09-05T07:31:32Z
Center window
diff --git a/pandas/_libs/window/indexers.pyx b/pandas/_libs/window/indexers.pyx index 9af1159a805ec..ae67f5909eb56 100644 --- a/pandas/_libs/window/indexers.pyx +++ b/pandas/_libs/window/indexers.pyx @@ -11,7 +11,7 @@ def calculate_variable_window_bounds( int64_t num_values, int64_t window_size, object min_periods, # unused but here to match get_window_bounds signature - object center, # unused but here to match get_window_bounds signature + object center, object closed, const int64_t[:] index ): @@ -30,7 +30,7 @@ def calculate_variable_window_bounds( ignored, exists for compatibility center : object - ignored, exists for compatibility + center the rolling window on the current observation closed : str string of side of the window that should be closed @@ -45,6 +45,7 @@ def calculate_variable_window_bounds( cdef: bint left_closed = False bint right_closed = False + bint center_window = False int index_growth_sign = 1 ndarray[int64_t, ndim=1] start, end int64_t start_bound, end_bound @@ -62,6 +63,8 @@ def calculate_variable_window_bounds( if index[num_values - 1] < index[0]: index_growth_sign = -1 + if center: + center_window = True start = np.empty(num_values, dtype='int64') start.fill(-1) @@ -76,14 +79,27 @@ def calculate_variable_window_bounds( # right endpoint is open else: end[0] = 0 + if center_window: + for j in range(0, num_values+1): + if (index[j] == index[0] + index_growth_sign*window_size/2 and + right_closed): + end[0] = j+1 + break + elif index[j] >= index[0] + index_growth_sign * window_size/2: + end[0] = j + break with nogil: # start is start of slice interval (including) # end is end of slice interval (not including) for i in range(1, num_values): - end_bound = index[i] - start_bound = index[i] - index_growth_sign * window_size + if center_window: + end_bound = index[i] + index_growth_sign * window_size/2 + start_bound = index[i] - index_growth_sign * window_size/2 + else: + end_bound = index[i] + start_bound = index[i] - index_growth_sign * window_size # left endpoint is closed if left_closed: @@ -97,14 +113,27 @@ def calculate_variable_window_bounds( start[i] = j break + # for centered window advance the end bound until we are + # outside the constraint + if center_window: + for j in range(end[i - 1], num_values+1): + if j == num_values: + end[i] = j + elif ((index[j] - end_bound) * index_growth_sign == 0 and + right_closed): + end[i] = j+1 + break + elif (index[j] - end_bound) * index_growth_sign >= 0: + end[i] = j + break # end bound is previous end # or current index - if (index[end[i - 1]] - end_bound) * index_growth_sign <= 0: + elif (index[end[i - 1]] - end_bound) * index_growth_sign <= 0: end[i] = i + 1 else: end[i] = end[i - 1] # right endpoint is open - if not right_closed: + if not right_closed and not center_window: end[i] -= 1 return start, end diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 558c0eeb0ea65..0b087bebe4e32 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -462,7 +462,9 @@ def _get_window_indexer(self, window: int) -> BaseIndexer: if isinstance(self.window, BaseIndexer): return self.window if self.is_freq_type: - return VariableWindowIndexer(index_array=self._on.asi8, window_size=window) + return VariableWindowIndexer( + index_array=self._on.asi8, window_size=window, center=self.center + ) return FixedWindowIndexer(window_size=window) def _apply_series(self, homogeneous_func: Callable[..., ArrayLike]) -> "Series": @@ -470,7 +472,6 @@ def _apply_series(self, homogeneous_func: Callable[..., ArrayLike]) -> "Series": Series version of _apply_blockwise """ _, obj = self._create_blocks(self._selected_obj) - try: values = self._prep_values(obj.values) except (TypeError, NotImplementedError) as err: @@ -554,7 +555,14 @@ def homogeneous_func(values: np.ndarray): if values.size == 0: return values.copy() - offset = calculate_center_offset(window) if center else 0 + offset = ( + calculate_center_offset(window) + if center + and not isinstance( + self._get_window_indexer(window), VariableWindowIndexer + ) + else 0 + ) additional_nans = np.array([np.nan] * offset) if not is_weighted: @@ -597,7 +605,9 @@ def calc(x): if use_numba_cache: NUMBA_FUNC_CACHE[(kwargs["original_func"], "rolling_apply")] = func - if center: + if center and not isinstance( + self._get_window_indexer(window), VariableWindowIndexer + ): result = self._center_window(result, window) return result @@ -1950,15 +1960,13 @@ def validate(self): if (self.obj.empty or self.is_datetimelike) and isinstance( self.window, (str, BaseOffset, timedelta) ): - self._validate_monotonic() freq = self._validate_freq() - # we don't allow center - if self.center: + # we don't allow center for offset based windows + if self.center and self.obj.empty: raise NotImplementedError( - "center is not implemented for " - "datetimelike and offset based windows" + "center is not implemented for offset based windows" ) # this will raise ValueError on non-fixed freqs
- [x] closes #20012 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Added center functionality for `VariableWindowIndexer`. Note - I am unsure if the NotImplementedError in lines 1966-1969 in rolling.py still correctly raises an error for offset based windows.
https://api.github.com/repos/pandas-dev/pandas/pulls/36097
2020-09-03T17:06:40Z
2020-11-04T08:26:32Z
null
2020-12-29T14:56:12Z
Backport PR #36086 on branch 1.1.x (DOC: minor fixes to whatsnew\v1.1.2.rst)
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 17d51c9121f43..8695ff8d11e6d 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -29,9 +29,9 @@ Bug fixes - Bug in :meth:`DataFrame.eval` with ``object`` dtype column binary operations (:issue:`35794`) - Bug in :class:`Series` constructor raising a ``TypeError`` when constructing sparse datetime64 dtypes (:issue:`35762`) - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) -- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`) +- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should be ``""`` (:issue:`35712`) - Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) -- Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`36051`) +- Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`) .. --------------------------------------------------------------------------- @@ -39,7 +39,7 @@ Bug fixes Other ~~~~~ -- :meth:`factorize` now supports ``na_sentinel=None`` to include NaN in the uniques of the values and remove ``dropna`` keyword which was unintentionally exposed to public facing API in 1.1 version from :meth:`factorize`(:issue:`35667`) +- :meth:`factorize` now supports ``na_sentinel=None`` to include NaN in the uniques of the values and remove ``dropna`` keyword which was unintentionally exposed to public facing API in 1.1 version from :meth:`factorize` (:issue:`35667`) .. ---------------------------------------------------------------------------
Backport PR #36086: DOC: minor fixes to whatsnew\v1.1.2.rst
https://api.github.com/repos/pandas-dev/pandas/pulls/36095
2020-09-03T16:30:25Z
2020-09-03T18:02:50Z
2020-09-03T18:02:50Z
2020-09-03T18:02:50Z
BUG: extra leading space in to_string when index=False
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 8b28a4439e1da..b7f2729eed1ab 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -214,8 +214,6 @@ Performance improvements Bug fixes ~~~~~~~~~ -- Bug in :meth:`DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`) -- Categorical ^^^^^^^^^^^ @@ -257,7 +255,7 @@ Conversion Strings ^^^^^^^ - +- Bug in :meth:`Series.to_string`, :meth:`DataFrame.to_string`, and :meth:`DataFrame.to_latex` adding a leading space when ``index=False`` (:issue:`24980`) - - @@ -313,6 +311,7 @@ Groupby/resample/rolling - Bug when subsetting columns on a :class:`~pandas.core.groupby.DataFrameGroupBy` (e.g. ``df.groupby('a')[['b']])``) would reset the attributes ``axis``, ``dropna``, ``group_keys``, ``level``, ``mutated``, ``sort``, and ``squeeze`` to their default values. (:issue:`9959`) - Bug in :meth:`DataFrameGroupby.tshift` failing to raise ``ValueError`` when a frequency cannot be inferred for the index of a group (:issue:`35937`) - Bug in :meth:`DataFrame.groupby` does not always maintain column index name for ``any``, ``all``, ``bfill``, ``ffill``, ``shift`` (:issue:`29764`) +- Bug in :meth:`DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`) - Reshaping diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 3dc4290953360..e19a6933f39d4 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -345,6 +345,7 @@ def _get_formatted_values(self) -> List[str]: None, float_format=self.float_format, na_rep=self.na_rep, + leading_space=self.index, ) def to_string(self) -> str: @@ -960,6 +961,7 @@ def _format_col(self, i: int) -> List[str]: na_rep=self.na_rep, space=self.col_space.get(frame.columns[i]), decimal=self.decimal, + leading_space=self.index, ) def to_html( @@ -1111,7 +1113,7 @@ def format_array( space: Optional[Union[str, int]] = None, justify: str = "right", decimal: str = ".", - leading_space: Optional[bool] = None, + leading_space: Optional[bool] = True, quoting: Optional[int] = None, ) -> List[str]: """ @@ -1127,7 +1129,7 @@ def format_array( space justify decimal - leading_space : bool, optional + leading_space : bool, optional, default True Whether the array should be formatted with a leading space. When an array as a column of a Series or DataFrame, we do want the leading space to pad between columns. @@ -1194,7 +1196,7 @@ def __init__( decimal: str = ".", quoting: Optional[int] = None, fixed_width: bool = True, - leading_space: Optional[bool] = None, + leading_space: Optional[bool] = True, ): self.values = values self.digits = digits @@ -1395,9 +1397,11 @@ def format_values_with(float_format): float_format: Optional[FloatFormatType] if self.float_format is None: if self.fixed_width: - float_format = partial( - "{value: .{digits:d}f}".format, digits=self.digits - ) + if self.leading_space is True: + fmt_str = "{value: .{digits:d}f}" + else: + fmt_str = "{value:.{digits:d}f}" + float_format = partial(fmt_str.format, digits=self.digits) else: float_format = self.float_format else: @@ -1429,7 +1433,11 @@ def format_values_with(float_format): ).any() if has_small_values or (too_long and has_large_values): - float_format = partial("{value: .{digits:d}e}".format, digits=self.digits) + if self.leading_space is True: + fmt_str = "{value: .{digits:d}e}" + else: + fmt_str = "{value:.{digits:d}e}" + float_format = partial(fmt_str.format, digits=self.digits) formatted_values = format_values_with(float_format) return formatted_values @@ -1444,7 +1452,11 @@ def _format_strings(self) -> List[str]: class IntArrayFormatter(GenericArrayFormatter): def _format_strings(self) -> List[str]: - formatter = self.formatter or (lambda x: f"{x: d}") + if self.leading_space is False: + formatter_str = lambda x: f"{x:d}".format(x=x) + else: + formatter_str = lambda x: f"{x: d}".format(x=x) + formatter = self.formatter or formatter_str fmt_values = [formatter(x) for x in self.values] return fmt_values diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 22942ed75d0f3..0c04c58afe24e 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1546,11 +1546,11 @@ def test_to_string_no_index(self): df_s = df.to_string(index=False) # Leading space is expected for positive numbers. - expected = " x y z\n 11 33 AAA\n 22 -44 " + expected = " x y z\n11 33 AAA\n22 -44 " assert df_s == expected df_s = df[["y", "x", "z"]].to_string(index=False) - expected = " y x z\n 33 11 AAA\n-44 22 " + expected = " y x z\n 33 11 AAA\n-44 22 " assert df_s == expected def test_to_string_line_width_no_index(self): @@ -1565,7 +1565,7 @@ def test_to_string_line_width_no_index(self): df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]}) df_s = df.to_string(line_width=1, index=False) - expected = " x \\\n 11 \n 22 \n 33 \n\n y \n 4 \n 5 \n 6 " + expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 " assert df_s == expected @@ -2269,7 +2269,7 @@ def test_to_string_without_index(self): # GH 11729 Test index=False option s = Series([1, 2, 3, 4]) result = s.to_string(index=False) - expected = " 1\n" + " 2\n" + " 3\n" + " 4" + expected = "1\n" + "2\n" + "3\n" + "4" assert result == expected def test_unicode_name_in_footer(self): @@ -3391,3 +3391,37 @@ def test_filepath_or_buffer_bad_arg_raises(float_frame, method): msg = "buf is not a file name and it has no write method" with pytest.raises(TypeError, match=msg): getattr(float_frame, method)(buf=object()) + + +@pytest.mark.parametrize( + "input_array, expected", + [ + ("a", "a"), + (["a", "b"], "a\nb"), + ([1, "a"], "1\na"), + (1, "1"), + ([0, -1], " 0\n-1"), + (1.0, "1.0"), + ([" a", " b"], " a\n b"), + ([".1", "1"], ".1\n 1"), + (["10", "-10"], " 10\n-10"), + ], +) +def test_format_remove_leading_space_series(input_array, expected): + # GH: 24980 + s = pd.Series(input_array).to_string(index=False) + assert s == expected + + +@pytest.mark.parametrize( + "input_array, expected", + [ + ({"A": ["a"]}, "A\na"), + ({"A": ["a", "b"], "B": ["c", "dd"]}, "A B\na c\nb dd"), + ({"A": ["a", 1], "B": ["aa", 1]}, "A B\na aa\n1 1"), + ], +) +def test_format_remove_leading_space_dataframe(input_array, expected): + # GH: 24980 + df = pd.DataFrame(input_array).to_string(index=False) + assert df == expected diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 96a9ed2b86cf4..9dfd851e91c65 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -50,10 +50,10 @@ def test_to_latex(self, float_frame): withoutindex_result = df.to_latex(index=False) withoutindex_expected = r"""\begin{tabular}{rl} \toprule - a & b \\ + a & b \\ \midrule - 1 & b1 \\ - 2 & b2 \\ + 1 & b1 \\ + 2 & b2 \\ \bottomrule \end{tabular} """ @@ -413,7 +413,7 @@ def test_to_latex_longtable(self): withoutindex_result = df.to_latex(index=False, longtable=True) withoutindex_expected = r"""\begin{longtable}{rl} \toprule - a & b \\ + a & b \\ \midrule \endhead \midrule @@ -423,8 +423,8 @@ def test_to_latex_longtable(self): \bottomrule \endlastfoot - 1 & b1 \\ - 2 & b2 \\ + 1 & b1 \\ + 2 & b2 \\ \end{longtable} """ @@ -663,8 +663,8 @@ def test_to_latex_no_header(self): withoutindex_result = df.to_latex(index=False, header=False) withoutindex_expected = r"""\begin{tabular}{rl} \toprule - 1 & b1 \\ - 2 & b2 \\ +1 & b1 \\ +2 & b2 \\ \bottomrule \end{tabular} """ @@ -690,10 +690,10 @@ def test_to_latex_specified_header(self): withoutindex_result = df.to_latex(header=["AA", "BB"], index=False) withoutindex_expected = r"""\begin{tabular}{rl} \toprule -AA & BB \\ +AA & BB \\ \midrule - 1 & b1 \\ - 2 & b2 \\ + 1 & b1 \\ + 2 & b2 \\ \bottomrule \end{tabular} """
- [x] closes #24980 - [x] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This PR is mainly from #29670 contributed by @charlesdong1991, and I made a few changes on his work. The code have passed all tests modified and added in `test_format.py` and `test_to_latex.py`. Any comment is welcomed!
https://api.github.com/repos/pandas-dev/pandas/pulls/36094
2020-09-03T16:14:40Z
2020-09-06T17:49:28Z
2020-09-06T17:49:27Z
2020-09-06T17:49:33Z
BUG: df.replace with numeric values and str to_replace
diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst index 2e68a0598bb71..28206192dd161 100644 --- a/doc/source/user_guide/missing_data.rst +++ b/doc/source/user_guide/missing_data.rst @@ -689,32 +689,6 @@ You can also operate on the DataFrame in place: df.replace(1.5, np.nan, inplace=True) -.. warning:: - - When replacing multiple ``bool`` or ``datetime64`` objects, the first - argument to ``replace`` (``to_replace``) must match the type of the value - being replaced. For example, - - .. code-block:: python - - >>> s = pd.Series([True, False, True]) - >>> s.replace({'a string': 'new value', True: False}) # raises - TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str' - - will raise a ``TypeError`` because one of the ``dict`` keys is not of the - correct type for replacement. - - However, when replacing a *single* object such as, - - .. ipython:: python - - s = pd.Series([True, False, True]) - s.replace('a string', 'another string') - - the original ``NDFrame`` object will be returned untouched. We're working on - unifying this API, but for backwards compatibility reasons we cannot break - the latter behavior. See :issue:`6354` for more details. - Missing data casting rules and indexing --------------------------------------- diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index e65daa439a225..ff806f7ce5ceb 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -337,6 +337,7 @@ ExtensionArray Other ^^^^^ - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`) +- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py new file mode 100644 index 0000000000000..6ac3cc1f9f2fe --- /dev/null +++ b/pandas/core/array_algos/replace.py @@ -0,0 +1,95 @@ +""" +Methods used by Block.replace and related methods. +""" +import operator +import re +from typing import Optional, Pattern, Union + +import numpy as np + +from pandas._typing import ArrayLike, Scalar + +from pandas.core.dtypes.common import ( + is_datetimelike_v_numeric, + is_numeric_v_string_like, + is_scalar, +) +from pandas.core.dtypes.missing import isna + + +def compare_or_regex_search( + a: ArrayLike, + b: Union[Scalar, Pattern], + regex: bool = False, + mask: Optional[ArrayLike] = None, +) -> Union[ArrayLike, bool]: + """ + Compare two array_like inputs of the same shape or two scalar values + + Calls operator.eq or re.search, depending on regex argument. If regex is + True, perform an element-wise regex matching. + + Parameters + ---------- + a : array_like + b : scalar or regex pattern + regex : bool, default False + mask : array_like or None (default) + + Returns + ------- + mask : array_like of bool + """ + + def _check_comparison_types( + result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern] + ): + """ + Raises an error if the two arrays (a,b) cannot be compared. + Otherwise, returns the comparison result as expected. + """ + if is_scalar(result) and isinstance(a, np.ndarray): + type_names = [type(a).__name__, type(b).__name__] + + if isinstance(a, np.ndarray): + type_names[0] = f"ndarray(dtype={a.dtype})" + + raise TypeError( + f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}" + ) + + if not regex: + op = lambda x: operator.eq(x, b) + else: + op = np.vectorize( + lambda x: bool(re.search(b, x)) + if isinstance(x, str) and isinstance(b, (str, Pattern)) + else False + ) + + # GH#32621 use mask to avoid comparing to NAs + if mask is None and isinstance(a, np.ndarray) and not isinstance(b, np.ndarray): + mask = np.reshape(~(isna(a)), a.shape) + if isinstance(a, np.ndarray): + a = a[mask] + + if is_numeric_v_string_like(a, b): + # GH#29553 avoid deprecation warnings from numpy + return np.zeros(a.shape, dtype=bool) + + elif is_datetimelike_v_numeric(a, b): + # GH#29553 avoid deprecation warnings from numpy + _check_comparison_types(False, a, b) + return False + + result = op(a) + + if isinstance(result, np.ndarray) and mask is not None: + # The shape of the mask can differ to that of the result + # since we may compare only a subset of a's or b's elements + tmp = np.zeros(mask.shape, dtype=np.bool_) + tmp[mask] = result + result = tmp + + _check_comparison_types(result, a, b) + return result diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6c8780a0fc186..7b8072279ce69 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6559,20 +6559,6 @@ def replace( 1 new new 2 bait xyz - Note that when replacing multiple ``bool`` or ``datetime64`` objects, - the data types in the `to_replace` parameter must match the data - type of the value being replaced: - - >>> df = pd.DataFrame({{'A': [True, False, True], - ... 'B': [False, True, False]}}) - >>> df.replace({{'a string': 'new value', True: False}}) # raises - Traceback (most recent call last): - ... - TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str' - - This raises a ``TypeError`` because one of the ``dict`` keys is not of - the correct type for replacement. - Compare the behavior of ``s.replace({{'a': None}})`` and ``s.replace('a', None)`` to understand the peculiarities of the `to_replace` parameter: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index ad388ef3f53b0..30ea2766e5133 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -11,7 +11,7 @@ from pandas._libs.internals import BlockPlacement from pandas._libs.tslibs import conversion from pandas._libs.tslibs.timezones import tz_compare -from pandas._typing import ArrayLike +from pandas._typing import ArrayLike, Scalar from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( @@ -59,6 +59,7 @@ from pandas.core.dtypes.missing import _isna_compat, is_valid_nat_for_dtype, isna import pandas.core.algorithms as algos +from pandas.core.array_algos.replace import compare_or_regex_search from pandas.core.array_algos.transforms import shift from pandas.core.arrays import ( Categorical, @@ -792,7 +793,6 @@ def _replace_list( self, src_list: List[Any], dest_list: List[Any], - masks: List[np.ndarray], inplace: bool = False, regex: bool = False, ) -> List["Block"]: @@ -801,11 +801,28 @@ def _replace_list( """ src_len = len(src_list) - 1 + def comp(s: Scalar, mask: np.ndarray, regex: bool = False) -> np.ndarray: + """ + Generate a bool array by perform an equality check, or perform + an element-wise regular expression matching + """ + if isna(s): + return ~mask + + s = com.maybe_box_datetimelike(s) + return compare_or_regex_search(self.values, s, regex, mask) + + # Calculate the mask once, prior to the call of comp + # in order to avoid repeating the same computations + mask = ~isna(self.values) + + masks = [comp(s, mask, regex) for s in src_list] + rb = [self if inplace else self.copy()] for i, (src, dest) in enumerate(zip(src_list, dest_list)): new_rb: List["Block"] = [] for blk in rb: - m = masks[i][blk.mgr_locs.indexer] + m = masks[i] convert = i == src_len # only convert once at the end result = blk._replace_coerce( mask=m, @@ -2908,7 +2925,9 @@ def _extract_bool_array(mask: ArrayLike) -> np.ndarray: """ if isinstance(mask, ExtensionArray): # We could have BooleanArray, Sparse[bool], ... - mask = np.asarray(mask, dtype=np.bool_) + # Except for BooleanArray, this is equivalent to just + # np.asarray(mask, dtype=bool) + mask = mask.to_numpy(dtype=bool, na_value=False) assert isinstance(mask, np.ndarray), type(mask) assert mask.dtype == bool, mask.dtype diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 2e3098d94afcb..248cfb2490c9e 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1,14 +1,11 @@ from collections import defaultdict import itertools -import operator -import re from typing import ( Any, DefaultDict, Dict, List, Optional, - Pattern, Sequence, Tuple, TypeVar, @@ -19,7 +16,7 @@ import numpy as np from pandas._libs import internals as libinternals, lib -from pandas._typing import ArrayLike, DtypeObj, Label, Scalar +from pandas._typing import ArrayLike, DtypeObj, Label from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( @@ -29,12 +26,9 @@ ) from pandas.core.dtypes.common import ( DT64NS_DTYPE, - is_datetimelike_v_numeric, is_dtype_equal, is_extension_array_dtype, is_list_like, - is_numeric_v_string_like, - is_scalar, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ExtensionDtype @@ -44,7 +38,6 @@ import pandas.core.algorithms as algos from pandas.core.arrays.sparse import SparseDtype from pandas.core.base import PandasObject -import pandas.core.common as com from pandas.core.construction import extract_array from pandas.core.indexers import maybe_convert_indices from pandas.core.indexes.api import Index, ensure_index @@ -628,31 +621,10 @@ def replace_list( """ do a list replace """ inplace = validate_bool_kwarg(inplace, "inplace") - # figure out our mask apriori to avoid repeated replacements - values = self.as_array() - - def comp(s: Scalar, mask: np.ndarray, regex: bool = False): - """ - Generate a bool array by perform an equality check, or perform - an element-wise regular expression matching - """ - if isna(s): - return ~mask - - s = com.maybe_box_datetimelike(s) - return _compare_or_regex_search(values, s, regex, mask) - - # Calculate the mask once, prior to the call of comp - # in order to avoid repeating the same computations - mask = ~isna(values) - - masks = [comp(s, mask, regex) for s in src_list] - bm = self.apply( "_replace_list", src_list=src_list, dest_list=dest_list, - masks=masks, inplace=inplace, regex=regex, ) @@ -1900,80 +1872,6 @@ def _merge_blocks( return blocks -def _compare_or_regex_search( - a: ArrayLike, - b: Union[Scalar, Pattern], - regex: bool = False, - mask: Optional[ArrayLike] = None, -) -> Union[ArrayLike, bool]: - """ - Compare two array_like inputs of the same shape or two scalar values - - Calls operator.eq or re.search, depending on regex argument. If regex is - True, perform an element-wise regex matching. - - Parameters - ---------- - a : array_like - b : scalar or regex pattern - regex : bool, default False - mask : array_like or None (default) - - Returns - ------- - mask : array_like of bool - """ - - def _check_comparison_types( - result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern] - ): - """ - Raises an error if the two arrays (a,b) cannot be compared. - Otherwise, returns the comparison result as expected. - """ - if is_scalar(result) and isinstance(a, np.ndarray): - type_names = [type(a).__name__, type(b).__name__] - - if isinstance(a, np.ndarray): - type_names[0] = f"ndarray(dtype={a.dtype})" - - raise TypeError( - f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}" - ) - - if not regex: - op = lambda x: operator.eq(x, b) - else: - op = np.vectorize( - lambda x: bool(re.search(b, x)) - if isinstance(x, str) and isinstance(b, (str, Pattern)) - else False - ) - - # GH#32621 use mask to avoid comparing to NAs - if mask is None and isinstance(a, np.ndarray) and not isinstance(b, np.ndarray): - mask = np.reshape(~(isna(a)), a.shape) - if isinstance(a, np.ndarray): - a = a[mask] - - if is_datetimelike_v_numeric(a, b) or is_numeric_v_string_like(a, b): - # GH#29553 avoid deprecation warnings from numpy - _check_comparison_types(False, a, b) - return False - - result = op(a) - - if isinstance(result, np.ndarray) and mask is not None: - # The shape of the mask can differ to that of the result - # since we may compare only a subset of a's or b's elements - tmp = np.zeros(mask.shape, dtype=np.bool_) - tmp[mask] = result - result = tmp - - _check_comparison_types(result, a, b) - return result - - def _fast_count_smallints(arr: np.ndarray) -> np.ndarray: """Faster version of set(arr) for sequences of small numbers.""" counts = np.bincount(arr.astype(np.int_)) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 83dfd42ae2a6e..ea2488dfc0877 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1131,8 +1131,19 @@ def test_replace_bool_with_bool(self): def test_replace_with_dict_with_bool_keys(self): df = DataFrame({0: [True, False], 1: [False, True]}) - with pytest.raises(TypeError, match="Cannot compare types .+"): - df.replace({"asdf": "asdb", True: "yes"}) + result = df.replace({"asdf": "asdb", True: "yes"}) + expected = DataFrame({0: ["yes", False], 1: [False, "yes"]}) + tm.assert_frame_equal(result, expected) + + def test_replace_dict_strings_vs_ints(self): + # GH#34789 + df = pd.DataFrame({"Y0": [1, 2], "Y1": [3, 4]}) + result = df.replace({"replace_string": "test"}) + + tm.assert_frame_equal(result, df) + + result = df["Y0"].replace({"replace_string": "test"}) + tm.assert_series_equal(result, df["Y0"]) def test_replace_truthy(self): df = DataFrame({"a": [True, True]}) diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index ccaa005369a1c..e255d46e81851 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -218,8 +218,9 @@ def test_replace_bool_with_bool(self): def test_replace_with_dict_with_bool_keys(self): s = pd.Series([True, False, True]) - with pytest.raises(TypeError, match="Cannot compare types .+"): - s.replace({"asdf": "asdb", True: "yes"}) + result = s.replace({"asdf": "asdb", True: "yes"}) + expected = pd.Series(["yes", False, "yes"]) + tm.assert_series_equal(result, expected) def test_replace2(self): N = 100
- [x] closes #34789 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry We also avoid copies by not calling `self.as_array` and instead moving the mask-finding to the block level.
https://api.github.com/repos/pandas-dev/pandas/pulls/36093
2020-09-03T15:57:21Z
2020-09-05T03:21:50Z
2020-09-05T03:21:50Z
2020-09-05T03:41:43Z
CI: unpin numpy for CI / Checks github action
diff --git a/environment.yml b/environment.yml index f54bf41c14c75..ebf22bbf067a6 100644 --- a/environment.yml +++ b/environment.yml @@ -3,7 +3,7 @@ channels: - conda-forge dependencies: # required - - numpy>=1.16.5, <1.20 # gh-39513 + - numpy>=1.16.5 - python=3 - python-dateutil>=2.7.3 - pytz diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py index 1d8077da76469..e551f05efa31b 100644 --- a/pandas/compat/numpy/__init__.py +++ b/pandas/compat/numpy/__init__.py @@ -43,7 +43,9 @@ def np_datetime64_compat(s, *args, **kwargs): warning, when need to pass '2015-01-01 09:00:00' """ s = tz_replacer(s) - return np.datetime64(s, *args, **kwargs) + # error: No overload variant of "datetime64" matches argument types "Any", + # "Tuple[Any, ...]", "Dict[str, Any]" + return np.datetime64(s, *args, **kwargs) # type: ignore[call-overload] def np_array_datetime64_compat(arr, *args, **kwargs): diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index 0a4e03fa97402..a8a761b5f4aac 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -181,7 +181,9 @@ def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[i # get the new index of columns by comparison col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order) - return aggspec, columns, col_idx_order + # error: Incompatible return value type (got "Tuple[defaultdict[Any, Any], + # Any, ndarray]", expected "Tuple[Dict[Any, Any], List[str], List[int]]") + return aggspec, columns, col_idx_order # type: ignore[return-value] def _make_unique_kwarg_list( diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 04eef635dc79b..57e57f48fdfe5 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -157,7 +157,10 @@ def _ensure_data(values: ArrayLike) -> Tuple[np.ndarray, DtypeObj]: with catch_warnings(): simplefilter("ignore", np.ComplexWarning) values = ensure_float64(values) - return values, np.dtype("float64") + # error: Incompatible return value type (got "Tuple[ExtensionArray, + # dtype[floating[_64Bit]]]", expected "Tuple[ndarray, Union[dtype[Any], + # ExtensionDtype]]") + return values, np.dtype("float64") # type: ignore[return-value] except (TypeError, ValueError, OverflowError): # if we are trying to coerce to a dtype @@ -173,7 +176,9 @@ def _ensure_data(values: ArrayLike) -> Tuple[np.ndarray, DtypeObj]: elif is_timedelta64_dtype(values.dtype): from pandas import TimedeltaIndex - values = TimedeltaIndex(values)._data + # error: Incompatible types in assignment (expression has type + # "TimedeltaArray", variable has type "ndarray") + values = TimedeltaIndex(values)._data # type: ignore[assignment] else: # Datetime if values.ndim > 1 and is_datetime64_ns_dtype(values.dtype): @@ -182,27 +187,45 @@ def _ensure_data(values: ArrayLike) -> Tuple[np.ndarray, DtypeObj]: # TODO(EA2D): special case not needed with 2D EAs asi8 = values.view("i8") dtype = values.dtype - return asi8, dtype + # error: Incompatible return value type (got "Tuple[Any, + # Union[dtype, ExtensionDtype, None]]", expected + # "Tuple[ndarray, Union[dtype, ExtensionDtype]]") + return asi8, dtype # type: ignore[return-value] from pandas import DatetimeIndex - values = DatetimeIndex(values)._data + # Incompatible types in assignment (expression has type "DatetimeArray", + # variable has type "ndarray") + values = DatetimeIndex(values)._data # type: ignore[assignment] dtype = values.dtype - return values.asi8, dtype + # error: Item "ndarray" of "Union[PeriodArray, Any, ndarray]" has no attribute + # "asi8" + return values.asi8, dtype # type: ignore[union-attr] elif is_categorical_dtype(values.dtype): - values = cast("Categorical", values) - values = values.codes + # error: Incompatible types in assignment (expression has type "Categorical", + # variable has type "ndarray") + values = cast("Categorical", values) # type: ignore[assignment] + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + # error: Item "ndarray" of "Union[Any, ndarray]" has no attribute "codes" + values = values.codes # type: ignore[assignment,union-attr] dtype = pandas_dtype("category") # we are actually coercing to int64 # until our algos support int* directly (not all do) values = ensure_int64(values) - return values, dtype + # error: Incompatible return value type (got "Tuple[ExtensionArray, + # Union[dtype[Any], ExtensionDtype]]", expected "Tuple[ndarray, + # Union[dtype[Any], ExtensionDtype]]") + return values, dtype # type: ignore[return-value] # we have failed, return object - values = np.asarray(values, dtype=object) + + # error: Incompatible types in assignment (expression has type "ndarray", variable + # has type "ExtensionArray") + values = np.asarray(values, dtype=object) # type: ignore[assignment] return ensure_object(values), np.dtype("object") @@ -227,24 +250,40 @@ def _reconstruct_data( return values if is_extension_array_dtype(dtype): - cls = dtype.construct_array_type() + # error: Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has no + # attribute "construct_array_type" + cls = dtype.construct_array_type() # type: ignore[union-attr] if isinstance(values, cls) and values.dtype == dtype: return values values = cls._from_sequence(values) elif is_bool_dtype(dtype): - values = values.astype(dtype, copy=False) + # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has + # incompatible type "Union[dtype, ExtensionDtype]"; expected + # "Union[dtype, None, type, _SupportsDtype, str, Tuple[Any, int], + # Tuple[Any, Union[int, Sequence[int]]], List[Any], _DtypeDict, + # Tuple[Any, Any]]" + values = values.astype(dtype, copy=False) # type: ignore[arg-type] # we only support object dtypes bool Index if isinstance(original, ABCIndex): values = values.astype(object, copy=False) elif dtype is not None: if is_datetime64_dtype(dtype): - dtype = "datetime64[ns]" + # error: Incompatible types in assignment (expression has type + # "str", variable has type "Union[dtype, ExtensionDtype]") + dtype = "datetime64[ns]" # type: ignore[assignment] elif is_timedelta64_dtype(dtype): - dtype = "timedelta64[ns]" + # error: Incompatible types in assignment (expression has type + # "str", variable has type "Union[dtype, ExtensionDtype]") + dtype = "timedelta64[ns]" # type: ignore[assignment] - values = values.astype(dtype, copy=False) + # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has + # incompatible type "Union[dtype, ExtensionDtype]"; expected + # "Union[dtype, None, type, _SupportsDtype, str, Tuple[Any, int], + # Tuple[Any, Union[int, Sequence[int]]], List[Any], _DtypeDict, + # Tuple[Any, Any]]" + values = values.astype(dtype, copy=False) # type: ignore[arg-type] return values @@ -296,14 +335,18 @@ def _get_values_for_rank(values: ArrayLike): if is_categorical_dtype(values): values = cast("Categorical", values)._values_for_rank() - values, _ = _ensure_data(values) + # error: Incompatible types in assignment (expression has type "ndarray", variable + # has type "ExtensionArray") + values, _ = _ensure_data(values) # type: ignore[assignment] return values def get_data_algo(values: ArrayLike): values = _get_values_for_rank(values) - ndtype = _check_object_for_strings(values) + # error: Argument 1 to "_check_object_for_strings" has incompatible type + # "ExtensionArray"; expected "ndarray" + ndtype = _check_object_for_strings(values) # type: ignore[arg-type] htable = _hashtables.get(ndtype, _hashtables["object"]) return htable, values @@ -460,17 +503,46 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: ) if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)): - values = _ensure_arraylike(list(values)) + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "Index") + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "Series") + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "ndarray") + values = _ensure_arraylike(list(values)) # type: ignore[assignment] elif isinstance(values, ABCMultiIndex): # Avoid raising in extract_array - values = np.array(values) - else: - values = extract_array(values, extract_numpy=True) - comps = _ensure_arraylike(comps) - comps = extract_array(comps, extract_numpy=True) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "Index") + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "Series") + values = np.array(values) # type: ignore[assignment] + else: + # error: Incompatible types in assignment (expression has type "Union[Any, + # ExtensionArray]", variable has type "Index") + # error: Incompatible types in assignment (expression has type "Union[Any, + # ExtensionArray]", variable has type "Series") + values = extract_array(values, extract_numpy=True) # type: ignore[assignment] + + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "Index") + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "Series") + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "ndarray") + comps = _ensure_arraylike(comps) # type: ignore[assignment] + # error: Incompatible types in assignment (expression has type "Union[Any, + # ExtensionArray]", variable has type "Index") + # error: Incompatible types in assignment (expression has type "Union[Any, + # ExtensionArray]", variable has type "Series") + comps = extract_array(comps, extract_numpy=True) # type: ignore[assignment] if is_extension_array_dtype(comps.dtype): - return comps.isin(values) + # error: Incompatible return value type (got "Series", expected "ndarray") + # error: Item "ndarray" of "Union[Any, ndarray]" has no attribute "isin" + return comps.isin(values) # type: ignore[return-value,union-attr] elif needs_i8_conversion(comps.dtype): # Dispatch to DatetimeLikeArrayMixin.isin @@ -501,7 +573,19 @@ def f(c, v): f = np.in1d else: - common = np.find_common_type([values.dtype, comps.dtype], []) + # error: List item 0 has incompatible type "Union[Any, dtype[Any], + # ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str, + # Tuple[Any, Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, + # Any]]" + # error: List item 1 has incompatible type "Union[Any, ExtensionDtype]"; + # expected "Union[dtype[Any], None, type, _SupportsDType, str, Tuple[Any, + # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]" + # error: List item 1 has incompatible type "Union[dtype[Any], ExtensionDtype]"; + # expected "Union[dtype[Any], None, type, _SupportsDType, str, Tuple[Any, + # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]" + common = np.find_common_type( + [values.dtype, comps.dtype], [] # type: ignore[list-item] + ) values = values.astype(common, copy=False) comps = comps.astype(common, copy=False) name = common.name @@ -916,7 +1000,9 @@ def duplicated(values: ArrayLike, keep: Union[str, bool] = "first") -> np.ndarra ------- duplicated : ndarray """ - values, _ = _ensure_data(values) + # error: Incompatible types in assignment (expression has type "ndarray", variable + # has type "ExtensionArray") + values, _ = _ensure_data(values) # type: ignore[assignment] ndtype = values.dtype.name f = getattr(htable, f"duplicated_{ndtype}") return f(values, keep=keep) @@ -1188,7 +1274,9 @@ def _get_score(at): else: q = np.asarray(q, np.float64) result = [_get_score(x) for x in q] - result = np.array(result, dtype=np.float64) + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "List[Any]") + result = np.array(result, dtype=np.float64) # type: ignore[assignment] return result @@ -1776,7 +1864,11 @@ def safe_sort( if not isinstance(values, (np.ndarray, ABCExtensionArray)): # don't convert to string types dtype, _ = infer_dtype_from_array(values) - values = np.asarray(values, dtype=dtype) + # error: Argument "dtype" to "asarray" has incompatible type "Union[dtype[Any], + # ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str, + # Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any], + # _DTypeDict, Tuple[Any, Any]]]" + values = np.asarray(values, dtype=dtype) # type: ignore[arg-type] sorter = None diff --git a/pandas/core/apply.py b/pandas/core/apply.py index cccd88ccb7a1e..57147461284fb 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -1016,7 +1016,11 @@ def apply_standard(self) -> FrameOrSeriesUnion: with np.errstate(all="ignore"): if isinstance(f, np.ufunc): - return f(obj) + # error: Argument 1 to "__call__" of "ufunc" has incompatible type + # "Series"; expected "Union[Union[int, float, complex, str, bytes, + # generic], Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], _SupportsArray]" + return f(obj) # type: ignore[arg-type] # row-wise access if is_extension_array_dtype(obj.dtype) and hasattr(obj._values, "map"): diff --git a/pandas/core/array_algos/putmask.py b/pandas/core/array_algos/putmask.py index 93046f476c6ba..b552a1be4c36e 100644 --- a/pandas/core/array_algos/putmask.py +++ b/pandas/core/array_algos/putmask.py @@ -120,7 +120,11 @@ def putmask_smart(values: np.ndarray, mask: np.ndarray, new) -> np.ndarray: return _putmask_preserve(values, new, mask) dtype = find_common_type([values.dtype, new.dtype]) - values = values.astype(dtype) + # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has incompatible type + # "Union[dtype[Any], ExtensionDtype]"; expected "Union[dtype[Any], None, type, + # _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], + # List[Any], _DTypeDict, Tuple[Any, Any]]]" + values = values.astype(dtype) # type: ignore[arg-type] return _putmask_preserve(values, new, mask) @@ -187,10 +191,16 @@ def extract_bool_array(mask: ArrayLike) -> np.ndarray: # We could have BooleanArray, Sparse[bool], ... # Except for BooleanArray, this is equivalent to just # np.asarray(mask, dtype=bool) - mask = mask.to_numpy(dtype=bool, na_value=False) - mask = np.asarray(mask, dtype=bool) - return mask + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + mask = mask.to_numpy(dtype=bool, na_value=False) # type: ignore[assignment] + + # error: Incompatible types in assignment (expression has type "ndarray", variable + # has type "ExtensionArray") + mask = np.asarray(mask, dtype=bool) # type: ignore[assignment] + # error: Incompatible return value type (got "ExtensionArray", expected "ndarray") + return mask # type: ignore[return-value] def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other): diff --git a/pandas/core/array_algos/quantile.py b/pandas/core/array_algos/quantile.py index 802fc4db0a36d..501d3308b7d8b 100644 --- a/pandas/core/array_algos/quantile.py +++ b/pandas/core/array_algos/quantile.py @@ -143,10 +143,17 @@ def quantile_ea_compat( mask = np.asarray(values.isna()) mask = np.atleast_2d(mask) - values, fill_value = values._values_for_factorize() - values = np.atleast_2d(values) - - result = quantile_with_mask(values, mask, fill_value, qs, interpolation, axis) + # error: Incompatible types in assignment (expression has type "ndarray", variable + # has type "ExtensionArray") + values, fill_value = values._values_for_factorize() # type: ignore[assignment] + # error: No overload variant of "atleast_2d" matches argument type "ExtensionArray" + values = np.atleast_2d(values) # type: ignore[call-overload] + + # error: Argument 1 to "quantile_with_mask" has incompatible type "ExtensionArray"; + # expected "ndarray" + result = quantile_with_mask( + values, mask, fill_value, qs, interpolation, axis # type: ignore[arg-type] + ) if not is_sparse(orig.dtype): # shape[0] should be 1 as long as EAs are 1D @@ -160,4 +167,5 @@ def quantile_ea_compat( assert result.shape == (1, len(qs)), result.shape result = type(orig)._from_factorized(result[0], orig) - return result + # error: Incompatible return value type (got "ndarray", expected "ExtensionArray") + return result # type: ignore[return-value] diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py index 201b9fdcc51cc..b0c0799750859 100644 --- a/pandas/core/array_algos/replace.py +++ b/pandas/core/array_algos/replace.py @@ -95,7 +95,9 @@ def _check_comparison_types( if is_numeric_v_string_like(a, b): # GH#29553 avoid deprecation warnings from numpy - return np.zeros(a.shape, dtype=bool) + # error: Incompatible return value type (got "ndarray", expected + # "Union[ExtensionArray, bool]") + return np.zeros(a.shape, dtype=bool) # type: ignore[return-value] elif is_datetimelike_v_numeric(a, b): # GH#29553 avoid deprecation warnings from numpy @@ -152,6 +154,8 @@ def re_replacer(s): f = np.vectorize(re_replacer, otypes=[values.dtype]) if mask is None: - values[:] = f(values) + # error: Invalid index type "slice" for "ExtensionArray"; expected type + # "Union[int, ndarray]" + values[:] = f(values) # type: ignore[index] else: values[mask] = f(values[mask]) diff --git a/pandas/core/array_algos/take.py b/pandas/core/array_algos/take.py index 054497089c5ab..7eed31663f1cb 100644 --- a/pandas/core/array_algos/take.py +++ b/pandas/core/array_algos/take.py @@ -140,7 +140,14 @@ def take_1d( """ if not isinstance(arr, np.ndarray): # ExtensionArray -> dispatch to their method - return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) + + # error: Argument 1 to "take" of "ExtensionArray" has incompatible type + # "ndarray"; expected "Sequence[int]" + return arr.take( + indexer, # type: ignore[arg-type] + fill_value=fill_value, + allow_fill=allow_fill, + ) indexer, dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( arr, indexer, 0, None, fill_value, allow_fill @@ -174,7 +181,9 @@ def take_2d_multi( row_idx = ensure_int64(row_idx) col_idx = ensure_int64(col_idx) - indexer = row_idx, col_idx + # error: Incompatible types in assignment (expression has type "Tuple[Any, Any]", + # variable has type "ndarray") + indexer = row_idx, col_idx # type: ignore[assignment] mask_info = None # check for promotion based on types only (do this first because @@ -485,8 +494,13 @@ def _take_preprocess_indexer_and_fill_value( if dtype != arr.dtype and (out is None or out.dtype != dtype): # check if promotion is actually required based on indexer mask = indexer == -1 - needs_masking = mask.any() - mask_info = mask, needs_masking + # error: Item "bool" of "Union[Any, bool]" has no attribute "any" + # [union-attr] + needs_masking = mask.any() # type: ignore[union-attr] + # error: Incompatible types in assignment (expression has type + # "Tuple[Union[Any, bool], Any]", variable has type + # "Optional[Tuple[None, bool]]") + mask_info = mask, needs_masking # type: ignore[assignment] if needs_masking: if out is not None and out.dtype != dtype: raise TypeError("Incompatible type for fill_value") diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py index 3f45f503d0f62..588fe8adc7241 100644 --- a/pandas/core/arraylike.py +++ b/pandas/core/arraylike.py @@ -258,7 +258,12 @@ def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any) return result # Determine if we should defer. - no_defer = (np.ndarray.__array_ufunc__, cls.__array_ufunc__) + + # error: "Type[ndarray]" has no attribute "__array_ufunc__" + no_defer = ( + np.ndarray.__array_ufunc__, # type: ignore[attr-defined] + cls.__array_ufunc__, + ) for item in inputs: higher_priority = ( diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index d54d1855ac2f8..8beafe3fe4578 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -104,7 +104,9 @@ def take( new_data = take( self._ndarray, - indices, + # error: Argument 2 to "take" has incompatible type "Sequence[int]"; + # expected "ndarray" + indices, # type: ignore[arg-type] allow_fill=allow_fill, fill_value=fill_value, axis=axis, @@ -147,7 +149,8 @@ def ndim(self) -> int: @cache_readonly def size(self) -> int: - return np.prod(self.shape) + # error: Incompatible return value type (got "number", expected "int") + return np.prod(self.shape) # type: ignore[return-value] @cache_readonly def nbytes(self) -> int: @@ -217,7 +220,9 @@ def _concat_same_type( new_values = [x._ndarray for x in to_concat] new_values = np.concatenate(new_values, axis=axis) - return to_concat[0]._from_backing_data(new_values) + # error: Argument 1 to "_from_backing_data" of "NDArrayBackedExtensionArray" has + # incompatible type "List[ndarray]"; expected "ndarray" + return to_concat[0]._from_backing_data(new_values) # type: ignore[arg-type] @doc(ExtensionArray.searchsorted) def searchsorted(self, value, side="left", sorter=None): @@ -258,7 +263,13 @@ def __getitem__( return self._box_func(result) return self._from_backing_data(result) - key = extract_array(key, extract_numpy=True) + # error: Value of type variable "AnyArrayLike" of "extract_array" cannot be + # "Union[int, slice, ndarray]" + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "Union[int, slice, ndarray]") + key = extract_array( # type: ignore[type-var,assignment] + key, extract_numpy=True + ) key = check_array_indexer(self, key) result = self._ndarray[key] if lib.is_scalar(result): @@ -274,9 +285,14 @@ def fillna( value, method = validate_fillna_kwargs(value, method) mask = self.isna() - value = missing.check_value_size(value, mask, len(self)) + # error: Argument 2 to "check_value_size" has incompatible type + # "ExtensionArray"; expected "ndarray" + value = missing.check_value_size( + value, mask, len(self) # type: ignore[arg-type] + ) - if mask.any(): + # error: "ExtensionArray" has no attribute "any" + if mask.any(): # type: ignore[attr-defined] if method is not None: # TODO: check value is None # (for now) when self.ndim == 2, we assume axis=0 @@ -412,7 +428,8 @@ def value_counts(self, dropna: bool = True): ) if dropna: - values = self[~self.isna()]._ndarray + # error: Unsupported operand type for ~ ("ExtensionArray") + values = self[~self.isna()]._ndarray # type: ignore[operator] else: values = self._ndarray diff --git a/pandas/core/arrays/_ranges.py b/pandas/core/arrays/_ranges.py index c001c57ffe757..34d5ea6cfb20d 100644 --- a/pandas/core/arrays/_ranges.py +++ b/pandas/core/arrays/_ranges.py @@ -161,7 +161,9 @@ def _generate_range_overflow_safe_signed( # Putting this into a DatetimeArray/TimedeltaArray # would incorrectly be interpreted as NaT raise OverflowError - return result + # error: Incompatible return value type (got "signedinteger[_64Bit]", + # expected "int") + return result # type: ignore[return-value] except (FloatingPointError, OverflowError): # with endpoint negative and addend positive we risk # FloatingPointError; with reversed signed we risk OverflowError @@ -175,11 +177,16 @@ def _generate_range_overflow_safe_signed( # watch out for very special case in which we just slightly # exceed implementation bounds, but when passing the result to # np.arange will get a result slightly within the bounds - result = np.uint64(endpoint) + np.uint64(addend) + + # error: Incompatible types in assignment (expression has type + # "unsignedinteger[_64Bit]", variable has type "signedinteger[_64Bit]") + result = np.uint64(endpoint) + np.uint64(addend) # type: ignore[assignment] i64max = np.uint64(np.iinfo(np.int64).max) assert result > i64max if result <= i64max + np.uint64(stride): - return result + # error: Incompatible return value type (got "unsignedinteger", expected + # "int") + return result # type: ignore[return-value] raise OutOfBoundsDatetime( f"Cannot generate range with {side}={endpoint} and periods={periods}" diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 86a1bcf24167c..99838602eeb63 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -391,13 +391,15 @@ def __contains__(self, item) -> bool: if not self._can_hold_na: return False elif item is self.dtype.na_value or isinstance(item, self.dtype.type): - return self.isna().any() + # error: "ExtensionArray" has no attribute "any" + return self.isna().any() # type: ignore[attr-defined] else: return False else: return (item == self).any() - def __eq__(self, other: Any) -> ArrayLike: + # error: Signature of "__eq__" incompatible with supertype "object" + def __eq__(self, other: Any) -> ArrayLike: # type: ignore[override] """ Return for `self == other` (element-wise equality). """ @@ -409,7 +411,8 @@ def __eq__(self, other: Any) -> ArrayLike: # underlying arrays) raise AbstractMethodError(self) - def __ne__(self, other: Any) -> ArrayLike: + # error: Signature of "__ne__" incompatible with supertype "object" + def __ne__(self, other: Any) -> ArrayLike: # type: ignore[override] """ Return for `self != other` (element-wise in-equality). """ @@ -446,7 +449,12 @@ def to_numpy( ------- numpy.ndarray """ - result = np.asarray(self, dtype=dtype) + # error: Argument "dtype" to "asarray" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], Type[int], + # Type[complex], Type[bool], Type[object], None]"; expected "Union[dtype[Any], + # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, + # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" + result = np.asarray(self, dtype=dtype) # type: ignore[arg-type] if copy or na_value is not lib.no_default: result = result.copy() if na_value is not lib.no_default: @@ -476,7 +484,8 @@ def size(self) -> int: """ The number of elements in the array. """ - return np.prod(self.shape) + # error: Incompatible return value type (got "number", expected "int") + return np.prod(self.shape) # type: ignore[return-value] @property def ndim(self) -> int: @@ -639,7 +648,8 @@ def argmin(self, skipna: bool = True) -> int: ExtensionArray.argmax """ validate_bool_kwarg(skipna, "skipna") - if not skipna and self.isna().any(): + # error: "ExtensionArray" has no attribute "any" + if not skipna and self.isna().any(): # type: ignore[attr-defined] raise NotImplementedError return nargminmax(self, "argmin") @@ -663,7 +673,8 @@ def argmax(self, skipna: bool = True) -> int: ExtensionArray.argmin """ validate_bool_kwarg(skipna, "skipna") - if not skipna and self.isna().any(): + # error: "ExtensionArray" has no attribute "any" + if not skipna and self.isna().any(): # type: ignore[attr-defined] raise NotImplementedError return nargminmax(self, "argmax") @@ -697,9 +708,14 @@ def fillna(self, value=None, method=None, limit=None): value, method = validate_fillna_kwargs(value, method) mask = self.isna() - value = missing.check_value_size(value, mask, len(self)) + # error: Argument 2 to "check_value_size" has incompatible type + # "ExtensionArray"; expected "ndarray" + value = missing.check_value_size( + value, mask, len(self) # type: ignore[arg-type] + ) - if mask.any(): + # error: "ExtensionArray" has no attribute "any" + if mask.any(): # type: ignore[attr-defined] if method is not None: func = missing.get_fill_func(method) new_values, _ = func(self.astype(object), limit=limit, mask=mask) @@ -720,7 +736,8 @@ def dropna(self): ------- valid : ExtensionArray """ - return self[~self.isna()] + # error: Unsupported operand type for ~ ("ExtensionArray") + return self[~self.isna()] # type: ignore[operator] def shift(self, periods: int = 1, fill_value: object = None) -> ExtensionArray: """ @@ -865,7 +882,8 @@ def equals(self, other: object) -> bool: if isinstance(equal_values, ExtensionArray): # boolean array with NA -> fill with False equal_values = equal_values.fillna(False) - equal_na = self.isna() & other.isna() + # error: Unsupported left operand type for & ("ExtensionArray") + equal_na = self.isna() & other.isna() # type: ignore[operator] return bool((equal_values | equal_na).all()) def isin(self, values) -> np.ndarray: @@ -954,7 +972,9 @@ def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, ExtensionArray]: ) uniques = self._from_factorized(uniques, self) - return codes, uniques + # error: Incompatible return value type (got "Tuple[ndarray, ndarray]", + # expected "Tuple[ndarray, ExtensionArray]") + return codes, uniques # type: ignore[return-value] _extension_array_shared_docs[ "repeat" @@ -1136,7 +1156,9 @@ def view(self, dtype: Optional[Dtype] = None) -> ArrayLike: # giving a view with the same dtype as self. if dtype is not None: raise NotImplementedError(dtype) - return self[:] + # error: Incompatible return value type (got "Union[ExtensionArray, Any]", + # expected "ndarray") + return self[:] # type: ignore[return-value] # ------------------------------------------------------------------------ # Printing diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index b37cf0a0ec579..a84b33d3da9af 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -406,14 +406,18 @@ def astype(self, dtype, copy: bool = True) -> ArrayLike: dtype = pandas_dtype(dtype) if isinstance(dtype, ExtensionDtype): - return super().astype(dtype, copy) + # error: Incompatible return value type (got "ExtensionArray", expected + # "ndarray") + return super().astype(dtype, copy) # type: ignore[return-value] if is_bool_dtype(dtype): # astype_nansafe converts np.nan to True if self._hasna: raise ValueError("cannot convert float NaN to bool") else: - return self._data.astype(dtype, copy=copy) + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return self._data.astype(dtype, copy=copy) # type: ignore[return-value] # for integer, error if there are missing values if is_integer_dtype(dtype) and self._hasna: @@ -425,7 +429,12 @@ def astype(self, dtype, copy: bool = True) -> ArrayLike: if is_float_dtype(dtype): na_value = np.nan # coerce - return self.to_numpy(dtype=dtype, na_value=na_value, copy=False) + + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return self.to_numpy( # type: ignore[return-value] + dtype=dtype, na_value=na_value, copy=False + ) def _values_for_argsort(self) -> np.ndarray: """ @@ -613,7 +622,9 @@ def _logical_method(self, other, op): elif op.__name__ in {"xor", "rxor"}: result, mask = ops.kleene_xor(self._data, other, self._mask, mask) - return BooleanArray(result, mask) + # error: Argument 2 to "BooleanArray" has incompatible type "Optional[Any]"; + # expected "ndarray" + return BooleanArray(result, mask) # type: ignore[arg-type] def _cmp_method(self, other, op): from pandas.arrays import ( diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 8c242e3800e48..8588bc9aa94ec 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -412,7 +412,12 @@ def __init__( if null_mask.any(): # We remove null values here, then below will re-insert # them, grep "full_codes" - arr = [values[idx] for idx in np.where(~null_mask)[0]] + + # error: Incompatible types in assignment (expression has type + # "List[Any]", variable has type "ExtensionArray") + arr = [ # type: ignore[assignment] + values[idx] for idx in np.where(~null_mask)[0] + ] arr = sanitize_array(arr, None) values = arr @@ -440,7 +445,9 @@ def __init__( dtype = CategoricalDtype(categories, dtype.ordered) elif is_categorical_dtype(values.dtype): - old_codes = extract_array(values)._codes + # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no + # attribute "_codes" + old_codes = extract_array(values)._codes # type: ignore[union-attr] codes = recode_for_categories( old_codes, values.dtype.categories, dtype.categories, copy=copy ) @@ -504,13 +511,32 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: raise ValueError("Cannot convert float NaN to integer") elif len(self.codes) == 0 or len(self.categories) == 0: - result = np.array(self, dtype=dtype, copy=copy) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "Categorical") + result = np.array( # type: ignore[assignment] + self, + # error: Argument "dtype" to "array" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], + # Type[int], Type[complex], Type[bool], Type[object]]"; expected + # "Union[dtype[Any], None, type, _SupportsDType, str, Union[Tuple[Any, + # int], Tuple[Any, Union[int, Sequence[int]]], List[Any], _DTypeDict, + # Tuple[Any, Any]]]" + dtype=dtype, # type: ignore[arg-type] + copy=copy, + ) else: # GH8628 (PERF): astype category codes instead of astyping array try: new_cats = np.asarray(self.categories) - new_cats = new_cats.astype(dtype=dtype, copy=copy) + # error: Argument "dtype" to "astype" of "_ArrayOrScalarCommon" has + # incompatible type "Union[ExtensionDtype, dtype[Any]]"; expected + # "Union[dtype[Any], None, type, _SupportsDType, str, Union[Tuple[Any, + # int], Tuple[Any, Union[int, Sequence[int]]], List[Any], _DTypeDict, + # Tuple[Any, Any]]]" + new_cats = new_cats.astype( + dtype=dtype, copy=copy # type: ignore[arg-type] + ) except ( TypeError, # downstream error msg for CategoricalIndex is misleading ValueError, @@ -518,9 +544,14 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: msg = f"Cannot cast {self.categories.dtype} dtype to {dtype}" raise ValueError(msg) - result = take_nd(new_cats, libalgos.ensure_platform_int(self._codes)) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "Categorical") + result = take_nd( # type: ignore[assignment] + new_cats, libalgos.ensure_platform_int(self._codes) + ) - return result + # error: Incompatible return value type (got "Categorical", expected "ndarray") + return result # type: ignore[return-value] @cache_readonly def itemsize(self) -> int: @@ -1311,7 +1342,9 @@ def _validate_searchsorted_value(self, value): codes = self._unbox_scalar(value) else: locs = [self.categories.get_loc(x) for x in value] - codes = np.array(locs, dtype=self.codes.dtype) + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "int") + codes = np.array(locs, dtype=self.codes.dtype) # type: ignore[assignment] return codes def _validate_fill_value(self, fill_value): @@ -2126,7 +2159,11 @@ def mode(self, dropna=True): if dropna: good = self._codes != -1 codes = self._codes[good] - codes = sorted(htable.mode_int64(ensure_int64(codes), dropna)) + # error: Incompatible types in assignment (expression has type "List[Any]", + # variable has type "ndarray") + codes = sorted( # type: ignore[assignment] + htable.mode_int64(ensure_int64(codes), dropna) + ) codes = coerce_indexer_dtype(codes, self.dtype.categories) return self._from_backing_data(codes) @@ -2418,7 +2455,11 @@ def _str_get_dummies(self, sep="|"): # sep may not be in categories. Just bail on this. from pandas.core.arrays import PandasArray - return PandasArray(self.astype(str))._str_get_dummies(sep) + # error: Argument 1 to "PandasArray" has incompatible type + # "ExtensionArray"; expected "Union[ndarray, PandasArray]" + return PandasArray(self.astype(str))._str_get_dummies( # type: ignore[arg-type] + sep + ) # The Series.cat accessor @@ -2618,7 +2659,8 @@ def _get_codes_for_values(values, categories: Index) -> np.ndarray: # Only hit here when we've already coerced to object dtypee. hash_klass, vals = get_data_algo(values) - _, cats = get_data_algo(categories) + # error: Value of type variable "ArrayLike" of "get_data_algo" cannot be "Index" + _, cats = get_data_algo(categories) # type: ignore[type-var] t = hash_klass(len(cats)) t.map_locations(cats) return coerce_indexer_dtype(t.lookup(vals), cats) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 633a20d6bed37..c2ac7517ecba3 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -378,7 +378,10 @@ def _get_getitem_freq(self, key): return self._get_getitem_freq(new_key) return freq - def __setitem__( + # error: Argument 1 of "__setitem__" is incompatible with supertype + # "ExtensionArray"; supertype defines the argument type as "Union[int, + # ndarray]" + def __setitem__( # type: ignore[override] self, key: Union[int, Sequence[int], Sequence[bool], slice], value: Union[NaTType, Any, Sequence[Any]], @@ -455,26 +458,45 @@ def view(self, dtype: Optional[Dtype] = None) -> ArrayLike: # dtypes here. Everything else we pass through to the underlying # ndarray. if dtype is None or dtype is self.dtype: - return type(self)(self._ndarray, dtype=self.dtype) + # error: Incompatible return value type (got "DatetimeLikeArrayMixin", + # expected "ndarray") + return type(self)( # type: ignore[return-value] + self._ndarray, dtype=self.dtype + ) if isinstance(dtype, type): # we sometimes pass non-dtype objects, e.g np.ndarray; # pass those through to the underlying ndarray - return self._ndarray.view(dtype) + + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return self._ndarray.view(dtype) # type: ignore[return-value] dtype = pandas_dtype(dtype) if isinstance(dtype, (PeriodDtype, DatetimeTZDtype)): cls = dtype.construct_array_type() - return cls(self.asi8, dtype=dtype) + # error: Incompatible return value type (got "Union[PeriodArray, + # DatetimeArray]", expected "ndarray") + return cls(self.asi8, dtype=dtype) # type: ignore[return-value] elif dtype == "M8[ns]": from pandas.core.arrays import DatetimeArray - return DatetimeArray(self.asi8, dtype=dtype) + # error: Incompatible return value type (got "DatetimeArray", expected + # "ndarray") + return DatetimeArray(self.asi8, dtype=dtype) # type: ignore[return-value] elif dtype == "m8[ns]": from pandas.core.arrays import TimedeltaArray - return TimedeltaArray(self.asi8, dtype=dtype) - return self._ndarray.view(dtype=dtype) + # error: Incompatible return value type (got "TimedeltaArray", expected + # "ndarray") + return TimedeltaArray(self.asi8, dtype=dtype) # type: ignore[return-value] + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + # error: Argument "dtype" to "view" of "_ArrayOrScalarCommon" has incompatible + # type "Union[ExtensionDtype, dtype[Any]]"; expected "Union[dtype[Any], None, + # type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, + # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" + return self._ndarray.view(dtype=dtype) # type: ignore[return-value,arg-type] # ------------------------------------------------------------------ # ExtensionArray Interface @@ -849,7 +871,9 @@ def isin(self, values) -> np.ndarray: # ------------------------------------------------------------------ # Null Handling - def isna(self) -> np.ndarray: + # error: Return type "ndarray" of "isna" incompatible with return type "ArrayLike" + # in supertype "ExtensionArray" + def isna(self) -> np.ndarray: # type: ignore[override] return self._isnan @property # NB: override with cache_readonly in immutable subclasses @@ -864,7 +888,8 @@ def _hasnans(self) -> np.ndarray: """ return if I have any nans; enables various perf speedups """ - return bool(self._isnan.any()) + # error: Incompatible return value type (got "bool", expected "ndarray") + return bool(self._isnan.any()) # type: ignore[return-value] def _maybe_mask_results( self, result: np.ndarray, fill_value=iNaT, convert=None @@ -1208,7 +1233,13 @@ def _addsub_object_array(self, other: np.ndarray, op): res_values = op(self.astype("O"), np.asarray(other)) result = pd_array(res_values.ravel()) - result = extract_array(result, extract_numpy=True).reshape(self.shape) + # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no attribute + # "reshape" + result = extract_array( + result, extract_numpy=True + ).reshape( # type: ignore[union-attr] + self.shape + ) return result def _time_shift(self, periods, freq=None): @@ -1758,7 +1789,8 @@ def _with_freq(self, freq): freq = to_offset(self.inferred_freq) arr = self.view() - arr._freq = freq + # error: "ExtensionArray" has no attribute "_freq" + arr._freq = freq # type: ignore[attr-defined] return arr # -------------------------------------------------------------- diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index ce0ea7bca55cd..7e200097d7e82 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -504,7 +504,10 @@ def _box_func(self, x) -> Union[Timestamp, NaTType]: return Timestamp(x, freq=self.freq, tz=self.tz) @property - def dtype(self) -> Union[np.dtype, DatetimeTZDtype]: + # error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype" + # incompatible with return type "ExtensionDtype" in supertype + # "ExtensionArray" + def dtype(self) -> Union[np.dtype, DatetimeTZDtype]: # type: ignore[override] """ The dtype for the DatetimeArray. diff --git a/pandas/core/arrays/floating.py b/pandas/core/arrays/floating.py index aace5583ff47a..bbe2f23421fcf 100644 --- a/pandas/core/arrays/floating.py +++ b/pandas/core/arrays/floating.py @@ -305,19 +305,27 @@ def astype(self, dtype, copy: bool = True) -> ArrayLike: dtype = pandas_dtype(dtype) if isinstance(dtype, ExtensionDtype): - return super().astype(dtype, copy=copy) + # error: Incompatible return value type (got "ExtensionArray", expected + # "ndarray") + return super().astype(dtype, copy=copy) # type: ignore[return-value] # coerce if is_float_dtype(dtype): # In astype, we consider dtype=float to also mean na_value=np.nan kwargs = {"na_value": np.nan} elif is_datetime64_dtype(dtype): - kwargs = {"na_value": np.datetime64("NaT")} + # error: Dict entry 0 has incompatible type "str": "datetime64"; expected + # "str": "float" + kwargs = {"na_value": np.datetime64("NaT")} # type: ignore[dict-item] else: kwargs = {} - data = self.to_numpy(dtype=dtype, **kwargs) - return astype_nansafe(data, dtype, copy=False) + # error: Argument 2 to "to_numpy" of "BaseMaskedArray" has incompatible + # type "**Dict[str, float]"; expected "bool" + data = self.to_numpy(dtype=dtype, **kwargs) # type: ignore[arg-type] + # error: Incompatible return value type (got "ExtensionArray", expected + # "ndarray") + return astype_nansafe(data, dtype, copy=False) # type: ignore[return-value] def _values_for_argsort(self) -> np.ndarray: return self._data diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 61d63d2eed6e9..b2308233a6272 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -101,7 +101,17 @@ def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]: ): return None np_dtype = np.find_common_type( - [t.numpy_dtype if isinstance(t, BaseMaskedDtype) else t for t in dtypes], [] + # error: List comprehension has incompatible type List[Union[Any, + # dtype, ExtensionDtype]]; expected List[Union[dtype, None, type, + # _SupportsDtype, str, Tuple[Any, Union[int, Sequence[int]]], + # List[Any], _DtypeDict, Tuple[Any, Any]]] + [ + t.numpy_dtype # type: ignore[misc] + if isinstance(t, BaseMaskedDtype) + else t + for t in dtypes + ], + [], ) if np.issubdtype(np_dtype, np.integer): return INT_STR_TO_DTYPE[str(np_dtype)] @@ -359,18 +369,26 @@ def astype(self, dtype, copy: bool = True) -> ArrayLike: dtype = pandas_dtype(dtype) if isinstance(dtype, ExtensionDtype): - return super().astype(dtype, copy=copy) + # error: Incompatible return value type (got "ExtensionArray", expected + # "ndarray") + return super().astype(dtype, copy=copy) # type: ignore[return-value] # coerce if is_float_dtype(dtype): # In astype, we consider dtype=float to also mean na_value=np.nan na_value = np.nan elif is_datetime64_dtype(dtype): - na_value = np.datetime64("NaT") + # error: Incompatible types in assignment (expression has type + # "datetime64", variable has type "float") + na_value = np.datetime64("NaT") # type: ignore[assignment] else: na_value = lib.no_default - return self.to_numpy(dtype=dtype, na_value=na_value, copy=False) + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return self.to_numpy( # type: ignore[return-value] + dtype=dtype, na_value=na_value, copy=False + ) def _values_for_argsort(self) -> np.ndarray: """ diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index f192a34514390..7ccdad11761ab 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -641,7 +641,11 @@ def __getitem__(self, key): if is_scalar(left) and isna(left): return self._fill_value return Interval(left, right, self.closed) - if np.ndim(left) > 1: + # error: Argument 1 to "ndim" has incompatible type "Union[ndarray, + # ExtensionArray]"; expected "Union[Union[int, float, complex, str, bytes, + # generic], Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], _SupportsArray]" + if np.ndim(left) > 1: # type: ignore[arg-type] # GH#30588 multi-dimensional indexer disallowed raise ValueError("multi-dimensional indexing not allowed") return self._shallow_copy(left, right) @@ -907,7 +911,9 @@ def copy(self: IntervalArrayT) -> IntervalArrayT: # TODO: Could skip verify_integrity here. return type(self).from_arrays(left, right, closed=closed) - def isna(self) -> np.ndarray: + # error: Return type "ndarray" of "isna" incompatible with return type + # "ArrayLike" in supertype "ExtensionArray" + def isna(self) -> np.ndarray: # type: ignore[override] return isna(self._left) def shift( @@ -1612,7 +1618,10 @@ def _maybe_convert_platform_interval(values) -> ArrayLike: # GH 19016 # empty lists/tuples get object dtype by default, but this is # prohibited for IntervalArray, so coerce to integer instead - return np.array([], dtype=np.int64) + + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return np.array([], dtype=np.int64) # type: ignore[return-value] elif not is_list_like(values) or isinstance(values, ABCDataFrame): # This will raise later, but we avoid passing to maybe_convert_platform return values @@ -1624,4 +1633,5 @@ def _maybe_convert_platform_interval(values) -> ArrayLike: else: values = extract_array(values, extract_numpy=True) - return maybe_convert_platform(values) + # error: Incompatible return value type (got "ExtensionArray", expected "ndarray") + return maybe_convert_platform(values) # type: ignore[return-value] diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index eff06a5c62894..ac0ac2bb21d62 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -212,7 +212,10 @@ def __len__(self) -> int: def __invert__(self: BaseMaskedArrayT) -> BaseMaskedArrayT: return type(self)(~self._data, self._mask.copy()) - def to_numpy( + # error: Argument 1 of "to_numpy" is incompatible with supertype "ExtensionArray"; + # supertype defines the argument type as "Union[ExtensionDtype, str, dtype[Any], + # Type[str], Type[float], Type[int], Type[complex], Type[bool], Type[object], None]" + def to_numpy( # type: ignore[override] self, dtype: Optional[NpDtype] = None, copy: bool = False, @@ -281,7 +284,9 @@ def to_numpy( if na_value is lib.no_default: na_value = libmissing.NA if dtype is None: - dtype = object + # error: Incompatible types in assignment (expression has type + # "Type[object]", variable has type "Union[str, dtype[Any], None]") + dtype = object # type: ignore[assignment] if self._hasna: if ( not is_object_dtype(dtype) @@ -305,8 +310,12 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: if is_dtype_equal(dtype, self.dtype): if copy: - return self.copy() - return self + # error: Incompatible return value type (got "BaseMaskedArray", expected + # "ndarray") + return self.copy() # type: ignore[return-value] + # error: Incompatible return value type (got "BaseMaskedArray", expected + # "ndarray") + return self # type: ignore[return-value] # if we are astyping to another nullable masked dtype, we can fastpath if isinstance(dtype, BaseMaskedDtype): @@ -316,7 +325,9 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: # not directly depending on the `copy` keyword mask = self._mask if data is self._data else self._mask.copy() cls = dtype.construct_array_type() - return cls(data, mask, copy=False) + # error: Incompatible return value type (got "BaseMaskedArray", expected + # "ndarray") + return cls(data, mask, copy=False) # type: ignore[return-value] if isinstance(dtype, ExtensionDtype): eacls = dtype.construct_array_type() @@ -346,9 +357,13 @@ def _hasna(self) -> bool: # Note: this is expensive right now! The hope is that we can # make this faster by having an optional mask, but not have to change # source code using it.. - return self._mask.any() - def isna(self) -> np.ndarray: + # error: Incompatible return value type (got "bool_", expected "bool") + return self._mask.any() # type: ignore[return-value] + + # error: Return type "ndarray" of "isna" incompatible with return type + # "ArrayLike" in supertype "ExtensionArray" + def isna(self) -> np.ndarray: # type: ignore[override] return self._mask @property @@ -394,7 +409,9 @@ def take( return type(self)(result, mask, copy=False) - def isin(self, values) -> BooleanArray: + # error: Return type "BooleanArray" of "isin" incompatible with return type + # "ndarray" in supertype "ExtensionArray" + def isin(self, values) -> BooleanArray: # type: ignore[override] from pandas.core.arrays import BooleanArray @@ -404,7 +421,9 @@ def isin(self, values) -> BooleanArray: result += self._mask else: result *= np.invert(self._mask) - mask = np.zeros_like(self, dtype=bool) + # error: No overload variant of "zeros_like" matches argument types + # "BaseMaskedArray", "Type[bool]" + mask = np.zeros_like(self, dtype=bool) # type: ignore[call-overload] return BooleanArray(result, mask, copy=False) def copy(self: BaseMaskedArrayT) -> BaseMaskedArrayT: @@ -422,8 +441,14 @@ def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, ExtensionArray]: # the hashtables don't handle all different types of bits uniques = uniques.astype(self.dtype.numpy_dtype, copy=False) - uniques = type(self)(uniques, np.zeros(len(uniques), dtype=bool)) - return codes, uniques + # error: Incompatible types in assignment (expression has type + # "BaseMaskedArray", variable has type "ndarray") + uniques = type(self)( # type: ignore[assignment] + uniques, np.zeros(len(uniques), dtype=bool) + ) + # error: Incompatible return value type (got "Tuple[ndarray, ndarray]", + # expected "Tuple[ndarray, ExtensionArray]") + return codes, uniques # type: ignore[return-value] def value_counts(self, dropna: bool = True) -> Series: """ diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index fd95ab987b18a..bef047c29413b 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -97,7 +97,12 @@ def _from_sequence( if isinstance(dtype, PandasDtype): dtype = dtype._dtype - result = np.asarray(scalars, dtype=dtype) + # error: Argument "dtype" to "asarray" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], dtype[floating[_64Bit]], Type[object], + # None]"; expected "Union[dtype[Any], None, type, _SupportsDType, str, + # Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any], + # _DTypeDict, Tuple[Any, Any]]]" + result = np.asarray(scalars, dtype=dtype) # type: ignore[arg-type] if ( result.ndim > 1 and not hasattr(scalars, "dtype") @@ -185,7 +190,9 @@ def __array_ufunc__(self, ufunc, method: str, *inputs, **kwargs): # ------------------------------------------------------------------------ # Pandas ExtensionArray Interface - def isna(self) -> np.ndarray: + # error: Return type "ndarray" of "isna" incompatible with return type + # "ArrayLike" in supertype "ExtensionArray" + def isna(self) -> np.ndarray: # type: ignore[override] return isna(self._ndarray) def _validate_fill_value(self, fill_value): @@ -341,7 +348,10 @@ def skew( # ------------------------------------------------------------------------ # Additional Methods - def to_numpy( + # error: Argument 1 of "to_numpy" is incompatible with supertype "ExtensionArray"; + # supertype defines the argument type as "Union[ExtensionDtype, str, dtype[Any], + # Type[str], Type[float], Type[int], Type[complex], Type[bool], Type[object], None]" + def to_numpy( # type: ignore[override] self, dtype: Optional[NpDtype] = None, copy: bool = False, diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 7e9e13400e11f..d91522a9e1bb6 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -1125,9 +1125,12 @@ def _make_field_arrays(*fields): elif length is None: length = len(x) + # error: Argument 2 to "repeat" has incompatible type "Optional[int]"; expected + # "Union[Union[int, integer[Any]], Union[bool, bool_], ndarray, Sequence[Union[int, + # integer[Any]]], Sequence[Union[bool, bool_]], Sequence[Sequence[Any]]]" return [ np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries)) - else np.repeat(x, length) + else np.repeat(x, length) # type: ignore[arg-type] for x in fields ] diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py index c3d11793dbd8c..d4faea4fbc42c 100644 --- a/pandas/core/arrays/sparse/accessor.py +++ b/pandas/core/arrays/sparse/accessor.py @@ -354,7 +354,9 @@ def density(self) -> float: """ Ratio of non-sparse points to total (dense) data points. """ - return np.mean([column.array.density for _, column in self._parent.items()]) + # error: Incompatible return value type (got "number", expected "float") + tmp = np.mean([column.array.density for _, column in self._parent.items()]) + return tmp # type: ignore[return-value] @staticmethod def _prep_index(data, index, columns): diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index a209037f9a9a6..088a1165e4df0 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -365,7 +365,12 @@ def __init__( # dtype inference if data is None: # TODO: What should the empty dtype be? Object or float? - data = np.array([], dtype=dtype) + + # error: Argument "dtype" to "array" has incompatible type + # "Union[ExtensionDtype, dtype[Any], None]"; expected "Union[dtype[Any], + # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, + # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" + data = np.array([], dtype=dtype) # type: ignore[arg-type] if not is_array_like(data): try: @@ -394,7 +399,14 @@ def __init__( if isinstance(data, type(self)) and sparse_index is None: sparse_index = data._sparse_index - sparse_values = np.asarray(data.sp_values, dtype=dtype) + # error: Argument "dtype" to "asarray" has incompatible type + # "Union[ExtensionDtype, dtype[Any], Type[object], None]"; expected + # "Union[dtype[Any], None, type, _SupportsDType, str, Union[Tuple[Any, int], + # Tuple[Any, Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, + # Any]]]" + sparse_values = np.asarray( + data.sp_values, dtype=dtype # type: ignore[arg-type] + ) elif sparse_index is None: data = extract_array(data, extract_numpy=True) if not isinstance(data, np.ndarray): @@ -412,10 +424,21 @@ def __init__( fill_value = np.datetime64("NaT", "ns") data = np.asarray(data) sparse_values, sparse_index, fill_value = make_sparse( - data, kind=kind, fill_value=fill_value, dtype=dtype + # error: Argument "dtype" to "make_sparse" has incompatible type + # "Union[ExtensionDtype, dtype[Any], Type[object], None]"; expected + # "Union[str, dtype[Any], None]" + data, + kind=kind, + fill_value=fill_value, + dtype=dtype, # type: ignore[arg-type] ) else: - sparse_values = np.asarray(data, dtype=dtype) + # error: Argument "dtype" to "asarray" has incompatible type + # "Union[ExtensionDtype, dtype[Any], Type[object], None]"; expected + # "Union[dtype[Any], None, type, _SupportsDType, str, Union[Tuple[Any, int], + # Tuple[Any, Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, + # Any]]]" + sparse_values = np.asarray(data, dtype=dtype) # type: ignore[arg-type] if len(sparse_values) != sparse_index.npoints: raise AssertionError( f"Non array-like type {type(sparse_values)} must " @@ -503,7 +526,9 @@ def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray: try: dtype = np.result_type(self.sp_values.dtype, type(fill_value)) except TypeError: - dtype = object + # error: Incompatible types in assignment (expression has type + # "Type[object]", variable has type "Union[str, dtype[Any], None]") + dtype = object # type: ignore[assignment] out = np.full(self.shape, fill_value, dtype=dtype) out[self.sp_index.to_int_index().indices] = self.sp_values @@ -748,7 +773,9 @@ def factorize(self, na_sentinel=-1): # Given that we have to return a dense array of codes, why bother # implementing an efficient factorize? codes, uniques = algos.factorize(np.asarray(self), na_sentinel=na_sentinel) - uniques = SparseArray(uniques, dtype=self.dtype) + # error: Incompatible types in assignment (expression has type "SparseArray", + # variable has type "Union[ndarray, Index]") + uniques = SparseArray(uniques, dtype=self.dtype) # type: ignore[assignment] return codes, uniques def value_counts(self, dropna: bool = True): @@ -857,7 +884,9 @@ def take(self, indices, *, allow_fill=False, fill_value=None) -> SparseArray: result = self._take_with_fill(indices, fill_value=fill_value) kwargs = {} else: - result = self._take_without_fill(indices) + # error: Incompatible types in assignment (expression has type + # "Union[ndarray, SparseArray]", variable has type "ndarray") + result = self._take_without_fill(indices) # type: ignore[assignment] kwargs = {"dtype": self.dtype} return type(self)(result, fill_value=self.fill_value, kind=self.kind, **kwargs) @@ -1094,14 +1123,38 @@ def astype(self, dtype: Optional[Dtype] = None, copy=True): else: return self.copy() dtype = self.dtype.update_dtype(dtype) - subtype = pandas_dtype(dtype._subtype_with_str) + # error: Item "ExtensionDtype" of "Union[ExtensionDtype, str, dtype[Any], + # Type[str], Type[float], Type[int], Type[complex], Type[bool], Type[object], + # None]" has no attribute "_subtype_with_str" + # error: Item "str" of "Union[ExtensionDtype, str, dtype[Any], Type[str], + # Type[float], Type[int], Type[complex], Type[bool], Type[object], None]" has no + # attribute "_subtype_with_str" + # error: Item "dtype[Any]" of "Union[ExtensionDtype, str, dtype[Any], Type[str], + # Type[float], Type[int], Type[complex], Type[bool], Type[object], None]" has no + # attribute "_subtype_with_str" + # error: Item "ABCMeta" of "Union[ExtensionDtype, str, dtype[Any], Type[str], + # Type[float], Type[int], Type[complex], Type[bool], Type[object], None]" has no + # attribute "_subtype_with_str" + # error: Item "type" of "Union[ExtensionDtype, str, dtype[Any], Type[str], + # Type[float], Type[int], Type[complex], Type[bool], Type[object], None]" has no + # attribute "_subtype_with_str" + # error: Item "None" of "Union[ExtensionDtype, str, dtype[Any], Type[str], + # Type[float], Type[int], Type[complex], Type[bool], Type[object], None]" has no + # attribute "_subtype_with_str" + subtype = pandas_dtype(dtype._subtype_with_str) # type: ignore[union-attr] # TODO copy=False is broken for astype_nansafe with int -> float, so cannot # passthrough copy keyword: https://github.com/pandas-dev/pandas/issues/34456 sp_values = astype_nansafe(self.sp_values, subtype, copy=True) - if sp_values is self.sp_values and copy: + # error: Non-overlapping identity check (left operand type: "ExtensionArray", + # right operand t...ype: "ndarray") + if sp_values is self.sp_values and copy: # type: ignore[comparison-overlap] sp_values = sp_values.copy() - return self._simple_new(sp_values, self.sp_index, dtype) + # error: Argument 1 to "_simple_new" of "SparseArray" has incompatible type + # "ExtensionArray"; expected "ndarray" + return self._simple_new( + sp_values, self.sp_index, dtype # type: ignore[arg-type] + ) def map(self, mapper): """ @@ -1396,7 +1449,11 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): return type(self)(result) def __abs__(self): - return np.abs(self) + # error: Argument 1 to "__call__" of "ufunc" has incompatible type + # "SparseArray"; expected "Union[Union[int, float, complex, str, bytes, + # generic], Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], _SupportsArray]" + return np.abs(self) # type: ignore[arg-type] # ------------------------------------------------------------------------ # Ops @@ -1545,7 +1602,11 @@ def make_sparse( index = make_sparse_index(length, indices, kind) sparsified_values = arr[mask] if dtype is not None: - sparsified_values = astype_nansafe(sparsified_values, dtype=dtype) + # error: Argument "dtype" to "astype_nansafe" has incompatible type "Union[str, + # dtype[Any]]"; expected "Union[dtype[Any], ExtensionDtype]" + sparsified_values = astype_nansafe( + sparsified_values, dtype=dtype # type: ignore[arg-type] + ) # TODO: copy return sparsified_values, index, fill_value diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py index 948edcbd99e64..9e61675002e64 100644 --- a/pandas/core/arrays/sparse/dtype.py +++ b/pandas/core/arrays/sparse/dtype.py @@ -342,7 +342,10 @@ def update_dtype(self, dtype): if is_extension_array_dtype(dtype): raise TypeError("sparse arrays of extension dtypes not supported") - fill_value = astype_nansafe(np.array(self.fill_value), dtype).item() + # error: "ExtensionArray" has no attribute "item" + fill_value = astype_nansafe( + np.array(self.fill_value), dtype + ).item() # type: ignore[attr-defined] dtype = cls(dtype, fill_value=fill_value) return dtype diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 6fd68050bc8dc..67cd6c63c1faa 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -320,7 +320,9 @@ def astype(self, dtype, copy=True): values = arr.astype(dtype.numpy_dtype) return IntegerArray(values, mask, copy=False) elif isinstance(dtype, FloatingDtype): - arr = self.copy() + # error: Incompatible types in assignment (expression has type + # "StringArray", variable has type "ndarray") + arr = self.copy() # type: ignore[assignment] mask = self.isna() arr[mask] = "0" values = arr.astype(dtype.numpy_dtype) @@ -434,7 +436,12 @@ def _str_map(self, f, na_value=None, dtype: Optional[Dtype] = None): mask.view("uint8"), convert=False, na_value=na_value, - dtype=np.dtype(dtype), + # error: Value of type variable "_DTypeScalar" of "dtype" cannot be + # "object" + # error: Argument 1 to "dtype" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected + # "Type[object]" + dtype=np.dtype(dtype), # type: ignore[type-var,arg-type] ) if not na_value_is_na: diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index e003efeabcb66..efdc18cd071b5 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -248,7 +248,10 @@ def __arrow_array__(self, type=None): """Convert myself to a pyarrow Array or ChunkedArray.""" return self._data - def to_numpy( + # error: Argument 1 of "to_numpy" is incompatible with supertype "ExtensionArray"; + # supertype defines the argument type as "Union[ExtensionDtype, str, dtype[Any], + # Type[str], Type[float], Type[int], Type[complex], Type[bool], Type[object], None]" + def to_numpy( # type: ignore[override] self, dtype: Optional[NpDtype] = None, copy: bool = False, @@ -341,7 +344,9 @@ def __getitem__(self, item: Any) -> Any: if not len(item): return type(self)(pa.chunked_array([], type=pa.string())) elif is_integer_dtype(item.dtype): - return self.take(item) + # error: Argument 1 to "take" of "ArrowStringArray" has incompatible + # type "ndarray"; expected "Sequence[int]" + return self.take(item) # type: ignore[arg-type] elif is_bool_dtype(item.dtype): return type(self)(self._data.filter(item)) else: @@ -400,7 +405,13 @@ def fillna(self, value=None, method=None, limit=None): if mask.any(): if method is not None: func = missing.get_fill_func(method) - new_values, _ = func(self.to_numpy(object), limit=limit, mask=mask) + # error: Argument 1 to "to_numpy" of "ArrowStringArray" has incompatible + # type "Type[object]"; expected "Union[str, dtype[Any], None]" + new_values, _ = func( + self.to_numpy(object), # type: ignore[arg-type] + limit=limit, + mask=mask, + ) new_values = self._from_sequence(new_values) else: # fill with value @@ -423,7 +434,9 @@ def nbytes(self) -> int: """ return self._data.nbytes - def isna(self) -> np.ndarray: + # error: Return type "ndarray" of "isna" incompatible with return type "ArrayLike" + # in supertype "ExtensionArray" + def isna(self) -> np.ndarray: # type: ignore[override] """ Boolean NumPy array indicating if each value is missing. @@ -498,7 +511,8 @@ def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None: # Slice data and insert in-between new_data = [ - *self._data[0:key].chunks, + # error: Slice index must be an integer or None + *self._data[0:key].chunks, # type: ignore[misc] pa.array([value], type=pa.string()), *self._data[(key + 1) :].chunks, ] @@ -589,7 +603,9 @@ def take( if not is_array_like(indices): indices_array = np.asanyarray(indices) else: - indices_array = indices + # error: Incompatible types in assignment (expression has type + # "Sequence[int]", variable has type "ndarray") + indices_array = indices # type: ignore[assignment] if len(self._data) == 0 and (indices_array >= 0).any(): raise IndexError("cannot do a non-empty take") diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index c371e27eeceac..5a45a8d105f6e 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -148,7 +148,9 @@ def _box_func(self, x) -> Union[Timedelta, NaTType]: return Timedelta(x, unit="ns") @property - def dtype(self) -> np.dtype: + # error: Return type "dtype" of "dtype" incompatible with return type + # "ExtensionDtype" in supertype "ExtensionArray" + def dtype(self) -> np.dtype: # type: ignore[override] """ The dtype for the TimedeltaArray. @@ -666,7 +668,11 @@ def __floordiv__(self, other): return result elif is_object_dtype(other.dtype): - result = [self[n] // other[n] for n in range(len(self))] + # error: Incompatible types in assignment (expression has type + # "List[Any]", variable has type "ndarray") + result = [ # type: ignore[assignment] + self[n] // other[n] for n in range(len(self)) + ] result = np.array(result) if lib.infer_dtype(result, skipna=False) == "timedelta": result, _ = sequence_to_td64ns(result) @@ -720,7 +726,11 @@ def __rfloordiv__(self, other): return result elif is_object_dtype(other.dtype): - result = [other[n] // self[n] for n in range(len(self))] + # error: Incompatible types in assignment (expression has type + # "List[Any]", variable has type "ndarray") + result = [ # type: ignore[assignment] + other[n] // self[n] for n in range(len(self)) + ] result = np.array(result) return result diff --git a/pandas/core/base.py b/pandas/core/base.py index c02f7bb2edf58..1943aafc7c760 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -617,7 +617,12 @@ def to_numpy( f"to_numpy() got an unexpected keyword argument '{bad_keys}'" ) - result = np.asarray(self._values, dtype=dtype) + # error: Argument "dtype" to "asarray" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], Type[int], + # Type[complex], Type[bool], Type[object], None]"; expected "Union[dtype[Any], + # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, + # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" + result = np.asarray(self._values, dtype=dtype) # type: ignore[arg-type] # TODO(GH-24345): Avoid potential double copy if copy or na_value is not lib.no_default: result = result.copy() @@ -730,12 +735,17 @@ def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) if isinstance(delegate, ExtensionArray): - if not skipna and delegate.isna().any(): + # error: "ExtensionArray" has no attribute "any" + if not skipna and delegate.isna().any(): # type: ignore[attr-defined] return -1 else: return delegate.argmax() else: - return nanops.nanargmax(delegate, skipna=skipna) + # error: Incompatible return value type (got "Union[int, ndarray]", expected + # "int") + return nanops.nanargmax( # type: ignore[return-value] + delegate, skipna=skipna + ) def min(self, axis=None, skipna: bool = True, *args, **kwargs): """ @@ -788,12 +798,17 @@ def argmin(self, axis=None, skipna=True, *args, **kwargs) -> int: skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) if isinstance(delegate, ExtensionArray): - if not skipna and delegate.isna().any(): + # error: "ExtensionArray" has no attribute "any" + if not skipna and delegate.isna().any(): # type: ignore[attr-defined] return -1 else: return delegate.argmin() else: - return nanops.nanargmin(delegate, skipna=skipna) + # error: Incompatible return value type (got "Union[int, ndarray]", expected + # "int") + return nanops.nanargmin( # type: ignore[return-value] + delegate, skipna=skipna + ) def tolist(self): """ @@ -1318,4 +1333,6 @@ def drop_duplicates(self, keep="first"): return self[~duplicated] # type: ignore[index] def duplicated(self, keep: Union[str, bool] = "first") -> np.ndarray: - return duplicated(self._values, keep=keep) + # error: Value of type variable "ArrayLike" of "duplicated" cannot be + # "Union[ExtensionArray, ndarray]" + return duplicated(self._values, keep=keep) # type: ignore[type-var] diff --git a/pandas/core/common.py b/pandas/core/common.py index 0b2dec371bf02..83848e0532253 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -228,9 +228,16 @@ def asarray_tuplesafe(values, dtype: Optional[NpDtype] = None) -> np.ndarray: if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")): values = list(values) elif isinstance(values, ABCIndex): - return values._values - - if isinstance(values, list) and dtype in [np.object_, object]: + # error: Incompatible return value type (got "Union[ExtensionArray, ndarray]", + # expected "ndarray") + return values._values # type: ignore[return-value] + + # error: Non-overlapping container check (element type: "Union[str, dtype[Any], + # None]", container item type: "type") + if isinstance(values, list) and dtype in [ # type: ignore[comparison-overlap] + np.object_, + object, + ]: return construct_1d_object_array_from_listlike(values) result = np.asarray(values, dtype=dtype) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 43900709ad11f..46f32ee401603 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -306,7 +306,17 @@ def array( # Note: we exclude np.ndarray here, will do type inference on it dtype = data.dtype - data = extract_array(data, extract_numpy=True) + # error: Value of type variable "AnyArrayLike" of "extract_array" cannot be + # "Union[Sequence[object], ExtensionArray]" + # error: Value of type variable "AnyArrayLike" of "extract_array" cannot be + # "Union[Sequence[object], Index]" + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "Union[Sequence[object], Index]") + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "Union[Sequence[object], Series]") + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "Union[Sequence[object], ndarray]") + data = extract_array(data, extract_numpy=True) # type: ignore[type-var,assignment] # this returns None for not-found dtypes. if isinstance(dtype, str): @@ -500,7 +510,9 @@ def sanitize_array( try: subarr = _try_cast(data, dtype, copy, True) except ValueError: - subarr = np.array(data, copy=copy) + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "ExtensionArray") + subarr = np.array(data, copy=copy) # type: ignore[assignment] else: # we will try to copy by-definition here subarr = _try_cast(data, dtype, copy, raise_cast_failure) @@ -513,7 +525,9 @@ def sanitize_array( subarr = subarr.astype(dtype, copy=copy) elif copy: subarr = subarr.copy() - return subarr + # error: Incompatible return value type (got "ExtensionArray", expected + # "ndarray") + return subarr # type: ignore[return-value] elif isinstance(data, (list, tuple, abc.Set, abc.ValuesView)) and len(data) > 0: # TODO: deque, array.array @@ -526,7 +540,10 @@ def sanitize_array( subarr = _try_cast(data, dtype, copy, raise_cast_failure) else: subarr = maybe_convert_platform(data) - subarr = maybe_cast_to_datetime(subarr, dtype) + # error: Incompatible types in assignment (expression has type + # "Union[ExtensionArray, ndarray, List[Any]]", variable has type + # "ExtensionArray") + subarr = maybe_cast_to_datetime(subarr, dtype) # type: ignore[assignment] elif isinstance(data, range): # GH#16804 @@ -547,7 +564,13 @@ def sanitize_array( subarr = _sanitize_ndim(subarr, data, dtype, index) if not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype)): - subarr = _sanitize_str_dtypes(subarr, data, dtype, copy) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + # error: Argument 1 to "_sanitize_str_dtypes" has incompatible type + # "ExtensionArray"; expected "ndarray" + subarr = _sanitize_str_dtypes( # type: ignore[assignment] + subarr, data, dtype, copy # type: ignore[arg-type] + ) is_object_or_str_dtype = is_object_dtype(dtype) or is_string_dtype(dtype) if is_object_dtype(subarr.dtype) and not is_object_or_str_dtype: @@ -556,7 +579,8 @@ def sanitize_array( subarr = array(subarr) subarr = extract_array(subarr, extract_numpy=True) - return subarr + # error: Incompatible return value type (got "ExtensionArray", expected "ndarray") + return subarr # type: ignore[return-value] def _sanitize_ndim( @@ -577,11 +601,25 @@ def _sanitize_ndim( raise ValueError("Data must be 1-dimensional") if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype): # i.e. PandasDtype("O") - result = com.asarray_tuplesafe(data, dtype=object) + + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + # error: Argument "dtype" to "asarray_tuplesafe" has incompatible type + # "Type[object]"; expected "Union[str, dtype[Any], None]" + result = com.asarray_tuplesafe( # type: ignore[assignment] + data, dtype=object # type: ignore[arg-type] + ) cls = dtype.construct_array_type() result = cls._from_sequence(result, dtype=dtype) else: - result = com.asarray_tuplesafe(data, dtype=dtype) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + # error: Argument "dtype" to "asarray_tuplesafe" has incompatible type + # "Union[dtype[Any], ExtensionDtype, None]"; expected "Union[str, + # dtype[Any], None]" + result = com.asarray_tuplesafe( # type: ignore[assignment] + data, dtype=dtype # type: ignore[arg-type] + ) return result @@ -600,7 +638,11 @@ def _sanitize_str_dtypes( # GH#19853: If data is a scalar, result has already the result if not lib.is_scalar(data): if not np.all(isna(data)): - data = np.array(data, dtype=dtype, copy=False) + # error: Argument "dtype" to "array" has incompatible type + # "Union[dtype[Any], ExtensionDtype, None]"; expected "Union[dtype[Any], + # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, + # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" + data = np.array(data, dtype=dtype, copy=False) # type: ignore[arg-type] result = np.array(data, dtype=object, copy=copy) return result @@ -647,7 +689,9 @@ def _try_cast( and not copy and dtype is None ): - return arr + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return arr # type: ignore[return-value] if isinstance(dtype, ExtensionDtype) and (dtype.kind != "M" or is_sparse(dtype)): # create an extension array from its dtype @@ -666,7 +710,12 @@ def _try_cast( # that we can convert the data to the requested dtype. if is_integer_dtype(dtype): # this will raise if we have e.g. floats - maybe_cast_to_integer_array(arr, dtype) + + # error: Argument 2 to "maybe_cast_to_integer_array" has incompatible type + # "Union[dtype, ExtensionDtype, None]"; expected "Union[ExtensionDtype, str, + # dtype, Type[str], Type[float], Type[int], Type[complex], Type[bool], + # Type[object]]" + maybe_cast_to_integer_array(arr, dtype) # type: ignore[arg-type] subarr = arr else: subarr = maybe_cast_to_datetime(arr, dtype) diff --git a/pandas/core/describe.py b/pandas/core/describe.py index 3a872c6202e04..57a33e7f90e51 100644 --- a/pandas/core/describe.py +++ b/pandas/core/describe.py @@ -192,7 +192,9 @@ def _select_data(self): # when some numerics are found, keep only numerics default_include = [np.number] if self.datetime_is_numeric: - default_include.append("datetime") + # error: Argument 1 to "append" of "list" has incompatible type "str"; + # expected "Type[number[Any]]" + default_include.append("datetime") # type: ignore[arg-type] data = self.obj.select_dtypes(include=default_include) if len(data.columns) == 0: data = self.obj @@ -232,7 +234,10 @@ def describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series: """ from pandas import Series - formatted_percentiles = format_percentiles(percentiles) + # error: Argument 1 to "format_percentiles" has incompatible type "Sequence[float]"; + # expected "Union[ndarray, List[Union[int, float]], List[float], List[Union[str, + # float]]]" + formatted_percentiles = format_percentiles(percentiles) # type: ignore[arg-type] stat_index = ["count", "mean", "std", "min"] + formatted_percentiles + ["max"] d = ( @@ -336,7 +341,10 @@ def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series: # GH-30164 from pandas import Series - formatted_percentiles = format_percentiles(percentiles) + # error: Argument 1 to "format_percentiles" has incompatible type "Sequence[float]"; + # expected "Union[ndarray, List[Union[int, float]], List[float], List[Union[str, + # float]]]" + formatted_percentiles = format_percentiles(percentiles) # type: ignore[arg-type] stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"] d = ( @@ -392,7 +400,9 @@ def refine_percentiles(percentiles: Optional[Sequence[float]]) -> Sequence[float The percentiles to include in the output. """ if percentiles is None: - return np.array([0.25, 0.5, 0.75]) + # error: Incompatible return value type (got "ndarray", expected + # "Sequence[float]") + return np.array([0.25, 0.5, 0.75]) # type: ignore[return-value] # explicit conversion of `percentiles` to list percentiles = list(percentiles) @@ -404,7 +414,9 @@ def refine_percentiles(percentiles: Optional[Sequence[float]]) -> Sequence[float if 0.5 not in percentiles: percentiles.append(0.5) - percentiles = np.asarray(percentiles) + # error: Incompatible types in assignment (expression has type "ndarray", variable + # has type "Optional[Sequence[float]]") + percentiles = np.asarray(percentiles) # type: ignore[assignment] # sort and check for duplicates unique_pcts = np.unique(percentiles) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 6eca89e1a8744..c5d672b207369 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -128,12 +128,16 @@ def maybe_convert_platform( else: # The caller is responsible for ensuring that we have np.ndarray # or ExtensionArray here. - arr = values + + # error: Incompatible types in assignment (expression has type "Union[ndarray, + # ExtensionArray]", variable has type "ndarray") + arr = values # type: ignore[assignment] if arr.dtype == object: arr = lib.maybe_convert_objects(arr) - return arr + # error: Incompatible return value type (got "ndarray", expected "ExtensionArray") + return arr # type: ignore[return-value] def is_nested_object(obj) -> bool: @@ -279,7 +283,9 @@ def maybe_downcast_to_dtype( with suppress(TypeError): # e.g. TypeError: int() argument must be a string, a # bytes-like object or a number, not 'Period - return PeriodArray(result, freq=dtype.freq) + + # error: "dtype[Any]" has no attribute "freq" + return PeriodArray(result, freq=dtype.freq) # type: ignore[attr-defined] converted = maybe_downcast_numeric(result, dtype, do_round) if converted is not result: @@ -406,11 +412,20 @@ def maybe_cast_result( ): # We have to special case categorical so as not to upcast # things like counts back to categorical - cls = dtype.construct_array_type() - result = maybe_cast_to_extension_array(cls, result, dtype=dtype) + + # error: Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has no + # attribute "construct_array_type" + cls = dtype.construct_array_type() # type: ignore[union-attr] + # error: Argument "dtype" to "maybe_cast_to_extension_array" has incompatible + # type "Union[dtype[Any], ExtensionDtype]"; expected "Optional[ExtensionDtype]" + result = maybe_cast_to_extension_array( + cls, result, dtype=dtype # type: ignore[arg-type] + ) elif numeric_only and is_numeric_dtype(dtype) or not numeric_only: - result = maybe_downcast_to_dtype(result, dtype) + # error: Argument 2 to "maybe_downcast_to_dtype" has incompatible type + # "Union[dtype[Any], ExtensionDtype]"; expected "Union[str, dtype[Any]]" + result = maybe_downcast_to_dtype(result, dtype) # type: ignore[arg-type] return result @@ -532,7 +547,11 @@ def maybe_upcast_putmask(result: np.ndarray, mask: np.ndarray) -> np.ndarray: new_dtype = ensure_dtype_can_hold_na(result.dtype) if new_dtype != result.dtype: - result = result.astype(new_dtype, copy=True) + # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has incompatible + # type "Union[dtype[Any], ExtensionDtype]"; expected "Union[dtype[Any], + # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, + # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" + result = result.astype(new_dtype, copy=True) # type: ignore[arg-type] np.place(result, mask, np.nan) @@ -615,7 +634,9 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan): kinds = ["i", "u", "f", "c", "m", "M"] if is_valid_na_for_dtype(fill_value, dtype) and dtype.kind in kinds: - dtype = ensure_dtype_can_hold_na(dtype) + # error: Incompatible types in assignment (expression has type + # "Union[dtype[Any], ExtensionDtype]", variable has type "dtype[Any]") + dtype = ensure_dtype_can_hold_na(dtype) # type: ignore[assignment] fv = na_value_for_dtype(dtype) return dtype, fv @@ -666,13 +687,15 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan): if fv.tz is None: return dtype, fv.asm8 - return np.dtype(object), fill_value + # error: Value of type variable "_DTypeScalar" of "dtype" cannot be "object" + return np.dtype(object), fill_value # type: ignore[type-var] elif issubclass(dtype.type, np.timedelta64): inferred, fv = infer_dtype_from_scalar(fill_value, pandas_dtype=True) if inferred == dtype: return dtype, fv - return np.dtype(object), fill_value + # error: Value of type variable "_DTypeScalar" of "dtype" cannot be "object" + return np.dtype(object), fill_value # type: ignore[type-var] elif is_float(fill_value): if issubclass(dtype.type, np.bool_): @@ -916,7 +939,9 @@ def infer_dtype_from_array( (dtype('O'), [1, '1']) """ if isinstance(arr, np.ndarray): - return arr.dtype, arr + # error: Incompatible return value type (got "Tuple[dtype, ndarray]", expected + # "Tuple[Union[dtype, ExtensionDtype], ExtensionArray]") + return arr.dtype, arr # type: ignore[return-value] if not is_list_like(arr): raise TypeError("'arr' must be list-like") @@ -925,7 +950,9 @@ def infer_dtype_from_array( return arr.dtype, arr elif isinstance(arr, ABCSeries): - return arr.dtype, np.asarray(arr) + # error: Incompatible return value type (got "Tuple[Any, ndarray]", expected + # "Tuple[Union[dtype, ExtensionDtype], ExtensionArray]") + return arr.dtype, np.asarray(arr) # type: ignore[return-value] # don't force numpy coerce with nan's inferred = lib.infer_dtype(arr, skipna=False) @@ -1005,7 +1032,14 @@ def invalidate_string_dtypes(dtype_set: Set[DtypeObj]): Change string like dtypes to object for ``DataFrame.select_dtypes()``. """ - non_string_dtypes = dtype_set - {np.dtype("S").type, np.dtype("<U").type} + # error: Argument 1 to <set> has incompatible type "Type[generic]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + # error: Argument 2 to <set> has incompatible type "Type[generic]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + non_string_dtypes = dtype_set - { + np.dtype("S").type, # type: ignore[arg-type] + np.dtype("<U").type, # type: ignore[arg-type] + } if non_string_dtypes != dtype_set: raise TypeError("string dtypes are not allowed, use 'object' instead") @@ -1033,12 +1067,18 @@ def astype_dt64_to_dt64tz( from pandas.core.construction import ensure_wrapped_if_datetimelike values = ensure_wrapped_if_datetimelike(values) - values = cast("DatetimeArray", values) + # error: Incompatible types in assignment (expression has type "DatetimeArray", + # variable has type "ndarray") + values = cast("DatetimeArray", values) # type: ignore[assignment] aware = isinstance(dtype, DatetimeTZDtype) if via_utc: # Series.astype behavior - assert values.tz is None and aware # caller is responsible for checking this + + # caller is responsible for checking this + + # error: "ndarray" has no attribute "tz" + assert values.tz is None and aware # type: ignore[attr-defined] dtype = cast(DatetimeTZDtype, dtype) if copy: @@ -1056,12 +1096,17 @@ def astype_dt64_to_dt64tz( # FIXME: GH#33401 this doesn't match DatetimeArray.astype, which # goes through the `not via_utc` path - return values.tz_localize("UTC").tz_convert(dtype.tz) + + # error: "ndarray" has no attribute "tz_localize" + return values.tz_localize("UTC").tz_convert( # type: ignore[attr-defined] + dtype.tz + ) else: # DatetimeArray/DatetimeIndex.astype behavior - if values.tz is None and aware: + # error: "ndarray" has no attribute "tz" + if values.tz is None and aware: # type: ignore[attr-defined] dtype = cast(DatetimeTZDtype, dtype) level = find_stack_level() warnings.warn( @@ -1072,17 +1117,20 @@ def astype_dt64_to_dt64tz( stacklevel=level, ) - return values.tz_localize(dtype.tz) + # error: "ndarray" has no attribute "tz_localize" + return values.tz_localize(dtype.tz) # type: ignore[attr-defined] elif aware: # GH#18951: datetime64_tz dtype but not equal means different tz dtype = cast(DatetimeTZDtype, dtype) - result = values.tz_convert(dtype.tz) + # error: "ndarray" has no attribute "tz_convert" + result = values.tz_convert(dtype.tz) # type: ignore[attr-defined] if copy: result = result.copy() return result - elif values.tz is not None: + # error: "ndarray" has no attribute "tz" + elif values.tz is not None: # type: ignore[attr-defined] level = find_stack_level() warnings.warn( "Using .astype to convert from timezone-aware dtype to " @@ -1093,7 +1141,10 @@ def astype_dt64_to_dt64tz( stacklevel=level, ) - result = values.tz_convert("UTC").tz_localize(None) + # error: "ndarray" has no attribute "tz_convert" + result = values.tz_convert("UTC").tz_localize( # type: ignore[attr-defined] + None + ) if copy: result = result.copy() return result @@ -1161,7 +1212,8 @@ def astype_nansafe( flat = arr.ravel("K") result = astype_nansafe(flat, dtype, copy=copy, skipna=skipna) order = "F" if flags.f_contiguous else "C" - return result.reshape(arr.shape, order=order) + # error: "ExtensionArray" has no attribute "reshape"; maybe "shape"? + return result.reshape(arr.shape, order=order) # type: ignore[attr-defined] # We get here with 0-dim from sparse arr = np.atleast_1d(arr) @@ -1179,7 +1231,9 @@ def astype_nansafe( from pandas.core.construction import ensure_wrapped_if_datetimelike arr = ensure_wrapped_if_datetimelike(arr) - return arr.astype(dtype, copy=copy) + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return arr.astype(dtype, copy=copy) # type: ignore[return-value] if issubclass(dtype.type, str): return lib.ensure_string_array(arr, skipna=skipna, convert_na_value=False) @@ -1196,11 +1250,15 @@ def astype_nansafe( ) if isna(arr).any(): raise ValueError("Cannot convert NaT values to integer") - return arr.view(dtype) + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return arr.view(dtype) # type: ignore[return-value] # allow frequency conversions if dtype.kind == "M": - return arr.astype(dtype) + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return arr.astype(dtype) # type: ignore[return-value] raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]") @@ -1216,10 +1274,16 @@ def astype_nansafe( ) if isna(arr).any(): raise ValueError("Cannot convert NaT values to integer") - return arr.view(dtype) + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return arr.view(dtype) # type: ignore[return-value] elif dtype.kind == "m": - return astype_td64_unit_conversion(arr, dtype, copy=copy) + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return astype_td64_unit_conversion( # type: ignore[return-value] + arr, dtype, copy=copy + ) raise TypeError(f"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]") @@ -1240,11 +1304,23 @@ def astype_nansafe( elif is_datetime64_dtype(dtype): from pandas import to_datetime - return astype_nansafe(to_datetime(arr).values, dtype, copy=copy) + # error: Incompatible return value type (got "ExtensionArray", expected + # "ndarray") + return astype_nansafe( # type: ignore[return-value] + # error: No overload variant of "to_datetime" matches argument type + # "ndarray" + to_datetime(arr).values, # type: ignore[call-overload] + dtype, + copy=copy, + ) elif is_timedelta64_dtype(dtype): from pandas import to_timedelta - return astype_nansafe(to_timedelta(arr)._values, dtype, copy=copy) + # error: Incompatible return value type (got "ExtensionArray", expected + # "ndarray") + return astype_nansafe( # type: ignore[return-value] + to_timedelta(arr)._values, dtype, copy=copy + ) if dtype.name in ("datetime64", "timedelta64"): msg = ( @@ -1255,9 +1331,13 @@ def astype_nansafe( if copy or is_object_dtype(arr.dtype) or is_object_dtype(dtype): # Explicit copy, or required since NumPy can't view from / to object. - return arr.astype(dtype, copy=True) - return arr.astype(dtype, copy=copy) + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return arr.astype(dtype, copy=True) # type: ignore[return-value] + + # error: Incompatible return value type (got "ndarray", expected "ExtensionArray") + return arr.astype(dtype, copy=copy) # type: ignore[return-value] def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike: @@ -1286,7 +1366,11 @@ def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> Arra raise TypeError(msg) if is_datetime64tz_dtype(dtype) and is_datetime64_dtype(values.dtype): - return astype_dt64_to_dt64tz(values, dtype, copy, via_utc=True) + # error: Incompatible return value type (got "DatetimeArray", expected + # "ndarray") + return astype_dt64_to_dt64tz( # type: ignore[return-value] + values, dtype, copy, via_utc=True + ) if is_dtype_equal(values.dtype, dtype): if copy: @@ -1297,11 +1381,19 @@ def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> Arra values = values.astype(dtype, copy=copy) else: - values = astype_nansafe(values, dtype, copy=copy) + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "ndarray") + # error: Argument 1 to "astype_nansafe" has incompatible type "ExtensionArray"; + # expected "ndarray" + values = astype_nansafe( # type: ignore[assignment] + values, dtype, copy=copy # type: ignore[arg-type] + ) # in pandas we don't store numpy str dtypes, so convert to object if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str): - values = np.array(values, dtype=object) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + values = np.array(values, dtype=object) # type: ignore[assignment] return values @@ -1402,7 +1494,9 @@ def soft_convert_objects( values, convert_datetime=datetime, convert_timedelta=timedelta ) except (OutOfBoundsDatetime, ValueError): - return values + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return values # type: ignore[return-value] if numeric and is_object_dtype(values.dtype): converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True) @@ -1411,7 +1505,8 @@ def soft_convert_objects( values = converted if not isna(converted).all() else values values = values.copy() if copy else values - return values + # error: Incompatible return value type (got "ndarray", expected "ExtensionArray") + return values # type: ignore[return-value] def convert_dtypes( @@ -1562,12 +1657,20 @@ def try_datetime(v: np.ndarray) -> ArrayLike: dta = sequence_to_datetimes(v, require_iso8601=True, allow_object=True) except (ValueError, TypeError): # e.g. <class 'numpy.timedelta64'> is not convertible to datetime - return v.reshape(shape) + + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return v.reshape(shape) # type: ignore[return-value] else: # GH#19761 we may have mixed timezones, in which cast 'dta' is # an ndarray[object]. Only 1 test # relies on this behavior, see GH#40111 - return dta.reshape(shape) + + # error: Incompatible return value type (got "Union[ndarray, + # DatetimeArray]", expected "ExtensionArray") + # error: Incompatible return value type (got "Union[ndarray, + # DatetimeArray]", expected "ndarray") + return dta.reshape(shape) # type: ignore[return-value] def try_timedelta(v: np.ndarray) -> np.ndarray: # safe coerce to timedelta64 @@ -1586,14 +1689,18 @@ def try_timedelta(v: np.ndarray) -> np.ndarray: inferred_type = lib.infer_datetimelike_array(ensure_object(v)) if inferred_type == "datetime": - value = try_datetime(v) + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "Union[ndarray, List[Any]]") + value = try_datetime(v) # type: ignore[assignment] elif inferred_type == "timedelta": value = try_timedelta(v) elif inferred_type == "nat": # if all NaT, return as datetime if isna(v).all(): - value = try_datetime(v) + # error: Incompatible types in assignment (expression has type + # "ExtensionArray", variable has type "Union[ndarray, List[Any]]") + value = try_datetime(v) # type: ignore[assignment] else: # We have at least a NaT and a string @@ -1603,7 +1710,10 @@ def try_timedelta(v: np.ndarray) -> np.ndarray: if lib.infer_dtype(value, skipna=False) in ["mixed"]: # cannot skip missing values, as NaT implies that the string # is actually a datetime - value = try_datetime(v) + + # error: Incompatible types in assignment (expression has type + # "ExtensionArray", variable has type "Union[ndarray, List[Any]]") + value = try_datetime(v) # type: ignore[assignment] return value @@ -1643,10 +1753,16 @@ def maybe_cast_to_datetime( dta = sequence_to_datetimes(value, allow_object=False) # GH 25843: Remove tz information since the dtype # didn't specify one - if dta.tz is not None: + + # error: Item "ndarray" of "Union[ndarray, DatetimeArray]" + # has no attribute "tz" + if dta.tz is not None: # type: ignore[union-attr] # equiv: dta.view(dtype) # Note: NOT equivalent to dta.astype(dtype) - dta = dta.tz_localize(None) + + # error: Item "ndarray" of "Union[ndarray, + # DatetimeArray]" has no attribute "tz_localize" + dta = dta.tz_localize(None) # type: ignore[union-attr] value = dta elif is_datetime64tz: dtype = cast(DatetimeTZDtype, dtype) @@ -1656,17 +1772,38 @@ def maybe_cast_to_datetime( # be localized to the timezone. is_dt_string = is_string_dtype(value.dtype) dta = sequence_to_datetimes(value, allow_object=False) - if dta.tz is not None: - value = dta.astype(dtype, copy=False) + # error: Item "ndarray" of "Union[ndarray, DatetimeArray]" + # has no attribute "tz" + if dta.tz is not None: # type: ignore[union-attr] + # error: Argument 1 to "astype" of + # "_ArrayOrScalarCommon" has incompatible type + # "Union[dtype[Any], ExtensionDtype, None]"; expected + # "Union[dtype[Any], None, type, _SupportsDType, str, + # Union[Tuple[Any, int], Tuple[Any, Union[int, + # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, + # Any]]]" + value = dta.astype( + dtype, copy=False # type: ignore[arg-type] + ) elif is_dt_string: # Strings here are naive, so directly localize # equiv: dta.astype(dtype) # though deprecated - value = dta.tz_localize(dtype.tz) + + # error: Item "ndarray" of "Union[ndarray, + # DatetimeArray]" has no attribute "tz_localize" + value = dta.tz_localize( # type: ignore[union-attr] + dtype.tz + ) else: # Numeric values are UTC at this point, # so localize and convert # equiv: Series(dta).astype(dtype) # though deprecated - value = dta.tz_localize("UTC").tz_convert(dtype.tz) + + # error: Item "ndarray" of "Union[ndarray, + # DatetimeArray]" has no attribute "tz_localize" + value = dta.tz_localize( # type: ignore[union-attr] + "UTC" + ).tz_convert(dtype.tz) elif is_timedelta64: # if successful, we get a ndarray[td64ns] value, _ = sequence_to_td64ns(value) @@ -1703,7 +1840,10 @@ def maybe_cast_to_datetime( # only do this if we have an array and the dtype of the array is not # setup already we are not an integer/object, so don't bother with this # conversion - value = maybe_infer_to_datetimelike(value) + + # error: Argument 1 to "maybe_infer_to_datetimelike" has incompatible type + # "Union[ExtensionArray, List[Any]]"; expected "Union[ndarray, List[Any]]" + value = maybe_infer_to_datetimelike(value) # type: ignore[arg-type] return value @@ -1820,7 +1960,11 @@ def find_common_type(types: List[DtypeObj]) -> DtypeObj: if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t): return np.dtype("object") - return np.find_common_type(types, []) + # error: Argument 1 to "find_common_type" has incompatible type + # "List[Union[dtype, ExtensionDtype]]"; expected "Sequence[Union[dtype, + # None, type, _SupportsDtype, str, Tuple[Any, int], Tuple[Any, Union[int, + # Sequence[int]]], List[Any], _DtypeDict, Tuple[Any, Any]]]" + return np.find_common_type(types, []) # type: ignore[arg-type] def construct_2d_arraylike_from_scalar( @@ -1878,7 +2022,9 @@ def construct_1d_arraylike_from_scalar( dtype = np.dtype(object) if is_extension_array_dtype(dtype): - cls = dtype.construct_array_type() + # error: Item "dtype" of "Union[dtype, ExtensionDtype]" has no + # attribute "construct_array_type" + cls = dtype.construct_array_type() # type: ignore[union-attr] subarr = cls._from_sequence([value] * length, dtype=dtype) else: @@ -1895,7 +2041,11 @@ def construct_1d_arraylike_from_scalar( elif dtype.kind in ["M", "m"]: value = maybe_unbox_datetimelike(value, dtype) - subarr = np.empty(length, dtype=dtype) + # error: Argument "dtype" to "empty" has incompatible type + # "Union[dtype, ExtensionDtype]"; expected "Union[dtype, None, type, + # _SupportsDtype, str, Tuple[Any, int], Tuple[Any, Union[int, + # Sequence[int]]], List[Any], _DtypeDict, Tuple[Any, Any]]" + subarr = np.empty(length, dtype=dtype) # type: ignore[arg-type] subarr.fill(value) return subarr @@ -1967,7 +2117,11 @@ def construct_1d_ndarray_preserving_na( # TODO(numpy#12550): special-case can be removed subarr = construct_1d_object_array_from_listlike(list(values)) else: - subarr = np.array(values, dtype=dtype, copy=copy) + # error: Argument "dtype" to "array" has incompatible type + # "Union[dtype[Any], ExtensionDtype, None]"; expected "Union[dtype[Any], + # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, + # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" + subarr = np.array(values, dtype=dtype, copy=copy) # type: ignore[arg-type] return subarr diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 0966d0b93cc25..68c8d35810b7e 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -165,7 +165,10 @@ def ensure_int_or_float(arr: ArrayLike, copy: bool = False) -> np.ndarray: return arr.astype("uint64", copy=copy, casting="safe") # type: ignore[call-arg] except TypeError: if is_extension_array_dtype(arr.dtype): - return arr.to_numpy(dtype="float64", na_value=np.nan) + # error: "ndarray" has no attribute "to_numpy" + return arr.to_numpy( # type: ignore[attr-defined] + dtype="float64", na_value=np.nan + ) return arr.astype("float64", copy=copy) @@ -1718,7 +1721,10 @@ def infer_dtype_from_object(dtype) -> DtypeObj: """ if isinstance(dtype, type) and issubclass(dtype, np.generic): # Type object from a dtype - return dtype + + # error: Incompatible return value type (got "Type[generic]", expected + # "Union[dtype[Any], ExtensionDtype]") + return dtype # type: ignore[return-value] elif isinstance(dtype, (np.dtype, ExtensionDtype)): # dtype object try: @@ -1726,7 +1732,9 @@ def infer_dtype_from_object(dtype) -> DtypeObj: except TypeError: # Should still pass if we don't have a date-like pass - return dtype.type + # error: Incompatible return value type (got "Union[Type[generic], Type[Any]]", + # expected "Union[dtype[Any], ExtensionDtype]") + return dtype.type # type: ignore[return-value] try: dtype = pandas_dtype(dtype) @@ -1740,7 +1748,9 @@ def infer_dtype_from_object(dtype) -> DtypeObj: # TODO(jreback) # should deprecate these if dtype in ["datetimetz", "datetime64tz"]: - return DatetimeTZDtype.type + # error: Incompatible return value type (got "Type[Any]", expected + # "Union[dtype[Any], ExtensionDtype]") + return DatetimeTZDtype.type # type: ignore[return-value] elif dtype in ["period"]: raise NotImplementedError @@ -1837,7 +1847,9 @@ def pandas_dtype(dtype) -> DtypeObj: # registered extension types result = registry.find(dtype) if result is not None: - return result + # error: Incompatible return value type (got "Type[ExtensionDtype]", + # expected "Union[dtype, ExtensionDtype]") + return result # type: ignore[return-value] # try a numpy dtype # raise a consistent TypeError if failed diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 1545b5b106803..06fc1918b5ecf 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -51,8 +51,12 @@ def _cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike: # problem case: SparseArray.astype(dtype) doesn't follow the specified # dtype exactly, but converts this to Sparse[dtype] -> first manually # convert to dense array - arr = cast(SparseArray, arr) - return arr.to_dense().astype(dtype, copy=False) + + # error: Incompatible types in assignment (expression has type + # "SparseArray", variable has type "ndarray") + arr = cast(SparseArray, arr) # type: ignore[assignment] + # error: "ndarray" has no attribute "to_dense" + return arr.to_dense().astype(dtype, copy=False) # type: ignore[attr-defined] if ( isinstance(arr, np.ndarray) @@ -67,7 +71,11 @@ def _cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike: if is_extension_array_dtype(dtype) and isinstance(arr, np.ndarray): # numpy's astype cannot handle ExtensionDtypes return pd_array(arr, dtype=dtype, copy=False) - return arr.astype(dtype, copy=False) + # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has incompatible type + # "Union[dtype[Any], ExtensionDtype]"; expected "Union[dtype[Any], None, type, + # _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], + # List[Any], _DTypeDict, Tuple[Any, Any]]]" + return arr.astype(dtype, copy=False) # type: ignore[arg-type] def concat_compat(to_concat, axis: int = 0, ea_compat_axis: bool = False): diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index da3a9269cf2c4..d44d2a564fb78 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -180,7 +180,9 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype): type: Type[CategoricalDtypeType] = CategoricalDtypeType kind: str_type = "O" str = "|O08" - base = np.dtype("O") + # error: Incompatible types in assignment (expression has type "dtype", + # base class "PandasExtensionDtype" defined the type as "None") + base = np.dtype("O") # type: ignore[assignment] _metadata = ("categories", "ordered") _cache: Dict[str_type, PandasExtensionDtype] = {} @@ -467,8 +469,14 @@ def _hash_categories(categories, ordered: Ordered = True) -> int: [cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)] ) else: - cat_array = [cat_array] - hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array)) + # error: Incompatible types in assignment (expression has type + # "List[ndarray]", variable has type "ndarray") + cat_array = [cat_array] # type: ignore[assignment] + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "int") + hashed = combine_hash_arrays( # type: ignore[assignment] + iter(cat_array), num_items=len(cat_array) + ) return np.bitwise_xor.reduce(hashed) @classmethod @@ -668,7 +676,9 @@ class DatetimeTZDtype(PandasExtensionDtype): kind: str_type = "M" str = "|M8[ns]" num = 101 - base = np.dtype("M8[ns]") + # error: Incompatible types in assignment (expression has type "dtype", + # base class "PandasExtensionDtype" defined the type as "None") + base = np.dtype("M8[ns]") # type: ignore[assignment] na_value = NaT _metadata = ("unit", "tz") _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") @@ -834,7 +844,9 @@ class PeriodDtype(dtypes.PeriodDtypeBase, PandasExtensionDtype): type: Type[Period] = Period kind: str_type = "O" str = "|O08" - base = np.dtype("O") + # error: Incompatible types in assignment (expression has type "dtype", + # base class "PandasExtensionDtype" defined the type as "None") + base = np.dtype("O") # type: ignore[assignment] num = 102 _metadata = ("freq",) _match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]") @@ -1034,7 +1046,9 @@ class IntervalDtype(PandasExtensionDtype): name = "interval" kind: str_type = "O" str = "|O08" - base = np.dtype("O") + # error: Incompatible types in assignment (expression has type "dtype", + # base class "PandasExtensionDtype" defined the type as "None") + base = np.dtype("O") # type: ignore[assignment] num = 103 _metadata = ( "subtype", diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index b4a77337ce9f2..286272b165fb9 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -164,9 +164,13 @@ def _isna(obj, inf_as_na: bool = False): elif isinstance(obj, type): return False elif isinstance(obj, (np.ndarray, ABCExtensionArray)): - return _isna_array(obj, inf_as_na=inf_as_na) + # error: Value of type variable "ArrayLike" of "_isna_array" cannot be + # "Union[ndarray, ExtensionArray]" + return _isna_array(obj, inf_as_na=inf_as_na) # type: ignore[type-var] elif isinstance(obj, (ABCSeries, ABCIndex)): - result = _isna_array(obj._values, inf_as_na=inf_as_na) + # error: Value of type variable "ArrayLike" of "_isna_array" cannot be + # "Union[Any, ExtensionArray, ndarray]" + result = _isna_array(obj._values, inf_as_na=inf_as_na) # type: ignore[type-var] # box if isinstance(obj, ABCSeries): result = obj._constructor( @@ -234,19 +238,37 @@ def _isna_array(values: ArrayLike, inf_as_na: bool = False): if is_extension_array_dtype(dtype): if inf_as_na and is_categorical_dtype(dtype): - result = libmissing.isnaobj_old(values.to_numpy()) + # error: "ndarray" has no attribute "to_numpy" + result = libmissing.isnaobj_old( + values.to_numpy() # type: ignore[attr-defined] + ) else: - result = values.isna() + # error: "ndarray" has no attribute "isna" + result = values.isna() # type: ignore[attr-defined] elif is_string_dtype(dtype): - result = _isna_string_dtype(values, dtype, inf_as_na=inf_as_na) + # error: Argument 1 to "_isna_string_dtype" has incompatible type + # "ExtensionArray"; expected "ndarray" + # error: Argument 2 to "_isna_string_dtype" has incompatible type + # "ExtensionDtype"; expected "dtype[Any]" + result = _isna_string_dtype( + values, dtype, inf_as_na=inf_as_na # type: ignore[arg-type] + ) elif needs_i8_conversion(dtype): # this is the NaT pattern result = values.view("i8") == iNaT else: if inf_as_na: - result = ~np.isfinite(values) + # error: Argument 1 to "__call__" of "ufunc" has incompatible type + # "ExtensionArray"; expected "Union[Union[int, float, complex, str, bytes, + # generic], Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], _SupportsArray]" + result = ~np.isfinite(values) # type: ignore[arg-type] else: - result = np.isnan(values) + # error: Argument 1 to "__call__" of "ufunc" has incompatible type + # "ExtensionArray"; expected "Union[Union[int, float, complex, str, bytes, + # generic], Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], _SupportsArray]" + result = np.isnan(values) # type: ignore[arg-type] return result @@ -573,7 +595,9 @@ def na_value_for_dtype(dtype: DtypeObj, compat: bool = True): """ if is_extension_array_dtype(dtype): - return dtype.na_value + # error: Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has no + # attribute "na_value" + return dtype.na_value # type: ignore[union-attr] elif needs_i8_conversion(dtype): return dtype.type("NaT", "ns") elif is_float_dtype(dtype): @@ -626,7 +650,8 @@ def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool: # Numeric return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64)) - elif dtype == np.dtype(object): + # error: Value of type variable "_DTypeScalar" of "dtype" cannot be "object" + elif dtype == np.dtype(object): # type: ignore[type-var] # This is needed for Categorical, but is kind of weird return True @@ -650,11 +675,22 @@ def isna_all(arr: ArrayLike) -> bool: checker = nan_checker elif dtype.kind in ["m", "M"] or dtype.type is Period: - checker = lambda x: np.asarray(x.view("i8")) == iNaT + # error: Incompatible types in assignment (expression has type + # "Callable[[Any], Any]", variable has type "ufunc") + checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment] else: - checker = lambda x: _isna_array(x, inf_as_na=INF_AS_NA) + # error: Incompatible types in assignment (expression has type "Callable[[Any], + # Any]", variable has type "ufunc") + checker = lambda x: _isna_array( # type: ignore[assignment] + x, inf_as_na=INF_AS_NA + ) return all( - checker(arr[i : i + chunk_len]).all() for i in range(0, total_len, chunk_len) + # error: Argument 1 to "__call__" of "ufunc" has incompatible type + # "Union[ExtensionArray, Any]"; expected "Union[Union[int, float, complex, str, + # bytes, generic], Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], _SupportsArray]" + checker(arr[i : i + chunk_len]).all() # type: ignore[arg-type] + for i in range(0, total_len, chunk_len) ) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a0c97b0cdd268..de28c04ca0793 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -584,33 +584,84 @@ def __init__( ) elif isinstance(data, dict): - mgr = dict_to_mgr(data, index, columns, dtype=dtype, typ=manager) + # error: Argument "dtype" to "dict_to_mgr" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object], None]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + mgr = dict_to_mgr( + data, index, columns, dtype=dtype, typ=manager # type: ignore[arg-type] + ) elif isinstance(data, ma.MaskedArray): import numpy.ma.mrecords as mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): - mgr = rec_array_to_mgr(data, index, columns, dtype, copy, typ=manager) + # error: Argument 4 to "rec_array_to_mgr" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object], None]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + mgr = rec_array_to_mgr( + data, + index, + columns, + dtype, # type: ignore[arg-type] + copy, + typ=manager, + ) # a masked array else: data = sanitize_masked_array(data) mgr = ndarray_to_mgr( - data, index, columns, dtype=dtype, copy=copy, typ=manager + # error: Argument "dtype" to "ndarray_to_mgr" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object], None]"; + # expected "Union[dtype[Any], ExtensionDtype, None]" + data, + index, + columns, + dtype=dtype, # type: ignore[arg-type] + copy=copy, + typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index)): if data.dtype.names: # i.e. numpy structured array - mgr = rec_array_to_mgr(data, index, columns, dtype, copy, typ=manager) + + # error: Argument 4 to "rec_array_to_mgr" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object], None]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + mgr = rec_array_to_mgr( + data, + index, + columns, + dtype, # type: ignore[arg-type] + copy, + typ=manager, + ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name mgr = dict_to_mgr( - {data.name: data}, index, columns, dtype=dtype, typ=manager + # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no + # attribute "name" + # error: Argument "dtype" to "dict_to_mgr" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object], None]"; + # expected "Union[dtype[Any], ExtensionDtype, None]" + {data.name: data}, # type: ignore[union-attr] + index, + columns, + dtype=dtype, # type: ignore[arg-type] + typ=manager, ) else: mgr = ndarray_to_mgr( - data, index, columns, dtype=dtype, copy=copy, typ=manager + # error: Argument "dtype" to "ndarray_to_mgr" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object], None]"; + # expected "Union[dtype[Any], ExtensionDtype, None]" + data, + index, + columns, + dtype=dtype, # type: ignore[arg-type] + copy=copy, + typ=manager, ) # For data is list-like, or Iterable (will consume into list) @@ -622,19 +673,54 @@ def __init__( data = dataclasses_to_dicts(data) if treat_as_nested(data): if columns is not None: - columns = ensure_index(columns) + # error: Value of type variable "AnyArrayLike" of "ensure_index" + # cannot be "Collection[Any]" + columns = ensure_index(columns) # type: ignore[type-var] arrays, columns, index = nested_data_to_arrays( - data, columns, index, dtype + # error: Argument 3 to "nested_data_to_arrays" has incompatible + # type "Optional[Collection[Any]]"; expected "Optional[Index]" + # error: Argument 4 to "nested_data_to_arrays" has incompatible + # type "Union[ExtensionDtype, str, dtype[Any], Type[object], + # None]"; expected "Union[dtype[Any], ExtensionDtype, None]" + data, + columns, + index, # type: ignore[arg-type] + dtype, # type: ignore[arg-type] ) mgr = arrays_to_mgr( - arrays, columns, index, columns, dtype=dtype, typ=manager + # error: Argument "dtype" to "arrays_to_mgr" has incompatible + # type "Union[ExtensionDtype, str, dtype[Any], Type[object], + # None]"; expected "Union[dtype[Any], ExtensionDtype, None]" + arrays, + columns, + index, + columns, + dtype=dtype, # type: ignore[arg-type] + typ=manager, ) else: mgr = ndarray_to_mgr( - data, index, columns, dtype=dtype, copy=copy, typ=manager + # error: Argument "dtype" to "ndarray_to_mgr" has incompatible + # type "Union[ExtensionDtype, str, dtype[Any], Type[object], + # None]"; expected "Union[dtype[Any], ExtensionDtype, None]" + data, + index, + columns, + dtype=dtype, # type: ignore[arg-type] + copy=copy, + typ=manager, ) else: - mgr = dict_to_mgr({}, index, columns, dtype=dtype, typ=manager) + # error: Argument "dtype" to "dict_to_mgr" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object], None]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + mgr = dict_to_mgr( + {}, + index, + columns, + dtype=dtype, # type: ignore[arg-type] + typ=manager, + ) # For data is scalar else: if index is None or columns is None: @@ -648,19 +734,39 @@ def __init__( # TODO(EA2D): special case not needed with 2D EAs values = [ - construct_1d_arraylike_from_scalar(data, len(index), dtype) + # error: Argument 3 to "construct_1d_arraylike_from_scalar" + # has incompatible type "Union[ExtensionDtype, str, dtype, + # Type[object]]"; expected "Union[dtype, ExtensionDtype]" + construct_1d_arraylike_from_scalar( + data, len(index), dtype # type: ignore[arg-type] + ) for _ in range(len(columns)) ] mgr = arrays_to_mgr( values, columns, index, columns, dtype=None, typ=manager ) else: - values = construct_2d_arraylike_from_scalar( - data, len(index), len(columns), dtype, copy + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "List[ExtensionArray]") + values = construct_2d_arraylike_from_scalar( # type: ignore[assignment] + # error: Argument 4 to "construct_2d_arraylike_from_scalar" has + # incompatible type "Union[ExtensionDtype, str, dtype[Any], + # Type[object]]"; expected "dtype[Any]" + data, + len(index), + len(columns), + dtype, # type: ignore[arg-type] + copy, ) mgr = ndarray_to_mgr( - values, index, columns, dtype=values.dtype, copy=False, typ=manager + # error: "List[ExtensionArray]" has no attribute "dtype" + values, + index, + columns, + dtype=values.dtype, # type: ignore[attr-defined] + copy=False, + typ=manager, ) # ensure correct Manager type according to settings @@ -1230,17 +1336,19 @@ def __len__(self) -> int: """ return len(self.index) - # error: Overloaded function signatures 1 and 2 overlap with incompatible return - # types @overload - def dot(self, other: Series) -> Series: # type: ignore[misc] + def dot(self, other: Series) -> Series: ... @overload def dot(self, other: Union[DataFrame, Index, ArrayLike]) -> DataFrame: ... - def dot(self, other: Union[AnyArrayLike, FrameOrSeriesUnion]) -> FrameOrSeriesUnion: + # error: Overloaded function implementation cannot satisfy signature 2 due to + # inconsistencies in how they use type variables + def dot( # type: ignore[misc] + self, other: Union[AnyArrayLike, FrameOrSeriesUnion] + ) -> FrameOrSeriesUnion: """ Compute the matrix multiplication between the DataFrame and other. @@ -2085,7 +2193,9 @@ def to_records( # array of tuples to numpy cols. copy copy copy ix_vals = list(map(np.array, zip(*self.index._values))) else: - ix_vals = [self.index.values] + # error: List item 0 has incompatible type "ArrayLike"; expected + # "ndarray" + ix_vals = [self.index.values] # type: ignore[list-item] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) @@ -2152,7 +2262,9 @@ def to_records( if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): - formats.append(dtype_mapping) + # error: Argument 1 to "append" of "list" has incompatible type + # "Union[type, dtype, str]"; expected "dtype" + formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" @@ -3217,7 +3329,9 @@ def transpose(self, *args, copy: bool = False) -> DataFrame: ) else: - new_values = self.values.T + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "List[Any]") + new_values = self.values.T # type: ignore[assignment] if copy: new_values = new_values.copy() result = self._constructor( @@ -3276,7 +3390,9 @@ def _get_column_array(self, i: int) -> ArrayLike: Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) """ - return self._mgr.iget_values(i) + # error: Incompatible return value type (got "ExtensionArray", expected + # "ndarray") + return self._mgr.iget_values(i) # type: ignore[return-value] def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ @@ -3284,7 +3400,9 @@ def _iter_column_arrays(self) -> Iterator[ArrayLike]: This returns the values as stored in the Block (ndarray or ExtensionArray). """ for i in range(len(self.columns)): - yield self._get_column_array(i) + # error: Incompatible types in "yield" (actual type + # "ExtensionArray", expected type "ndarray") + yield self._get_column_array(i) # type: ignore[misc] def __getitem__(self, key): key = lib.item_from_zerodim(key) @@ -3543,7 +3661,10 @@ def _set_item_frame_value(self, key, value: DataFrame) -> None: value = value.reindex(cols, axis=1) # now align rows - value = _reindex_for_setitem(value, self.index) + + # error: Incompatible types in assignment (expression has type "ExtensionArray", + # variable has type "DataFrame") + value = _reindex_for_setitem(value, self.index) # type: ignore[assignment] self._set_item_mgr(key, value) def _iset_item_mgr(self, loc: int, value) -> None: @@ -4052,9 +4173,16 @@ def check_int_infer_dtype(dtypes): # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) - converted_dtypes.append(np.int64) + # error: Argument 1 to "append" of "list" has incompatible type + # "Type[signedinteger[Any]]"; expected "Type[signedinteger[Any]]" + converted_dtypes.append(np.int64) # type: ignore[arg-type] else: - converted_dtypes.append(infer_dtype_from_object(dtype)) + # error: Argument 1 to "append" of "list" has incompatible type + # "Union[dtype[Any], ExtensionDtype]"; expected + # "Type[signedinteger[Any]]" + converted_dtypes.append( + infer_dtype_from_object(dtype) # type: ignore[arg-type] + ) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) @@ -4109,7 +4237,8 @@ def extract_unique_dtypes_from_dtypes_set( ) keep_these &= ~self.dtypes.isin(excluded_dtypes) - return self.iloc[:, keep_these.values] + # error: "ndarray" has no attribute "values" + return self.iloc[:, keep_these.values] # type: ignore[attr-defined] def insert(self, loc, column, value, allow_duplicates: bool = False) -> None: """ @@ -4418,7 +4547,11 @@ def _reindex_multi(self, axes, copy: bool, fill_value) -> DataFrame: if row_indexer is not None and col_indexer is not None: indexer = row_indexer, col_indexer - new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) + # error: Argument 2 to "take_2d_multi" has incompatible type "Tuple[Any, + # Any]"; expected "ndarray" + new_values = take_2d_multi( + self.values, indexer, fill_value=fill_value # type: ignore[arg-type] + ) return self._constructor(new_values, index=new_index, columns=new_columns) else: return self._reindex_with_indexers( @@ -5106,10 +5239,14 @@ def set_index( arrays.append(col) # type:ignore[arg-type] names.append(col.name) elif isinstance(col, (list, np.ndarray)): - arrays.append(col) + # error: Argument 1 to "append" of "list" has incompatible type + # "Union[List[Any], ndarray]"; expected "Index" + arrays.append(col) # type: ignore[arg-type] names.append(None) elif isinstance(col, abc.Iterator): - arrays.append(list(col)) + # error: Argument 1 to "append" of "list" has incompatible type + # "List[Any]"; expected "Index" + arrays.append(list(col)) # type: ignore[arg-type] names.append(None) # from here, col can only be a column label else: @@ -5853,7 +5990,12 @@ def sort_values( # type: ignore[override] # need to rewrap columns in Series to apply key function if key is not None: - keys = [Series(k, name=name) for (k, name) in zip(keys, by)] + # error: List comprehension has incompatible type List[Series]; + # expected List[ndarray] + keys = [ + Series(k, name=name) # type: ignore[misc] + for (k, name) in zip(keys, by) + ] indexer = lexsort_indexer( keys, orders=ascending, na_position=na_position, key=key @@ -5866,7 +6008,9 @@ def sort_values( # type: ignore[override] # need to rewrap column in Series to apply key function if key is not None: - k = Series(k, name=by) + # error: Incompatible types in assignment (expression has type + # "Series", variable has type "ndarray") + k = Series(k, name=by) # type: ignore[assignment] if isinstance(ascending, (tuple, list)): ascending = ascending[0] @@ -10024,7 +10168,9 @@ def _reindex_for_setitem(value: FrameOrSeriesUnion, index: Index) -> ArrayLike: # reindex if necessary if value.index.equals(index) or not len(index): - return value._values.copy() + # error: Incompatible return value type (got "Union[ndarray, Any]", expected + # "ExtensionArray") + return value._values.copy() # type: ignore[return-value] # GH#4107 try: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index cd5c0159e93cb..d2b63c42d777b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -666,7 +666,8 @@ def size(self) -> int: >>> df.size 4 """ - return np.prod(self.shape) + # error: Incompatible return value type (got "number", expected "int") + return np.prod(self.shape) # type: ignore[return-value] @final @property @@ -752,11 +753,13 @@ def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries: # ignore needed because of NDFrame constructor is different than # DataFrame/Series constructors. return self._constructor( + # error: Argument 1 to "NDFrame" has incompatible type "ndarray"; expected + # "Union[ArrayManager, BlockManager]" # error: Argument 2 to "NDFrame" has incompatible type "*Generator[Index, # None, None]"; expected "bool" [arg-type] # error: Argument 2 to "NDFrame" has incompatible type "*Generator[Index, # None, None]"; expected "Optional[Mapping[Optional[Hashable], Any]]" - new_values, + new_values, # type: ignore[arg-type] *new_axes, # type: ignore[arg-type] ).__finalize__(self, method="swapaxes") @@ -1985,14 +1988,20 @@ def __array_wrap__( # ptp also requires the item_from_zerodim return result d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) - return self._constructor(result, **d).__finalize__( + # error: Argument 1 to "NDFrame" has incompatible type "ndarray"; + # expected "BlockManager" + return self._constructor(result, **d).__finalize__( # type: ignore[arg-type] self, method="__array_wrap__" ) def __array_ufunc__( self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any ): - return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) + # error: Argument 2 to "array_ufunc" has incompatible type "Callable[..., Any]"; + # expected "ufunc" + return arraylike.array_ufunc( + self, ufunc, method, *inputs, **kwargs # type: ignore[arg-type] + ) # ideally we would define this to avoid the getattr checks, but # is slower @@ -6989,7 +6998,10 @@ def interpolate( f"`limit_direction` must be 'backward' for method `{method}`" ) - if obj.ndim == 2 and np.all(obj.dtypes == np.dtype(object)): + # error: Value of type variable "_DTypeScalar" of "dtype" cannot be "object" + if obj.ndim == 2 and np.all( + obj.dtypes == np.dtype(object) # type: ignore[type-var] + ): raise TypeError( "Cannot interpolate with all object-dtype columns " "in the DataFrame. Try setting at least one " @@ -8367,7 +8379,8 @@ def last(self: FrameOrSeries, offset) -> FrameOrSeries: start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side="right") - return self.iloc[start:] + # error: Slice index must be an integer or None + return self.iloc[start:] # type: ignore[misc] @final def rank( @@ -8475,8 +8488,15 @@ def ranker(data): na_option=na_option, pct=pct, ) - ranks = self._constructor(ranks, **data._construct_axes_dict()) - return ranks.__finalize__(self, method="rank") + # error: Incompatible types in assignment (expression has type + # "FrameOrSeries", variable has type "ndarray") + # error: Argument 1 to "NDFrame" has incompatible type "ndarray"; expected + # "Union[ArrayManager, BlockManager]" + ranks = self._constructor( # type: ignore[assignment] + ranks, **data._construct_axes_dict() # type: ignore[arg-type] + ) + # error: "ndarray" has no attribute "__finalize__" + return ranks.__finalize__(self, method="rank") # type: ignore[attr-defined] # if numeric_only is None, and we can't get anything, we try with # numeric_only=True @@ -8958,7 +8978,11 @@ def _where( # we are the same shape, so create an actual object for alignment else: - other = self._constructor(other, **self._construct_axes_dict()) + # error: Argument 1 to "NDFrame" has incompatible type "ndarray"; + # expected "BlockManager" + other = self._constructor( + other, **self._construct_axes_dict() # type: ignore[arg-type] + ) if axis is None: axis = 0 @@ -9885,7 +9909,11 @@ def abs(self: FrameOrSeries) -> FrameOrSeries: 2 6 30 -30 3 7 40 -50 """ - return np.abs(self) + # error: Argument 1 to "__call__" of "ufunc" has incompatible type + # "FrameOrSeries"; expected "Union[Union[int, float, complex, str, bytes, + # generic], Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], _SupportsArray]" + return np.abs(self) # type: ignore[arg-type] @final def describe( diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index af5c92ce82a66..50d04135c9300 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -344,7 +344,13 @@ def _aggregate_multiple_funcs(self, arg): # let higher level handle return results - output = self._wrap_aggregated_output(results, index=None) + # Argument 1 to "_wrap_aggregated_output" of "SeriesGroupBy" has + # incompatible type "Dict[OutputKey, Union[DataFrame, + # Series]]"; + # expected "Mapping[OutputKey, Union[Series, ndarray]]" + output = self._wrap_aggregated_output( + results, index=None # type: ignore[arg-type] + ) return self.obj._constructor_expanddim(output, columns=columns) # TODO: index should not be Optional - see GH 35490 @@ -759,13 +765,28 @@ def apply_series_value_counts(): # lab is a Categorical with categories an IntervalIndex lab = cut(Series(val), bins, include_lowest=True) - lev = lab.cat.categories - lab = lev.take(lab.cat.codes, allow_fill=True, fill_value=lev._na_value) + # error: "ndarray" has no attribute "cat" + lev = lab.cat.categories # type: ignore[attr-defined] + # error: No overload variant of "take" of "_ArrayOrScalarCommon" matches + # argument types "Any", "bool", "Union[Any, float]" + lab = lev.take( # type: ignore[call-overload] + # error: "ndarray" has no attribute "cat" + lab.cat.codes, # type: ignore[attr-defined] + allow_fill=True, + # error: Item "ndarray" of "Union[ndarray, Index]" has no attribute + # "_na_value" + fill_value=lev._na_value, # type: ignore[union-attr] + ) llab = lambda lab, inc: lab[inc]._multiindex.codes[-1] if is_interval_dtype(lab.dtype): # TODO: should we do this inside II? - sorter = np.lexsort((lab.left, lab.right, ids)) + + # error: "ndarray" has no attribute "left" + # error: "ndarray" has no attribute "right" + sorter = np.lexsort( + (lab.left, lab.right, ids) # type: ignore[attr-defined] + ) else: sorter = np.lexsort((lab, ids)) @@ -791,7 +812,11 @@ def apply_series_value_counts(): # multi-index components codes = self.grouper.reconstructed_codes codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)] - levels = [ping.group_index for ping in self.grouper.groupings] + [lev] + # error: List item 0 has incompatible type "Union[ndarray, Any]"; + # expected "Index" + levels = [ping.group_index for ping in self.grouper.groupings] + [ + lev # type: ignore[list-item] + ] names = self.grouper.names + [self._selection_name] if dropna: @@ -1149,11 +1174,20 @@ def py_fallback(values: ArrayLike) -> ArrayLike: # We've split an object block! Everything we've assumed # about a single block input returning a single block output # is a lie. See eg GH-39329 - return mgr.as_array() + + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return mgr.as_array() # type: ignore[return-value] else: # We are a single block from a BlockManager # or one array from SingleArrayManager - return arrays[0] + + # error: Incompatible return value type (got "Union[ndarray, + # ExtensionArray, ArrayLike]", expected "ExtensionArray") + # error: Incompatible return value type (got "Union[ndarray, + # ExtensionArray, ArrayLike]", expected + # "ndarray") + return arrays[0] # type: ignore[return-value] def array_func(values: ArrayLike) -> ArrayLike: @@ -1172,7 +1206,9 @@ def array_func(values: ArrayLike) -> ArrayLike: assert how == "ohlc" raise - result = py_fallback(values) + # error: Incompatible types in assignment (expression has type + # "ExtensionArray", variable has type "ndarray") + result = py_fallback(values) # type: ignore[assignment] return cast_agg_result(result, values, how) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index e11c296783476..e5010da5ccac6 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1131,7 +1131,12 @@ def _cython_agg_general( if not output: raise DataError("No numeric types to aggregate") - return self._wrap_aggregated_output(output, index=self.grouper.result_index) + # error: Argument 1 to "_wrap_aggregated_output" of "BaseGroupBy" has + # incompatible type "Dict[OutputKey, Union[ndarray, DatetimeArray]]"; + # expected "Mapping[OutputKey, ndarray]" + return self._wrap_aggregated_output( + output, index=self.grouper.result_index # type: ignore[arg-type] + ) @final def _transform_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs): @@ -2269,15 +2274,25 @@ def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]: inference = None if is_integer_dtype(vals.dtype): if is_extension_array_dtype(vals.dtype): - vals = vals.to_numpy(dtype=float, na_value=np.nan) + # error: "ndarray" has no attribute "to_numpy" + vals = vals.to_numpy( # type: ignore[attr-defined] + dtype=float, na_value=np.nan + ) inference = np.int64 elif is_bool_dtype(vals.dtype) and is_extension_array_dtype(vals.dtype): - vals = vals.to_numpy(dtype=float, na_value=np.nan) + # error: "ndarray" has no attribute "to_numpy" + vals = vals.to_numpy( # type: ignore[attr-defined] + dtype=float, na_value=np.nan + ) elif is_datetime64_dtype(vals.dtype): - inference = "datetime64[ns]" + # error: Incompatible types in assignment (expression has type + # "str", variable has type "Optional[Type[int64]]") + inference = "datetime64[ns]" # type: ignore[assignment] vals = np.asarray(vals).astype(float) elif is_timedelta64_dtype(vals.dtype): - inference = "timedelta64[ns]" + # error: Incompatible types in assignment (expression has type "str", + # variable has type "Optional[Type[signedinteger[Any]]]") + inference = "timedelta64[ns]" # type: ignore[assignment] vals = np.asarray(vals).astype(float) return vals, inference diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 89becb880c519..51f7b44f6d69d 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -583,7 +583,9 @@ def indices(self): def codes(self) -> np.ndarray: if self._codes is None: self._make_codes() - return self._codes + # error: Incompatible return value type (got "Optional[ndarray]", + # expected "ndarray") + return self._codes # type: ignore[return-value] @cache_readonly def result_index(self) -> Index: diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 008ee4dff4f7b..2d7547ff75ca4 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -539,7 +539,10 @@ def _ea_wrap_cython_operation( ) if how in ["rank"]: # preserve float64 dtype - return res_values + + # error: Incompatible return value type (got "ndarray", expected + # "Tuple[ndarray, Optional[List[str]]]") + return res_values # type: ignore[return-value] res_values = res_values.astype("i8", copy=False) result = type(orig_values)(res_values, dtype=orig_values.dtype) @@ -553,9 +556,13 @@ def _ea_wrap_cython_operation( ) dtype = maybe_cast_result_dtype(orig_values.dtype, how) if is_extension_array_dtype(dtype): - cls = dtype.construct_array_type() + # error: Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has no + # attribute "construct_array_type" + cls = dtype.construct_array_type() # type: ignore[union-attr] return cls._from_sequence(res_values, dtype=dtype) - return res_values + # error: Incompatible return value type (got "ndarray", expected + # "Tuple[ndarray, Optional[List[str]]]") + return res_values # type: ignore[return-value] elif is_float_dtype(values.dtype): # FloatingArray @@ -592,7 +599,9 @@ def _cython_operation( self._disallow_invalid_ops(values, how) if is_extension_array_dtype(values.dtype): - return self._ea_wrap_cython_operation( + # error: Incompatible return value type (got "Tuple[ndarray, + # Optional[List[str]]]", expected "ndarray") + return self._ea_wrap_cython_operation( # type: ignore[return-value] kind, values, how, axis, min_count, **kwargs ) @@ -680,7 +689,9 @@ def _cython_operation( # e.g. if we are int64 and need to restore to datetime64/timedelta64 # "rank" is the only member of cython_cast_blocklist we get here dtype = maybe_cast_result_dtype(orig_values.dtype, how) - result = maybe_downcast_to_dtype(result, dtype) + # error: Argument 2 to "maybe_downcast_to_dtype" has incompatible type + # "Union[dtype[Any], ExtensionDtype]"; expected "Union[str, dtype[Any]]" + result = maybe_downcast_to_dtype(result, dtype) # type: ignore[arg-type] return result diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index e3f9f6dbb0025..9543b11ad4de1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -191,7 +191,8 @@ str_t = str -_o_dtype = np.dtype(object) +# error: Value of type variable "_DTypeScalar" of "dtype" cannot be "object" +_o_dtype = np.dtype(object) # type: ignore[type-var] _Identity = NewType("_Identity", object) @@ -404,14 +405,23 @@ def __new__( # they are actually ints, e.g. '0' and 0.0 # should not be coerced # GH 11836 - data = _maybe_cast_with_dtype(data, dtype, copy) + + # error: Argument 1 to "_maybe_cast_with_dtype" has incompatible type + # "Union[ndarray, Index, Series]"; expected "ndarray" + data = _maybe_cast_with_dtype( + data, dtype, copy # type: ignore[arg-type] + ) dtype = data.dtype if data.dtype.kind in ["i", "u", "f"]: # maybe coerce to a sub-class arr = data else: - arr = com.asarray_tuplesafe(data, dtype=object) + # error: Argument "dtype" to "asarray_tuplesafe" has incompatible type + # "Type[object]"; expected "Union[str, dtype[Any], None]" + arr = com.asarray_tuplesafe( + data, dtype=object # type: ignore[arg-type] + ) if dtype is None: arr = _maybe_cast_data_without_dtype(arr) @@ -445,7 +455,10 @@ def __new__( data, names=name or kwargs.get("names") ) # other iterable of some kind - subarr = com.asarray_tuplesafe(data, dtype=object) + + # error: Argument "dtype" to "asarray_tuplesafe" has incompatible type + # "Type[object]"; expected "Union[str, dtype[Any], None]" + subarr = com.asarray_tuplesafe(data, dtype=object) # type: ignore[arg-type] return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs) @classmethod @@ -2889,10 +2902,16 @@ def union(self, other, sort=None): # <T> | <T> -> T # <T> | <U> -> object if not (is_integer_dtype(self.dtype) and is_integer_dtype(other.dtype)): - dtype = "float64" + # error: Incompatible types in assignment (expression has type + # "str", variable has type "Union[dtype[Any], ExtensionDtype]") + dtype = "float64" # type: ignore[assignment] else: # one is int64 other is uint64 - dtype = object + + # error: Incompatible types in assignment (expression has type + # "Type[object]", variable has type "Union[dtype[Any], + # ExtensionDtype]") + dtype = object # type: ignore[assignment] left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) @@ -2941,7 +2960,11 @@ def _union(self, other: Index, sort): ): # Both are unique and monotonic, so can use outer join try: - return self._outer_indexer(lvals, rvals)[0] + # error: Argument 1 to "_outer_indexer" of "Index" has incompatible type + # "Union[ExtensionArray, ndarray]"; expected "ndarray" + # error: Argument 2 to "_outer_indexer" of "Index" has incompatible type + # "Union[ExtensionArray, ndarray]"; expected "ndarray" + return self._outer_indexer(lvals, rvals)[0] # type: ignore[arg-type] except (TypeError, IncompatibleFrequency): # incomparable objects value_list = list(lvals) @@ -2953,7 +2976,12 @@ def _union(self, other: Index, sort): elif not other.is_unique and not self.is_unique: # self and other both have duplicates - result = algos.union_with_duplicates(lvals, rvals) + + # error: Argument 1 to "union_with_duplicates" has incompatible type + # "Union[ExtensionArray, ndarray]"; expected "ndarray" + # error: Argument 2 to "union_with_duplicates" has incompatible type + # "Union[ExtensionArray, ndarray]"; expected "ndarray" + result = algos.union_with_duplicates(lvals, rvals) # type: ignore[arg-type] return _maybe_try_sort(result, sort) # Either other or self is not unique @@ -2965,10 +2993,16 @@ def _union(self, other: Index, sort): missing = algos.unique1d(self.get_indexer_non_unique(other)[1]) if len(missing) > 0: - other_diff = algos.take_nd(rvals, missing, allow_fill=False) + # error: Value of type variable "ArrayLike" of "take_nd" cannot be + # "Union[ExtensionArray, ndarray]" + other_diff = algos.take_nd( + rvals, missing, allow_fill=False # type: ignore[type-var] + ) result = concat_compat((lvals, other_diff)) else: - result = lvals + # error: Incompatible types in assignment (expression has type + # "Union[ExtensionArray, ndarray]", variable has type "ndarray") + result = lvals # type: ignore[assignment] if not self.is_monotonic or not other.is_monotonic: result = _maybe_try_sort(result, sort) @@ -3058,7 +3092,9 @@ def _intersection(self, other: Index, sort=False): if self.is_monotonic and other.is_monotonic: try: - result = self._inner_indexer(lvals, rvals)[0] + # error: Argument 1 to "_inner_indexer" of "Index" has incompatible type + # "Union[ExtensionArray, ndarray]"; expected "ndarray" + result = self._inner_indexer(lvals, rvals)[0] # type: ignore[arg-type] except TypeError: pass else: @@ -3964,11 +4000,15 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False) if return_indexers: if join_index is self: - lindexer = None + # error: Incompatible types in assignment (expression has type "None", + # variable has type "ndarray") + lindexer = None # type: ignore[assignment] else: lindexer = self.get_indexer(join_index) if join_index is other: - rindexer = None + # error: Incompatible types in assignment (expression has type "None", + # variable has type "ndarray") + rindexer = None # type: ignore[assignment] else: rindexer = other.get_indexer(join_index) return join_index, lindexer, rindexer @@ -4075,7 +4115,11 @@ def _join_non_unique(self, other, how="left", return_indexers=False): mask = left_idx == -1 np.putmask(join_index, mask, rvalues.take(right_idx)) - join_index = self._wrap_joined_index(join_index, other) + # error: Incompatible types in assignment (expression has type "Index", variable + # has type "ndarray") + join_index = self._wrap_joined_index( + join_index, other # type: ignore[assignment] + ) if return_indexers: return join_index, left_idx, right_idx @@ -4248,23 +4292,61 @@ def _join_monotonic(self, other, how="left", return_indexers=False): elif how == "right": join_index = other lidx = self._left_indexer_unique(ov, sv) - ridx = None + # error: Incompatible types in assignment (expression has type "None", + # variable has type "ndarray") + ridx = None # type: ignore[assignment] elif how == "inner": - join_index, lidx, ridx = self._inner_indexer(sv, ov) - join_index = self._wrap_joined_index(join_index, other) + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "Index") + join_index, lidx, ridx = self._inner_indexer( # type:ignore[assignment] + sv, ov + ) + # error: Argument 1 to "_wrap_joined_index" of "Index" has incompatible + # type "Index"; expected "ndarray" + join_index = self._wrap_joined_index( + join_index, other # type: ignore[arg-type] + ) elif how == "outer": - join_index, lidx, ridx = self._outer_indexer(sv, ov) - join_index = self._wrap_joined_index(join_index, other) + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "Index") + join_index, lidx, ridx = self._outer_indexer( # type:ignore[assignment] + sv, ov + ) + # error: Argument 1 to "_wrap_joined_index" of "Index" has incompatible + # type "Index"; expected "ndarray" + join_index = self._wrap_joined_index( + join_index, other # type: ignore[arg-type] + ) else: if how == "left": - join_index, lidx, ridx = self._left_indexer(sv, ov) + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "Index") + join_index, lidx, ridx = self._left_indexer( # type: ignore[assignment] + sv, ov + ) elif how == "right": - join_index, ridx, lidx = self._left_indexer(ov, sv) + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "Index") + join_index, ridx, lidx = self._left_indexer( # type: ignore[assignment] + ov, sv + ) elif how == "inner": - join_index, lidx, ridx = self._inner_indexer(sv, ov) + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "Index") + join_index, lidx, ridx = self._inner_indexer( # type:ignore[assignment] + sv, ov + ) elif how == "outer": - join_index, lidx, ridx = self._outer_indexer(sv, ov) - join_index = self._wrap_joined_index(join_index, other) + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "Index") + join_index, lidx, ridx = self._outer_indexer( # type:ignore[assignment] + sv, ov + ) + # error: Argument 1 to "_wrap_joined_index" of "Index" has incompatible type + # "Index"; expected "ndarray" + join_index = self._wrap_joined_index( + join_index, other # type: ignore[arg-type] + ) if return_indexers: lidx = None if lidx is None else ensure_platform_int(lidx) @@ -4307,7 +4389,11 @@ def values(self) -> ArrayLike: Index.array : Reference to the underlying data. Index.to_numpy : A NumPy array representing the underlying data. """ - return self._data + # error: Incompatible return value type (got "Union[ExtensionArray, ndarray]", + # expected "ExtensionArray") + # error: Incompatible return value type (got "Union[ExtensionArray, ndarray]", + # expected "ndarray") + return self._data # type: ignore[return-value] @cache_readonly @doc(IndexOpsMixin.array) @@ -4349,7 +4435,9 @@ def _get_engine_target(self) -> np.ndarray: """ Get the ndarray that we can pass to the IndexEngine constructor. """ - return self._values + # error: Incompatible return value type (got "Union[ExtensionArray, + # ndarray]", expected "ndarray") + return self._values # type: ignore[return-value] @doc(IndexOpsMixin.memory_usage) def memory_usage(self, deep: bool = False) -> int: @@ -4542,7 +4630,11 @@ def __getitem__(self, key): result = getitem(key) if not is_scalar(result): - if np.ndim(result) > 1: + # error: Argument 1 to "ndim" has incompatible type "Union[ExtensionArray, + # Any]"; expected "Union[Union[int, float, complex, str, bytes, generic], + # Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], _SupportsArray]" + if np.ndim(result) > 1: # type: ignore[arg-type] deprecate_ndim_indexing(result) return result # NB: Using _constructor._simple_new would break if MultiIndex @@ -4622,7 +4714,9 @@ def putmask(self, mask, value): numpy.ndarray.putmask : Changes elements of an array based on conditional and input values. """ - mask, noop = validate_putmask(self._values, mask) + # error: Value of type variable "ArrayLike" of "validate_putmask" cannot be + # "Union[ExtensionArray, ndarray]" + mask, noop = validate_putmask(self._values, mask) # type: ignore[type-var] if noop: return self.copy() @@ -4638,7 +4732,11 @@ def putmask(self, mask, value): return self.astype(dtype).putmask(mask, value) values = self._values.copy() - converted = setitem_datetimelike_compat(values, mask.sum(), converted) + # error: Argument 1 to "setitem_datetimelike_compat" has incompatible type + # "Union[ExtensionArray, ndarray]"; expected "ndarray" + converted = setitem_datetimelike_compat( + values, mask.sum(), converted # type: ignore[arg-type] + ) np.putmask(values, mask, converted) return type(self)._simple_new(values, name=self.name) @@ -5502,7 +5600,9 @@ def isin(self, values, level=None): """ if level is not None: self._validate_index_level(level) - return algos.isin(self._values, values) + # error: Value of type variable "AnyArrayLike" of "isin" cannot be + # "Union[ExtensionArray, ndarray]" + return algos.isin(self._values, values) # type: ignore[type-var] def _get_string_slice(self, key: str_t): # this is for partial string indexing, @@ -5923,7 +6023,11 @@ def _cmp_method(self, other, op): else: with np.errstate(all="ignore"): - result = ops.comparison_op(self._values, other, op) + # error: Value of type variable "ArrayLike" of "comparison_op" cannot be + # "Union[ExtensionArray, ndarray]" + result = ops.comparison_op( + self._values, other, op # type: ignore[type-var] + ) return result @@ -5996,7 +6100,11 @@ def any(self, *args, **kwargs): """ nv.validate_any(args, kwargs) self._maybe_disable_logical_methods("any") - return np.any(self.values) + # error: Argument 1 to "any" has incompatible type "ArrayLike"; expected + # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, + # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], + # _SupportsArray]" + return np.any(self.values) # type: ignore[arg-type] def all(self, *args, **kwargs): """ @@ -6053,7 +6161,11 @@ def all(self, *args, **kwargs): """ nv.validate_all(args, kwargs) self._maybe_disable_logical_methods("all") - return np.all(self.values) + # error: Argument 1 to "all" has incompatible type "ArrayLike"; expected + # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, + # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], + # _SupportsArray]" + return np.all(self.values) # type: ignore[arg-type] @final def _maybe_disable_logical_methods(self, opname: str_t): @@ -6340,7 +6452,11 @@ def _maybe_cast_data_without_dtype(subarr): if inferred == "integer": try: - data = _try_convert_to_int_array(subarr, False, None) + # error: Argument 3 to "_try_convert_to_int_array" has incompatible type + # "None"; expected "dtype[Any]" + data = _try_convert_to_int_array( + subarr, False, None # type: ignore[arg-type] + ) return data except ValueError: pass @@ -6374,7 +6490,11 @@ def _maybe_cast_data_without_dtype(subarr): pass elif inferred.startswith("timedelta"): - data = TimedeltaArray._from_sequence(subarr, copy=False) + # error: Incompatible types in assignment (expression has type + # "TimedeltaArray", variable has type "ndarray") + data = TimedeltaArray._from_sequence( # type: ignore[assignment] + subarr, copy=False + ) return data elif inferred == "period": try: diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 869836a3da70c..a38ef55614638 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -193,12 +193,17 @@ def _can_hold_strings(self): def _engine_type(self): # self.codes can have dtype int8, int16, int32 or int64, so we need # to return the corresponding engine type (libindex.Int8Engine, etc.). + + # error: Invalid index type "Type[generic]" for "Dict[Type[signedinteger[Any]], + # Any]"; expected type "Type[signedinteger[Any]]" return { np.int8: libindex.Int8Engine, np.int16: libindex.Int16Engine, np.int32: libindex.Int32Engine, np.int64: libindex.Int64Engine, - }[self.codes.dtype.type] + }[ + self.codes.dtype.type # type: ignore[index] + ] _attributes = ["name"] @@ -484,7 +489,9 @@ def _get_indexer( if self.equals(target): return np.arange(len(self), dtype="intp") - return self._get_indexer_non_unique(target._values)[0] + # error: Value of type variable "ArrayLike" of "_get_indexer_non_unique" of + # "CategoricalIndex" cannot be "Union[ExtensionArray, ndarray]" + return self._get_indexer_non_unique(target._values)[0] # type: ignore[type-var] @Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs) def get_indexer_non_unique(self, target): diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 1dd5b40f7102f..793dd041fbf6f 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -136,7 +136,9 @@ def _is_all_dates(self) -> bool: # Abstract data attributes @property - def values(self) -> np.ndarray: + # error: Return type "ndarray" of "values" incompatible with return type "ArrayLike" + # in supertype "Index" + def values(self) -> np.ndarray: # type: ignore[override] # Note: PeriodArray overrides this to return an ndarray of objects. return self._data._ndarray @@ -528,8 +530,10 @@ def shift(self: _T, periods: int = 1, freq=None) -> _T: PeriodIndex.shift : Shift values of PeriodIndex. """ arr = self._data.view() - arr._freq = self.freq - result = arr._time_shift(periods, freq=freq) + # error: "ExtensionArray" has no attribute "_freq" + arr._freq = self.freq # type: ignore[attr-defined] + # error: "ExtensionArray" has no attribute "_time_shift" + result = arr._time_shift(periods, freq=freq) # type: ignore[attr-defined] return type(self)(result, name=self.name) # -------------------------------------------------------------------- @@ -772,7 +776,8 @@ def _fast_union(self: _T, other: _T, sort=None) -> _T: left, right = self, other left_start = left[0] loc = right.searchsorted(left_start, side="left") - right_chunk = right._values[:loc] + # error: Slice index must be an integer or None + right_chunk = right._values[:loc] # type: ignore[misc] dates = concat_compat((left._values, right_chunk)) # With sort being False, we can't infer that result.freq == self.freq # TODO: no tests rely on the _with_freq("infer"); needed? @@ -788,7 +793,8 @@ def _fast_union(self: _T, other: _T, sort=None) -> _T: # concatenate if left_end < right_end: loc = right.searchsorted(left_end, side="right") - right_chunk = right._values[loc:] + # error: Slice index must be an integer or None + right_chunk = right._values[loc:] # type: ignore[misc] dates = concat_compat([left._values, right_chunk]) # The can_fast_union check ensures that the result.freq # should match self.freq diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 9ea43d083f5b3..ed0856f3d30a3 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -365,7 +365,10 @@ def _is_dates_only(self) -> bool: """ from pandas.io.formats.format import is_dates_only - return self.tz is None and is_dates_only(self._values) + # error: Argument 1 to "is_dates_only" has incompatible type + # "Union[ExtensionArray, ndarray]"; expected "Union[ndarray, + # DatetimeArray, Index, DatetimeIndex]" + return self.tz is None and is_dates_only(self._values) # type: ignore[arg-type] def __reduce__(self): @@ -533,7 +536,9 @@ def to_series(self, keep_tz=lib.no_default, index=None, name=None): # preserve the tz & copy values = self.copy(deep=True) else: - values = self._values.view("M8[ns]").copy() + # error: Incompatible types in assignment (expression has type + # "Union[ExtensionArray, ndarray]", variable has type "DatetimeIndex") + values = self._values.view("M8[ns]").copy() # type: ignore[assignment] return Series(values, index=index, name=name) diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index a5899f83dd238..4c15e9df534ba 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -339,7 +339,9 @@ def astype(self, dtype, copy=True): @cache_readonly def _isnan(self) -> np.ndarray: - return self._data.isna() + # error: Incompatible return value type (got "ExtensionArray", expected + # "ndarray") + return self._data.isna() # type: ignore[return-value] @doc(Index.equals) def equals(self, other) -> bool: diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index ad512b8393166..58c5b23d12a35 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1227,8 +1227,16 @@ def interval_range( else: # delegate to the appropriate range function if isinstance(endpoint, Timestamp): - breaks = date_range(start=start, end=end, periods=periods, freq=freq) + # error: Incompatible types in assignment (expression has type + # "DatetimeIndex", variable has type "ndarray") + breaks = date_range( # type: ignore[assignment] + start=start, end=end, periods=periods, freq=freq + ) else: - breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq) + # error: Incompatible types in assignment (expression has type + # "TimedeltaIndex", variable has type "ndarray") + breaks = timedelta_range( # type: ignore[assignment] + start=start, end=end, periods=periods, freq=freq + ) return IntervalIndex.from_breaks(breaks, name=name, closed=closed) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index fc3e404998b43..3b538b948ae81 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -711,14 +711,18 @@ def _values(self) -> np.ndarray: vals, (ABCDatetimeIndex, ABCTimedeltaIndex) ): vals = vals.astype(object) - vals = np.array(vals, copy=False) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "Index") + vals = np.array(vals, copy=False) # type: ignore[assignment] values.append(vals) arr = lib.fast_zip(values) return arr @property - def values(self) -> np.ndarray: + # error: Return type "ndarray" of "values" incompatible with return type "ArrayLike" + # in supertype "Index" + def values(self) -> np.ndarray: # type: ignore[override] return self._values @property @@ -2218,7 +2222,11 @@ def drop(self, codes, level=None, errors="raise"): if not isinstance(codes, (np.ndarray, Index)): try: - codes = com.index_labels_to_array(codes, dtype=object) + # error: Argument "dtype" to "index_labels_to_array" has incompatible + # type "Type[object]"; expected "Union[str, dtype[Any], None]" + codes = com.index_labels_to_array( + codes, dtype=object # type: ignore[arg-type] + ) except ValueError: pass @@ -3162,10 +3170,14 @@ def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes): indexer = codes.take(ensure_platform_int(indexer)) result = Series(Index(indexer).isin(r).nonzero()[0]) m = result.map(mapper) - m = np.asarray(m) + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "Series") + m = np.asarray(m) # type: ignore[assignment] else: - m = np.zeros(len(codes), dtype=bool) + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "Series") + m = np.zeros(len(codes), dtype=bool) # type: ignore[assignment] m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True return m diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index a581516f23feb..b6f476d864011 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -253,7 +253,9 @@ def asi8(self) -> np.ndarray: FutureWarning, stacklevel=2, ) - return self._values.view(self._default_dtype) + # error: Incompatible return value type (got "Union[ExtensionArray, ndarray]", + # expected "ndarray") + return self._values.view(self._default_dtype) # type: ignore[return-value] class Int64Index(IntegerIndex): @@ -292,7 +294,10 @@ def _convert_arr_indexer(self, keyarr): ): dtype = np.uint64 - return com.asarray_tuplesafe(keyarr, dtype=dtype) + # error: Argument "dtype" to "asarray_tuplesafe" has incompatible type + # "Optional[Type[unsignedinteger[Any]]]"; expected "Union[str, dtype[Any], + # None]" + return com.asarray_tuplesafe(keyarr, dtype=dtype) # type: ignore[arg-type] _float64_descr_args = { @@ -328,7 +333,10 @@ def astype(self, dtype, copy=True): elif is_integer_dtype(dtype) and not is_extension_array_dtype(dtype): # TODO(jreback); this can change once we have an EA Index type # GH 13149 - arr = astype_nansafe(self._values, dtype=dtype) + + # error: Argument 1 to "astype_nansafe" has incompatible type + # "Union[ExtensionArray, ndarray]"; expected "ndarray" + arr = astype_nansafe(self._values, dtype=dtype) # type: ignore[arg-type] return Int64Index(arr, name=self.name) return super().astype(dtype, copy=copy) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 0c5dbec2094e5..b15912e4c477b 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -275,7 +275,9 @@ def __new__( # Data @property - def values(self) -> np.ndarray: + # error: Return type "ndarray" of "values" incompatible with return type "ArrayLike" + # in supertype "Index" + def values(self) -> np.ndarray: # type: ignore[override] return np.asarray(self, dtype=object) def _maybe_convert_timedelta(self, other): diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 56093c2a399c2..05bb32dad6cab 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -111,7 +111,12 @@ def __new__( name=None, ): - cls._validate_dtype(dtype) + # error: Argument 1 to "_validate_dtype" of "NumericIndex" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], Type[int], + # Type[complex], Type[bool], Type[object], None]"; expected + # "Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float], + # Type[int], Type[complex], Type[bool], Type[object]]" + cls._validate_dtype(dtype) # type: ignore[arg-type] name = maybe_extract_name(name, start, cls) # RangeIndex @@ -155,7 +160,12 @@ def from_range( f"range, {repr(data)} was passed" ) - cls._validate_dtype(dtype) + # error: Argument 1 to "_validate_dtype" of "NumericIndex" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], Type[int], + # Type[complex], Type[bool], Type[object], None]"; expected + # "Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float], + # Type[int], Type[complex], Type[bool], Type[object]]" + cls._validate_dtype(dtype) # type: ignore[arg-type] return cls._simple_new(data, name=name) @classmethod @@ -901,7 +911,8 @@ def _arith_method(self, other, op): # apply if we have an override if step: with np.errstate(all="ignore"): - rstep = step(left.step, right) + # error: "bool" not callable + rstep = step(left.step, right) # type: ignore[operator] # we don't have a representable op # so return a base index diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index bbe71c4977e77..0ab4bc991f468 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -267,7 +267,11 @@ def reduce( if res is NaT and is_timedelta64_ns_dtype(arr.dtype): result_arrays.append(np.array(["NaT"], dtype="timedelta64[ns]")) else: - result_arrays.append(sanitize_array([res], None)) + # error: Argument 1 to "append" of "list" has incompatible type + # "ExtensionArray"; expected "ndarray" + result_arrays.append( + sanitize_array([res], None) # type: ignore[arg-type] + ) result_indices.append(i) index = Index._simple_new(np.array([None], dtype=object)) # placeholder @@ -278,7 +282,9 @@ def reduce( indexer = np.arange(self.shape[0]) columns = self.items - new_mgr = type(self)(result_arrays, [index, columns]) + # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; + # expected "List[Union[ndarray, ExtensionArray]]" + new_mgr = type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] return new_mgr, indexer def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> T: @@ -318,7 +324,9 @@ def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> T: else: columns = self.items - return type(self)(result_arrays, [index, columns]) + # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; + # expected "List[Union[ndarray, ExtensionArray]]" + return type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager: """ @@ -408,7 +416,9 @@ def apply( if len(result_arrays) == 0: return self.make_empty(new_axes) - return type(self)(result_arrays, new_axes) + # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; + # expected "List[Union[ndarray, ExtensionArray]]" + return type(self)(result_arrays, new_axes) # type: ignore[arg-type] def apply_2d(self: T, f, ignore_failures: bool = False, **kwargs) -> T: """ @@ -469,9 +479,8 @@ def apply_with_block(self: T, f, align_keys=None, swap_axis=True, **kwargs) -> T elif arr.dtype.kind == "m" and not isinstance(arr, np.ndarray): # TimedeltaArray needs to be converted to ndarray for TimedeltaBlock - # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no - # attribute "_data" - arr = arr._data # type: ignore[union-attr] + # error: "ExtensionArray" has no attribute "_data" + arr = arr._data # type: ignore[attr-defined] if self.ndim == 2: if isinstance(arr, np.ndarray): @@ -500,9 +509,16 @@ def quantile( interpolation="linear", ) -> ArrayManager: - arrs = [ensure_block_shape(x, 2) for x in self.arrays] + # error: Value of type variable "ArrayLike" of "ensure_block_shape" cannot be + # "Union[ndarray, ExtensionArray]" + arrs = [ensure_block_shape(x, 2) for x in self.arrays] # type: ignore[type-var] assert axis == 1 - new_arrs = [quantile_compat(x, qs, interpolation, axis=axis) for x in arrs] + # error: Value of type variable "ArrayLike" of "quantile_compat" cannot be + # "object" + new_arrs = [ + quantile_compat(x, qs, interpolation, axis=axis) # type: ignore[type-var] + for x in arrs + ] for i, arr in enumerate(new_arrs): if arr.ndim == 2: assert arr.shape[0] == 1, arr.shape @@ -765,7 +781,9 @@ def as_array( result = np.empty(self.shape_proper, dtype=dtype) - for i, arr in enumerate(self.arrays): + # error: Incompatible types in assignment (expression has type "Union[ndarray, + # ExtensionArray]", variable has type "ndarray") + for i, arr in enumerate(self.arrays): # type: ignore[assignment] arr = arr.astype(dtype, copy=copy) result[:, i] = arr @@ -827,7 +845,11 @@ def iget_values(self, i: int) -> ArrayLike: """ Return the data for column i as the values (ndarray or ExtensionArray). """ - return self.arrays[i] + # error: Incompatible return value type (got "Union[ndarray, ExtensionArray]", + # expected "ExtensionArray") + # error: Incompatible return value type (got "Union[ndarray, ExtensionArray]", + # expected "ndarray") + return self.arrays[i] # type: ignore[return-value] def idelete(self, indexer): """ @@ -870,7 +892,9 @@ def iset(self, loc: Union[int, slice, np.ndarray], value): assert isinstance(value, (np.ndarray, ExtensionArray)) assert value.ndim == 1 assert len(value) == len(self._axes[0]) - self.arrays[loc] = value + # error: Invalid index type "Union[int, slice, ndarray]" for + # "List[Union[ndarray, ExtensionArray]]"; expected type "int" + self.arrays[loc] = value # type: ignore[index] return # multiple columns -> convert slice or array to integer indices @@ -883,7 +907,9 @@ def iset(self, loc: Union[int, slice, np.ndarray], value): else: assert isinstance(loc, np.ndarray) assert loc.dtype == "bool" - indices = np.nonzero(loc)[0] + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "range") + indices = np.nonzero(loc)[0] # type: ignore[assignment] assert value.ndim == 2 assert value.shape[0] == len(self._axes[0]) @@ -1002,7 +1028,9 @@ def _reindex_indexer( else: validate_indices(indexer, len(self._axes[0])) new_arrays = [ - take_1d( + # error: Value of type variable "ArrayLike" of "take_1d" cannot be + # "Union[ndarray, ExtensionArray]" [type-var] + take_1d( # type: ignore[type-var] arr, indexer, allow_fill=True, @@ -1047,7 +1075,11 @@ def _make_na_array(self, fill_value=None): fill_value = np.nan dtype, fill_value = infer_dtype_from_scalar(fill_value) - values = np.empty(self.shape_proper[0], dtype=dtype) + # error: Argument "dtype" to "empty" has incompatible type "Union[dtype[Any], + # ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str, + # Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any], + # _DTypeDict, Tuple[Any, Any]]]" + values = np.empty(self.shape_proper[0], dtype=dtype) # type: ignore[arg-type] values.fill(fill_value) return values @@ -1057,7 +1089,9 @@ def _equal_values(self, other) -> bool: assuming shape and indexes have already been checked. """ for left, right in zip(self.arrays, other.arrays): - if not array_equals(left, right): + # error: Value of type variable "ArrayLike" of "array_equals" cannot be + # "Union[Any, ndarray, ExtensionArray]" + if not array_equals(left, right): # type: ignore[type-var] return False else: return True @@ -1084,7 +1118,9 @@ def unstack(self, unstacker, fill_value) -> ArrayManager: new_arrays = [] for arr in self.arrays: for i in range(unstacker.full_shape[1]): - new_arr = take_1d( + # error: Value of type variable "ArrayLike" of "take_1d" cannot be + # "Union[ndarray, ExtensionArray]" [type-var] + new_arr = take_1d( # type: ignore[type-var] arr, new_indexer2D[:, i], allow_fill=True, fill_value=fill_value ) new_arrays.append(new_arr) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index f0d7d7e441527..1767b56962db1 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -117,8 +117,10 @@ ) from pandas.core.arrays._mixins import NDArrayBackedExtensionArray +# comparison is faster than is_object_dtype -_dtype_obj = np.dtype(object) # comparison is faster than is_object_dtype +# error: Value of type variable "_DTypeScalar" of "dtype" cannot be "object" +_dtype_obj = np.dtype(object) # type: ignore[type-var] class Block(PandasObject): @@ -277,7 +279,9 @@ def array_values(self) -> ExtensionArray: """ The array that Series.array returns. Always an ExtensionArray. """ - return PandasArray(self.values) + # error: Argument 1 to "PandasArray" has incompatible type "Union[ndarray, + # ExtensionArray]"; expected "Union[ndarray, PandasArray]" + return PandasArray(self.values) # type: ignore[arg-type] def get_values(self, dtype: Optional[DtypeObj] = None) -> np.ndarray: """ @@ -286,7 +290,9 @@ def get_values(self, dtype: Optional[DtypeObj] = None) -> np.ndarray: """ if dtype == _dtype_obj: return self.values.astype(_dtype_obj) - return self.values + # error: Incompatible return value type (got "Union[ndarray, ExtensionArray]", + # expected "ndarray") + return self.values # type: ignore[return-value] @final def get_block_values_for_json(self) -> np.ndarray: @@ -474,7 +480,9 @@ def fillna( inplace = validate_bool_kwarg(inplace, "inplace") mask = isna(self.values) - mask, noop = validate_putmask(self.values, mask) + # error: Value of type variable "ArrayLike" of "validate_putmask" cannot be + # "Union[ndarray, ExtensionArray]" + mask, noop = validate_putmask(self.values, mask) # type: ignore[type-var] if limit is not None: limit = libalgos.validate_limit(None, limit=limit) @@ -617,7 +625,9 @@ def downcast(self, dtypes=None) -> List[Block]: if dtypes is None: dtypes = "infer" - nv = maybe_downcast_to_dtype(values, dtypes) + # error: Value of type variable "ArrayLike" of "maybe_downcast_to_dtype" + # cannot be "Union[ndarray, ExtensionArray]" + nv = maybe_downcast_to_dtype(values, dtypes) # type: ignore[type-var] return [self.make_block(nv)] # ndim > 1 @@ -661,7 +671,11 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"): if values.dtype.kind in ["m", "M"]: values = self.array_values() - new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) + # error: Value of type variable "ArrayLike" of "astype_array_safe" cannot be + # "Union[ndarray, ExtensionArray]" + new_values = astype_array_safe( + values, dtype, copy=copy, errors=errors # type: ignore[type-var] + ) newb = self.make_block(new_values) if newb.shape != self.shape: @@ -758,7 +772,9 @@ def replace( values = self.values - mask = missing.mask_missing(values, to_replace) + # error: Value of type variable "ArrayLike" of "mask_missing" cannot be + # "Union[ndarray, ExtensionArray]" + mask = missing.mask_missing(values, to_replace) # type: ignore[type-var] if not mask.any(): # Note: we get here with test_replace_extension_other incorrectly # bc _can_hold_element is incorrect. @@ -785,7 +801,9 @@ def replace( ) blk = self if inplace else self.copy() - putmask_inplace(blk.values, mask, value) + # error: Value of type variable "ArrayLike" of "putmask_inplace" cannot be + # "Union[ndarray, ExtensionArray]" + putmask_inplace(blk.values, mask, value) # type: ignore[type-var] blocks = blk.convert(numeric=False, copy=False) return blocks @@ -826,7 +844,9 @@ def _replace_regex( rx = re.compile(to_replace) new_values = self.values if inplace else self.values.copy() - replace_regex(new_values, rx, value, mask) + # error: Value of type variable "ArrayLike" of "replace_regex" cannot be + # "Union[ndarray, ExtensionArray]" + replace_regex(new_values, rx, value, mask) # type: ignore[type-var] block = self.make_block(new_values) return [block] @@ -863,14 +883,26 @@ def _replace_list( # in order to avoid repeating the same computations mask = ~isna(self.values) masks = [ - compare_or_regex_search(self.values, s[0], regex=regex, mask=mask) + # error: Value of type variable "ArrayLike" of "compare_or_regex_search" + # cannot be "Union[ndarray, ExtensionArray]" + compare_or_regex_search( # type: ignore[type-var] + self.values, s[0], regex=regex, mask=mask + ) for s in pairs ] else: # GH#38086 faster if we know we dont need to check for regex - masks = [missing.mask_missing(self.values, s[0]) for s in pairs] - masks = [extract_bool_array(x) for x in masks] + # error: Value of type variable "ArrayLike" of "mask_missing" cannot be + # "Union[ndarray, ExtensionArray]" + masks = [ + missing.mask_missing(self.values, s[0]) # type: ignore[type-var] + for s in pairs + ] + + # error: Value of type variable "ArrayLike" of "extract_bool_array" cannot be + # "Union[ndarray, ExtensionArray, bool]" + masks = [extract_bool_array(x) for x in masks] # type: ignore[type-var] rb = [self if inplace else self.copy()] for i, (src, dest) in enumerate(pairs): @@ -928,7 +960,9 @@ def _replace_coerce( nb = self.coerce_to_target_dtype(value) if nb is self and not inplace: nb = nb.copy() - putmask_inplace(nb.values, mask, value) + # error: Value of type variable "ArrayLike" of "putmask_inplace" cannot + # be "Union[ndarray, ExtensionArray]" + putmask_inplace(nb.values, mask, value) # type: ignore[type-var] return [nb] else: regex = should_use_regex(regex, to_replace) @@ -1001,7 +1035,9 @@ def setitem(self, indexer, value): # length checking check_setitem_lengths(indexer, value, values) - exact_match = is_exact_shape_match(values, arr_value) + # error: Value of type variable "ArrayLike" of "is_exact_shape_match" cannot be + # "Union[Any, ndarray, ExtensionArray]" + exact_match = is_exact_shape_match(values, arr_value) # type: ignore[type-var] if is_empty_indexer(indexer, arr_value): # GH#8669 empty indexers @@ -1040,7 +1076,11 @@ def setitem(self, indexer, value): values[indexer] = value.to_numpy(values.dtype).reshape(-1, 1) else: - value = setitem_datetimelike_compat(values, len(values[indexer]), value) + # error: Argument 1 to "setitem_datetimelike_compat" has incompatible type + # "Union[ndarray, ExtensionArray]"; expected "ndarray" + value = setitem_datetimelike_compat( + values, len(values[indexer]), value # type: ignore[arg-type] + ) values[indexer] = value if transpose: @@ -1065,7 +1105,9 @@ def putmask(self, mask, new) -> List[Block]: List[Block] """ orig_mask = mask - mask, noop = validate_putmask(self.values.T, mask) + # error: Value of type variable "ArrayLike" of "validate_putmask" cannot be + # "Union[ndarray, ExtensionArray]" + mask, noop = validate_putmask(self.values.T, mask) # type: ignore[type-var] assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) # if we are passed a scalar None, convert it here @@ -1074,7 +1116,9 @@ def putmask(self, mask, new) -> List[Block]: if self._can_hold_element(new): - putmask_without_repeat(self.values.T, mask, new) + # error: Argument 1 to "putmask_without_repeat" has incompatible type + # "Union[ndarray, ExtensionArray]"; expected "ndarray" + putmask_without_repeat(self.values.T, mask, new) # type: ignore[arg-type] return [self] elif noop: @@ -1089,7 +1133,10 @@ def putmask(self, mask, new) -> List[Block]: elif self.ndim == 1 or self.shape[0] == 1: # no need to split columns - nv = putmask_smart(self.values.T, mask, new).T + + # error: Argument 1 to "putmask_smart" has incompatible type "Union[ndarray, + # ExtensionArray]"; expected "ndarray" + nv = putmask_smart(self.values.T, mask, new).T # type: ignore[arg-type] return [self.make_block(nv)] else: @@ -1285,7 +1332,9 @@ def take_nd( else: allow_fill = True - new_values = algos.take_nd( + # error: Value of type variable "ArrayLike" of "take_nd" cannot be + # "Union[ndarray, ExtensionArray]" + new_values = algos.take_nd( # type: ignore[type-var] values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value ) @@ -1309,7 +1358,12 @@ def shift(self, periods: int, axis: int = 0, fill_value: Any = None) -> List[Blo """ shift the block by periods, possibly upcast """ # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also - new_values, fill_value = maybe_upcast(self.values, fill_value) + + # error: Argument 1 to "maybe_upcast" has incompatible type "Union[ndarray, + # ExtensionArray]"; expected "ndarray" + new_values, fill_value = maybe_upcast( + self.values, fill_value # type: ignore[arg-type] + ) new_values = shift(new_values, periods, axis, fill_value) @@ -1344,7 +1398,9 @@ def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: if transpose: values = values.T - icond, noop = validate_putmask(values, ~cond) + # error: Value of type variable "ArrayLike" of "validate_putmask" cannot be + # "Union[ndarray, ExtensionArray]" + icond, noop = validate_putmask(values, ~cond) # type: ignore[type-var] if is_valid_na_for_dtype(other, self.dtype) and not self.is_object: other = self.fill_value @@ -1362,7 +1418,13 @@ def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: blocks = block.where(orig_other, cond, errors=errors, axis=axis) return self._maybe_downcast(blocks, "infer") - alt = setitem_datetimelike_compat(values, icond.sum(), other) + # error: Argument 1 to "setitem_datetimelike_compat" has incompatible type + # "Union[ndarray, ExtensionArray]"; expected "ndarray" + # error: Argument 2 to "setitem_datetimelike_compat" has incompatible type + # "number[Any]"; expected "int" + alt = setitem_datetimelike_compat( + values, icond.sum(), other # type: ignore[arg-type] + ) if alt is not other: result = values.copy() np.putmask(result, icond, alt) @@ -1449,7 +1511,11 @@ def quantile( assert axis == 1 # only ever called this way assert is_list_like(qs) # caller is responsible for this - result = quantile_compat(self.values, qs, interpolation, axis) + # error: Value of type variable "ArrayLike" of "quantile_compat" cannot be + # "Union[ndarray, ExtensionArray]" + result = quantile_compat( # type: ignore[type-var] + self.values, qs, interpolation, axis + ) return new_block(result, placement=self.mgr_locs, ndim=2) @@ -1489,7 +1555,9 @@ def iget(self, col): elif isinstance(col, slice): if col != slice(None): raise NotImplementedError(col) - return self.values[[loc]] + # error: Invalid index type "List[Any]" for "ExtensionArray"; expected + # type "Union[int, slice, ndarray]" + return self.values[[loc]] # type: ignore[index] return self.values[loc] else: if col != 0: @@ -1615,7 +1683,9 @@ def to_native_types(self, na_rep="nan", quoting=None, **kwargs): values = self.values mask = isna(values) - values = np.asarray(values.astype(object)) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + values = np.asarray(values.astype(object)) # type: ignore[assignment] values[mask] = na_rep # TODO(EA2D): reshape not needed with 2D EAs @@ -1742,7 +1812,10 @@ def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: # The default `other` for Series / Frame is np.nan # we want to replace that with the correct NA value # for the type - other = self.dtype.na_value + + # error: Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has no + # attribute "na_value" + other = self.dtype.na_value # type: ignore[union-attr] if is_sparse(self.values): # TODO(SparseArray.__setitem__): remove this if condition @@ -1832,7 +1905,9 @@ def _can_hold_element(self, element: Any) -> bool: if isinstance(element, (IntegerArray, FloatingArray)): if element._mask.any(): return False - return can_hold_element(self.dtype, element) + # error: Argument 1 to "can_hold_element" has incompatible type + # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" + return can_hold_element(self.dtype, element) # type: ignore[arg-type] @property def _can_hold_na(self): @@ -2317,5 +2392,7 @@ def ensure_block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: # block.shape is incorrect for "2D" ExtensionArrays # We can't, and don't need to, reshape. - values = np.asarray(values).reshape(1, -1) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + values = np.asarray(values).reshape(1, -1) # type: ignore[assignment] return values diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 924d2a77e5da5..64777ef31ac6e 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -187,7 +187,9 @@ def _get_mgr_concatenation_plan(mgr: BlockManager, indexers: Dict[int, np.ndarra blk = mgr.blocks[0] return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))] - ax0_indexer = None + # error: Incompatible types in assignment (expression has type "None", variable + # has type "ndarray") + ax0_indexer = None # type: ignore[assignment] blknos = mgr.blknos blklocs = mgr.blklocs @@ -329,7 +331,9 @@ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: if self.is_valid_na_for(empty_dtype): blk_dtype = getattr(self.block, "dtype", None) - if blk_dtype == np.dtype(object): + # error: Value of type variable "_DTypeScalar" of "dtype" cannot be + # "object" + if blk_dtype == np.dtype(object): # type: ignore[type-var] # we want to avoid filling with np.nan if we are # using None; we already know that we are all # nulls @@ -340,11 +344,17 @@ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: if is_datetime64tz_dtype(empty_dtype): # TODO(EA2D): special case unneeded with 2D EAs i8values = np.full(self.shape[1], fill_value.value) - return DatetimeArray(i8values, dtype=empty_dtype) + # error: Incompatible return value type (got "DatetimeArray", + # expected "ndarray") + return DatetimeArray( # type: ignore[return-value] + i8values, dtype=empty_dtype + ) elif is_extension_array_dtype(blk_dtype): pass elif is_extension_array_dtype(empty_dtype): - cls = empty_dtype.construct_array_type() + # error: Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" + # has no attribute "construct_array_type" + cls = empty_dtype.construct_array_type() # type: ignore[union-attr] missing_arr = cls._from_sequence([], dtype=empty_dtype) ncols, nrows = self.shape assert ncols == 1, ncols @@ -355,7 +365,15 @@ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: else: # NB: we should never get here with empty_dtype integer or bool; # if we did, the missing_arr.fill would cast to gibberish - missing_arr = np.empty(self.shape, dtype=empty_dtype) + + # error: Argument "dtype" to "empty" has incompatible type + # "Union[dtype[Any], ExtensionDtype]"; expected "Union[dtype[Any], + # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, + # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, + # Any]]]" + missing_arr = np.empty( + self.shape, dtype=empty_dtype # type: ignore[arg-type] + ) missing_arr.fill(fill_value) return missing_arr @@ -421,14 +439,21 @@ def _concatenate_join_units( elif any(isinstance(t, ExtensionArray) for t in to_concat): # concatting with at least one EA means we are concatting a single column # the non-EA values are 2D arrays with shape (1, n) - to_concat = [t if isinstance(t, ExtensionArray) else t[0, :] for t in to_concat] + + # error: Invalid index type "Tuple[int, slice]" for "ExtensionArray"; expected + # type "Union[int, slice, ndarray]" + to_concat = [ + t if isinstance(t, ExtensionArray) else t[0, :] # type: ignore[index] + for t in to_concat + ] concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True) concat_values = ensure_block_shape(concat_values, 2) else: concat_values = concat_compat(to_concat, axis=concat_axis) - return concat_values + # error: Incompatible return value type (got "ExtensionArray", expected "ndarray") + return concat_values # type: ignore[return-value] def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool): @@ -436,7 +461,9 @@ def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool): Find the NA value to go with this dtype. """ if is_extension_array_dtype(dtype): - return dtype.na_value + # error: Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has no + # attribute "na_value" + return dtype.na_value # type: ignore[union-attr] elif dtype.kind in ["m", "M"]: return dtype.type("NaT") elif dtype.kind in ["f", "c"]: diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 0b712267ccf11..0ea8c3eb994a3 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -162,10 +162,17 @@ def rec_array_to_mgr( if isinstance(data, np.ma.MaskedArray): new_arrays = fill_masked_arrays(data, arr_columns) else: - new_arrays = arrays + # error: Incompatible types in assignment (expression has type + # "List[ExtensionArray]", variable has type "List[ndarray]") + new_arrays = arrays # type: ignore[assignment] # create the manager - arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns) + + # error: Argument 1 to "reorder_arrays" has incompatible type "List[ndarray]"; + # expected "List[ExtensionArray]" + arrays, arr_columns = reorder_arrays( + new_arrays, arr_columns, columns # type: ignore[arg-type] + ) if columns is None: columns = arr_columns @@ -357,12 +364,22 @@ def dict_to_mgr( if missing.any() and not is_integer_dtype(dtype): if dtype is None or ( not is_extension_array_dtype(dtype) - and np.issubdtype(dtype, np.flexible) + # error: Argument 1 to "issubdtype" has incompatible type + # "Union[dtype, ExtensionDtype]"; expected "Union[dtype, None, + # type, _SupportsDtype, str, Tuple[Any, int], Tuple[Any, + # Union[int, Sequence[int]]], List[Any], _DtypeDict, Tuple[Any, + # Any]]" + and np.issubdtype(dtype, np.flexible) # type: ignore[arg-type] ): # GH#1783 - nan_dtype = np.dtype(object) + + # error: Value of type variable "_DTypeScalar" of "dtype" cannot be + # "object" + nan_dtype = np.dtype(object) # type: ignore[type-var] else: - nan_dtype = dtype + # error: Incompatible types in assignment (expression has type + # "Union[dtype, ExtensionDtype]", variable has type "dtype") + nan_dtype = dtype # type: ignore[assignment] val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) arrays.loc[missing] = [val] * missing.sum() @@ -557,7 +574,9 @@ def extract_index(data) -> Index: else: index = ibase.default_index(lengths[0]) - return ensure_index(index) + # error: Value of type variable "AnyArrayLike" of "ensure_index" cannot be + # "Optional[Index]" + return ensure_index(index) # type: ignore[type-var] def reorder_arrays( @@ -660,7 +679,9 @@ def to_arrays( if not len(data): if isinstance(data, np.ndarray): - columns = data.dtype.names + # error: Incompatible types in assignment (expression has type + # "Optional[Tuple[str, ...]]", variable has type "Optional[Index]") + columns = data.dtype.names # type: ignore[assignment] if columns is not None: # i.e. numpy structured array arrays = [data[name] for name in columns] @@ -689,8 +710,17 @@ def to_arrays( data = [tuple(x) for x in data] content = _list_to_arrays(data) - content, columns = _finalize_columns_and_data(content, columns, dtype) - return content, columns + # error: Incompatible types in assignment (expression has type "List[ndarray]", + # variable has type "List[Union[Union[str, int, float, bool], Union[Any, Any, Any, + # Any]]]") + content, columns = _finalize_columns_and_data( # type: ignore[assignment] + content, columns, dtype + ) + # error: Incompatible return value type (got "Tuple[ndarray, Index]", expected + # "Tuple[List[ExtensionArray], Index]") + # error: Incompatible return value type (got "Tuple[ndarray, Index]", expected + # "Tuple[List[ndarray], Index]") + return content, columns # type: ignore[return-value] def _list_to_arrays(data: List[Union[Tuple, List]]) -> np.ndarray: @@ -731,7 +761,11 @@ def _list_of_series_to_arrays( values = extract_array(s, extract_numpy=True) aligned_values.append(algorithms.take_nd(values, indexer)) - content = np.vstack(aligned_values) + # error: Argument 1 to "vstack" has incompatible type "List[ExtensionArray]"; + # expected "Sequence[Union[Union[int, float, complex, str, bytes, generic], + # Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], _SupportsArray]]" + content = np.vstack(aligned_values) # type: ignore[arg-type] return content, columns @@ -781,17 +815,34 @@ def _finalize_columns_and_data( """ Ensure we have valid columns, cast object dtypes if possible. """ - content = list(content.T) + # error: Incompatible types in assignment (expression has type "List[Any]", variable + # has type "ndarray") + content = list(content.T) # type: ignore[assignment] try: - columns = _validate_or_indexify_columns(content, columns) + # error: Argument 1 to "_validate_or_indexify_columns" has incompatible type + # "ndarray"; expected "List[Any]" + columns = _validate_or_indexify_columns( + content, columns # type: ignore[arg-type] + ) except AssertionError as err: # GH#26429 do not raise user-facing AssertionError raise ValueError(err) from err if len(content) and content[0].dtype == np.object_: - content = _convert_object_array(content, dtype=dtype) - return content, columns + # error: Incompatible types in assignment (expression has type + # "List[Union[Union[str, int, float, bool], Union[Any, Any, Any, Any]]]", + # variable has type "ndarray") + # error: Argument 1 to "_convert_object_array" has incompatible type "ndarray"; + # expected "List[Union[Union[str, int, float, bool], Union[Any, Any, Any, + # Any]]]" + content = _convert_object_array( # type: ignore[assignment] + content, dtype=dtype # type: ignore[arg-type] + ) + # error: Incompatible return value type (got "Tuple[ndarray, Union[Index, + # List[Union[str, int]]]]", expected "Tuple[List[ndarray], Union[Index, + # List[Union[str, int]]]]") + return content, columns # type: ignore[return-value] def _validate_or_indexify_columns( diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 2daa1ce8dc9a4..476bd836bf216 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -173,8 +173,12 @@ def __init__( # Populate known_consolidate, blknos, and blklocs lazily self._known_consolidated = False - self._blknos = None - self._blklocs = None + # error: Incompatible types in assignment (expression has type "None", + # variable has type "ndarray") + self._blknos = None # type: ignore[assignment] + # error: Incompatible types in assignment (expression has type "None", + # variable has type "ndarray") + self._blklocs = None # type: ignore[assignment] @classmethod def _simple_new(cls, blocks: Tuple[Block, ...], axes: List[Index]): @@ -316,7 +320,11 @@ def arrays(self) -> List[ArrayLike]: Not to be used in actual code, and return value is not the same as the ArrayManager method (list of 1D arrays vs iterator of 2D ndarrays / 1D EAs). """ - return [blk.values for blk in self.blocks] + # error: List comprehension has incompatible type List[Union[ndarray, + # ExtensionArray]]; expected List[ExtensionArray] + # error: List comprehension has incompatible type List[Union[ndarray, + # ExtensionArray]]; expected List[ndarray] + return [blk.values for blk in self.blocks] # type: ignore[misc] def __getstate__(self): block_values = [b.values for b in self.blocks] @@ -889,13 +897,21 @@ def as_array( blk = self.blocks[0] if blk.is_extension: # Avoid implicit conversion of extension blocks to object - arr = blk.values.to_numpy(dtype=dtype, na_value=na_value).reshape( - blk.shape - ) + + # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no + # attribute "to_numpy" + arr = blk.values.to_numpy( # type: ignore[union-attr] + dtype=dtype, na_value=na_value + ).reshape(blk.shape) else: arr = np.asarray(blk.get_values()) if dtype: - arr = arr.astype(dtype, copy=False) + # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has + # incompatible type "Union[ExtensionDtype, str, dtype[Any], + # Type[object]]"; expected "Union[dtype[Any], None, type, + # _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, + # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]" + arr = arr.astype(dtype, copy=False) # type: ignore[arg-type] else: arr = self._interleave(dtype=dtype, na_value=na_value) # The underlying data was copied within _interleave @@ -928,7 +944,12 @@ def _interleave( elif is_dtype_equal(dtype, str): dtype = "object" - result = np.empty(self.shape, dtype=dtype) + # error: Argument "dtype" to "empty" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object], None]"; expected + # "Union[dtype[Any], None, type, _SupportsDType, str, Union[Tuple[Any, int], + # Tuple[Any, Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, + # Any]]]" + result = np.empty(self.shape, dtype=dtype) # type: ignore[arg-type] itemmask = np.zeros(self.shape[0]) @@ -936,9 +957,17 @@ def _interleave( rl = blk.mgr_locs if blk.is_extension: # Avoid implicit conversion of extension blocks to object - arr = blk.values.to_numpy(dtype=dtype, na_value=na_value) + + # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no + # attribute "to_numpy" + arr = blk.values.to_numpy( # type: ignore[union-attr] + dtype=dtype, na_value=na_value + ) else: - arr = blk.get_values(dtype) + # error: Argument 1 to "get_values" of "Block" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object], None]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + arr = blk.get_values(dtype) # type: ignore[arg-type] result[rl.indexer] = arr itemmask[rl.indexer] = 1 @@ -989,7 +1018,12 @@ def fast_xs(self, loc: int) -> ArrayLike: # we'll eventually construct an ExtensionArray. result = np.empty(n, dtype=object) else: - result = np.empty(n, dtype=dtype) + # error: Argument "dtype" to "empty" has incompatible type + # "Union[dtype, ExtensionDtype, None]"; expected "Union[dtype, + # None, type, _SupportsDtype, str, Tuple[Any, int], Tuple[Any, + # Union[int, Sequence[int]]], List[Any], _DtypeDict, Tuple[Any, + # Any]]" + result = np.empty(n, dtype=dtype) # type: ignore[arg-type] for blk in self.blocks: # Such assignment may incorrectly coerce NaT to None @@ -1000,7 +1034,9 @@ def fast_xs(self, loc: int) -> ArrayLike: if isinstance(dtype, ExtensionDtype): result = dtype.construct_array_type()._from_sequence(result, dtype=dtype) - return result + # error: Incompatible return value type (got "ndarray", expected + # "ExtensionArray") + return result # type: ignore[return-value] def consolidate(self) -> BlockManager: """ @@ -1123,7 +1159,11 @@ def value_getitem(placement): # We have 6 tests where loc is _not_ an int. # In this case, get_blkno_placements will yield only one tuple, # containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1))) - loc = [loc] + + # error: Incompatible types in assignment (expression has type + # "List[Union[int, slice, ndarray]]", variable has type "Union[int, + # slice, ndarray]") + loc = [loc] # type: ignore[assignment] # Accessing public blknos ensures the public versions are initialized blknos = self.blknos[loc] @@ -1461,7 +1501,11 @@ def _make_na_block(self, placement, fill_value=None): block_shape[0] = len(placement) dtype, fill_value = infer_dtype_from_scalar(fill_value) - block_values = np.empty(block_shape, dtype=dtype) + # error: Argument "dtype" to "empty" has incompatible type "Union[dtype, + # ExtensionDtype]"; expected "Union[dtype, None, type, _SupportsDtype, str, + # Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any], _DtypeDict, + # Tuple[Any, Any]]" + block_values = np.empty(block_shape, dtype=dtype) # type: ignore[arg-type] block_values.fill(fill_value) return new_block(block_values, placement=placement, ndim=block_values.ndim) @@ -1503,7 +1547,9 @@ def _equal_values(self: T, other: T) -> bool: return False left = self.blocks[0].values right = other.blocks[0].values - return array_equals(left, right) + # error: Value of type variable "ArrayLike" of "array_equals" cannot be + # "Union[ndarray, ExtensionArray]" + return array_equals(left, right) # type: ignore[type-var] return blockwise_all(self, other, array_equals) @@ -1884,7 +1930,12 @@ def _multi_blockify(tuples, dtype: Optional[Dtype] = None): new_blocks = [] for dtype, tup_block in grouper: - values, placement = _stack_arrays(list(tup_block), dtype) + # error: Argument 2 to "_stack_arrays" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], Type[int], + # Type[complex], Type[bool], Type[object], None]"; expected "dtype[Any]" + values, placement = _stack_arrays( + list(tup_block), dtype # type: ignore[arg-type] + ) block = new_block(values, placement=placement, ndim=2) new_blocks.append(block) @@ -1958,7 +2009,11 @@ def _merge_blocks( # TODO: optimization potential in case all mgrs contain slices and # combination of those slices is a slice, too. new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks]) - new_values = np.vstack([b.values for b in blocks]) + # error: List comprehension has incompatible type List[Union[ndarray, + # ExtensionArray]]; expected List[Union[complex, generic, Sequence[Union[int, + # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], + # _SupportsArray]] + new_values = np.vstack([b.values for b in blocks]) # type: ignore[misc] argsort = np.argsort(new_mgr_locs) new_values = new_values[argsort] diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py index dbd309f0836a5..103092ba37b70 100644 --- a/pandas/core/internals/ops.py +++ b/pandas/core/internals/ops.py @@ -108,21 +108,35 @@ def _get_same_shape_values( # TODO(EA2D): with 2D EAs only this first clause would be needed if not (left_ea or right_ea): - lvals = lvals[rblk.mgr_locs.indexer, :] + # error: Invalid index type "Tuple[Any, slice]" for "Union[ndarray, + # ExtensionArray]"; expected type "Union[int, slice, ndarray]" + lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[index] assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape) elif left_ea and right_ea: assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape) elif right_ea: # lvals are 2D, rvals are 1D - lvals = lvals[rblk.mgr_locs.indexer, :] + + # error: Invalid index type "Tuple[Any, slice]" for "Union[ndarray, + # ExtensionArray]"; expected type "Union[int, slice, ndarray]" + lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[index] assert lvals.shape[0] == 1, lvals.shape - lvals = lvals[0, :] + # error: Invalid index type "Tuple[int, slice]" for "Union[Any, + # ExtensionArray]"; expected type "Union[int, slice, ndarray]" + lvals = lvals[0, :] # type: ignore[index] else: # lvals are 1D, rvals are 2D assert rvals.shape[0] == 1, rvals.shape - rvals = rvals[0, :] - - return lvals, rvals + # error: Invalid index type "Tuple[int, slice]" for "Union[ndarray, + # ExtensionArray]"; expected type "Union[int, slice, ndarray]" + rvals = rvals[0, :] # type: ignore[index] + + # error: Incompatible return value type (got "Tuple[Union[ndarray, ExtensionArray], + # Union[ndarray, ExtensionArray]]", expected "Tuple[ExtensionArray, + # ExtensionArray]") + # error: Incompatible return value type (got "Tuple[Union[ndarray, ExtensionArray], + # Union[ndarray, ExtensionArray]]", expected "Tuple[ndarray, ndarray]") + return lvals, rvals # type: ignore[return-value] def blockwise_all(left: BlockManager, right: BlockManager, op) -> bool: diff --git a/pandas/core/missing.py b/pandas/core/missing.py index dc42a175409c2..48b2084319292 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -75,7 +75,11 @@ def mask_missing(arr: ArrayLike, values_to_mask) -> np.ndarray: # known to be holdable by arr. # When called from Series._single_replace, values_to_mask is tuple or list dtype, values_to_mask = infer_dtype_from(values_to_mask) - values_to_mask = np.array(values_to_mask, dtype=dtype) + # error: Argument "dtype" to "array" has incompatible type "Union[dtype[Any], + # ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str, + # Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any], + # _DTypeDict, Tuple[Any, Any]]]" + values_to_mask = np.array(values_to_mask, dtype=dtype) # type: ignore[arg-type] na_mask = isna(values_to_mask) nonna = values_to_mask[~na_mask] @@ -305,7 +309,12 @@ def interpolate_1d( if method in NP_METHODS: # np.interp requires sorted X values, #21037 - indexer = np.argsort(inds[valid]) + + # error: Argument 1 to "argsort" has incompatible type "Union[ExtensionArray, + # Any]"; expected "Union[Union[int, float, complex, str, bytes, generic], + # Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], _SupportsArray]" + indexer = np.argsort(inds[valid]) # type: ignore[arg-type] result[invalid] = np.interp( inds[invalid], inds[valid][indexer], yvalues[valid][indexer] ) @@ -708,7 +717,9 @@ def _pad_1d( ) -> tuple[np.ndarray, np.ndarray]: mask = _fillna_prep(values, mask) algos.pad_inplace(values, mask, limit=limit) - return values, mask + # error: Incompatible return value type (got "Tuple[ndarray, Optional[ndarray]]", + # expected "Tuple[ndarray, ndarray]") + return values, mask # type: ignore[return-value] @_datetimelike_compat @@ -719,7 +730,9 @@ def _backfill_1d( ) -> tuple[np.ndarray, np.ndarray]: mask = _fillna_prep(values, mask) algos.backfill_inplace(values, mask, limit=limit) - return values, mask + # error: Incompatible return value type (got "Tuple[ndarray, Optional[ndarray]]", + # expected "Tuple[ndarray, ndarray]") + return values, mask # type: ignore[return-value] @_datetimelike_compat @@ -839,4 +852,7 @@ def _rolling_window(a: np.ndarray, window: int): # https://stackoverflow.com/a/6811241 shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],) - return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) + # error: Module has no attribute "stride_tricks" + return np.lib.stride_tricks.as_strided( # type: ignore[attr-defined] + a, shape=shape, strides=strides + ) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 2592492f1c14c..f17569d114389 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -411,7 +411,11 @@ def new_func( if datetimelike: result = _wrap_results(result, orig_values.dtype, fill_value=iNaT) if not skipna: - result = _mask_datetimelike_result(result, axis, mask, orig_values) + # error: Argument 3 to "_mask_datetimelike_result" has incompatible type + # "Optional[ndarray]"; expected "ndarray" + result = _mask_datetimelike_result( + result, axis, mask, orig_values # type: ignore[arg-type] + ) return result @@ -486,7 +490,9 @@ def nanany( False """ values, _, _, _, _ = _get_values(values, skipna, fill_value=False, mask=mask) - return values.any(axis) + # error: Incompatible return value type (got "Union[bool_, ndarray]", expected + # "bool") + return values.any(axis) # type: ignore[return-value] def nanall( @@ -524,7 +530,9 @@ def nanall( False """ values, _, _, _, _ = _get_values(values, skipna, fill_value=True, mask=mask) - return values.all(axis) + # error: Incompatible return value type (got "Union[bool_, ndarray]", expected + # "bool") + return values.all(axis) # type: ignore[return-value] @disallow("M8") @@ -567,12 +575,22 @@ def nansum( if is_float_dtype(dtype): dtype_sum = dtype elif is_timedelta64_dtype(dtype): - dtype_sum = np.float64 + # error: Incompatible types in assignment (expression has type + # "Type[float64]", variable has type "dtype") + dtype_sum = np.float64 # type: ignore[assignment] the_sum = values.sum(axis, dtype=dtype_sum) - the_sum = _maybe_null_out(the_sum, axis, mask, values.shape, min_count=min_count) + # error: Incompatible types in assignment (expression has type "float", variable has + # type "Union[number, ndarray]") + # error: Argument 1 to "_maybe_null_out" has incompatible type "Union[number, + # ndarray]"; expected "ndarray" + the_sum = _maybe_null_out( # type: ignore[assignment] + the_sum, axis, mask, values.shape, min_count=min_count # type: ignore[arg-type] + ) - return the_sum + # error: Incompatible return value type (got "Union[number, ndarray]", expected + # "float") + return the_sum # type: ignore[return-value] def _mask_datetimelike_result( @@ -634,12 +652,18 @@ def nanmean( # not using needs_i8_conversion because that includes period if dtype.kind in ["m", "M"]: - dtype_sum = np.float64 + # error: Incompatible types in assignment (expression has type "Type[float64]", + # variable has type "dtype[Any]") + dtype_sum = np.float64 # type: ignore[assignment] elif is_integer_dtype(dtype): - dtype_sum = np.float64 + # error: Incompatible types in assignment (expression has type "Type[float64]", + # variable has type "dtype[Any]") + dtype_sum = np.float64 # type: ignore[assignment] elif is_float_dtype(dtype): dtype_sum = dtype - dtype_count = dtype + # error: Incompatible types in assignment (expression has type "dtype[Any]", + # variable has type "Type[float64]") + dtype_count = dtype # type: ignore[assignment] count = _get_counts(values.shape, mask, axis, dtype=dtype_count) the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum)) @@ -791,7 +815,9 @@ def _get_counts_nanvar( """ dtype = get_dtype(dtype) count = _get_counts(value_counts, mask, axis, dtype=dtype) - d = count - dtype.type(ddof) + # error: Unsupported operand types for - ("int" and "generic") + # error: Unsupported operand types for - ("float" and "generic") + d = count - dtype.type(ddof) # type: ignore[operator] # always return NaN, never inf if is_scalar(count): @@ -799,11 +825,16 @@ def _get_counts_nanvar( count = np.nan d = np.nan else: - mask2: np.ndarray = count <= ddof + # error: Incompatible types in assignment (expression has type + # "Union[bool, Any]", variable has type "ndarray") + mask2: np.ndarray = count <= ddof # type: ignore[assignment] if mask2.any(): np.putmask(d, mask2, np.nan) np.putmask(count, mask2, np.nan) - return count, d + # error: Incompatible return value type (got "Tuple[Union[int, float, + # ndarray], Any]", expected "Tuple[Union[int, ndarray], Union[int, + # ndarray]]") + return count, d # type: ignore[return-value] @bottleneck_switch(ddof=1) @@ -958,7 +989,11 @@ def nansem( if not is_float_dtype(values.dtype): values = values.astype("f8") - count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype) + # error: Argument 1 to "_get_counts_nanvar" has incompatible type + # "Tuple[int, ...]"; expected "Tuple[int]" + count, _ = _get_counts_nanvar( + values.shape, mask, axis, ddof, values.dtype # type: ignore[arg-type] + ) var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof) return np.sqrt(var) / np.sqrt(count) @@ -1038,7 +1073,8 @@ def nanargmax( array([2, 2, 1, 1], dtype=int64) """ values, mask, _, _, _ = _get_values(values, True, fill_value_typ="-inf", mask=mask) - result = values.argmax(axis) + # error: Need type annotation for 'result' + result = values.argmax(axis) # type: ignore[var-annotated] result = _maybe_arg_null_out(result, axis, mask, skipna) return result @@ -1083,7 +1119,8 @@ def nanargmin( array([0, 0, 1, 1], dtype=int64) """ values, mask, _, _, _ = _get_values(values, True, fill_value_typ="+inf", mask=mask) - result = values.argmin(axis) + # error: Need type annotation for 'result' + result = values.argmin(axis) # type: ignore[var-annotated] result = _maybe_arg_null_out(result, axis, mask, skipna) return result @@ -1304,7 +1341,13 @@ def nanprod( values = values.copy() values[mask] = 1 result = values.prod(axis) - return _maybe_null_out(result, axis, mask, values.shape, min_count=min_count) + # error: Argument 1 to "_maybe_null_out" has incompatible type "Union[number, + # ndarray]"; expected "ndarray" + # error: Incompatible return value type (got "Union[ndarray, float]", expected + # "float") + return _maybe_null_out( # type: ignore[return-value] + result, axis, mask, values.shape, min_count=min_count # type: ignore[arg-type] + ) def _maybe_arg_null_out( @@ -1317,10 +1360,14 @@ def _maybe_arg_null_out( if axis is None or not getattr(result, "ndim", False): if skipna: if mask.all(): - result = -1 + # error: Incompatible types in assignment (expression has type + # "int", variable has type "ndarray") + result = -1 # type: ignore[assignment] else: if mask.any(): - result = -1 + # error: Incompatible types in assignment (expression has type + # "int", variable has type "ndarray") + result = -1 # type: ignore[assignment] else: if skipna: na_mask = mask.all(axis) @@ -1361,7 +1408,9 @@ def _get_counts( n = mask.size - mask.sum() else: n = np.prod(values_shape) - return dtype.type(n) + # error: Incompatible return value type (got "Union[Any, generic]", + # expected "Union[int, float, ndarray]") + return dtype.type(n) # type: ignore[return-value] if mask is not None: count = mask.shape[axis] - mask.sum(axis) @@ -1369,11 +1418,23 @@ def _get_counts( count = values_shape[axis] if is_scalar(count): - return dtype.type(count) + # error: Incompatible return value type (got "Union[Any, generic]", + # expected "Union[int, float, ndarray]") + return dtype.type(count) # type: ignore[return-value] try: - return count.astype(dtype) + # error: Incompatible return value type (got "Union[ndarray, generic]", expected + # "Union[int, float, ndarray]") + # error: Argument 1 to "astype" of "_ArrayOrScalarCommon" has incompatible type + # "Union[ExtensionDtype, dtype]"; expected "Union[dtype, None, type, + # _SupportsDtype, str, Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], + # List[Any], _DtypeDict, Tuple[Any, Any]]" + return count.astype(dtype) # type: ignore[return-value,arg-type] except AttributeError: - return np.array(count, dtype=dtype) + # error: Argument "dtype" to "array" has incompatible type + # "Union[ExtensionDtype, dtype]"; expected "Union[dtype, None, type, + # _SupportsDtype, str, Tuple[Any, int], Tuple[Any, Union[int, + # Sequence[int]]], List[Any], _DtypeDict, Tuple[Any, Any]]" + return np.array(count, dtype=dtype) # type: ignore[arg-type] def _maybe_null_out( @@ -1403,7 +1464,9 @@ def _maybe_null_out( result[null_mask] = None elif result is not NaT: if check_below_min_count(shape, mask, min_count): - result = np.nan + # error: Incompatible types in assignment (expression has type + # "float", variable has type "ndarray") + result = np.nan # type: ignore[assignment] return result diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 10807dffb026b..9153eb25032e7 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -88,7 +88,11 @@ def _masked_arith_op(x: np.ndarray, y, op): assert isinstance(x, np.ndarray), type(x) if isinstance(y, np.ndarray): dtype = find_common_type([x.dtype, y.dtype]) - result = np.empty(x.size, dtype=dtype) + # error: Argument "dtype" to "empty" has incompatible type + # "Union[dtype, ExtensionDtype]"; expected "Union[dtype, None, type, + # _SupportsDtype, str, Tuple[Any, int], Tuple[Any, Union[int, + # Sequence[int]]], List[Any], _DtypeDict, Tuple[Any, Any]]" + result = np.empty(x.size, dtype=dtype) # type: ignore[arg-type] if len(x) != len(y): raise ValueError(x.shape, y.shape) diff --git a/pandas/core/ops/mask_ops.py b/pandas/core/ops/mask_ops.py index 501bc0159e641..968833cd1ae44 100644 --- a/pandas/core/ops/mask_ops.py +++ b/pandas/core/ops/mask_ops.py @@ -109,7 +109,9 @@ def kleene_xor( if right is libmissing.NA: result = np.zeros_like(left) else: - result = left ^ right + # error: Incompatible types in assignment (expression has type + # "Union[bool, Any]", variable has type "ndarray") + result = left ^ right # type: ignore[assignment] if right_mask is None: if right is libmissing.NA: diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 80a44e8fda39b..09249eba9c3f5 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -143,10 +143,17 @@ def melt( mcolumns = id_vars + var_name + [value_name] - mdata[value_name] = frame._values.ravel("F") + # error: Incompatible types in assignment (expression has type "ndarray", + # target has type "Series") + mdata[value_name] = frame._values.ravel("F") # type: ignore[assignment] for i, col in enumerate(var_name): # asanyarray will keep the columns as an Index - mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N) + + # error: Incompatible types in assignment (expression has type "ndarray", target + # has type "Series") + mdata[col] = np.asanyarray( # type: ignore[assignment] + frame.columns._get_level_values(i) + ).repeat(N) result = frame._constructor(mdata, columns=mcolumns) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 9291dcf552786..a048217d6b1f0 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -2064,8 +2064,13 @@ def _factorize_keys( if is_datetime64tz_dtype(lk.dtype) and is_datetime64tz_dtype(rk.dtype): # Extract the ndarray (UTC-localized) values # Note: we dont need the dtypes to match, as these can still be compared - lk = cast("DatetimeArray", lk)._ndarray - rk = cast("DatetimeArray", rk)._ndarray + + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + lk = cast("DatetimeArray", lk)._ndarray # type: ignore[assignment] + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + rk = cast("DatetimeArray", rk)._ndarray # type: ignore[assignment] elif ( is_categorical_dtype(lk.dtype) @@ -2075,14 +2080,27 @@ def _factorize_keys( assert isinstance(lk, Categorical) assert isinstance(rk, Categorical) # Cast rk to encoding so we can compare codes with lk - rk = lk._encode_with_my_categories(rk) - lk = ensure_int64(lk.codes) - rk = ensure_int64(rk.codes) + # error: <nothing> has no attribute "_encode_with_my_categories" + rk = lk._encode_with_my_categories(rk) # type: ignore[attr-defined] + + # error: <nothing> has no attribute "codes" + lk = ensure_int64(lk.codes) # type: ignore[attr-defined] + # error: "ndarray" has no attribute "codes" + rk = ensure_int64(rk.codes) # type: ignore[attr-defined] elif is_extension_array_dtype(lk.dtype) and is_dtype_equal(lk.dtype, rk.dtype): - lk, _ = lk._values_for_factorize() - rk, _ = rk._values_for_factorize() + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + # error: Item "ndarray" of "Union[Any, ndarray]" has no attribute + # "_values_for_factorize" + lk, _ = lk._values_for_factorize() # type: ignore[union-attr,assignment] + + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "ExtensionArray") + # error: Item "ndarray" of "Union[Any, ndarray]" has no attribute + # "_values_for_factorize" + rk, _ = rk._values_for_factorize() # type: ignore[union-attr,assignment] if is_integer_dtype(lk.dtype) and is_integer_dtype(rk.dtype): # GH#23917 TODO: needs tests for case where lk is integer-dtype diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index d0026d7acbe65..0c0b37791f883 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -486,7 +486,11 @@ def pivot( cols = [] append = index is None - indexed = data.set_index(cols + columns, append=append) + # error: Unsupported operand types for + ("List[Any]" and "ExtensionArray") + # error: Unsupported left operand type for + ("ExtensionArray") + indexed = data.set_index( + cols + columns, append=append # type: ignore[operator] + ) else: if index is None: index = [Series(data.index, name=data.index.name)] diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index f0a2ef0cb1869..13119b9997002 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -169,7 +169,9 @@ def _make_selectors(self): self.full_shape = ngroups, stride selector = self.sorted_labels[-1] + stride * comp_index + self.lift - mask = np.zeros(np.prod(self.full_shape), dtype=bool) + # error: Argument 1 to "zeros" has incompatible type "number"; expected + # "Union[int, Sequence[int]]" + mask = np.zeros(np.prod(self.full_shape), dtype=bool) # type: ignore[arg-type] mask.put(selector, True) if mask.sum() < len(self.index): @@ -952,7 +954,9 @@ def _get_dummies_1d( if dtype is None: dtype = np.uint8 - dtype = np.dtype(dtype) + # error: Argument 1 to "dtype" has incompatible type "Union[ExtensionDtype, str, + # dtype[Any], Type[object]]"; expected "Type[Any]" + dtype = np.dtype(dtype) # type: ignore[arg-type] if is_object_dtype(dtype): raise ValueError("dtype=object is not a valid dtype for get_dummies") diff --git a/pandas/core/series.py b/pandas/core/series.py index 468c3baca92c3..b92ada9537bd4 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -353,7 +353,9 @@ def __init__( copy = False elif isinstance(data, np.ndarray): - if len(data.dtype): + # error: Argument 1 to "len" has incompatible type "dtype"; expected + # "Sized" + if len(data.dtype): # type: ignore[arg-type] # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( @@ -402,7 +404,12 @@ def __init__( elif copy: data = data.copy() else: - data = sanitize_array(data, index, dtype, copy) + # error: Argument 3 to "sanitize_array" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object], None]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + data = sanitize_array( + data, index, dtype, copy # type: ignore[arg-type] + ) manager = get_option("mode.data_manager") if manager == "block": @@ -453,7 +460,10 @@ def _init_dict(self, data, index=None, dtype: Optional[Dtype] = None): # Input is now list-like, so rely on "standard" construction: # TODO: passing np.float64 to not break anything yet. See GH-17261 - s = create_series_with_explicit_dtype( + + # error: Value of type variable "ArrayLike" of + # "create_series_with_explicit_dtype" cannot be "Tuple[Any, ...]" + s = create_series_with_explicit_dtype( # type: ignore[type-var] values, index=keys, dtype=dtype, dtype_if_empty=np.float64 ) @@ -1053,7 +1063,9 @@ def __setitem__(self, key, value): def _set_with_engine(self, key, value): # fails with AttributeError for IntervalIndex loc = self.index._engine.get_loc(key) - validate_numeric_casting(self.dtype, value) + # error: Argument 1 to "validate_numeric_casting" has incompatible type + # "Union[dtype, ExtensionDtype]"; expected "dtype" + validate_numeric_casting(self.dtype, value) # type: ignore[arg-type] self._values[loc] = value def _set_with(self, key, value): @@ -2005,7 +2017,9 @@ def drop_duplicates(self, keep="first", inplace=False) -> Optional[Series]: else: return result - def duplicated(self, keep="first") -> Series: + # error: Return type "Series" of "duplicated" incompatible with return type + # "ndarray" in supertype "IndexOpsMixin" + def duplicated(self, keep="first") -> Series: # type: ignore[override] """ Indicate duplicate Series values. @@ -2988,7 +3002,12 @@ def combine(self, other, func, fill_value=None) -> Series: # TODO: can we do this for only SparseDtype? # The function can return something of any type, so check # if the type is compatible with the calling EA. - new_values = maybe_cast_to_extension_array(type(self._values), new_values) + + # error: Value of type variable "ArrayLike" of + # "maybe_cast_to_extension_array" cannot be "List[Any]" + new_values = maybe_cast_to_extension_array( + type(self._values), new_values # type: ignore[type-var] + ) return self._constructor(new_values, index=new_index, name=new_name) def combine_first(self, other) -> Series: diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 973fed2c1436f..ba81866602361 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -43,6 +43,7 @@ _INT64_MAX = np.iinfo(np.int64).max +# error: Function "numpy.array" is not valid as a type def get_indexer_indexer( target: Index, level: Union[str, int, List[str], List[int]], @@ -51,7 +52,7 @@ def get_indexer_indexer( na_position: str, sort_remaining: bool, key: IndexKeyFunc, -) -> Optional[np.array]: +) -> Optional[np.array]: # type: ignore[valid-type] """ Helper method that return the indexer according to input parameters for the sort_index method of DataFrame and Series. @@ -584,11 +585,16 @@ def get_group_index_sorter( df.groupby(key)[col].transform('first') """ if ngroups is None: - ngroups = 1 + group_index.max() + # error: Incompatible types in assignment (expression has type "number[Any]", + # variable has type "Optional[int]") + ngroups = 1 + group_index.max() # type: ignore[assignment] count = len(group_index) alpha = 0.0 # taking complexities literally; there may be beta = 1.0 # some room for fine-tuning these parameters - do_groupsort = count > 0 and ((alpha + beta * ngroups) < (count * np.log(count))) + # error: Unsupported operand types for * ("float" and "None") + do_groupsort = count > 0 and ( + (alpha + beta * ngroups) < (count * np.log(count)) # type: ignore[operator] + ) if do_groupsort: sorter, _ = algos.groupsort_indexer(ensure_int64(group_index), ngroups) return ensure_platform_int(sorter) diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 32a99c0a020b2..3508624e51a9d 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -604,15 +604,27 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"): if isinstance(self._orig, ABCIndex): # add dtype for case that result is all-NA - result = Index(result, dtype=object, name=self._orig.name) + + # error: Incompatible types in assignment (expression has type + # "Index", variable has type "ndarray") + result = Index( # type: ignore[assignment] + result, dtype=object, name=self._orig.name + ) else: # Series if is_categorical_dtype(self._orig.dtype): # We need to infer the new categories. dtype = None else: dtype = self._orig.dtype - result = Series(result, dtype=dtype, index=data.index, name=self._orig.name) - result = result.__finalize__(self._orig, method="str_cat") + # error: Incompatible types in assignment (expression has type + # "Series", variable has type "ndarray") + result = Series( # type: ignore[assignment] + result, dtype=dtype, index=data.index, name=self._orig.name + ) + # error: "ndarray" has no attribute "__finalize__" + result = result.__finalize__( # type: ignore[attr-defined] + self._orig, method="str_cat" + ) return result _shared_docs[ @@ -3030,10 +3042,16 @@ def _str_extract_noexpand(arr, pat, flags=0): names = dict(zip(regex.groupindex.values(), regex.groupindex.keys())) columns = [names.get(1 + i, i) for i in range(regex.groups)] if arr.size == 0: - result = DataFrame(columns=columns, dtype=object) + # error: Incompatible types in assignment (expression has type + # "DataFrame", variable has type "ndarray") + result = DataFrame( # type: ignore[assignment] + columns=columns, dtype=object + ) else: dtype = _result_dtype(arr) - result = DataFrame( + # error: Incompatible types in assignment (expression has type + # "DataFrame", variable has type "ndarray") + result = DataFrame( # type:ignore[assignment] [groups_or_na(val) for val in arr], columns=columns, index=arr.index, diff --git a/pandas/core/strings/object_array.py b/pandas/core/strings/object_array.py index 0a4543057c386..edf32bade0657 100644 --- a/pandas/core/strings/object_array.py +++ b/pandas/core/strings/object_array.py @@ -63,10 +63,14 @@ def _str_map(self, f, na_value=None, dtype: Optional[Dtype] = None): na_value = self._str_na_value if not len(arr): - return np.ndarray(0, dtype=dtype) + # error: Argument 1 to "ndarray" has incompatible type "int"; + # expected "Sequence[int]" + return np.ndarray(0, dtype=dtype) # type: ignore[arg-type] if not isinstance(arr, np.ndarray): - arr = np.asarray(arr, dtype=object) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ObjectStringArrayMixin") + arr = np.asarray(arr, dtype=object) # type: ignore[assignment] mask = isna(arr) convert = not np.all(mask) try: diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index d58b5e5ffa83d..f7bb3083b91a9 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -245,7 +245,9 @@ def _convert_and_box_cache( from pandas import Series result = Series(arg).map(cache_array) - return _box_as_indexlike(result, utc=None, name=name) + # error: Value of type variable "ArrayLike" of "_box_as_indexlike" cannot + # be "Series" + return _box_as_indexlike(result, utc=None, name=name) # type: ignore[type-var] def _return_parsed_timezone_results(result: np.ndarray, timezones, tz, name) -> Index: @@ -362,7 +364,9 @@ def _convert_listlike_datetimes( result = np.array(["NaT"], dtype="datetime64[ns]").repeat(len(arg)) return DatetimeIndex(result, name=name) elif errors == "ignore": - result = Index(arg, name=name) + # error: Incompatible types in assignment (expression has type + # "Index", variable has type "ExtensionArray") + result = Index(arg, name=name) # type: ignore[assignment] return result raise @@ -382,10 +386,14 @@ def _convert_listlike_datetimes( require_iso8601 = not infer_datetime_format format = None - result = None + # error: Incompatible types in assignment (expression has type "None", variable has + # type "ExtensionArray") + result = None # type: ignore[assignment] if format is not None: - result = _to_datetime_with_format( + # error: Incompatible types in assignment (expression has type + # "Optional[Index]", variable has type "ndarray") + result = _to_datetime_with_format( # type: ignore[assignment] arg, orig_arg, name, tz, format, exact, errors, infer_datetime_format ) if result is not None: @@ -494,7 +502,9 @@ def _to_datetime_with_format( # fallback if result is None: - result = _array_strptime_with_fallback( + # error: Incompatible types in assignment (expression has type + # "Optional[Index]", variable has type "Optional[ndarray]") + result = _array_strptime_with_fallback( # type: ignore[assignment] arg, name, tz, fmt, exact, errors, infer_datetime_format ) if result is not None: @@ -510,7 +520,9 @@ def _to_datetime_with_format( except (ValueError, TypeError): raise e - return result + # error: Incompatible return value type (got "Optional[ndarray]", expected + # "Optional[Index]") + return result # type: ignore[return-value] def _to_datetime_with_unit(arg, unit, name, tz, errors: Optional[str]) -> Index: @@ -529,12 +541,18 @@ def _to_datetime_with_unit(arg, unit, name, tz, errors: Optional[str]) -> Index: if errors == "ignore": # Index constructor _may_ infer to DatetimeIndex - result = Index(result, name=name) + + # error: Incompatible types in assignment (expression has type "Index", variable + # has type "ExtensionArray") + result = Index(result, name=name) # type: ignore[assignment] else: - result = DatetimeIndex(result, name=name) + # error: Incompatible types in assignment (expression has type "DatetimeIndex", + # variable has type "ExtensionArray") + result = DatetimeIndex(result, name=name) # type: ignore[assignment] if not isinstance(result, DatetimeIndex): - return result + # error: Incompatible return value type (got "ExtensionArray", expected "Index") + return result # type: ignore[return-value] # GH#23758: We may still need to localize the result with tz # GH#25546: Apply tz_parsed first (from arg), then tz (from caller) @@ -1063,7 +1081,9 @@ def calc_with_mask(carg, mask): # string with NaN-like try: - mask = ~algorithms.isin(arg, list(nat_strings)) + # error: Value of type variable "AnyArrayLike" of "isin" cannot be + # "Iterable[Any]" + mask = ~algorithms.isin(arg, list(nat_strings)) # type: ignore[type-var] return calc_with_mask(arg, mask) except (ValueError, OverflowError, TypeError): pass diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index 1032edcb22b46..31ab78e59a556 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -168,7 +168,9 @@ def to_numeric(arg, errors="raise", downcast=None): mask = values._mask values = values._data[~mask] else: - mask = None + # error: Incompatible types in assignment (expression has type "None", variable + # has type "ndarray") + mask = None # type: ignore[assignment] values_dtype = getattr(values, "dtype", None) if is_numeric_dtype(values_dtype): diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index a335146265523..a8378e91f9375 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -181,5 +181,7 @@ def _convert_listlike(arg, unit=None, errors="raise", name=None): from pandas import TimedeltaIndex - value = TimedeltaIndex(value, unit="ns", name=name) + # error: Incompatible types in assignment (expression has type "TimedeltaIndex", + # variable has type "ndarray") + value = TimedeltaIndex(value, unit="ns", name=name) # type: ignore[assignment] return value diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index 9d488bb13b0f1..7d314d6a6fa1a 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -116,10 +116,14 @@ def hash_pandas_object( return Series(hash_tuples(obj, encoding, hash_key), dtype="uint64", copy=False) elif isinstance(obj, ABCIndex): - h = hash_array(obj._values, encoding, hash_key, categorize).astype( - "uint64", copy=False - ) - h = Series(h, index=obj, dtype="uint64", copy=False) + # error: Value of type variable "ArrayLike" of "hash_array" cannot be + # "Union[ExtensionArray, ndarray]" + h = hash_array( # type: ignore[type-var] + obj._values, encoding, hash_key, categorize + ).astype("uint64", copy=False) + # error: Incompatible types in assignment (expression has type "Series", + # variable has type "ndarray") + h = Series(h, index=obj, dtype="uint64", copy=False) # type: ignore[assignment] elif isinstance(obj, ABCSeries): h = hash_array(obj._values, encoding, hash_key, categorize).astype( @@ -139,7 +143,11 @@ def hash_pandas_object( arrays = itertools.chain([h], index_iter) h = combine_hash_arrays(arrays, 2) - h = Series(h, index=obj.index, dtype="uint64", copy=False) + # error: Incompatible types in assignment (expression has type "Series", + # variable has type "ndarray") + h = Series( # type: ignore[assignment] + h, index=obj.index, dtype="uint64", copy=False + ) elif isinstance(obj, ABCDataFrame): hashes = (hash_array(series._values) for _, series in obj.items()) @@ -162,10 +170,15 @@ def hash_pandas_object( hashes = (x for x in _hashes) h = combine_hash_arrays(hashes, num_items) - h = Series(h, index=obj.index, dtype="uint64", copy=False) + # error: Incompatible types in assignment (expression has type "Series", + # variable has type "ndarray") + h = Series( # type: ignore[assignment] + h, index=obj.index, dtype="uint64", copy=False + ) else: raise TypeError(f"Unexpected type for hashing {type(obj)}") - return h + # error: Incompatible return value type (got "ndarray", expected "Series") + return h # type: ignore[return-value] def hash_tuples( @@ -284,12 +297,21 @@ def hash_array( # hash values. (This check is above the complex check so that we don't ask # numpy if categorical is a subdtype of complex, as it will choke). if is_categorical_dtype(dtype): - vals = cast("Categorical", vals) - return _hash_categorical(vals, encoding, hash_key) + # error: Incompatible types in assignment (expression has type "Categorical", + # variable has type "ndarray") + vals = cast("Categorical", vals) # type: ignore[assignment] + # error: Argument 1 to "_hash_categorical" has incompatible type "ndarray"; + # expected "Categorical" + return _hash_categorical(vals, encoding, hash_key) # type: ignore[arg-type] elif is_extension_array_dtype(dtype): - vals, _ = vals._values_for_factorize() - - return _hash_ndarray(vals, encoding, hash_key, categorize) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + # error: "ndarray" has no attribute "_values_for_factorize" + vals, _ = vals._values_for_factorize() # type: ignore[assignment,attr-defined] + + # error: Argument 1 to "_hash_ndarray" has incompatible type "ExtensionArray"; + # expected "ndarray" + return _hash_ndarray(vals, encoding, hash_key, categorize) # type: ignore[arg-type] def _hash_ndarray( diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 4537e525c5086..4c9222e0805f1 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -261,7 +261,9 @@ def __init__( self.times = self._selected_obj[self.times] if not is_datetime64_ns_dtype(self.times): raise ValueError("times must be datetime64[ns] dtype.") - if len(self.times) != len(obj): + # error: Argument 1 to "len" has incompatible type "Union[str, ndarray, + # FrameOrSeries, None]"; expected "Sized" + if len(self.times) != len(obj): # type: ignore[arg-type] raise ValueError("times must be the same length as the object.") if not isinstance(self.halflife, (str, datetime.timedelta)): raise ValueError( @@ -269,7 +271,13 @@ def __init__( ) if isna(self.times).any(): raise ValueError("Cannot convert NaT values to integer") - _times = np.asarray(self.times.view(np.int64), dtype=np.float64) + # error: Item "str" of "Union[str, ndarray, FrameOrSeries, None]" has no + # attribute "view" + # error: Item "None" of "Union[str, ndarray, FrameOrSeries, None]" has no + # attribute "view" + _times = np.asarray( + self.times.view(np.int64), dtype=np.float64 # type: ignore[union-attr] + ) _halflife = float(Timedelta(self.halflife).value) self._deltas = np.diff(_times) / _halflife # Halflife is no longer applicable when calculating COM @@ -289,7 +297,13 @@ def __init__( # Without times, points are equally spaced self._deltas = np.ones(max(len(self.obj) - 1, 0), dtype=np.float64) self._com = get_center_of_mass( - self.com, self.span, self.halflife, self.alpha + # error: Argument 3 to "get_center_of_mass" has incompatible type + # "Union[float, Any, None, timedelta64, signedinteger[_64Bit]]"; + # expected "Optional[float]" + self.com, + self.span, + self.halflife, # type: ignore[arg-type] + self.alpha, ) def _get_window_indexer(self) -> BaseIndexer: diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 503849bf673d5..17d05e81b82bb 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -316,11 +316,17 @@ def _prep_values(self, values: ArrayLike) -> np.ndarray: raise TypeError(f"cannot handle this type -> {values.dtype}") from err # Convert inf to nan for C funcs - inf = np.isinf(values) + + # error: Argument 1 to "__call__" of "ufunc" has incompatible type + # "Optional[ndarray]"; expected "Union[bool, int, float, complex, + # _SupportsArray, Sequence[Any]]" + inf = np.isinf(values) # type: ignore[arg-type] if inf.any(): values = np.where(inf, np.nan, values) - return values + # error: Incompatible return value type (got "Optional[ndarray]", + # expected "ndarray") + return values # type: ignore[return-value] def _insert_on_column(self, result: DataFrame, obj: DataFrame): # if we have an 'on' column we want to put it back into @@ -418,7 +424,11 @@ def hfunc(bvalues: ArrayLike) -> ArrayLike: return getattr(res_values, "T", res_values) def hfunc2d(values: ArrayLike) -> ArrayLike: - values = self._prep_values(values) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + # error: Argument 1 to "_prep_values" of "BaseWindow" has incompatible type + # "ExtensionArray"; expected "Optional[ndarray]" + values = self._prep_values(values) # type: ignore[assignment,arg-type] return homogeneous_func(values) if isinstance(mgr, ArrayManager) and self.axis == 1: diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index ab8e19d9f8a6f..f54481f527d93 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1565,7 +1565,10 @@ def _format_strings(self) -> List[str]: if is_categorical_dtype(values.dtype): # Categorical is special for now, so that we can preserve tzinfo - array = values._internal_get_values() + + # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no + # attribute "_internal_get_values" + array = values._internal_get_values() # type: ignore[union-attr] else: array = np.asarray(values) @@ -1632,10 +1635,25 @@ def format_percentiles( raise ValueError("percentiles should all be in the interval [0,1]") percentiles = 100 * percentiles - int_idx = np.isclose(percentiles.astype(int), percentiles) + + # error: Item "List[Union[int, float]]" of "Union[ndarray, List[Union[int, float]], + # List[float], List[Union[str, float]]]" has no attribute "astype" + # error: Item "List[float]" of "Union[ndarray, List[Union[int, float]], List[float], + # List[Union[str, float]]]" has no attribute "astype" + # error: Item "List[Union[str, float]]" of "Union[ndarray, List[Union[int, float]], + # List[float], List[Union[str, float]]]" has no attribute "astype" + int_idx = np.isclose( + percentiles.astype(int), percentiles # type: ignore[union-attr] + ) if np.all(int_idx): - out = percentiles.astype(int).astype(str) + # error: Item "List[Union[int, float]]" of "Union[ndarray, List[Union[int, + # float]], List[float], List[Union[str, float]]]" has no attribute "astype" + # error: Item "List[float]" of "Union[ndarray, List[Union[int, float]], + # List[float], List[Union[str, float]]]" has no attribute "astype" + # error: Item "List[Union[str, float]]" of "Union[ndarray, List[Union[int, + # float]], List[float], List[Union[str, float]]]" has no attribute "astype" + out = percentiles.astype(int).astype(str) # type: ignore[union-attr] return [i + "%" for i in out] unique_pcts = np.unique(percentiles) @@ -1648,8 +1666,19 @@ def format_percentiles( ).astype(int) prec = max(1, prec) out = np.empty_like(percentiles, dtype=object) - out[int_idx] = percentiles[int_idx].astype(int).astype(str) - out[~int_idx] = percentiles[~int_idx].round(prec).astype(str) + # error: No overload variant of "__getitem__" of "list" matches argument type + # "Union[bool_, ndarray]" + out[int_idx] = ( + percentiles[int_idx].astype(int).astype(str) # type: ignore[call-overload] + ) + + # error: Item "float" of "Union[Any, float, str]" has no attribute "round" + # error: Item "str" of "Union[Any, float, str]" has no attribute "round" + # error: Invalid index type "Union[bool_, Any]" for "Union[ndarray, List[Union[int, + # float]], List[float], List[Union[str, float]]]"; expected type "int" + out[~int_idx] = ( + percentiles[~int_idx].round(prec).astype(str) # type: ignore[union-attr,index] + ) return [i + "%" for i in out] @@ -1772,7 +1801,11 @@ def get_format_timedelta64( one_day_nanos = 86400 * 10 ** 9 even_days = ( - np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0 + # error: Unsupported operand types for % ("ExtensionArray" and "int") + np.logical_and( + consider_values, values_int % one_day_nanos != 0 # type: ignore[operator] + ).sum() + == 0 ) if even_days: diff --git a/pandas/io/formats/string.py b/pandas/io/formats/string.py index 622001f280885..84333cfc441b2 100644 --- a/pandas/io/formats/string.py +++ b/pandas/io/formats/string.py @@ -117,7 +117,13 @@ def _join_multiline(self, strcols_input: Iterable[List[str]]) -> str: if self.fmt.index: idx = strcols.pop(0) - lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width + # error: Argument 1 to "__call__" of "_NumberOp" has incompatible type + # "None"; expected "Union[int, float, complex, number, bool_]" + # error: Incompatible types in assignment (expression has type "number", + # variable has type "Optional[int]") + lwidth -= ( # type: ignore[assignment,arg-type] + np.array([self.adj.len(x) for x in idx]).max() + adjoin_width + ) col_widths = [ np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0 @@ -125,7 +131,9 @@ def _join_multiline(self, strcols_input: Iterable[List[str]]) -> str: ] assert lwidth is not None - col_bins = _binify(col_widths, lwidth) + # error: Argument 1 to "_binify" has incompatible type "List[object]"; expected + # "List[int]" + col_bins = _binify(col_widths, lwidth) # type: ignore[arg-type] nbins = len(col_bins) if self.fmt.is_truncated_vertically: diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index ec09a4cc4cd89..cc5f3164385cb 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -1708,7 +1708,11 @@ def f(data: DataFrame, props: str) -> np.ndarray: if props is None: props = f"background-color: {null_color};" - return self.apply(f, axis=None, subset=subset, props=props) + # error: Argument 1 to "apply" of "Styler" has incompatible type + # "Callable[[DataFrame, str], ndarray]"; expected "Callable[..., Styler]" + return self.apply( + f, axis=None, subset=subset, props=props # type: ignore[arg-type] + ) def highlight_max( self, @@ -1751,7 +1755,11 @@ def f(data: FrameOrSeries, props: str) -> np.ndarray: if props is None: props = f"background-color: {color};" - return self.apply(f, axis=axis, subset=subset, props=props) + # error: Argument 1 to "apply" of "Styler" has incompatible type + # "Callable[[FrameOrSeries, str], ndarray]"; expected "Callable[..., Styler]" + return self.apply( + f, axis=axis, subset=subset, props=props # type: ignore[arg-type] + ) def highlight_min( self, @@ -1794,7 +1802,11 @@ def f(data: FrameOrSeries, props: str) -> np.ndarray: if props is None: props = f"background-color: {color};" - return self.apply(f, axis=axis, subset=subset, props=props) + # error: Argument 1 to "apply" of "Styler" has incompatible type + # "Callable[[FrameOrSeries, str], ndarray]"; expected "Callable[..., Styler]" + return self.apply( + f, axis=axis, subset=subset, props=props # type: ignore[arg-type] + ) @classmethod def from_custom_template(cls, searchpath, name): diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index aa654e971641f..7c83beca1ae71 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -570,7 +570,12 @@ def read_json( raise ValueError("cannot pass both convert_axes and orient='table'") if dtype is None and orient != "table": - dtype = True + # error: Incompatible types in assignment (expression has type "bool", variable + # has type "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], + # Type[int], Type[complex], Type[bool], Type[object], Dict[Optional[Hashable], + # Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float], + # Type[int], Type[complex], Type[bool], Type[object]]], None]") + dtype = True # type: ignore[assignment] if convert_axes is None and orient != "table": convert_axes = True @@ -914,7 +919,12 @@ def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True): return data, False return data.fillna(np.nan), True - elif self.dtype is True: + # error: Non-overlapping identity check (left operand type: + # "Union[ExtensionDtype, str, dtype[Any], Type[object], + # Dict[Optional[Hashable], Union[ExtensionDtype, Union[str, dtype[Any]], + # Type[str], Type[float], Type[int], Type[complex], Type[bool], + # Type[object]]]]", right operand type: "Literal[True]") + elif self.dtype is True: # type: ignore[comparison-overlap] pass else: # dtype to force @@ -923,7 +933,10 @@ def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True): ) if dtype is not None: try: - dtype = np.dtype(dtype) + # error: Argument 1 to "dtype" has incompatible type + # "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; + # expected "Type[Any]" + dtype = np.dtype(dtype) # type: ignore[arg-type] return data.astype(dtype), True except (TypeError, ValueError): return data, False diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index c05efe9e73c5a..4539ceabbb92f 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -658,7 +658,9 @@ def _infer_types(self, values, na_values, try_num_bool=True): na_count = 0 if issubclass(values.dtype.type, (np.number, np.bool_)): mask = algorithms.isin(values, list(na_values)) - na_count = mask.sum() + # error: Incompatible types in assignment (expression has type + # "number[Any]", variable has type "int") + na_count = mask.sum() # type: ignore[assignment] if na_count > 0: if is_integer_dtype(values): values = values.astype(np.float64) @@ -716,7 +718,10 @@ def _cast_types(self, values, cast_type, column): # TODO: this is for consistency with # c-parser which parses all categories # as strings - values = astype_nansafe(values, str) + + # error: Argument 2 to "astype_nansafe" has incompatible type + # "Type[str]"; expected "Union[dtype[Any], ExtensionDtype]" + values = astype_nansafe(values, str) # type: ignore[arg-type] cats = Index(values).unique().dropna() values = Categorical._from_inferred_categories( @@ -909,7 +914,20 @@ def _get_empty_meta( if not is_dict_like(dtype): # if dtype == None, default will be object. default_dtype = dtype or object - dtype = defaultdict(lambda: default_dtype) + # error: Argument 1 to "defaultdict" has incompatible type "Callable[[], + # Union[ExtensionDtype, str, dtype[Any], Type[object], Dict[Hashable, + # Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float], + # Type[int], Type[complex], Type[bool], Type[object]]]]]"; expected + # "Optional[Callable[[], Union[ExtensionDtype, str, dtype[Any], + # Type[object]]]]" + # error: Incompatible return value type (got "Union[ExtensionDtype, str, + # dtype[Any], Type[object], Dict[Hashable, Union[ExtensionDtype, Union[str, + # dtype[Any]], Type[str], Type[float], Type[int], Type[complex], Type[bool], + # Type[object]]]]", expected "Union[ExtensionDtype, str, dtype[Any], + # Type[object]]") + dtype = defaultdict( + lambda: default_dtype # type: ignore[arg-type, return-value] + ) else: dtype = cast(dict, dtype) dtype = defaultdict( diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index ceb4900b887f1..24bd2da6cc12e 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2092,7 +2092,9 @@ def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str): kwargs["freq"] = None new_pd_index = factory(values, **kwargs) - new_pd_index = _set_tz(new_pd_index, self.tz) + # error: Incompatible types in assignment (expression has type + # "Union[ndarray, DatetimeIndex]", variable has type "Index") + new_pd_index = _set_tz(new_pd_index, self.tz) # type: ignore[assignment] return new_pd_index, new_pd_index def take_data(self): @@ -2254,7 +2256,9 @@ def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str): """ assert isinstance(values, np.ndarray), type(values) - values = Int64Index(np.arange(len(values))) + # error: Incompatible types in assignment (expression has type + # "Int64Index", variable has type "ndarray") + values = Int64Index(np.arange(len(values))) # type: ignore[assignment] return values, values def set_attr(self): @@ -3087,10 +3091,17 @@ def write_array(self, key: str, obj: FrameOrSeries, items: Optional[Index] = Non elif is_datetime64tz_dtype(value.dtype): # store as UTC # with a zone - self._handle.create_array(self.group, key, value.asi8) + + # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no + # attribute "asi8" + self._handle.create_array( + self.group, key, value.asi8 # type: ignore[union-attr] + ) node = getattr(self.group, key) - node._v_attrs.tz = _get_tz(value.tz) + # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no + # attribute "tz" + node._v_attrs.tz = _get_tz(value.tz) # type: ignore[union-attr] node._v_attrs.value_type = "datetime64" elif is_timedelta64_dtype(value.dtype): self._handle.create_array(self.group, key, value.view("i8")) @@ -3376,7 +3387,10 @@ def validate_multiindex( @property def nrows_expected(self) -> int: """ based on our axes, compute the expected nrows """ - return np.prod([i.cvalues.shape[0] for i in self.index_axes]) + # error: Incompatible return value type (got "number", expected "int") + return np.prod( # type: ignore[return-value] + [i.cvalues.shape[0] for i in self.index_axes] + ) @property def is_exists(self) -> bool: @@ -3462,8 +3476,12 @@ def write_metadata(self, key: str, values: np.ndarray): key : str values : ndarray """ - values = Series(values) - self.parent.put( + # error: Incompatible types in assignment (expression has type + # "Series", variable has type "ndarray") + values = Series(values) # type: ignore[assignment] + # error: Value of type variable "FrameOrSeries" of "put" of "HDFStore" + # cannot be "ndarray" + self.parent.put( # type: ignore[type-var] self._get_metadata_path(key), values, format="table", @@ -4818,14 +4836,18 @@ def _set_tz( elif coerce: values = np.asarray(values, dtype="M8[ns]") - return values + # error: Incompatible return value type (got "Union[ndarray, Index]", + # expected "Union[ndarray, DatetimeIndex]") + return values # type: ignore[return-value] def _convert_index(name: str, index: Index, encoding: str, errors: str) -> IndexCol: assert isinstance(name, str) index_name = index.name - converted, dtype_name = _get_data_and_dtype_name(index) + # error: Value of type variable "ArrayLike" of "_get_data_and_dtype_name" + # cannot be "Index" + converted, dtype_name = _get_data_and_dtype_name(index) # type: ignore[type-var] kind = _dtype_to_kind(dtype_name) atom = DataIndexableCol._get_atom(converted) @@ -4966,7 +4988,12 @@ def _maybe_convert_for_string_atom( ) # itemsize is the maximum length of a string (along any dimension) - data_converted = _convert_string_array(data, encoding, errors).reshape(data.shape) + + # error: Argument 1 to "_convert_string_array" has incompatible type "Union[ndarray, + # ExtensionArray]"; expected "ndarray" + data_converted = _convert_string_array( + data, encoding, errors # type: ignore[arg-type] + ).reshape(data.shape) itemsize = data_converted.itemsize # specified min_itemsize? @@ -5142,20 +5169,26 @@ def _get_data_and_dtype_name(data: ArrayLike): Convert the passed data into a storable form and a dtype string. """ if isinstance(data, Categorical): - data = data.codes + # error: Incompatible types in assignment (expression has type + # "ndarray", variable has type "ExtensionArray") + data = data.codes # type: ignore[assignment] # For datetime64tz we need to drop the TZ in tests TODO: why? dtype_name = data.dtype.name.split("[")[0] if data.dtype.kind in ["m", "M"]: - data = np.asarray(data.view("i8")) + # error: Incompatible types in assignment (expression has type "ndarray", + # variable has type "ExtensionArray") + data = np.asarray(data.view("i8")) # type: ignore[assignment] # TODO: we used to reshape for the dt64tz case, but no longer # doing that doesn't seem to break anything. why? elif isinstance(data, PeriodIndex): data = data.asi8 - data = np.asarray(data) + # error: Incompatible types in assignment (expression has type "ndarray", variable + # has type "ExtensionArray") + data = np.asarray(data) # type: ignore[assignment] return data, dtype_name diff --git a/pandas/io/sql.py b/pandas/io/sql.py index c028e1f5c5dbe..fb08abb6fea45 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -898,7 +898,9 @@ def insert_data(self): mask = isna(d) d[mask] = None - data_list[i] = d + # error: No overload variant of "__setitem__" of "list" matches + # argument types "int", "ndarray" + data_list[i] = d # type: ignore[call-overload] return column_names, data_list @@ -1545,7 +1547,13 @@ def to_sql( """ if dtype: if not is_dict_like(dtype): - dtype = {col_name: dtype for col_name in frame} + # error: Value expression in dictionary comprehension has incompatible + # type "Union[ExtensionDtype, str, dtype[Any], Type[object], + # Dict[Optional[Hashable], Union[ExtensionDtype, Union[str, dtype[Any]], + # Type[str], Type[float], Type[int], Type[complex], Type[bool], + # Type[object]]]]"; expected type "Union[ExtensionDtype, str, + # dtype[Any], Type[object]]" + dtype = {col_name: dtype for col_name in frame} # type: ignore[misc] else: dtype = cast(dict, dtype) @@ -2022,7 +2030,13 @@ def to_sql( """ if dtype: if not is_dict_like(dtype): - dtype = {col_name: dtype for col_name in frame} + # error: Value expression in dictionary comprehension has incompatible + # type "Union[ExtensionDtype, str, dtype[Any], Type[object], + # Dict[Optional[Hashable], Union[ExtensionDtype, Union[str, dtype[Any]], + # Type[str], Type[float], Type[int], Type[complex], Type[bool], + # Type[object]]]]"; expected type "Union[ExtensionDtype, str, + # dtype[Any], Type[object]]" + dtype = {col_name: dtype for col_name in frame} # type: ignore[misc] else: dtype = cast(dict, dtype) diff --git a/pandas/io/stata.py b/pandas/io/stata.py index ebc0395aec0b2..c01a369bf0054 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1233,7 +1233,9 @@ def g(typ: int) -> Union[str, np.dtype]: if typ <= 2045: return str(typ) try: - return self.DTYPE_MAP_XML[typ] + # error: Incompatible return value type (got "Type[number]", expected + # "Union[str, dtype]") + return self.DTYPE_MAP_XML[typ] # type: ignore[return-value] except KeyError as err: raise ValueError(f"cannot convert stata dtype [{typ}]") from err @@ -1666,7 +1668,12 @@ def read( if self.dtyplist[i] is not None: col = data.columns[i] dtype = data[col].dtype - if dtype != np.dtype(object) and dtype != self.dtyplist[i]: + # error: Value of type variable "_DTypeScalar" of "dtype" cannot be + # "object" + if ( + dtype != np.dtype(object) # type: ignore[type-var] + and dtype != self.dtyplist[i] + ): requires_type_conversion = True data_formatted.append( (col, Series(data[col], ix, self.dtyplist[i])) diff --git a/pandas/tests/io/parser/common/test_chunksize.py b/pandas/tests/io/parser/common/test_chunksize.py index 4bc3f3c38f506..6d5aeaa713687 100644 --- a/pandas/tests/io/parser/common/test_chunksize.py +++ b/pandas/tests/io/parser/common/test_chunksize.py @@ -143,7 +143,10 @@ def test_read_chunksize_jagged_names(all_parsers): parser = all_parsers data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)]) - expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10]) + # error: List item 0 has incompatible type "float"; expected "int" + expected = DataFrame( + [[0] + [np.nan] * 9] * 7 + [[0] * 10] # type: ignore[list-item] + ) with parser.read_csv(StringIO(data), names=range(10), chunksize=4) as reader: result = concat(reader) tm.assert_frame_equal(result, expected) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 048d138608ef9..c5b875b8f027e 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -385,7 +385,8 @@ def _is_business_daily(self) -> bool: shifts = np.diff(self.index.asi8) shifts = np.floor_divide(shifts, _ONE_DAY) weekdays = np.mod(first_weekday + np.cumsum(shifts), 7) - return np.all( + # error: Incompatible return value type (got "bool_", expected "bool") + return np.all( # type: ignore[return-value] ((weekdays == 0) & (shifts == 3)) | ((weekdays > 0) & (weekdays <= 4) & (shifts == 1)) ) diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index c39647522aaf1..752ed43849d2b 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -212,7 +212,9 @@ def skip_if_np_lt(ver_str: str, *args, reason: Optional[str] = None): if reason is None: reason = f"NumPy {ver_str} or greater required" return pytest.mark.skipif( - np.__version__ < LooseVersion(ver_str), *args, reason=reason + np.__version__ < LooseVersion(ver_str), + *args, + reason=reason, ) diff --git a/requirements-dev.txt b/requirements-dev.txt index 37adbbb8e671f..f60e3bf0daea7 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,7 +1,7 @@ # This file is auto-generated from environment.yml, do not modify. # See that file for comments about the need/usage of each dependency. -numpy>=1.16.5, <1.20 +numpy>=1.16.5 python-dateutil>=2.7.3 pytz asv diff --git a/setup.cfg b/setup.cfg index fdc0fbdbd6b57..a0b6a0cdfc260 100644 --- a/setup.cfg +++ b/setup.cfg @@ -192,3 +192,42 @@ check_untyped_defs = False [mypy-pandas.io.clipboard] check_untyped_defs = False + +[mypy-pandas.io.formats.string] +ignore_errors = True + +[mypy-pandas.tests.apply.test_series_apply] +ignore_errors = True + +[mypy-pandas.tests.arithmetic.conftest] +ignore_errors = True + +[mypy-pandas.tests.arrays.sparse.test_combine_concat] +ignore_errors = True + +[mypy-pandas.tests.dtypes.test_common] +ignore_errors = True + +[mypy-pandas.tests.frame.methods.test_to_records] +ignore_errors = True + +[mypy-pandas.tests.groupby.test_rank] +ignore_errors = True + +[mypy-pandas.tests.groupby.transform.test_transform] +ignore_errors = True + +[mypy-pandas.tests.indexes.interval.test_interval] +ignore_errors = True + +[mypy-pandas.tests.indexing.test_categorical] +ignore_errors = True + +[mypy-pandas.tests.io.excel.test_writers] +ignore_errors = True + +[mypy-pandas.tests.reductions.test_reductions] +ignore_errors = True + +[mypy-pandas.tests.test_expressions] +ignore_errors = True
xref #39513
https://api.github.com/repos/pandas-dev/pandas/pulls/36092
2020-09-03T15:54:38Z
2021-03-10T20:42:50Z
2021-03-10T20:42:49Z
2021-03-10T21:32:56Z
PERF: use from __future__ import annotations more
diff --git a/pandas/_config/config.py b/pandas/_config/config.py index 0b802f2cc9e69..940f007c785e2 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -47,6 +47,7 @@ which can save developers some typing, see the docstring. """ +from __future__ import annotations from collections import namedtuple from contextlib import ContextDecorator, contextmanager diff --git a/pandas/_config/dates.py b/pandas/_config/dates.py index 5bf2b49ce5904..b37831f96eb73 100644 --- a/pandas/_config/dates.py +++ b/pandas/_config/dates.py @@ -1,6 +1,8 @@ """ config for datetime formatting """ +from __future__ import annotations + from pandas._config import config as cf pc_date_dayfirst_doc = """ diff --git a/pandas/_config/display.py b/pandas/_config/display.py index ef319f4447565..afba0a72f3645 100644 --- a/pandas/_config/display.py +++ b/pandas/_config/display.py @@ -2,6 +2,8 @@ Unopinionated display configuration. """ +from __future__ import annotations + import locale import sys diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py index 3933c8f3d519c..2162d6fb3875e 100644 --- a/pandas/_config/localization.py +++ b/pandas/_config/localization.py @@ -3,6 +3,8 @@ Name `localization` is chosen to avoid overlap with builtin `locale` module. """ +from __future__ import annotations + from contextlib import contextmanager import locale import re diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index 689c7c889ef66..a8859d5342eaa 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import distutils.version import importlib import types diff --git a/pandas/compat/chainmap.py b/pandas/compat/chainmap.py index a84dbb4a661e4..93355557bd98f 100644 --- a/pandas/compat/chainmap.py +++ b/pandas/compat/chainmap.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import ChainMap, MutableMapping, TypeVar, cast _KT = TypeVar("_KT") diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index ef9f36705a7ee..399154990aea3 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -1,6 +1,7 @@ """ Support pre-0.12 series pickle compatibility. """ +from __future__ import annotations import contextlib import copy diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index 2caf1f75f3da1..aa3e41c9e8423 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -4,6 +4,8 @@ that can be mixed into or pinned onto other pandas classes. """ +from __future__ import annotations + from typing import FrozenSet, Set import warnings diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index 7ca68d8289bd5..29d13b5acff7a 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -2,6 +2,7 @@ aggregation.py contains utility functions to handle multiple named and lambda kwarg aggregations in groupby and DataFrame/Series aggregation """ +from __future__ import annotations from collections import defaultdict from functools import partial diff --git a/pandas/core/api.py b/pandas/core/api.py index 348e9206d6e19..08e75ce51d2aa 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -1,5 +1,7 @@ # flake8: noqa +from __future__ import annotations + from pandas._libs import NaT, Period, Timedelta, Timestamp from pandas._libs.missing import NA diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 99a9e1377563c..532e513abadbf 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import abc import inspect from typing import TYPE_CHECKING, Any, Dict, Iterator, Optional, Tuple, Type, Union diff --git a/pandas/core/array_algos/masked_reductions.py b/pandas/core/array_algos/masked_reductions.py index 1b9ed014f27b7..0f81434a62ee1 100644 --- a/pandas/core/array_algos/masked_reductions.py +++ b/pandas/core/array_algos/masked_reductions.py @@ -3,6 +3,8 @@ for missing values. """ +from __future__ import annotations + from typing import Callable import numpy as np diff --git a/pandas/core/array_algos/transforms.py b/pandas/core/array_algos/transforms.py index 371425f325d76..54a1a9fb5219a 100644 --- a/pandas/core/array_algos/transforms.py +++ b/pandas/core/array_algos/transforms.py @@ -2,6 +2,8 @@ transforms.py is for shape-preserving functions. """ +from __future__ import annotations + import numpy as np from pandas.core.dtypes.common import ensure_platform_int diff --git a/pandas/core/arrays/_arrow_utils.py b/pandas/core/arrays/_arrow_utils.py index 4a33e0e841f7f..564e51cf89ba3 100644 --- a/pandas/core/arrays/_arrow_utils.py +++ b/pandas/core/arrays/_arrow_utils.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from distutils.version import LooseVersion import json diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 2976747d66dfa..6f49f3f74c9e6 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import Any, Sequence, Tuple, TypeVar import numpy as np diff --git a/pandas/core/arrays/_ranges.py b/pandas/core/arrays/_ranges.py index 14b442bf71080..95bbbb45f5cc6 100644 --- a/pandas/core/arrays/_ranges.py +++ b/pandas/core/arrays/_ranges.py @@ -3,6 +3,8 @@ (and possibly TimedeltaArray/PeriodArray) """ +from __future__ import annotations + from typing import Union import numpy as np diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 8193d65b3b30c..fe8bcd1515be5 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -6,6 +6,8 @@ This is an experimental API and subject to breaking changes without warning. """ +from __future__ import annotations + import operator from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union, cast diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index bd4bdc5ecb46f..0ddb45b2fc14f 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import numbers from typing import TYPE_CHECKING, List, Tuple, Type, Union import warnings diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 27b1afdb438cb..3040578cace4e 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from csv import QUOTE_NONNUMERIC from functools import partial import operator diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 1b5e1d81f00d6..1534ca439d922 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from datetime import datetime, timedelta import operator from typing import Any, Callable, Optional, Sequence, Tuple, Type, TypeVar, Union, cast diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 8b2bb7832b5d0..d7d1020b6faeb 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from datetime import datetime, time, timedelta, tzinfo from typing import Optional, Union import warnings diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index d83ff91a1315f..ace538bfbe502 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import numbers from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Type, Union import warnings diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index d76e0fd628a48..0fbe56350cce7 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from operator import le, lt import textwrap diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 1237dea5c1a64..ecb6551657cb8 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import TYPE_CHECKING, Optional, Tuple, Type, TypeVar import numpy as np diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 23a4a70734c81..0b3b483e3a7ab 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import numbers from typing import Optional, Tuple, Type, Union diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index cc39ffb5d1203..46924320df65f 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from datetime import timedelta import operator from typing import Any, Callable, List, Optional, Sequence, Type, Union diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py index da8d695c59b9e..7feaa3cb5057c 100644 --- a/pandas/core/arrays/sparse/accessor.py +++ b/pandas/core/arrays/sparse/accessor.py @@ -1,5 +1,7 @@ """Sparse accessor""" +from __future__ import annotations + import numpy as np from pandas.compat._optional import import_optional_dependency diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 1531f7b292365..60b8e42770ba6 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -1,6 +1,8 @@ """ SparseArray data structure """ +from __future__ import annotations + from collections import abc import numbers import operator diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py index ccf2825162f51..5d10ebabebb9d 100644 --- a/pandas/core/arrays/sparse/dtype.py +++ b/pandas/core/arrays/sparse/dtype.py @@ -1,5 +1,7 @@ """Sparse Dtype""" +from __future__ import annotations + import re from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type import warnings diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py index eafd782dc9b9c..5a8152189d242 100644 --- a/pandas/core/arrays/sparse/scipy_sparse.py +++ b/pandas/core/arrays/sparse/scipy_sparse.py @@ -3,6 +3,8 @@ Currently only includes to_coo helpers. """ +from __future__ import annotations + from pandas.core.indexes.api import Index, MultiIndex from pandas.core.series import Series diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 381968f9724b6..27093516a65f4 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import operator from typing import TYPE_CHECKING, Type, Union diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 3e21d01355dda..4ca5b60f92ed8 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from datetime import timedelta from typing import List diff --git a/pandas/core/base.py b/pandas/core/base.py index 1926803d8f04b..45f70c0b2bd6f 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -2,6 +2,8 @@ Base and utility classes for pandas objects. """ +from __future__ import annotations + import builtins import textwrap from typing import Any, Dict, FrozenSet, List, Optional, Union diff --git a/pandas/core/common.py b/pandas/core/common.py index 6fd4700ab7f3f..ee3948f5048f8 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -4,6 +4,8 @@ Note: pandas.core.common is *not* part of the public API. """ +from __future__ import annotations + from collections import abc, defaultdict import contextlib from datetime import datetime, timedelta diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 82867cf9dcd29..5ae0b2891793b 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -2,6 +2,8 @@ Core eval alignment algorithms. """ +from __future__ import annotations + from functools import partial, wraps from typing import Dict, Optional, Sequence, Tuple, Type, Union import warnings diff --git a/pandas/core/computation/common.py b/pandas/core/computation/common.py index 327ec21c3c11c..02974322d3e7f 100644 --- a/pandas/core/computation/common.py +++ b/pandas/core/computation/common.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from functools import reduce import numpy as np diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py index 9c5388faae1bd..c993a27974e8b 100644 --- a/pandas/core/computation/engines.py +++ b/pandas/core/computation/engines.py @@ -2,6 +2,8 @@ Engine classes for :func:`~pandas.eval` """ +from __future__ import annotations + import abc from typing import Dict, Type diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index b74f99fca21c7..0ff0a2a7aa1ab 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -2,6 +2,8 @@ Top level ``eval`` module. """ +from __future__ import annotations + import tokenize from typing import Optional import warnings diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index df71b4fe415f8..81bff4293adad 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -2,6 +2,8 @@ :func:`~pandas.eval` parsers. """ +from __future__ import annotations + import ast from functools import partial, reduce from keyword import iskeyword diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index a9c0cb0571446..1f06b3ecf3d64 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -5,6 +5,8 @@ Offer fast expression evaluation through numexpr """ +from __future__ import annotations + import operator from typing import List, Set import warnings diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index e55df1e1d8155..70c09bf81c192 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -2,6 +2,8 @@ Operator classes for eval. """ +from __future__ import annotations + from datetime import datetime from distutils.version import LooseVersion from functools import partial diff --git a/pandas/core/computation/parsing.py b/pandas/core/computation/parsing.py index 86e125b6b909b..5b6f184d0a4bf 100644 --- a/pandas/core/computation/parsing.py +++ b/pandas/core/computation/parsing.py @@ -2,6 +2,8 @@ :func:`~pandas.eval` source string parsing functions """ +from __future__ import annotations + from io import StringIO from keyword import iskeyword import token diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index f1b11a6869c2b..318c6160f5d47 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -1,5 +1,7 @@ """ manage PyTables query interface via Expressions """ +from __future__ import annotations + import ast from functools import partial from typing import Any, Dict, Optional, Tuple diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py index 83bf92ad737e4..09ec05acc4c21 100644 --- a/pandas/core/computation/scope.py +++ b/pandas/core/computation/scope.py @@ -2,6 +2,8 @@ Module for scope operations """ +from __future__ import annotations + import datetime import inspect from io import StringIO diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index 07c73876954d0..3c30a0e5388fa 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -2,6 +2,8 @@ Extend pandas with custom array types. """ +from __future__ import annotations + from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union import numpy as np diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index e6b4cb598989b..54928ea2fa557 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -2,6 +2,8 @@ Routines for casting. """ +from __future__ import annotations + from datetime import date, datetime, timedelta from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 1e70ff90fcd44..27191e3362b52 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -2,6 +2,8 @@ Common type operations. """ +from __future__ import annotations + from typing import Any, Callable, Union import warnings diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 9902016475b22..a22e094841883 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -1,6 +1,8 @@ """ Utility functions related to concat. """ +from __future__ import annotations + from typing import cast import numpy as np diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 8dc500dddeafa..3386d98362afd 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -1,6 +1,7 @@ """ Define extension dtypes. """ +from __future__ import annotations import re from typing import ( diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index d1607b5ede6c3..141c21f13fece 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -1,5 +1,7 @@ """ basic inference routines """ +from __future__ import annotations + from collections import abc from numbers import Number import re diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index f59bb31af2828..02bed966add0b 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -1,6 +1,8 @@ """ missing types & inference """ +from __future__ import annotations + from functools import partial import numpy as np diff --git a/pandas/core/flags.py b/pandas/core/flags.py index 15966d8ddce2a..949b5ea8576d5 100644 --- a/pandas/core/flags.py +++ b/pandas/core/flags.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import weakref diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py index 999873e7b81e4..7634289ce83dc 100644 --- a/pandas/core/groupby/base.py +++ b/pandas/core/groupby/base.py @@ -3,6 +3,8 @@ hold the allowlist of methods that are exposed on the SeriesGroupBy and the DataFrameGroupBy objects. """ +from __future__ import annotations + import collections from typing import List diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py index 4d5acf527a867..edcbe362fe9ef 100644 --- a/pandas/core/groupby/categorical.py +++ b/pandas/core/groupby/categorical.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import Optional, Tuple import numpy as np diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 537feace59fcb..b86168e2e946e 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -5,6 +5,8 @@ These are user facing as the result of the ``df.groupby(...)`` operations, which here returns a DataFrameGroupBy object. """ +from __future__ import annotations + from collections import abc, namedtuple import copy from functools import partial diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 651af2d314251..5dd02a6a781a5 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -6,6 +6,7 @@ class providing the base-class of operations. (defined in pandas.core.groupby.generic) expose these user-facing objects to provide specific functionality. """ +from __future__ import annotations from contextlib import contextmanager import datetime diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 6678edc3821c8..907405303d2c3 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -2,6 +2,8 @@ Provide user facing operators for doing the split part of the split-apply-combine paradigm. """ +from __future__ import annotations + from typing import Dict, Hashable, List, Optional, Tuple import warnings diff --git a/pandas/core/groupby/numba_.py b/pandas/core/groupby/numba_.py index aebe60f797fcd..39677e25ea88d 100644 --- a/pandas/core/groupby/numba_.py +++ b/pandas/core/groupby/numba_.py @@ -1,4 +1,6 @@ """Common utilities for Numba operations with groupby ops""" +from __future__ import annotations + import inspect from typing import Any, Callable, Dict, Optional, Tuple diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 4dd5b7f30e7f0..65f43d5171b33 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -6,6 +6,8 @@ are contained *in* the SeriesGroupBy and DataFrameGroupBy objects. """ +from __future__ import annotations + import collections from typing import List, Optional, Sequence, Tuple, Type diff --git a/pandas/core/index.py b/pandas/core/index.py index a315b9619b0e7..61f5c9430bbf5 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import warnings from pandas.core.indexes.api import ( # noqa:F401 diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py index d9aa02db3e42a..03342e1c7a7cc 100644 --- a/pandas/core/indexers.py +++ b/pandas/core/indexers.py @@ -1,6 +1,8 @@ """ Low-dependency indexing utilities. """ +from __future__ import annotations + import warnings import numpy as np diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index 881d5ce1fbaab..d06ec289d645d 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -1,6 +1,8 @@ """ datetimelike delegation """ +from __future__ import annotations + from typing import TYPE_CHECKING import warnings diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index d352b001f5d2a..3917ac9f82ed7 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import textwrap from typing import List, Set diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 65b5dfb6df911..03c8780163bf8 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from copy import copy as copy_func from datetime import datetime import operator diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index cbb30763797d1..cfcb3bb69a233 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import Any, List import warnings diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index e7e93068d9175..15e59c21f9392 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -1,6 +1,8 @@ """ Base and utility classes for tseries type pandas objects. """ +from __future__ import annotations + from datetime import datetime, tzinfo from typing import Any, List, Optional, TypeVar, Union, cast diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 6dcb9250812d0..c3eede32a5744 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from datetime import date, datetime, time, timedelta, tzinfo import operator from typing import Optional diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index c9367b7e2ee1d..79e46e7582823 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -1,6 +1,8 @@ """ Shared methods for Index subclasses backed by ExtensionArray. """ +from __future__ import annotations + from typing import List import numpy as np diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py index 8c4437f2cdeb9..fc4da5705c5d7 100644 --- a/pandas/core/indexes/frozen.py +++ b/pandas/core/indexes/frozen.py @@ -6,6 +6,7 @@ - .names (FrozenList) """ +from __future__ import annotations from typing import Any diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 08f9bd51de77b..3bb01fdbc5f38 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1,4 +1,6 @@ """ define the IntervalIndex """ +from __future__ import annotations + from operator import le, lt import textwrap from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union, cast diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index f66b009e6d505..c23263da26156 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from sys import getsizeof from typing import ( TYPE_CHECKING, diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index cd3f1f51a86d2..7155048f42c2e 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import Any import numpy as np diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index cdb502199c6f1..11973ec738f94 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from datetime import datetime, timedelta from typing import Any diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index f1457a9aac62b..32d476732860a 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from datetime import timedelta import operator from sys import getsizeof diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index dccc8369c5366..26a661324353d 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -1,5 +1,7 @@ """ implement the TimedeltaIndex """ +from __future__ import annotations + from pandas._libs import index as libindex, lib from pandas._libs.tslibs import Timedelta, to_offset from pandas._typing import DtypeObj, Label diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index dd81823055390..b1a8938a6a410 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import TYPE_CHECKING, Hashable, List, Tuple, Union import numpy as np diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index ad388ef3f53b0..b7e608e18d9dc 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from datetime import datetime, timedelta import inspect import re diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 88839d2211f81..4a62a964387f5 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from collections import defaultdict import copy from typing import Dict, List diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 2d4163e0dee89..8b69e1ba95bd2 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -2,6 +2,8 @@ Functions for preparing various inputs passed to the DataFrame or Series constructors before passing them to a BlockManager. """ +from __future__ import annotations + from collections import abc from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 2e3098d94afcb..0a74685358708 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from collections import defaultdict import itertools import operator diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py index 05f5f9a00ae1b..77589eaa73086 100644 --- a/pandas/core/internals/ops.py +++ b/pandas/core/internals/ops.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from collections import namedtuple from typing import TYPE_CHECKING, Iterator, List, Tuple diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 7802c5cbdbfb3..e61e25df3914d 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -2,6 +2,8 @@ Routines for filling missing data. """ +from __future__ import annotations + from typing import Any, List, Optional, Set, Union import numpy as np diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index e3f16a3ef4f90..7df5f3a76385f 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import functools import itertools import operator diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index aab10cea33632..545484b8466bb 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -2,6 +2,8 @@ Functions for arithmetic and comparison operations on NumPy arrays and ExtensionArrays. """ +from __future__ import annotations + from datetime import timedelta from functools import partial import operator diff --git a/pandas/core/ops/common.py b/pandas/core/ops/common.py index 515a0a5198d74..ba533efc8d0a4 100644 --- a/pandas/core/ops/common.py +++ b/pandas/core/ops/common.py @@ -1,6 +1,8 @@ """ Boilerplate functions used in defining binary operations. """ +from __future__ import annotations + from functools import wraps from typing import Callable diff --git a/pandas/core/ops/dispatch.py b/pandas/core/ops/dispatch.py index bfd4afe0de86f..2f500703ccfb3 100644 --- a/pandas/core/ops/dispatch.py +++ b/pandas/core/ops/dispatch.py @@ -1,6 +1,8 @@ """ Functions for defining unary operations. """ +from __future__ import annotations + from typing import Any from pandas._typing import ArrayLike diff --git a/pandas/core/ops/docstrings.py b/pandas/core/ops/docstrings.py index e3a68ad328d55..92507ac7d0816 100644 --- a/pandas/core/ops/docstrings.py +++ b/pandas/core/ops/docstrings.py @@ -1,6 +1,8 @@ """ Templating for ops docstrings """ +from __future__ import annotations + from typing import Dict, Optional diff --git a/pandas/core/ops/invalid.py b/pandas/core/ops/invalid.py index cc4a1f11edd2b..6513236e8376e 100644 --- a/pandas/core/ops/invalid.py +++ b/pandas/core/ops/invalid.py @@ -1,6 +1,8 @@ """ Templates for invalid operations. """ +from __future__ import annotations + import operator import numpy as np diff --git a/pandas/core/ops/mask_ops.py b/pandas/core/ops/mask_ops.py index 8fb81faf313d7..f5206a2da7016 100644 --- a/pandas/core/ops/mask_ops.py +++ b/pandas/core/ops/mask_ops.py @@ -1,6 +1,8 @@ """ Ops for masked arrays. """ +from __future__ import annotations + from typing import Optional, Union import numpy as np diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py index a4694a6e5134f..5a96a240fbd21 100644 --- a/pandas/core/ops/methods.py +++ b/pandas/core/ops/methods.py @@ -1,6 +1,8 @@ """ Functions to generate methods and pin them to the appropriate classes. """ +from __future__ import annotations + import operator from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries diff --git a/pandas/core/ops/missing.py b/pandas/core/ops/missing.py index c33cb32dcec19..01154a4887ebd 100644 --- a/pandas/core/ops/missing.py +++ b/pandas/core/ops/missing.py @@ -21,6 +21,8 @@ 3) divmod behavior consistent with 1) and 2). """ +from __future__ import annotations + import operator import numpy as np diff --git a/pandas/core/ops/roperator.py b/pandas/core/ops/roperator.py index e6691ddf8984e..45a3cc7eb68e7 100644 --- a/pandas/core/ops/roperator.py +++ b/pandas/core/ops/roperator.py @@ -2,6 +2,8 @@ Reversed Operations not available in the stdlib operator module. Defining these instead of using lambdas allows us to reference them by name. """ +from __future__ import annotations + import operator diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 7b5154756e613..7afc95a2db066 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import copy from datetime import timedelta from textwrap import dedent diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 299b68c6e71e0..9a958062f0fde 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -2,6 +2,8 @@ Concat routines. """ +from __future__ import annotations + from collections import abc from typing import TYPE_CHECKING, Iterable, List, Mapping, Union, overload diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 8724f7674f0c8..8616dab9a2fbb 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import re from typing import TYPE_CHECKING, List, cast import warnings diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 01e20f49917ac..4b7bf59fe2e0e 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -2,6 +2,8 @@ SQL-style merge routines """ +from __future__ import annotations + import copy import datetime from functools import partial diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 969ac56e41860..b936babaa8cd9 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import ( TYPE_CHECKING, Callable, diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index e81dd8f0c735c..fe03ce66b1468 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import itertools from typing import List, Optional, Union diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index f7723bee532ff..88558bac61510 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -1,6 +1,8 @@ """ Quantilization functions and related stuff """ +from __future__ import annotations + import numpy as np from pandas._libs import Timedelta, Timestamp diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py index 6949270317f7c..52f6d67884b48 100644 --- a/pandas/core/reshape/util.py +++ b/pandas/core/reshape/util.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import numpy as np from pandas.core.dtypes.common import is_list_like diff --git a/pandas/core/series.py b/pandas/core/series.py index 9d84ce4b9ab2e..0eab68c378ae2 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1,6 +1,8 @@ """ Data structure for 1-dimensional cross-sectional and time series data """ +from __future__ import annotations + from io import StringIO from shutil import get_terminal_size from textwrap import dedent diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index 0aaccb47efc44..854622139b8d7 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import Dict _shared_docs: Dict[str, str] = dict() diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 8bdd466ae6f33..0aebbd9e51608 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -1,4 +1,6 @@ """ miscellaneous sorting / groupby utilities """ +from __future__ import annotations + from collections import defaultdict from typing import TYPE_CHECKING, Callable, DefaultDict, Iterable, List, Optional, Tuple diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 6702bf519c52e..c7ceb01000498 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import codecs from functools import wraps import re diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 8fcc5f74ea897..88bb93c0e837c 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from collections import abc from datetime import datetime from functools import partial diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index cff4695603d06..0c82208ca2f6a 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import numpy as np from pandas._libs import lib diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index e457a8819f27a..a682895ecf354 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -2,6 +2,8 @@ timedelta support tools """ +from __future__ import annotations + import numpy as np from pandas._libs.tslibs import NaT diff --git a/pandas/core/tools/times.py b/pandas/core/tools/times.py index 3bac4cf0edb63..2bf260149ea4b 100644 --- a/pandas/core/tools/times.py +++ b/pandas/core/tools/times.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from datetime import datetime, time from typing import List, Optional diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index d79b9f4092325..e05f48cefb108 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -1,6 +1,8 @@ """ data hash pandas / numpy objects """ +from __future__ import annotations + import itertools from typing import Optional diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py index b951cd4f0cc2a..f57eb2cacda63 100644 --- a/pandas/core/util/numba_.py +++ b/pandas/core/util/numba_.py @@ -1,4 +1,6 @@ """Common utilities for Numba operations""" +from __future__ import annotations + from distutils.version import LooseVersion import types from typing import Callable, Dict, Optional, Tuple diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index 2f3058db4493b..cfc8d4319347f 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -1,4 +1,6 @@ """Common utility functions for rolling operations""" +from __future__ import annotations + from collections import defaultdict from typing import Callable, Optional import warnings diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 1913b51a68c15..d114b97a22996 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import datetime from functools import partial from textwrap import dedent diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py index ce4ab2f98c23d..e0d1edbe49913 100644 --- a/pandas/core/window/expanding.py +++ b/pandas/core/window/expanding.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from textwrap import dedent from typing import Dict, Optional diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py index a21521f4ce8bb..db8320af4ac3c 100644 --- a/pandas/core/window/indexers.py +++ b/pandas/core/window/indexers.py @@ -1,4 +1,6 @@ """Indexer objects for computing start/end window bounds for rolling operations""" +from __future__ import annotations + from datetime import timedelta from typing import Dict, Optional, Tuple, Type diff --git a/pandas/core/window/numba_.py b/pandas/core/window/numba_.py index aec294c3c84c2..753173a9f79a7 100644 --- a/pandas/core/window/numba_.py +++ b/pandas/core/window/numba_.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import Any, Callable, Dict, Optional, Tuple import numpy as np diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 558c0eeb0ea65..8764e8f4c1e9b 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -2,6 +2,8 @@ Provide a generic structure to support window functions, similar to how we have a Groupby object. """ +from __future__ import annotations + from datetime import timedelta from functools import partial import inspect
xref https://github.com/pandas-dev/pandas/pull/36034#issuecomment-684964511 @WillAyd using `python -X importtime -c "import pandas"` I don't see any different in import time after running a few times and things are cached. so I don't think we gain anything from a performance POV and therefore only need to use it for stylistic reasons in a few modules and not across the codebase. I've opened this PR, in case someone wants to test performance with these changes. so feel free to close
https://api.github.com/repos/pandas-dev/pandas/pulls/36091
2020-09-03T13:58:29Z
2020-09-03T16:07:34Z
null
2020-09-03T16:07:34Z
DOC: add mypy version to whatsnew\v1.2.0.rst
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index b07351d05defb..e65daa439a225 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -136,6 +136,8 @@ If installed, we now require: +-----------------+-----------------+----------+---------+ | pytest (dev) | 5.0.1 | | X | +-----------------+-----------------+----------+---------+ +| mypy (dev) | 0.782 | | X | ++-----------------+-----------------+----------+---------+ For `optional libraries <https://dev.pandas.io/docs/install.html#dependencies>`_ the general recommendation is to use the latest version. The following table lists the lowest version per library that is currently being tested throughout the development of pandas.
xref https://github.com/pandas-dev/pandas/pull/36012#issuecomment-685188102
https://api.github.com/repos/pandas-dev/pandas/pulls/36090
2020-09-03T12:36:39Z
2020-09-03T16:35:04Z
2020-09-03T16:35:04Z
2020-09-03T16:45:03Z
STY: add code check for use of builtin filter function
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 852f66763683b..35c4b284599a1 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -179,6 +179,10 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --include="*.py" -E "super\(\w*, (self|cls)\)" pandas RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check for use of builtin filter function' ; echo $MSG + invgrep -R --include="*.py" -P '(?<!def)[\(\s]filter\(' pandas + RET=$(($RET + $?)) ; echo $MSG "DONE" + # Check for the following code in testing: `np.testing` and `np.array_equal` MSG='Check for invalid testing' ; echo $MSG invgrep -r -E --include '*.py' --exclude testing.py '(numpy|np)(\.testing|\.array_equal)' pandas/tests/
xref https://github.com/pandas-dev/pandas/pull/35717#issuecomment-674051885
https://api.github.com/repos/pandas-dev/pandas/pulls/36089
2020-09-03T12:15:40Z
2020-09-05T03:19:00Z
2020-09-05T03:18:59Z
2020-09-05T07:30:48Z
TYP: activate Check for missing error codes
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 852f66763683b..2e0f27fefca0b 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -230,10 +230,9 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --include="*.py" -P '# type: (?!ignore)' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" - # https://github.com/python/mypy/issues/7384 - # MSG='Check for missing error codes with # type: ignore' ; echo $MSG - # invgrep -R --include="*.py" -P '# type: ignore(?!\[)' pandas - # RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check for missing error codes with # type: ignore' ; echo $MSG + invgrep -R --include="*.py" -P '# type:\s?ignore(?!\[)' pandas + RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Check for use of foo.__class__ instead of type(foo)' ; echo $MSG invgrep -R --include=*.{py,pyx} '\.__class__' pandas diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 1b5e1d81f00d6..5a44f87400b79 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -468,10 +468,9 @@ def _ndarray(self) -> np.ndarray: def _from_backing_data(self: _T, arr: np.ndarray) -> _T: # Note: we do not retain `freq` + # error: Too many arguments for "NDArrayBackedExtensionArray" # error: Unexpected keyword argument "dtype" for "NDArrayBackedExtensionArray" - # TODO: add my error code - # https://github.com/python/mypy/issues/7384 - return type(self)(arr, dtype=self.dtype) # type: ignore + return type(self)(arr, dtype=self.dtype) # type: ignore[call-arg] # ------------------------------------------------------------------ diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py index 4d5acf527a867..3f04339803bf6 100644 --- a/pandas/core/groupby/categorical.py +++ b/pandas/core/groupby/categorical.py @@ -98,8 +98,10 @@ def recode_from_groupby( """ # we re-order to the original category orderings if sort: - return ci.set_categories(c.categories) # type: ignore [attr-defined] + # error: "CategoricalIndex" has no attribute "set_categories" + return ci.set_categories(c.categories) # type: ignore[attr-defined] # we are not sorting, so add unobserved to the end new_cats = c.categories[~c.categories.isin(ci.categories)] - return ci.add_categories(new_cats) # type: ignore [attr-defined] + # error: "CategoricalIndex" has no attribute "add_categories" + return ci.add_categories(new_cats) # type: ignore[attr-defined] diff --git a/pandas/io/common.py b/pandas/io/common.py index 9328f90ce67a3..2b13d54ec3aed 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -374,7 +374,11 @@ def get_compression_method( if isinstance(compression, Mapping): compression_args = dict(compression) try: - compression_method = compression_args.pop("method") # type: ignore + # error: Incompatible types in assignment (expression has type + # "Union[str, int, None]", variable has type "Optional[str]") + compression_method = compression_args.pop( # type: ignore[assignment] + "method" + ) except KeyError as err: raise ValueError("If mapping, compression must have key 'method'") from err else: diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 2d64e1b051444..147e4efd74bc3 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -656,7 +656,7 @@ def _plot(cls, ax: "Axes", x, y, style=None, is_errorbar: bool = False, **kwds): if style is not None: args = (x, y, style) else: - args = (x, y) # type:ignore[assignment] + args = (x, y) # type: ignore[assignment] return ax.plot(*args, **kwds) def _get_index_name(self) -> Optional[str]:
xref #35311
https://api.github.com/repos/pandas-dev/pandas/pulls/36088
2020-09-03T12:05:01Z
2020-09-04T20:59:52Z
2020-09-04T20:59:52Z
2020-09-05T10:10:50Z
DOC: Add trailing dot
diff --git a/doc/source/development/contributing_docstring.rst b/doc/source/development/contributing_docstring.rst index 0c780ad5f5847..33f30e1d97512 100644 --- a/doc/source/development/contributing_docstring.rst +++ b/doc/source/development/contributing_docstring.rst @@ -32,18 +32,18 @@ The next example gives an idea of what a docstring looks like: Parameters ---------- num1 : int - First number to add + First number to add. num2 : int - Second number to add + Second number to add. Returns ------- int - The sum of `num1` and `num2` + The sum of `num1` and `num2`. See Also -------- - subtract : Subtract one integer from another + subtract : Subtract one integer from another. Examples -------- @@ -998,4 +998,4 @@ mapping function names to docstrings. Wherever possible, we prefer using See ``pandas.core.generic.NDFrame.fillna`` for an example template, and ``pandas.core.series.Series.fillna`` and ``pandas.core.generic.frame.fillna`` -for the filled versions. \ No newline at end of file +for the filled versions.
https://api.github.com/repos/pandas-dev/pandas/pulls/36087
2020-09-03T11:38:59Z
2020-09-03T16:14:10Z
2020-09-03T16:14:10Z
2020-09-03T16:14:18Z
DOC: minor fixes to whatsnew\v1.1.2.rst
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index c740c7b3882c9..ac9fe9d2fca26 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -30,9 +30,9 @@ Bug fixes - Bug in :meth:`DataFrame.eval` with ``object`` dtype column binary operations (:issue:`35794`) - Bug in :class:`Series` constructor raising a ``TypeError`` when constructing sparse datetime64 dtypes (:issue:`35762`) - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) -- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`) +- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should be ``""`` (:issue:`35712`) - Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) -- Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`36051`) +- Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`) .. --------------------------------------------------------------------------- @@ -40,7 +40,7 @@ Bug fixes Other ~~~~~ -- :meth:`factorize` now supports ``na_sentinel=None`` to include NaN in the uniques of the values and remove ``dropna`` keyword which was unintentionally exposed to public facing API in 1.1 version from :meth:`factorize`(:issue:`35667`) +- :meth:`factorize` now supports ``na_sentinel=None`` to include NaN in the uniques of the values and remove ``dropna`` keyword which was unintentionally exposed to public facing API in 1.1 version from :meth:`factorize` (:issue:`35667`) .. ---------------------------------------------------------------------------
https://pandas.pydata.org/pandas-docs/dev/whatsnew/v1.1.2.html
https://api.github.com/repos/pandas-dev/pandas/pulls/36086
2020-09-03T11:25:04Z
2020-09-03T16:29:47Z
2020-09-03T16:29:47Z
2020-09-03T16:46:30Z
CI: MyPy fixup
diff --git a/pandas/io/common.py b/pandas/io/common.py index 97dbc7f1031a2..9328f90ce67a3 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -165,11 +165,16 @@ def is_fsspec_url(url: FilePathOrBuffer) -> bool: ) -def get_filepath_or_buffer( # type: ignore[assignment] +# https://github.com/python/mypy/issues/8708 +# error: Incompatible default for argument "encoding" (default has type "None", +# argument has type "str") +# error: Incompatible default for argument "mode" (default has type "None", +# argument has type "str") +def get_filepath_or_buffer( filepath_or_buffer: FilePathOrBuffer, - encoding: EncodingVar = None, + encoding: EncodingVar = None, # type: ignore[assignment] compression: CompressionOptions = None, - mode: ModeVar = None, + mode: ModeVar = None, # type: ignore[assignment] storage_options: StorageOptions = None, ) -> IOargs[ModeVar, EncodingVar]: """
https://github.com/pandas-dev/pandas/runs/1064831108 ``` mypy --version mypy 0.782 Performing static analysis using mypy pandas/io/common.py:168: error: unused 'type: ignore' comment pandas/io/common.py:170: error: Incompatible default for argument "encoding" (default has type "None", argument has type "str") [assignment] pandas/io/common.py:172: error: Incompatible default for argument "mode" (default has type "None", argument has type "str") [assignment] Found 3 errors in 1 file (checked 1037 source files) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/36085
2020-09-03T08:39:18Z
2020-09-03T10:20:52Z
2020-09-03T10:20:52Z
2020-09-03T10:22:21Z
Comma cleanup
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 7bb1d98086a91..8e2ac4feb7ded 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -51,7 +51,7 @@ def test_reindex_with_same_tz(self): "2010-01-02 00:00:00", ] expected1 = DatetimeIndex( - expected_list1, dtype="datetime64[ns, UTC]", freq=None, + expected_list1, dtype="datetime64[ns, UTC]", freq=None ) expected2 = np.array([0] + [-1] * 21 + [23], dtype=np.dtype("intp")) tm.assert_index_equal(result1, expected1) diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index ea68e8759c123..233835bb4b5f7 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -799,7 +799,7 @@ def test_dti_from_tzaware_datetime(self, tz): @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_dti_tz_constructors(self, tzstr): - """ Test different DatetimeIndex constructions with timezone + """Test different DatetimeIndex constructions with timezone Follow-up of GH#4229 """ arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"] diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py index 1157c7f8bb962..16af884c89e9e 100644 --- a/pandas/tests/indexes/multi/test_constructors.py +++ b/pandas/tests/indexes/multi/test_constructors.py @@ -741,18 +741,18 @@ def test_raise_invalid_sortorder(): with pytest.raises(ValueError, match=r".* sortorder 2 with lexsort_depth 1.*"): MultiIndex( - levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=2, + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=2 ) with pytest.raises(ValueError, match=r".* sortorder 1 with lexsort_depth 0.*"): MultiIndex( - levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=1, + levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=1 ) def test_datetimeindex(): idx1 = pd.DatetimeIndex( - ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"] * 2, tz="Asia/Tokyo", + ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"] * 2, tz="Asia/Tokyo" ) idx2 = pd.date_range("2010/01/01", periods=6, freq="M", tz="US/Eastern") idx = MultiIndex.from_arrays([idx1, idx2]) diff --git a/pandas/tests/indexes/multi/test_isin.py b/pandas/tests/indexes/multi/test_isin.py index 122263e6ec198..b369b9a50954e 100644 --- a/pandas/tests/indexes/multi/test_isin.py +++ b/pandas/tests/indexes/multi/test_isin.py @@ -78,7 +78,7 @@ def test_isin_level_kwarg(): @pytest.mark.parametrize( "labels,expected,level", [ - ([("b", np.nan)], np.array([False, False, True]), None,), + ([("b", np.nan)], np.array([False, False, True]), None), ([np.nan, "a"], np.array([True, True, False]), 0), (["d", np.nan], np.array([False, True, True]), 1), ], diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index aee4b16621b4d..7720db9d98ebf 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2426,7 +2426,7 @@ def test_index_with_tuple_bool(self): # TODO: remove tupleize_cols=False once correct behaviour is restored # TODO: also this op right now produces FutureWarning from numpy idx = Index([("a", "b"), ("b", "c"), ("c", "a")], tupleize_cols=False) - result = idx == ("c", "a",) + result = idx == ("c", "a") expected = np.array([False, False, True]) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexes/timedeltas/test_scalar_compat.py b/pandas/tests/indexes/timedeltas/test_scalar_compat.py index 16c19b8d00380..6a2238d90b590 100644 --- a/pandas/tests/indexes/timedeltas/test_scalar_compat.py +++ b/pandas/tests/indexes/timedeltas/test_scalar_compat.py @@ -104,18 +104,18 @@ def test_round(self): "L", t1a, TimedeltaIndex( - ["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"], + ["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"] ), ), ( "S", t1a, TimedeltaIndex( - ["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"], + ["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"] ), ), - ("12T", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"],),), - ("H", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"],),), + ("12T", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"])), + ("H", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"])), ("d", t1c, TimedeltaIndex([-1, -1, -1], unit="D")), ]: diff --git a/pandas/tests/indexes/timedeltas/test_searchsorted.py b/pandas/tests/indexes/timedeltas/test_searchsorted.py index 4806a9acff96f..3cf45931cf6b7 100644 --- a/pandas/tests/indexes/timedeltas/test_searchsorted.py +++ b/pandas/tests/indexes/timedeltas/test_searchsorted.py @@ -17,7 +17,7 @@ def test_searchsorted_different_argument_classes(self, klass): tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize( - "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2], + "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2] ) def test_searchsorted_invalid_argument_dtype(self, arg): idx = TimedeltaIndex(["1 day", "2 days", "3 days"]) diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index 9cc031001f81c..656d25bec2a6b 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -144,9 +144,7 @@ def check_values(self, f, func, values=False): tm.assert_almost_equal(result, expected) - def check_result( - self, method, key, typs=None, axes=None, fails=None, - ): + def check_result(self, method, key, typs=None, axes=None, fails=None): def _eq(axis, obj, key): """ compare equal for these 2 keys """ axified = _axify(obj, key, axis) diff --git a/pandas/tests/indexing/test_callable.py b/pandas/tests/indexing/test_callable.py index 621417eb38d94..bf51c3e5d1695 100644 --- a/pandas/tests/indexing/test_callable.py +++ b/pandas/tests/indexing/test_callable.py @@ -17,15 +17,11 @@ def test_frame_loc_callable(self): res = df.loc[lambda x: x.A > 2] tm.assert_frame_equal(res, df.loc[df.A > 2]) - res = df.loc[ - lambda x: x.A > 2, - ] # noqa: E231 - tm.assert_frame_equal(res, df.loc[df.A > 2,]) # noqa: E231 + res = df.loc[lambda x: x.A > 2] # noqa: E231 + tm.assert_frame_equal(res, df.loc[df.A > 2]) # noqa: E231 - res = df.loc[ - lambda x: x.A > 2, - ] # noqa: E231 - tm.assert_frame_equal(res, df.loc[df.A > 2,]) # noqa: E231 + res = df.loc[lambda x: x.A > 2] # noqa: E231 + tm.assert_frame_equal(res, df.loc[df.A > 2]) # noqa: E231 res = df.loc[lambda x: x.B == "b", :] tm.assert_frame_equal(res, df.loc[df.B == "b", :]) @@ -94,10 +90,8 @@ def test_frame_loc_callable_labels(self): res = df.loc[lambda x: ["A", "C"]] tm.assert_frame_equal(res, df.loc[["A", "C"]]) - res = df.loc[ - lambda x: ["A", "C"], - ] # noqa: E231 - tm.assert_frame_equal(res, df.loc[["A", "C"],]) # noqa: E231 + res = df.loc[lambda x: ["A", "C"]] # noqa: E231 + tm.assert_frame_equal(res, df.loc[["A", "C"]]) # noqa: E231 res = df.loc[lambda x: ["A", "C"], :] tm.assert_frame_equal(res, df.loc[["A", "C"], :]) diff --git a/pandas/tests/indexing/test_check_indexer.py b/pandas/tests/indexing/test_check_indexer.py index 69d4065234d93..865ecb129cdfa 100644 --- a/pandas/tests/indexing/test_check_indexer.py +++ b/pandas/tests/indexing/test_check_indexer.py @@ -32,7 +32,7 @@ def test_valid_input(indexer, expected): @pytest.mark.parametrize( - "indexer", [[True, False, None], pd.array([True, False, None], dtype="boolean")], + "indexer", [[True, False, None], pd.array([True, False, None], dtype="boolean")] ) def test_boolean_na_returns_indexer(indexer): # https://github.com/pandas-dev/pandas/issues/31503 @@ -61,7 +61,7 @@ def test_bool_raise_length(indexer): @pytest.mark.parametrize( - "indexer", [[0, 1, None], pd.array([0, 1, pd.NA], dtype="Int64")], + "indexer", [[0, 1, None], pd.array([0, 1, pd.NA], dtype="Int64")] ) def test_int_raise_missing_values(indexer): array = np.array([1, 2, 3]) @@ -89,9 +89,7 @@ def test_raise_invalid_array_dtypes(indexer): check_array_indexer(array, indexer) -@pytest.mark.parametrize( - "indexer", [None, Ellipsis, slice(0, 3), (None,)], -) +@pytest.mark.parametrize("indexer", [None, Ellipsis, slice(0, 3), (None,)]) def test_pass_through_non_array_likes(indexer): array = np.array([1, 2, 3]) diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 1c5f00ff754a4..752ecd47fe089 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -87,7 +87,7 @@ def _assert_setitem_series_conversion( # tm.assert_series_equal(temp, expected_series) @pytest.mark.parametrize( - "val,exp_dtype", [(1, object), (1.1, object), (1 + 1j, object), (True, object)], + "val,exp_dtype", [(1, object), (1.1, object), (1 + 1j, object), (True, object)] ) def test_setitem_series_object(self, val, exp_dtype): obj = pd.Series(list("abcd")) diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index 18b9898e7d800..c48e0a129e161 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -181,9 +181,7 @@ def test_scalar_with_mixed(self): expected = 3 assert result == expected - @pytest.mark.parametrize( - "index_func", [tm.makeIntIndex, tm.makeRangeIndex], - ) + @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) @pytest.mark.parametrize("klass", [Series, DataFrame]) def test_scalar_integer(self, index_func, klass): @@ -405,7 +403,7 @@ def test_slice_integer(self): @pytest.mark.parametrize("l", [slice(2, 4.0), slice(2.0, 4), slice(2.0, 4.0)]) def test_integer_positional_indexing(self, l): - """ make sure that we are raising on positional indexing + """make sure that we are raising on positional indexing w.r.t. an integer index """ s = Series(range(2, 6), index=range(2, 6)) @@ -425,9 +423,7 @@ def test_integer_positional_indexing(self, l): with pytest.raises(TypeError, match=msg): s.iloc[l] - @pytest.mark.parametrize( - "index_func", [tm.makeIntIndex, tm.makeRangeIndex], - ) + @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) def test_slice_integer_frame_getitem(self, index_func): # similar to above, but on the getitem dim (of a DataFrame) @@ -486,9 +482,7 @@ def test_slice_integer_frame_getitem(self, index_func): s[l] @pytest.mark.parametrize("l", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) - @pytest.mark.parametrize( - "index_func", [tm.makeIntIndex, tm.makeRangeIndex], - ) + @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) def test_float_slice_getitem_with_integer_index_raises(self, l, index_func): # similar to above, but on the getitem dim (of a DataFrame)
- [x] pandas/tests/indexes/datetimes/test_datetime.py - [x] pandas/tests/indexes/datetimes/test_timezones.py - [x] pandas/tests/indexes/multi/test_constructors.py - [x] pandas/tests/indexes/multi/test_isin.py - [x] pandas/tests/indexes/test_base.py - [x] pandas/tests/indexes/timedeltas/test_scalar_compat.py - [x] pandas/tests/indexes/timedeltas/test_searchsorted.py - [x] pandas/tests/indexing/common.py - [x] pandas/tests/indexing/test_callable.py - [x] pandas/tests/indexing/test_check_indexer.py - [x] pandas/tests/indexing/test_coercion.py - [x] pandas/tests/indexing/test_floats.py
https://api.github.com/repos/pandas-dev/pandas/pulls/36082
2020-09-03T02:02:22Z
2020-09-05T03:10:50Z
2020-09-05T03:10:50Z
2020-09-05T03:10:54Z
BUG: Unary pos/neg ops on IntegerArrays failing with TypeError
diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index bd4bdc5ecb46f..184ea32237797 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -736,6 +736,9 @@ def boolean_arithmetic_method(self, other): name = f"__{op_name}__" return set_function_name(boolean_arithmetic_method, name, cls) + def __neg__(self): + return self.__invert__() + BooleanArray._add_logical_ops() BooleanArray._add_comparison_ops() diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 1237dea5c1a64..8d0601aac624f 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -125,6 +125,12 @@ def __len__(self) -> int: def __invert__(self: BaseMaskedArrayT) -> BaseMaskedArrayT: return type(self)(~self._data, self._mask) + def __neg__(self: BaseMaskedArrayT) -> BaseMaskedArrayT: + return type(self)(-self._data, self._mask) + + def __pos__(self: BaseMaskedArrayT) -> BaseMaskedArrayT: + return type(self)(+self._data, self._mask) + def to_numpy( self, dtype=None, copy: bool = False, na_value: Scalar = lib.no_default ) -> np.ndarray: diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index c93603398977e..bbcace9e3e096 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -186,3 +186,15 @@ def test_invert(self, data): result = ~s expected = pd.Series(~data, name="name") self.assert_series_equal(result, expected) + + def test_neg(self, data): + s = pd.Series(data, name="name") + result = -s + expected = pd.Series(-data, name="name") + self.assert_series_equal(result, expected) + + def test_pos(self, data): + s = pd.Series(data, name="name") + result = +s + expected = pd.Series(+data, name="name") + self.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py index 725533765ca2c..71b3c159169c1 100644 --- a/pandas/tests/extension/test_integer.py +++ b/pandas/tests/extension/test_integer.py @@ -181,6 +181,10 @@ def _compare_other(self, s, data, op_name, other): self.check_opname(s, op_name, other) +class TestUnaryOps(base.BaseUnaryOpsTests): + pass + + class TestInterface(base.BaseInterfaceTests): pass
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36081
2020-09-03T00:11:11Z
2020-09-12T18:20:11Z
null
2020-09-12T18:20:11Z
BUG: add py39 compat check for ast.slice #32766
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index df71b4fe415f8..3865c42993312 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -10,6 +10,8 @@ import numpy as np +from pandas.compat import PY39 + import pandas.core.common as com from pandas.core.computation.ops import ( _LOCAL_TAG, @@ -186,7 +188,6 @@ def _filter_nodes(superclass, all_nodes=_all_nodes): _stmt_nodes = _filter_nodes(ast.stmt) _expr_nodes = _filter_nodes(ast.expr) _expr_context_nodes = _filter_nodes(ast.expr_context) -_slice_nodes = _filter_nodes(ast.slice) _boolop_nodes = _filter_nodes(ast.boolop) _operator_nodes = _filter_nodes(ast.operator) _unary_op_nodes = _filter_nodes(ast.unaryop) @@ -197,6 +198,9 @@ def _filter_nodes(superclass, all_nodes=_all_nodes): _keyword_nodes = _filter_nodes(ast.keyword) _alias_nodes = _filter_nodes(ast.alias) +if not PY39: + _slice_nodes = _filter_nodes(ast.slice) + # nodes that we don't support directly but are needed for parsing _hacked_nodes = frozenset(["Assign", "Module", "Expr"])
- [x] closes #32766 - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/36080
2020-09-02T23:19:47Z
2020-09-15T02:06:08Z
2020-09-15T02:06:07Z
2020-09-15T15:13:49Z
BUG: Unary pos/neg ops on IntegerArrays failing with TypeError
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 8193d65b3b30c..9e2f1607bda9d 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1171,6 +1171,15 @@ class ExtensionOpsMixin: def _create_arithmetic_method(cls, op): raise AbstractMethodError(cls) + @classmethod + def _create_unary_method(cls, op): + raise AbstractMethodError(cls) + + @classmethod + def _add_unary_ops(cls): + cls.__pos__ = cls._create_unary_method(operator.pos) + cls.__neg__ = cls._create_unary_method(operator.neg) + @classmethod def _add_arithmetic_ops(cls): cls.__add__ = cls._create_arithmetic_method(operator.add) @@ -1244,7 +1253,7 @@ class ExtensionScalarOpsMixin(ExtensionOpsMixin): """ @classmethod - def _create_method(cls, op, coerce_to_dtype=True, result_dtype=None): + def _create_method(cls, op, coerce_to_dtype=True, result_dtype=None, unary=False): """ A class method that returns a method that will correspond to an operator for an ExtensionArray subclass, by dispatching to the @@ -1283,6 +1292,24 @@ def _create_method(cls, op, coerce_to_dtype=True, result_dtype=None): of the underlying elements of the ExtensionArray """ + def _maybe_convert(self, arr): + if coerce_to_dtype: + # https://github.com/pandas-dev/pandas/issues/22850 + # We catch all regular exceptions here, and fall back + # to an ndarray. + res = maybe_cast_to_extension_array(type(self), arr) + if not isinstance(res, type(self)): + # exception raised in _from_sequence; ensure we have ndarray + res = np.asarray(arr) + else: + res = np.asarray(arr, dtype=result_dtype) + return res + + def _unaryop(self): + res = [op(a) for a in self] + + return _maybe_convert(self, res) + def _binop(self, other): def convert_values(param): if isinstance(param, ExtensionArray) or is_list_like(param): @@ -1302,26 +1329,15 @@ def convert_values(param): # a TypeError should be raised res = [op(a, b) for (a, b) in zip(lvalues, rvalues)] - def _maybe_convert(arr): - if coerce_to_dtype: - # https://github.com/pandas-dev/pandas/issues/22850 - # We catch all regular exceptions here, and fall back - # to an ndarray. - res = maybe_cast_to_extension_array(type(self), arr) - if not isinstance(res, type(self)): - # exception raised in _from_sequence; ensure we have ndarray - res = np.asarray(arr) - else: - res = np.asarray(arr, dtype=result_dtype) - return res - if op.__name__ in {"divmod", "rdivmod"}: a, b = zip(*res) - return _maybe_convert(a), _maybe_convert(b) + return _maybe_convert(self, a), _maybe_convert(self, b) - return _maybe_convert(res) + return _maybe_convert(self, res) op_name = f"__{op.__name__}__" + if unary: + return set_function_name(_unaryop, op_name, cls) return set_function_name(_binop, op_name, cls) @classmethod @@ -1331,3 +1347,7 @@ def _create_arithmetic_method(cls, op): @classmethod def _create_comparison_method(cls, op): return cls._create_method(op, coerce_to_dtype=False, result_dtype=bool) + + @classmethod + def _create_unary_method(cls, op): + return cls._create_method(op, unary=True) diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index d83ff91a1315f..8450dd50ba461 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -654,9 +654,24 @@ def integer_arithmetic_method(self, other): name = f"__{op.__name__}__" return set_function_name(integer_arithmetic_method, name, cls) + @classmethod + def _create_unary_method(cls, op): + op_name = op.__name__ + + @unpack_zerodim_and_defer(op.__name__) + def integer_unary_method(self): + mask = self._mask + with np.errstate(all="ignore"): + result = op(self._data) + return self._maybe_mask_result(result, mask, None, op_name) + + name = f"__{op.__name__}__" + return set_function_name(integer_unary_method, name, cls) + IntegerArray._add_arithmetic_ops() IntegerArray._add_comparison_ops() +IntegerArray._add_unary_ops() _dtype_docstring = """ diff --git a/pandas/core/ops/common.py b/pandas/core/ops/common.py index 515a0a5198d74..349e699426cf0 100644 --- a/pandas/core/ops/common.py +++ b/pandas/core/ops/common.py @@ -46,9 +46,13 @@ def _unpack_zerodim_and_defer(method, name: str): method """ is_cmp = name.strip("__") in {"eq", "ne", "lt", "le", "gt", "ge"} + is_unary = name.strip("__") in {"neg", "pos"} @wraps(method) - def new_method(self, other): + def new_method(self, other=None): + + if is_unary: + return method(self) if is_cmp and isinstance(self, ABCIndexClass) and isinstance(other, ABCSeries): # For comparison ops, Index does *not* defer to Series diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index c93603398977e..bbcace9e3e096 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -186,3 +186,15 @@ def test_invert(self, data): result = ~s expected = pd.Series(~data, name="name") self.assert_series_equal(result, expected) + + def test_neg(self, data): + s = pd.Series(data, name="name") + result = -s + expected = pd.Series(-data, name="name") + self.assert_series_equal(result, expected) + + def test_pos(self, data): + s = pd.Series(data, name="name") + result = +s + expected = pd.Series(+data, name="name") + self.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py index 725533765ca2c..71b3c159169c1 100644 --- a/pandas/tests/extension/test_integer.py +++ b/pandas/tests/extension/test_integer.py @@ -181,6 +181,10 @@ def _compare_other(self, s, data, op_name, other): self.check_opname(s, op_name, other) +class TestUnaryOps(base.BaseUnaryOpsTests): + pass + + class TestInterface(base.BaseInterfaceTests): pass
- [ ] closes #36063 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36078
2020-09-02T22:02:12Z
2020-09-05T15:56:29Z
null
2020-09-05T15:56:29Z
DOC: Fix typo of `=!` to `!=` in docstring
diff --git a/pandas/core/ops/docstrings.py b/pandas/core/ops/docstrings.py index 99c2fefc97ae7..e3a68ad328d55 100644 --- a/pandas/core/ops/docstrings.py +++ b/pandas/core/ops/docstrings.py @@ -611,7 +611,7 @@ def _make_flex_doc(op_name, typ): Among flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison operators. -Equivalent to `==`, `=!`, `<=`, `<`, `>=`, `>` with support to choose axis +Equivalent to `==`, `!=`, `<=`, `<`, `>=`, `>` with support to choose axis (rows or columns) and level for comparison. Parameters
This fixes GH36075. - [x] closes #36075 I'm marking the following list items as not-applicable because I'm simply swapping two characters in a docstring that had been in the incorrect order. - [NA] tests added / passed - [NA] passes `black pandas` - [NA] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [NA] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36077
2020-09-02T18:00:48Z
2020-09-02T19:09:23Z
2020-09-02T19:09:23Z
2021-04-07T01:26:05Z
CLN clearing unnecessary trailing commas
diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py index dc5eb15348c1b..0a7dfbee4e672 100644 --- a/pandas/tests/scalar/test_na_scalar.py +++ b/pandas/tests/scalar/test_na_scalar.py @@ -111,7 +111,7 @@ def test_pow_special(value, asarray): @pytest.mark.parametrize( - "value", [1, 1.0, True, np.bool_(True), np.int_(1), np.float_(1)], + "value", [1, 1.0, True, np.bool_(True), np.int_(1), np.float_(1)] ) @pytest.mark.parametrize("asarray", [True, False]) def test_rpow_special(value, asarray): @@ -128,9 +128,7 @@ def test_rpow_special(value, asarray): assert result == value -@pytest.mark.parametrize( - "value", [-1, -1.0, np.int_(-1), np.float_(-1)], -) +@pytest.mark.parametrize("value", [-1, -1.0, np.int_(-1), np.float_(-1)]) @pytest.mark.parametrize("asarray", [True, False]) def test_rpow_minus_one(value, asarray): if asarray: @@ -193,9 +191,7 @@ def test_logical_not(): assert ~NA is NA -@pytest.mark.parametrize( - "shape", [(3,), (3, 3), (1, 2, 3)], -) +@pytest.mark.parametrize("shape", [(3,), (3, 3), (1, 2, 3)]) def test_arithmetic_ndarray(shape, all_arithmetic_functions): op = all_arithmetic_functions a = np.zeros(shape) diff --git a/pandas/tests/scalar/timestamp/test_arithmetic.py b/pandas/tests/scalar/timestamp/test_arithmetic.py index 954301b979074..1e980b6e4559c 100644 --- a/pandas/tests/scalar/timestamp/test_arithmetic.py +++ b/pandas/tests/scalar/timestamp/test_arithmetic.py @@ -213,7 +213,7 @@ def test_add_int_with_freq(self, ts, other): with pytest.raises(TypeError, match=msg): other - ts - @pytest.mark.parametrize("shape", [(6,), (2, 3,)]) + @pytest.mark.parametrize("shape", [(6,), (2, 3)]) def test_addsub_m8ndarray(self, shape): # GH#33296 ts = Timestamp("2020-04-04 15:45") @@ -237,7 +237,7 @@ def test_addsub_m8ndarray(self, shape): with pytest.raises(TypeError, match=msg): other - ts - @pytest.mark.parametrize("shape", [(6,), (2, 3,)]) + @pytest.mark.parametrize("shape", [(6,), (2, 3)]) def test_addsub_m8ndarray_tzaware(self, shape): # GH#33296 ts = Timestamp("2020-04-04 15:45", tz="US/Pacific") diff --git a/pandas/tests/series/methods/test_argsort.py b/pandas/tests/series/methods/test_argsort.py index 4353eb4c8cd64..ec9ba468c996c 100644 --- a/pandas/tests/series/methods/test_argsort.py +++ b/pandas/tests/series/methods/test_argsort.py @@ -9,7 +9,7 @@ class TestSeriesArgsort: def _check_accum_op(self, name, ser, check_dtype=True): func = getattr(np, name) tm.assert_numpy_array_equal( - func(ser).values, func(np.array(ser)), check_dtype=check_dtype, + func(ser).values, func(np.array(ser)), check_dtype=check_dtype ) # with missing values diff --git a/pandas/tests/series/methods/test_convert_dtypes.py b/pandas/tests/series/methods/test_convert_dtypes.py index dd4bf642e68e8..8a915324a72c1 100644 --- a/pandas/tests/series/methods/test_convert_dtypes.py +++ b/pandas/tests/series/methods/test_convert_dtypes.py @@ -219,10 +219,10 @@ class TestSeriesConvertDtypes: pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]), object, { - ((True,), (True, False), (True, False), (True, False),): np.dtype( + ((True,), (True, False), (True, False), (True, False)): np.dtype( "datetime64[ns]" ), - ((False,), (True, False), (True, False), (True, False),): np.dtype( + ((False,), (True, False), (True, False), (True, False)): np.dtype( "O" ), }, diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py index 40651c4342e8a..6eb0e09f12658 100644 --- a/pandas/tests/series/methods/test_drop_duplicates.py +++ b/pandas/tests/series/methods/test_drop_duplicates.py @@ -141,7 +141,7 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered): def test_drop_duplicates_categorical_bool(self, ordered): tc = Series( Categorical( - [True, False, True, False], categories=[True, False], ordered=ordered, + [True, False, True, False], categories=[True, False], ordered=ordered ) ) diff --git a/pandas/tests/series/methods/test_interpolate.py b/pandas/tests/series/methods/test_interpolate.py index c4b10e0ccdc3e..cba9443005f2f 100644 --- a/pandas/tests/series/methods/test_interpolate.py +++ b/pandas/tests/series/methods/test_interpolate.py @@ -30,7 +30,7 @@ ] ) def nontemporal_method(request): - """ Fixture that returns an (method name, required kwargs) pair. + """Fixture that returns an (method name, required kwargs) pair. This fixture does not include method 'time' as a parameterization; that method requires a Series with a DatetimeIndex, and is generally tested @@ -60,7 +60,7 @@ def nontemporal_method(request): ] ) def interp_methods_ind(request): - """ Fixture that returns a (method name, required kwargs) pair to + """Fixture that returns a (method name, required kwargs) pair to be tested for various Index types. This fixture does not include methods - 'time', 'index', 'nearest', diff --git a/pandas/tests/series/methods/test_unstack.py b/pandas/tests/series/methods/test_unstack.py index cdf6a16e88ad0..d651315d64561 100644 --- a/pandas/tests/series/methods/test_unstack.py +++ b/pandas/tests/series/methods/test_unstack.py @@ -75,9 +75,7 @@ def test_unstack_tuplename_in_multiindex(): expected = pd.DataFrame( [[1, 1, 1], [1, 1, 1], [1, 1, 1]], - columns=pd.MultiIndex.from_tuples( - [("a",), ("b",), ("c",)], names=[("A", "a")], - ), + columns=pd.MultiIndex.from_tuples([("a",), ("b",), ("c",)], names=[("A", "a")]), index=pd.Index([1, 2, 3], name=("B", "b")), ) tm.assert_frame_equal(result, expected) @@ -115,7 +113,7 @@ def test_unstack_mixed_type_name_in_multiindex( result = ser.unstack(unstack_idx) expected = pd.DataFrame( - expected_values, columns=expected_columns, index=expected_index, + expected_values, columns=expected_columns, index=expected_index ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/test_cumulative.py b/pandas/tests/series/test_cumulative.py index 0b4c5f091106a..e070b86717503 100644 --- a/pandas/tests/series/test_cumulative.py +++ b/pandas/tests/series/test_cumulative.py @@ -17,7 +17,7 @@ def _check_accum_op(name, series, check_dtype=True): func = getattr(np, name) tm.assert_numpy_array_equal( - func(series).values, func(np.array(series)), check_dtype=check_dtype, + func(series).values, func(np.array(series)), check_dtype=check_dtype ) # with missing values diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 67a2dc2303550..4e94051305f49 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -970,7 +970,7 @@ def test_isin_int_df_string_search(self): @pytest.mark.xfail(reason="problem related with issue #34125") def test_isin_nan_df_string_search(self): """Comparing df with nan value (np.nan,2) with a string at isin() ("NaN") - -> should not match values because np.nan is not equal str NaN """ + -> should not match values because np.nan is not equal str NaN""" df = pd.DataFrame({"values": [np.nan, 2]}) result = df.isin(["NaN"]) expected_false = pd.DataFrame({"values": [False, False]})
This PR is related to issue #35925, black 19.10b0 and upgraded version both passes these changes.
https://api.github.com/repos/pandas-dev/pandas/pulls/36073
2020-09-02T16:51:04Z
2020-09-02T19:21:30Z
2020-09-02T19:21:30Z
2020-09-02T19:21:45Z
Backport PR #36051 on branch 1.1.x (BUG: frame._item_cache not cleared when Series is altered)
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 9be5b5f0ad2dc..927b3290ac606 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -31,6 +31,7 @@ Bug fixes - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) - Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`) - Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) +- Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`36051`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8bd1dbea4696f..67e5759b39808 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3233,6 +3233,10 @@ def _maybe_update_cacher( if len(self) == len(ref): # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self) + else: + # GH#33675 we have swapped in a new array, so parent + # reference to self is now invalid + ref._item_cache.pop(cacher[0], None) if verify_is_copy: self._check_setitem_copy(stacklevel=5, t="referant") diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 9bf5d24085697..b4f91590e09d1 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -135,13 +135,20 @@ def test_drop_and_dropna_caching(self): df2 = df.copy() df["A"].dropna() tm.assert_series_equal(df["A"], original) - return_value = df["A"].dropna(inplace=True) - tm.assert_series_equal(df["A"], expected) + + ser = df["A"] + return_value = ser.dropna(inplace=True) + tm.assert_series_equal(ser, expected) + tm.assert_series_equal(df["A"], original) assert return_value is None + df2["A"].drop([1]) tm.assert_series_equal(df2["A"], original) - return_value = df2["A"].drop([1], inplace=True) - tm.assert_series_equal(df2["A"], original.drop([1])) + + ser = df2["A"] + return_value = ser.drop([1], inplace=True) + tm.assert_series_equal(ser, original.drop([1])) + tm.assert_series_equal(df2["A"], original) assert return_value is None def test_dropna_corner(self, float_frame): diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index fa5fe5ba5c384..9910ef1b04b1a 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -81,6 +81,21 @@ def test_setitem_cache_updating(self): tm.assert_frame_equal(out, expected) tm.assert_series_equal(out["A"], expected["A"]) + def test_altering_series_clears_parent_cache(self): + # GH #33675 + df = pd.DataFrame([[1, 2], [3, 4]], index=["a", "b"], columns=["A", "B"]) + ser = df["A"] + + assert "A" in df._item_cache + + # Adding a new entry to ser swaps in a new array, so "A" needs to + # be removed from df._item_cache + ser["c"] = 5 + assert len(ser) == 3 + assert "A" not in df._item_cache + assert df["A"] is not ser + assert len(df["A"]) == 2 + class TestChaining: def test_setitem_chained_setfault(self):
Backport PR #36051: BUG: frame._item_cache not cleared when Series is altered
https://api.github.com/repos/pandas-dev/pandas/pulls/36072
2020-09-02T16:02:19Z
2020-09-02T17:09:18Z
2020-09-02T17:09:18Z
2020-09-02T17:09:18Z
Backport PR #35852 on branch 1.1.x (API: replace dropna=False option with na_sentinel=None in factorize)
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 9be5b5f0ad2dc..dc7adf6d9d00e 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -34,6 +34,14 @@ Bug fixes .. --------------------------------------------------------------------------- +.. _whatsnew_112.other: + +Other +~~~~~ +- :meth:`factorize` now supports ``na_sentinel=None`` to include NaN in the uniques of the values and remove ``dropna`` keyword which was unintentionally exposed to public facing API in 1.1 version from :meth:`factorize`(:issue:`35667`) + +.. --------------------------------------------------------------------------- + .. _whatsnew_112.contributors: Contributors diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 9e3ca4cc53363..856b4ead3f3cc 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -525,9 +525,8 @@ def _factorize_array( def factorize( values, sort: bool = False, - na_sentinel: int = -1, + na_sentinel: Optional[int] = -1, size_hint: Optional[int] = None, - dropna: bool = True, ) -> Tuple[np.ndarray, Union[np.ndarray, ABCIndex]]: """ Encode the object as an enumerated type or categorical variable. @@ -540,8 +539,11 @@ def factorize( Parameters ---------- {values}{sort} - na_sentinel : int, default -1 - Value to mark "not found". + na_sentinel : int or None, default -1 + Value to mark "not found". If None, will not drop the NaN + from the uniques of the values. + + .. versionchanged:: 1.1.2 {size_hint}\ Returns @@ -619,6 +621,22 @@ def factorize( array([0, 0, 1]...) >>> uniques Index(['a', 'c'], dtype='object') + + If NaN is in the values, and we want to include NaN in the uniques of the + values, it can be achieved by setting ``na_sentinel=None``. + + >>> values = np.array([1, 2, 1, np.nan]) + >>> codes, uniques = pd.factorize(values) # default: na_sentinel=-1 + >>> codes + array([ 0, 1, 0, -1]) + >>> uniques + array([1., 2.]) + + >>> codes, uniques = pd.factorize(values, na_sentinel=None) + >>> codes + array([0, 1, 0, 2]) + >>> uniques + array([ 1., 2., nan]) """ # Implementation notes: This method is responsible for 3 things # 1.) coercing data to array-like (ndarray, Index, extension array) @@ -632,6 +650,13 @@ def factorize( values = _ensure_arraylike(values) original = values + # GH35667, if na_sentinel=None, we will not dropna NaNs from the uniques + # of values, assign na_sentinel=-1 to replace code value for NaN. + dropna = True + if na_sentinel is None: + na_sentinel = -1 + dropna = False + if is_extension_array_dtype(values.dtype): values = extract_array(values) codes, uniques = values.factorize(na_sentinel=na_sentinel) diff --git a/pandas/core/base.py b/pandas/core/base.py index b62ef668df5e1..1926803d8f04b 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1398,7 +1398,7 @@ def memory_usage(self, deep=False): """ ), ) - def factorize(self, sort=False, na_sentinel=-1): + def factorize(self, sort: bool = False, na_sentinel: Optional[int] = -1): return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel) _shared_docs[ diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 8239a792c65dd..272afe7335c6a 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -585,8 +585,13 @@ def _make_codes(self) -> None: codes = self.grouper.codes_info uniques = self.grouper.result_index else: + # GH35667, replace dropna=False with na_sentinel=None + if not self.dropna: + na_sentinel = None + else: + na_sentinel = -1 codes, uniques = algorithms.factorize( - self.grouper, sort=self.sort, dropna=self.dropna + self.grouper, sort=self.sort, na_sentinel=na_sentinel ) uniques = Index(uniques, name=self.name) self._codes = codes diff --git a/pandas/tests/base/test_factorize.py b/pandas/tests/base/test_factorize.py index 415a8b7e4362f..9fad9856d53cc 100644 --- a/pandas/tests/base/test_factorize.py +++ b/pandas/tests/base/test_factorize.py @@ -26,3 +26,16 @@ def test_factorize(index_or_series_obj, sort): tm.assert_numpy_array_equal(result_codes, expected_codes) tm.assert_index_equal(result_uniques, expected_uniques) + + +def test_series_factorize_na_sentinel_none(): + # GH35667 + values = np.array([1, 2, 1, np.nan]) + ser = pd.Series(values) + codes, uniques = ser.factorize(na_sentinel=None) + + expected_codes = np.array([0, 1, 0, 2], dtype="int64") + expected_uniques = pd.Index([1.0, 2.0, np.nan]) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_index_equal(uniques, expected_uniques) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index a080bf0feaebc..326c926238f89 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -326,73 +326,47 @@ def test_factorize_na_sentinel(self, sort, na_sentinel, data, uniques): tm.assert_extension_array_equal(uniques, expected_uniques) @pytest.mark.parametrize( - "data, dropna, expected_codes, expected_uniques", + "data, expected_codes, expected_uniques", [ ( ["a", None, "b", "a"], - True, - np.array([0, -1, 1, 0], dtype=np.dtype("intp")), - np.array(["a", "b"], dtype=object), - ), - ( - ["a", np.nan, "b", "a"], - True, - np.array([0, -1, 1, 0], dtype=np.dtype("intp")), - np.array(["a", "b"], dtype=object), - ), - ( - ["a", None, "b", "a"], - False, np.array([0, 2, 1, 0], dtype=np.dtype("intp")), np.array(["a", "b", np.nan], dtype=object), ), ( ["a", np.nan, "b", "a"], - False, np.array([0, 2, 1, 0], dtype=np.dtype("intp")), np.array(["a", "b", np.nan], dtype=object), ), ], ) - def test_object_factorize_dropna( - self, data, dropna, expected_codes, expected_uniques + def test_object_factorize_na_sentinel_none( + self, data, expected_codes, expected_uniques ): - codes, uniques = algos.factorize(data, dropna=dropna) + codes, uniques = algos.factorize(data, na_sentinel=None) tm.assert_numpy_array_equal(uniques, expected_uniques) tm.assert_numpy_array_equal(codes, expected_codes) @pytest.mark.parametrize( - "data, dropna, expected_codes, expected_uniques", + "data, expected_codes, expected_uniques", [ ( [1, None, 1, 2], - True, - np.array([0, -1, 0, 1], dtype=np.dtype("intp")), - np.array([1, 2], dtype="O"), - ), - ( - [1, np.nan, 1, 2], - True, - np.array([0, -1, 0, 1], dtype=np.dtype("intp")), - np.array([1, 2], dtype=np.float64), - ), - ( - [1, None, 1, 2], - False, np.array([0, 2, 0, 1], dtype=np.dtype("intp")), np.array([1, 2, np.nan], dtype="O"), ), ( [1, np.nan, 1, 2], - False, np.array([0, 2, 0, 1], dtype=np.dtype("intp")), np.array([1, 2, np.nan], dtype=np.float64), ), ], ) - def test_int_factorize_dropna(self, data, dropna, expected_codes, expected_uniques): - codes, uniques = algos.factorize(data, dropna=dropna) + def test_int_factorize_na_sentinel_none( + self, data, expected_codes, expected_uniques + ): + codes, uniques = algos.factorize(data, na_sentinel=None) tm.assert_numpy_array_equal(uniques, expected_uniques) tm.assert_numpy_array_equal(codes, expected_codes)
Backport PR #35852: API: replace dropna=False option with na_sentinel=None in factorize
https://api.github.com/repos/pandas-dev/pandas/pulls/36071
2020-09-02T15:05:47Z
2020-09-02T16:37:26Z
2020-09-02T16:37:25Z
2020-09-02T16:37:26Z
TYP: statically define attributes in plotting._matplotlib.core
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 93ba9bd26630b..5270c7362d29f 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -66,16 +66,6 @@ def _kind(self): _layout_type = "vertical" _default_rot = 0 orientation: Optional[str] = None - _pop_attributes = [ - "label", - "style", - "mark_right", - "stacked", - ] - _attr_defaults = { - "mark_right": True, - "stacked": False, - } def __init__( self, @@ -165,9 +155,10 @@ def __init__( self.logx = kwds.pop("logx", False) self.logy = kwds.pop("logy", False) self.loglog = kwds.pop("loglog", False) - for attr in self._pop_attributes: - value = kwds.pop(attr, self._attr_defaults.get(attr, None)) - setattr(self, attr, value) + self.label = kwds.pop("label", None) + self.style = kwds.pop("style", None) + self.mark_right = kwds.pop("mark_right", True) + self.stacked = kwds.pop("stacked", False) self.ax = ax self.fig = fig
continuation of changes in #36016 pandas\plotting\_matplotlib\core.py:231: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:232: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:233: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:235: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:385: error: "MPLPlot" has no attribute "label"; maybe "ylabel" or "xlabel"? [attr-defined] pandas\plotting\_matplotlib\core.py:553: error: "MPLPlot" has no attribute "mark_right" [attr-defined] pandas\plotting\_matplotlib\core.py:732: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:733: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:735: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:738: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:739: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:741: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:1008: error: "ScatterPlot" has no attribute "label" [attr-defined] pandas\plotting\_matplotlib\core.py:1075: error: "LinePlot" has no attribute "stacked" [attr-defined] pandas\plotting\_matplotlib\core.py:1180: error: "LinePlot" has no attribute "stacked" [attr-defined] pandas\plotting\_matplotlib\core.py:1269: error: "AreaPlot" has no attribute "stacked" [attr-defined] pandas\plotting\_matplotlib\core.py:1351: error: "BarPlot" has no attribute "stacked" [attr-defined] pandas\plotting\_matplotlib\core.py:1427: error: "BarPlot" has no attribute "stacked" [attr-defined]
https://api.github.com/repos/pandas-dev/pandas/pulls/36068
2020-09-02T13:15:11Z
2020-09-02T15:42:30Z
2020-09-02T15:42:30Z
2020-09-02T15:49:20Z
TYP: update setup.cfg
diff --git a/setup.cfg b/setup.cfg index 2d1c8037636de..29c731848de8e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -321,9 +321,6 @@ check_untyped_defs=False [mypy-pandas.plotting._matplotlib.core] check_untyped_defs=False -[mypy-pandas.plotting._matplotlib.misc] -check_untyped_defs=False - [mypy-pandas.plotting._misc] check_untyped_defs=False
fixes in #36017 merged between generating config in #36012 and merging #36012
https://api.github.com/repos/pandas-dev/pandas/pulls/36067
2020-09-02T13:12:50Z
2020-09-02T15:41:40Z
2020-09-02T15:41:40Z
2020-09-02T15:47:31Z
BUG: groupby and agg on read-only array gives ValueError: buffer source array is read-only
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 9b1ad658d4666..be58aea5783bb 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -18,7 +18,7 @@ Fixed regressions - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) - Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) -- +- Fixed regression in :meth:`DataFrameGroupBy.agg` where a ``ValueError: buffer source array is read-only`` would be raised when the underlying array is read-only (:issue:`36014`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 38cb973d6dde9..a83634aad3ce2 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -229,7 +229,7 @@ def group_cumprod_float64(float64_t[:, :] out, @cython.boundscheck(False) @cython.wraparound(False) def group_cumsum(numeric[:, :] out, - numeric[:, :] values, + ndarray[numeric, ndim=2] values, const int64_t[:] labels, int ngroups, is_datetimelike, @@ -472,7 +472,7 @@ ctypedef fused complexfloating_t: @cython.boundscheck(False) def _group_add(complexfloating_t[:, :] out, int64_t[:] counts, - complexfloating_t[:, :] values, + ndarray[complexfloating_t, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=0): """ @@ -483,8 +483,9 @@ def _group_add(complexfloating_t[:, :] out, complexfloating_t val, count complexfloating_t[:, :] sumx int64_t[:, :] nobs + Py_ssize_t len_values = len(values), len_labels = len(labels) - if len(values) != len(labels): + if len_values != len_labels: raise ValueError("len(index) != len(labels)") nobs = np.zeros((<object>out).shape, dtype=np.int64) @@ -530,7 +531,7 @@ group_add_complex128 = _group_add['double complex'] @cython.boundscheck(False) def _group_prod(floating[:, :] out, int64_t[:] counts, - floating[:, :] values, + ndarray[floating, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=0): """ @@ -541,8 +542,9 @@ def _group_prod(floating[:, :] out, floating val, count floating[:, :] prodx int64_t[:, :] nobs + Py_ssize_t len_values = len(values), len_labels = len(labels) - if not len(values) == len(labels): + if len_values != len_labels: raise ValueError("len(index) != len(labels)") nobs = np.zeros((<object>out).shape, dtype=np.int64) @@ -582,7 +584,7 @@ group_prod_float64 = _group_prod['double'] @cython.cdivision(True) def _group_var(floating[:, :] out, int64_t[:] counts, - floating[:, :] values, + ndarray[floating, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=-1, int64_t ddof=1): @@ -591,10 +593,11 @@ def _group_var(floating[:, :] out, floating val, ct, oldmean floating[:, :] mean int64_t[:, :] nobs + Py_ssize_t len_values = len(values), len_labels = len(labels) assert min_count == -1, "'min_count' only used in add and prod" - if not len(values) == len(labels): + if len_values != len_labels: raise ValueError("len(index) != len(labels)") nobs = np.zeros((<object>out).shape, dtype=np.int64) @@ -639,7 +642,7 @@ group_var_float64 = _group_var['double'] @cython.boundscheck(False) def _group_mean(floating[:, :] out, int64_t[:] counts, - floating[:, :] values, + ndarray[floating, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=-1): cdef: @@ -647,10 +650,11 @@ def _group_mean(floating[:, :] out, floating val, count floating[:, :] sumx int64_t[:, :] nobs + Py_ssize_t len_values = len(values), len_labels = len(labels) assert min_count == -1, "'min_count' only used in add and prod" - if not len(values) == len(labels): + if len_values != len_labels: raise ValueError("len(index) != len(labels)") nobs = np.zeros((<object>out).shape, dtype=np.int64) @@ -689,7 +693,7 @@ group_mean_float64 = _group_mean['double'] @cython.boundscheck(False) def _group_ohlc(floating[:, :] out, int64_t[:] counts, - floating[:, :] values, + ndarray[floating, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=-1): """ @@ -740,7 +744,7 @@ group_ohlc_float64 = _group_ohlc['double'] @cython.boundscheck(False) @cython.wraparound(False) def group_quantile(ndarray[float64_t] out, - numeric[:] values, + ndarray[numeric, ndim=1] values, ndarray[int64_t] labels, ndarray[uint8_t] mask, float64_t q, @@ -1072,7 +1076,7 @@ def group_nth(rank_t[:, :] out, @cython.boundscheck(False) @cython.wraparound(False) def group_rank(float64_t[:, :] out, - rank_t[:, :] values, + ndarray[rank_t, ndim=2] values, const int64_t[:] labels, int ngroups, bint is_datetimelike, object ties_method="average", @@ -1424,7 +1428,7 @@ def group_min(groupby_t[:, :] out, @cython.boundscheck(False) @cython.wraparound(False) def group_cummin(groupby_t[:, :] out, - groupby_t[:, :] values, + ndarray[groupby_t, ndim=2] values, const int64_t[:] labels, int ngroups, bint is_datetimelike): @@ -1484,7 +1488,7 @@ def group_cummin(groupby_t[:, :] out, @cython.boundscheck(False) @cython.wraparound(False) def group_cummax(groupby_t[:, :] out, - groupby_t[:, :] values, + ndarray[groupby_t, ndim=2] values, const int64_t[:] labels, int ngroups, bint is_datetimelike): diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py index 5ddda264642de..87ebd8b5a27fb 100644 --- a/pandas/tests/groupby/aggregate/test_cython.py +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -236,3 +236,44 @@ def test_cython_with_timestamp_and_nat(op, data): result = df.groupby("a").aggregate(op) tm.assert_frame_equal(expected, result) + + +@pytest.mark.parametrize( + "agg", + [ + "min", + "max", + "count", + "sum", + "prod", + "var", + "mean", + "median", + "ohlc", + "cumprod", + "cumsum", + "shift", + "any", + "all", + "quantile", + "first", + "last", + "rank", + "cummin", + "cummax", + ], +) +def test_read_only_buffer_source_agg(agg): + # https://github.com/pandas-dev/pandas/issues/36014 + df = DataFrame( + { + "sepal_length": [5.1, 4.9, 4.7, 4.6, 5.0], + "species": ["setosa", "setosa", "setosa", "setosa", "setosa"], + } + ) + df._mgr.blocks[0].values.flags.writeable = False + + result = df.groupby(["species"]).agg({"sepal_length": agg}) + expected = df.copy().groupby(["species"]).agg({"sepal_length": agg}) + + tm.assert_equal(result, expected)
- [x] closes #36014 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I found multiple aggregate functions that were failing for a read-only array. I applied the change suggested in #36014 to all of them. A couple of questions. 1. I had to add the Py_ssize_t cdefs to suppress the error below for `len(values) != len(labels)`. Let me know if there is another preferred way of doing it. ``` error: comparison of integer expressions of different signedness: ‘Py_ssize_t’ {aka ‘long int’} and ‘size_t’ {aka ‘long unsigned int’} [-Werror=sign-compare] ``` 2. I see some functions don't have the `len(values) != len(labels)` check. Should it be there in all the functions?
https://api.github.com/repos/pandas-dev/pandas/pulls/36061
2020-09-02T07:00:58Z
2020-09-04T14:28:16Z
2020-09-04T14:28:16Z
2020-09-05T08:26:11Z
CLN remove unnecessary trailing commas in groupby tests
diff --git a/pandas/tests/groupby/aggregate/test_numba.py b/pandas/tests/groupby/aggregate/test_numba.py index 29e65e938f6f9..c4266996748c2 100644 --- a/pandas/tests/groupby/aggregate/test_numba.py +++ b/pandas/tests/groupby/aggregate/test_numba.py @@ -57,7 +57,7 @@ def func_numba(values, index): func_numba = numba.jit(func_numba) data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} grouped = data.groupby(0) @@ -90,7 +90,7 @@ def func_2(values, index): func_2 = numba.jit(func_2) data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} grouped = data.groupby(0) @@ -121,7 +121,7 @@ def func_1(values, index): return np.mean(values) - 3.4 data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) grouped = data.groupby(0) expected = grouped.agg(func_1, engine="numba") @@ -142,7 +142,7 @@ def func_1(values, index): ) def test_multifunc_notimplimented(agg_func): data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) grouped = data.groupby(0) with pytest.raises(NotImplementedError, match="Numba engine can"): diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index a1dcb28a32c6c..3183305fe2933 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -946,9 +946,7 @@ def fct(group): tm.assert_series_equal(result, expected) -@pytest.mark.parametrize( - "function", [lambda gr: gr.index, lambda gr: gr.index + 1 - 1], -) +@pytest.mark.parametrize("function", [lambda gr: gr.index, lambda gr: gr.index + 1 - 1]) def test_apply_function_index_return(function): # GH: 22541 df = pd.DataFrame([1, 2, 2, 2, 1, 2, 3, 1, 3, 1], columns=["id"]) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 13a32e285e70a..cbf9e720ecfd0 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -17,7 +17,7 @@ def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN): - """ Reindex to a cartesian production for the groupers, + """Reindex to a cartesian production for the groupers, preserving the nature (Categorical) of each grouper """ diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py index adf62c4723526..d1501111cb22b 100644 --- a/pandas/tests/groupby/test_groupby_dropna.py +++ b/pandas/tests/groupby/test_groupby_dropna.py @@ -246,9 +246,7 @@ def test_groupby_dropna_multi_index_dataframe_agg(dropna, tuples, outputs): (pd.Period("2020-01-01"), pd.Period("2020-02-01")), ], ) -@pytest.mark.parametrize( - "dropna, values", [(True, [12, 3]), (False, [12, 3, 6],)], -) +@pytest.mark.parametrize("dropna, values", [(True, [12, 3]), (False, [12, 3, 6])]) def test_groupby_dropna_datetime_like_data( dropna, values, datetime1, datetime2, unique_nulls_fixture, unique_nulls_fixture2 ): diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py index 7271911c5f80f..cc7a79e976513 100644 --- a/pandas/tests/groupby/test_groupby_subclass.py +++ b/pandas/tests/groupby/test_groupby_subclass.py @@ -51,9 +51,7 @@ def test_groupby_preserves_subclass(obj, groupby_func): tm.assert_series_equal(result1, result2) -@pytest.mark.parametrize( - "obj", [DataFrame, tm.SubclassedDataFrame], -) +@pytest.mark.parametrize("obj", [DataFrame, tm.SubclassedDataFrame]) def test_groupby_resample_preserves_subclass(obj): # GH28330 -- preserve subclass through groupby.resample() diff --git a/pandas/tests/groupby/test_size.py b/pandas/tests/groupby/test_size.py index 9cff8b966dad0..ba27e5a24ba00 100644 --- a/pandas/tests/groupby/test_size.py +++ b/pandas/tests/groupby/test_size.py @@ -53,7 +53,7 @@ def test_size_on_categorical(as_index): result = df.groupby(["A", "B"], as_index=as_index).size() expected = DataFrame( - [[1, 1, 1], [1, 2, 0], [2, 1, 0], [2, 2, 1]], columns=["A", "B", "size"], + [[1, 1, 1], [1, 2, 0], [2, 1, 0], [2, 2, 1]], columns=["A", "B", "size"] ) expected["A"] = expected["A"].astype("category") if as_index: diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index 84fd7a1bdfb05..4ccbc6a65fd88 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -780,6 +780,6 @@ def test_grouper_period_index(self): result = period_series.groupby(period_series.index.month).sum() expected = pd.Series( - range(0, periods), index=Index(range(1, periods + 1), name=index.name), + range(0, periods), index=Index(range(1, periods + 1), name=index.name) ) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/transform/test_numba.py b/pandas/tests/groupby/transform/test_numba.py index ee482571e644d..87723cd7c8f50 100644 --- a/pandas/tests/groupby/transform/test_numba.py +++ b/pandas/tests/groupby/transform/test_numba.py @@ -56,7 +56,7 @@ def func(values, index): func = numba.jit(func) data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} grouped = data.groupby(0) @@ -89,7 +89,7 @@ def func_2(values, index): func_2 = numba.jit(func_2) data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} grouped = data.groupby(0) @@ -120,7 +120,7 @@ def func_1(values, index): return values + 1 data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) grouped = data.groupby(0) expected = grouped.transform(func_1, engine="numba")
xref #35925
https://api.github.com/repos/pandas-dev/pandas/pulls/36059
2020-09-02T01:32:16Z
2020-09-02T16:19:49Z
2020-09-02T16:19:49Z
2020-09-02T16:20:00Z
Comma cleanup for #35925
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 52a1e3aae9058..b0ba0d991c9b0 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -86,11 +86,7 @@ def wrapper(x): result0 = f(axis=0, skipna=False) result1 = f(axis=1, skipna=False) tm.assert_series_equal( - result0, - frame.apply(wrapper), - check_dtype=check_dtype, - rtol=rtol, - atol=atol, + result0, frame.apply(wrapper), check_dtype=check_dtype, rtol=rtol, atol=atol ) # HACK: win32 tm.assert_series_equal( @@ -116,7 +112,7 @@ def wrapper(x): if opname in ["sum", "prod"]: expected = frame.apply(skipna_wrapper, axis=1) tm.assert_series_equal( - result1, expected, check_dtype=False, rtol=rtol, atol=atol, + result1, expected, check_dtype=False, rtol=rtol, atol=atol ) # check dtypes diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index c8f5b2b0f6364..0d1004809f7f1 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -932,7 +932,7 @@ def test_constructor_mrecarray(self): # from GH3479 assert_fr_equal = functools.partial( - tm.assert_frame_equal, check_index_type=True, check_column_type=True, + tm.assert_frame_equal, check_index_type=True, check_column_type=True ) arrays = [ ("float", np.array([1.5, 2.0])), diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index 6a8f1e7c1aca2..d80ebaa09b6a8 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -417,7 +417,7 @@ def test_unstack_mixed_type_name_in_multiindex( result = df.unstack(unstack_idx) expected = pd.DataFrame( - expected_values, columns=expected_columns, index=expected_index, + expected_values, columns=expected_columns, index=expected_index ) tm.assert_frame_equal(result, expected) @@ -807,7 +807,7 @@ def test_unstack_multi_level_cols(self): [["B", "C"], ["B", "D"]], names=["c1", "c2"] ), index=pd.MultiIndex.from_tuples( - [[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"], + [[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"] ), ) assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"] diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py index 4d0f1a326225d..8898619e374ab 100644 --- a/pandas/tests/generic/test_finalize.py +++ b/pandas/tests/generic/test_finalize.py @@ -123,7 +123,7 @@ (pd.DataFrame, frame_data, operator.methodcaller("sort_index")), (pd.DataFrame, frame_data, operator.methodcaller("nlargest", 1, "A")), (pd.DataFrame, frame_data, operator.methodcaller("nsmallest", 1, "A")), - (pd.DataFrame, frame_mi_data, operator.methodcaller("swaplevel"),), + (pd.DataFrame, frame_mi_data, operator.methodcaller("swaplevel")), pytest.param( ( pd.DataFrame, @@ -178,7 +178,7 @@ marks=not_implemented_mark, ), pytest.param( - (pd.DataFrame, frame_mi_data, operator.methodcaller("unstack"),), + (pd.DataFrame, frame_mi_data, operator.methodcaller("unstack")), marks=not_implemented_mark, ), pytest.param( @@ -317,7 +317,7 @@ marks=not_implemented_mark, ), pytest.param( - (pd.Series, ([1, 2],), operator.methodcaller("squeeze")), + (pd.Series, ([1, 2],), operator.methodcaller("squeeze")) # marks=not_implemented_mark, ), (pd.Series, ([1, 2],), operator.methodcaller("rename_axis", index="a")), @@ -733,9 +733,7 @@ def test_timedelta_property(attr): assert result.attrs == {"a": 1} -@pytest.mark.parametrize( - "method", [operator.methodcaller("total_seconds")], -) +@pytest.mark.parametrize("method", [operator.methodcaller("total_seconds")]) @not_implemented_mark def test_timedelta_methods(method): s = pd.Series(pd.timedelta_range("2000", periods=4)) diff --git a/pandas/tests/generic/test_to_xarray.py b/pandas/tests/generic/test_to_xarray.py index ab56a752f7e90..a85d7ddc1ea53 100644 --- a/pandas/tests/generic/test_to_xarray.py +++ b/pandas/tests/generic/test_to_xarray.py @@ -47,9 +47,7 @@ def test_to_xarray_index_types(self, index): expected = df.copy() expected["f"] = expected["f"].astype(object) expected.columns.name = None - tm.assert_frame_equal( - result.to_dataframe(), expected, - ) + tm.assert_frame_equal(result.to_dataframe(), expected) @td.skip_if_no("xarray", min_version="0.7.0") def test_to_xarray(self): diff --git a/pandas/tests/groupby/aggregate/test_numba.py b/pandas/tests/groupby/aggregate/test_numba.py index 29e65e938f6f9..c4266996748c2 100644 --- a/pandas/tests/groupby/aggregate/test_numba.py +++ b/pandas/tests/groupby/aggregate/test_numba.py @@ -57,7 +57,7 @@ def func_numba(values, index): func_numba = numba.jit(func_numba) data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} grouped = data.groupby(0) @@ -90,7 +90,7 @@ def func_2(values, index): func_2 = numba.jit(func_2) data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} grouped = data.groupby(0) @@ -121,7 +121,7 @@ def func_1(values, index): return np.mean(values) - 3.4 data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) grouped = data.groupby(0) expected = grouped.agg(func_1, engine="numba") @@ -142,7 +142,7 @@ def func_1(values, index): ) def test_multifunc_notimplimented(agg_func): data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) grouped = data.groupby(0) with pytest.raises(NotImplementedError, match="Numba engine can"): diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index a1dcb28a32c6c..3183305fe2933 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -946,9 +946,7 @@ def fct(group): tm.assert_series_equal(result, expected) -@pytest.mark.parametrize( - "function", [lambda gr: gr.index, lambda gr: gr.index + 1 - 1], -) +@pytest.mark.parametrize("function", [lambda gr: gr.index, lambda gr: gr.index + 1 - 1]) def test_apply_function_index_return(function): # GH: 22541 df = pd.DataFrame([1, 2, 2, 2, 1, 2, 3, 1, 3, 1], columns=["id"]) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 13a32e285e70a..711daf7fe415d 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -17,7 +17,7 @@ def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN): - """ Reindex to a cartesian production for the groupers, + """Reindex to a cartesian production for the groupers, preserving the nature (Categorical) of each grouper """ @@ -1449,7 +1449,7 @@ def test_groupby_agg_categorical_columns(func, expected_values): result = df.groupby("groups").agg(func) expected = pd.DataFrame( - {"value": expected_values}, index=pd.Index([0, 1, 2], name="groups"), + {"value": expected_values}, index=pd.Index([0, 1, 2], name="groups") ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 8c51ebf89f5c0..bd7609551a6bc 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -676,7 +676,7 @@ def test_ops_not_as_index(reduction_func): if reduction_func in ("corrwith",): pytest.skip("Test not applicable") - if reduction_func in ("nth", "ngroup",): + if reduction_func in ("nth", "ngroup"): pytest.skip("Skip until behavior is determined (GH #5755)") df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"]) diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py index adf62c4723526..d1501111cb22b 100644 --- a/pandas/tests/groupby/test_groupby_dropna.py +++ b/pandas/tests/groupby/test_groupby_dropna.py @@ -246,9 +246,7 @@ def test_groupby_dropna_multi_index_dataframe_agg(dropna, tuples, outputs): (pd.Period("2020-01-01"), pd.Period("2020-02-01")), ], ) -@pytest.mark.parametrize( - "dropna, values", [(True, [12, 3]), (False, [12, 3, 6],)], -) +@pytest.mark.parametrize("dropna, values", [(True, [12, 3]), (False, [12, 3, 6])]) def test_groupby_dropna_datetime_like_data( dropna, values, datetime1, datetime2, unique_nulls_fixture, unique_nulls_fixture2 ): diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py index 7271911c5f80f..cc7a79e976513 100644 --- a/pandas/tests/groupby/test_groupby_subclass.py +++ b/pandas/tests/groupby/test_groupby_subclass.py @@ -51,9 +51,7 @@ def test_groupby_preserves_subclass(obj, groupby_func): tm.assert_series_equal(result1, result2) -@pytest.mark.parametrize( - "obj", [DataFrame, tm.SubclassedDataFrame], -) +@pytest.mark.parametrize("obj", [DataFrame, tm.SubclassedDataFrame]) def test_groupby_resample_preserves_subclass(obj): # GH28330 -- preserve subclass through groupby.resample() diff --git a/pandas/tests/groupby/test_size.py b/pandas/tests/groupby/test_size.py index 9cff8b966dad0..ba27e5a24ba00 100644 --- a/pandas/tests/groupby/test_size.py +++ b/pandas/tests/groupby/test_size.py @@ -53,7 +53,7 @@ def test_size_on_categorical(as_index): result = df.groupby(["A", "B"], as_index=as_index).size() expected = DataFrame( - [[1, 1, 1], [1, 2, 0], [2, 1, 0], [2, 2, 1]], columns=["A", "B", "size"], + [[1, 1, 1], [1, 2, 0], [2, 1, 0], [2, 2, 1]], columns=["A", "B", "size"] ) expected["A"] = expected["A"].astype("category") if as_index:
- [x] pandas/tests/generic/test_finalize.py - [x] pandas/tests/generic/test_to_xarray.py - [x] pandas/tests/groupby/aggregate/test_numba.py - [x] pandas/tests/groupby/test_apply.py - [x] pandas/tests/groupby/test_categorical.py - [x] pandas/tests/groupby/test_groupby.py - [x] pandas/tests/groupby/test_groupby_dropna.py - [x] pandas/tests/groupby/test_groupby_subclass.py
https://api.github.com/repos/pandas-dev/pandas/pulls/36058
2020-09-02T01:29:17Z
2020-09-02T13:28:45Z
2020-09-02T13:28:45Z
2020-09-02T13:28:56Z
CLN remove unnecessary trailing commas
diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py index 50b5fe8e6f6b9..72ef7ea6bf8ca 100644 --- a/pandas/tests/arithmetic/test_interval.py +++ b/pandas/tests/arithmetic/test_interval.py @@ -156,9 +156,7 @@ def test_compare_scalar_other(self, op, array, other): expected = self.elementwise_comparison(op, array, other) tm.assert_numpy_array_equal(result, expected) - def test_compare_list_like_interval( - self, op, array, interval_constructor, - ): + def test_compare_list_like_interval(self, op, array, interval_constructor): # same endpoints other = interval_constructor(array.left, array.right) result = op(array, other) diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 484f83deb0f55..ecac08ffe3ba2 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -99,7 +99,7 @@ class TestNumericArraylikeArithmeticWithDatetimeLike: # TODO: also check name retentention @pytest.mark.parametrize("box_cls", [np.array, pd.Index, pd.Series]) @pytest.mark.parametrize( - "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype), + "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype) ) def test_mul_td64arr(self, left, box_cls): # GH#22390 @@ -119,7 +119,7 @@ def test_mul_td64arr(self, left, box_cls): # TODO: also check name retentention @pytest.mark.parametrize("box_cls", [np.array, pd.Index, pd.Series]) @pytest.mark.parametrize( - "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype), + "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype) ) def test_div_td64arr(self, left, box_cls): # GH#22390 diff --git a/pandas/tests/arrays/boolean/test_logical.py b/pandas/tests/arrays/boolean/test_logical.py index e79262e1b7934..8ed1c27087b02 100644 --- a/pandas/tests/arrays/boolean/test_logical.py +++ b/pandas/tests/arrays/boolean/test_logical.py @@ -205,9 +205,7 @@ def test_kleene_xor_scalar(self, other, expected): a, pd.array([True, False, None], dtype="boolean") ) - @pytest.mark.parametrize( - "other", [True, False, pd.NA, [True, False, None] * 3], - ) + @pytest.mark.parametrize("other", [True, False, pd.NA, [True, False, None] * 3]) def test_no_masked_assumptions(self, other, all_logical_operators): # The logical operations should not assume that masked values are False! a = pd.arrays.BooleanArray(
#35925 - pandas/tests/arithmetic/test_interval.py - pandas/tests/arithmetic/test_numeric.py - pandas/tests/arrays/boolean/test_logical.py I am not including "pandas/io/sas/sas_xport.py" and "pandas/io/stata.py" as there is a PR already open for those.
https://api.github.com/repos/pandas-dev/pandas/pulls/36057
2020-09-01T23:19:03Z
2020-09-02T16:14:01Z
2020-09-02T16:14:01Z
2020-09-02T16:14:07Z
STY/WIP: check for private imports/lookups
diff --git a/Makefile b/Makefile index 4a9a48992f92f..b915d8840cd8d 100644 --- a/Makefile +++ b/Makefile @@ -32,3 +32,9 @@ check: --included-file-extensions="py" \ --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored \ pandas/ + + python3 scripts/validate_unwanted_patterns.py \ + --validation-type="private_import_across_module" \ + --included-file-extensions="py" \ + --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored,doc/ + pandas/ diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 875f1dbb83ce3..54aa830379c07 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -116,11 +116,19 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then fi RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Check for use of private module attribute access' ; echo $MSG + MSG='Check for import of private attributes across modules' ; echo $MSG if [[ "$GITHUB_ACTIONS" == "true" ]]; then - $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored --format="##[error]{source_path}:{line_number}:{msg}" pandas/ + $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_import_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored --format="##[error]{source_path}:{line_number}:{msg}" pandas/ else - $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored pandas/ + $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_import_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored pandas/ + fi + RET=$(($RET + $?)) ; echo $MSG "DONE" + + MSG='Check for use of private functions across modules' ; echo $MSG + if [[ "$GITHUB_ACTIONS" == "true" ]]; then + $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored,doc/ --format="##[error]{source_path}:{line_number}:{msg}" pandas/ + else + $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored,doc/ pandas/ fi RET=$(($RET + $?)) ; echo $MSG "DONE" diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 6302b48cb1978..b013246e724de 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -54,7 +54,7 @@ from pandas.core import missing, nanops, ops from pandas.core.algorithms import checked_add_with_arr, unique1d, value_counts -from pandas.core.arrays._mixins import _T, NDArrayBackedExtensionArray +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.arrays.base import ExtensionOpsMixin import pandas.core.common as com from pandas.core.construction import array, extract_array @@ -472,11 +472,11 @@ class DatetimeLikeArrayMixin( def _ndarray(self) -> np.ndarray: return self._data - def _from_backing_data(self: _T, arr: np.ndarray) -> _T: + def _from_backing_data( + self: DatetimeLikeArrayT, arr: np.ndarray + ) -> DatetimeLikeArrayT: # Note: we do not retain `freq` - return type(self)._simple_new( # type: ignore[attr-defined] - arr, dtype=self.dtype - ) + return type(self)._simple_new(arr, dtype=self.dtype) # ------------------------------------------------------------------ diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index d83ff91a1315f..dc08e018397bc 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -106,7 +106,7 @@ def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]: [t.numpy_dtype if isinstance(t, BaseMaskedDtype) else t for t in dtypes], [] ) if np.issubdtype(np_dtype, np.integer): - return _dtypes[str(np_dtype)] + return STR_TO_DTYPE[str(np_dtype)] return None def __from_arrow__( @@ -214,7 +214,7 @@ def coerce_to_array( if not issubclass(type(dtype), _IntegerDtype): try: - dtype = _dtypes[str(np.dtype(dtype))] + dtype = STR_TO_DTYPE[str(np.dtype(dtype))] except KeyError as err: raise ValueError(f"invalid dtype specified {dtype}") from err @@ -354,7 +354,7 @@ class IntegerArray(BaseMaskedArray): @cache_readonly def dtype(self) -> _IntegerDtype: - return _dtypes[str(self._data.dtype)] + return STR_TO_DTYPE[str(self._data.dtype)] def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False): if not (isinstance(values, np.ndarray) and values.dtype.kind in ["i", "u"]): @@ -735,7 +735,7 @@ class UInt64Dtype(_IntegerDtype): __doc__ = _dtype_docstring.format(dtype="uint64") -_dtypes: Dict[str, _IntegerDtype] = { +STR_TO_DTYPE: Dict[str, _IntegerDtype] = { "int8": Int8Dtype(), "int16": Int16Dtype(), "int32": Int32Dtype(), diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index ba1b0b075936d..64ccc0be0a25d 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1151,9 +1151,11 @@ def convert_dtypes( target_int_dtype = "Int64" if is_integer_dtype(input_array.dtype): - from pandas.core.arrays.integer import _dtypes + from pandas.core.arrays.integer import STR_TO_DTYPE - inferred_dtype = _dtypes.get(input_array.dtype.name, target_int_dtype) + inferred_dtype = STR_TO_DTYPE.get( + input_array.dtype.name, target_int_dtype + ) if not is_integer_dtype(input_array.dtype) and is_numeric_dtype( input_array.dtype ): diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 1e3e56f4ff09f..8a55d438cf8d4 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -459,7 +459,7 @@ def f(self): @contextmanager -def group_selection_context(groupby: "_GroupBy"): +def group_selection_context(groupby: "BaseGroupBy"): """ Set / reset the group_selection_context. """ @@ -479,7 +479,7 @@ def group_selection_context(groupby: "_GroupBy"): ] -class _GroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]): +class BaseGroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]): _group_selection = None _apply_allowlist: FrozenSet[str] = frozenset() @@ -1212,7 +1212,7 @@ def _apply_filter(self, indices, dropna): OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame) -class GroupBy(_GroupBy[FrameOrSeries]): +class GroupBy(BaseGroupBy[FrameOrSeries]): """ Class for grouping and aggregating relational data. diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index f0b80c2852bd5..f269495f6011a 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -312,9 +312,9 @@ def _is_dates_only(self) -> bool: ------- bool """ - from pandas.io.formats.format import _is_dates_only + from pandas.io.formats.format import is_dates_only - return self.tz is None and _is_dates_only(self._values) + return self.tz is None and is_dates_only(self._values) def __reduce__(self): diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 7b5154756e613..44848e4d43909 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -26,7 +26,12 @@ from pandas.core.generic import NDFrame, _shared_docs from pandas.core.groupby.base import GroupByMixin from pandas.core.groupby.generic import SeriesGroupBy -from pandas.core.groupby.groupby import GroupBy, _GroupBy, _pipe_template, get_groupby +from pandas.core.groupby.groupby import ( + BaseGroupBy, + GroupBy, + _pipe_template, + get_groupby, +) from pandas.core.groupby.grouper import Grouper from pandas.core.groupby.ops import BinGrouper from pandas.core.indexes.api import Index @@ -40,7 +45,7 @@ _shared_docs_kwargs: Dict[str, str] = dict() -class Resampler(_GroupBy, ShallowMixin): +class Resampler(BaseGroupBy, ShallowMixin): """ Class for resampling datetimelike data, a groupby-like operation. See aggregate, transform, and apply functions on this object. diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 2bd36d8bff155..4282cb41c4e91 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -15,7 +15,7 @@ import pandas.core.common as common from pandas.core.window.common import _doc_template, _shared_docs, zsqrt -from pandas.core.window.rolling import _Rolling, flex_binary_moment +from pandas.core.window.rolling import RollingMixin, flex_binary_moment _bias_template = """ Parameters @@ -60,7 +60,7 @@ def get_center_of_mass( return float(comass) -class ExponentialMovingWindow(_Rolling): +class ExponentialMovingWindow(RollingMixin): r""" Provide exponential weighted (EW) functions. diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py index ce4ab2f98c23d..46e002324ec75 100644 --- a/pandas/core/window/expanding.py +++ b/pandas/core/window/expanding.py @@ -5,10 +5,10 @@ from pandas.util._decorators import Appender, Substitution, doc from pandas.core.window.common import WindowGroupByMixin, _doc_template, _shared_docs -from pandas.core.window.rolling import _Rolling_and_Expanding +from pandas.core.window.rolling import RollingAndExpandingMixin -class Expanding(_Rolling_and_Expanding): +class Expanding(RollingAndExpandingMixin): """ Provide expanding transformations. diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 5a7482076903c..648ab4d25be83 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -1214,13 +1214,13 @@ def std(self, ddof=1, *args, **kwargs): return zsqrt(self.var(ddof=ddof, name="std", **kwargs)) -class _Rolling(_Window): +class RollingMixin(_Window): @property def _constructor(self): return Rolling -class _Rolling_and_Expanding(_Rolling): +class RollingAndExpandingMixin(RollingMixin): _shared_docs["count"] = dedent( r""" @@ -1917,7 +1917,7 @@ def _get_corr(a, b): ) -class Rolling(_Rolling_and_Expanding): +class Rolling(RollingAndExpandingMixin): @cache_readonly def is_datetimelike(self) -> bool: return isinstance( diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 6781d98ded41d..77f2a53fc7fab 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1586,7 +1586,7 @@ def format_percentiles( return [i + "%" for i in out] -def _is_dates_only( +def is_dates_only( values: Union[np.ndarray, DatetimeArray, Index, DatetimeIndex] ) -> bool: # return a boolean if we are only dates (and don't have a timezone) @@ -1658,8 +1658,8 @@ def get_format_datetime64_from_values( # only accepts 1D values values = values.ravel() - is_dates_only = _is_dates_only(values) - if is_dates_only: + ido = is_dates_only(values) + if ido: return date_format or "%Y-%m-%d" return date_format @@ -1668,9 +1668,9 @@ class Datetime64TZFormatter(Datetime64Formatter): def _format_strings(self) -> List[str]: """ we by definition have a TZ """ values = self.values.astype(object) - is_dates_only = _is_dates_only(values) + ido = is_dates_only(values) formatter = self.formatter or get_format_datetime64( - is_dates_only, date_format=self.date_format + ido, date_format=self.date_format ) fmt_values = [formatter(x) for x in values] diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py index 2add2b8c62a4e..4a0e859535215 100755 --- a/scripts/validate_unwanted_patterns.py +++ b/scripts/validate_unwanted_patterns.py @@ -18,6 +18,39 @@ import tokenize from typing import IO, Callable, FrozenSet, Iterable, List, Set, Tuple +PRIVATE_IMPORTS_TO_IGNORE: Set[str] = { + "_extension_array_shared_docs", + "_index_shared_docs", + "_interval_shared_docs", + "_merge_doc", + "_shared_docs", + "_apply_docs", + "_new_Index", + "_new_PeriodIndex", + "_doc_template", + "_agg_template", + "_pipe_template", + "_get_version", + "__main__", + "_transform_template", + "_arith_doc_FRAME", + "_flex_comp_doc_FRAME", + "_make_flex_doc", + "_op_descriptions", + "_IntegerDtype", + "_use_inf_as_na", + "_get_plot_backend", + "_matplotlib", + "_arrow_utils", + "_registry", + "_get_offset", # TODO: remove after get_offset deprecation enforced + "_test_parse_iso8601", + "_json_normalize", # TODO: remove after deprecation is enforced + "_testing", + "_test_decorators", + "__version__", # check np.__version__ in compat.numpy.function +} + def _get_literal_string_prefix_len(token_string: str) -> int: """ @@ -164,6 +197,36 @@ def private_function_across_module(file_obj: IO[str]) -> Iterable[Tuple[int, str yield (node.lineno, f"Private function '{module_name}.{function_name}'") +def private_import_across_module(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: + """ + Checking that a private function is not imported across modules. + Parameters + ---------- + file_obj : IO + File-like object containing the Python code to validate. + Yields + ------ + line_number : int + Line number of import statement, that imports the private function. + msg : str + Explenation of the error. + """ + contents = file_obj.read() + tree = ast.parse(contents) + + for node in ast.walk(tree): + if not (isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom)): + continue + + for module in node.names: + module_name = module.name.split(".")[-1] + if module_name in PRIVATE_IMPORTS_TO_IGNORE: + continue + + if module_name.startswith("_"): + yield (node.lineno, f"Import of internal function {repr(module_name)}") + + def strings_to_concatenate(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: """ This test case is necessary after 'Black' (https://github.com/psf/black), @@ -419,6 +482,7 @@ def main( available_validation_types: List[str] = [ "bare_pytest_raises", "private_function_across_module", + "private_import_across_module", "strings_to_concatenate", "strings_with_wrong_placed_whitespace", ] @@ -449,7 +513,7 @@ def main( parser.add_argument( "--excluded-file-paths", default="asv_bench/env", - help="Comma separated file extensions to check.", + help="Comma separated file paths to exclude.", ) args = parser.parse_args()
Mostly this is an amalgam of #33479, #33394, and #33393.
https://api.github.com/repos/pandas-dev/pandas/pulls/36055
2020-09-01T22:20:34Z
2020-09-12T21:30:33Z
2020-09-12T21:30:33Z
2020-09-12T21:34:28Z
BUG: Don't raise when constructing Series from ordered set
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index c6cfcc6730112..b8f6d0e52d058 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -35,6 +35,7 @@ Bug fixes - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) - Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should be ``""`` (:issue:`35712`) - Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) +- Bug in :class:`Series` constructor incorrectly raising a ``TypeError`` when passed an ordered set (:issue:`36044`) - Bug in :meth:`Series.dt.isocalendar` and :meth:`DatetimeIndex.isocalendar` that returned incorrect year for certain dates (:issue:`36032`) - Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`) - Bug in :meth:`DataFrame.corr` causing subsequent indexing lookups to be incorrect (:issue:`35882`) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 9d6c2789af25b..3812c306b8eb4 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -438,7 +438,12 @@ def sanitize_array( subarr = subarr.copy() return subarr - elif isinstance(data, (list, tuple)) and len(data) > 0: + elif isinstance(data, (list, tuple, abc.Set, abc.ValuesView)) and len(data) > 0: + if isinstance(data, set): + # Raise only for unordered sets, e.g., not for dict_keys + raise TypeError("Set type is unordered") + data = list(data) + if dtype is not None: subarr = _try_cast(data, dtype, copy, raise_cast_failure) else: @@ -450,8 +455,6 @@ def sanitize_array( # GH#16804 arr = np.arange(data.start, data.stop, data.step, dtype="int64") subarr = _try_cast(arr, dtype, copy, raise_cast_failure) - elif isinstance(data, abc.Set): - raise TypeError("Set type is unordered") elif lib.is_scalar(data) and index is not None and dtype is not None: data = maybe_cast_to_datetime(data, dtype) if not lib.is_scalar(data): diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index bcf7039ec9039..ce078059479b4 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1464,3 +1464,13 @@ def test_constructor_sparse_datetime64(self, values): arr = pd.arrays.SparseArray(values, dtype=dtype) expected = pd.Series(arr) tm.assert_series_equal(result, expected) + + def test_construction_from_ordered_collection(self): + # https://github.com/pandas-dev/pandas/issues/36044 + result = Series({"a": 1, "b": 2}.keys()) + expected = Series(["a", "b"]) + tm.assert_series_equal(result, expected) + + result = Series({"a": 1, "b": 2}.values()) + expected = Series([1, 2]) + tm.assert_series_equal(result, expected)
- [x] closes #36044 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This may or may not be a regression, since before the change that caused the error to be raised the output was still wrong (same bug that motivated the initial patch): ```python In [2]: keys = {'a': 1, 'b': 2}.keys() In [3]: pd.Series(keys) Out[3]: 0 (a, b) 1 (a, b) dtype: object ```
https://api.github.com/repos/pandas-dev/pandas/pulls/36054
2020-09-01T22:14:11Z
2020-09-05T23:13:46Z
2020-09-05T23:13:45Z
2020-09-06T12:58:55Z
CLN: _wrap_applied_output
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 7b45a114e548b..a92e3af0764a7 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1206,7 +1206,6 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): key_index = self.grouper.result_index if self.as_index else None if isinstance(first_not_none, Series): - # this is to silence a DeprecationWarning # TODO: Remove when default dtype of empty Series is object kwargs = first_not_none._construct_axes_dict() @@ -1218,16 +1217,26 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): v = values[0] - if isinstance(v, (np.ndarray, Index, Series)) or not self.as_index: + if not isinstance(v, (np.ndarray, Index, Series)) and self.as_index: + # values are not series or array-like but scalars + # self._selection_name not passed through to Series as the + # result should not take the name of original selection + # of columns + return self.obj._constructor_sliced(values, index=key_index) + + else: if isinstance(v, Series): - applied_index = self._selected_obj._get_axis(self.axis) all_indexed_same = all_indexes_same((x.index for x in values)) - singular_series = len(values) == 1 and applied_index.nlevels == 1 # GH3596 # provide a reduction (Frame -> Series) if groups are # unique if self.squeeze: + applied_index = self._selected_obj._get_axis(self.axis) + singular_series = ( + len(values) == 1 and applied_index.nlevels == 1 + ) + # assign the name to this series if singular_series: values[0].name = keys[0] @@ -1253,18 +1262,6 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): # GH 8467 return self._concat_objects(keys, values, not_indexed_same=True) - # GH6124 if the list of Series have a consistent name, - # then propagate that name to the result. - index = v.index.copy() - if index.name is None: - # Only propagate the series name to the result - # if all series have a consistent name. If the - # series do not have a consistent name, do - # nothing. - names = {v.name for v in values} - if len(names) == 1: - index.name = list(names)[0] - # Combine values # vstack+constructor is faster than concat and handles MI-columns stacked_values = np.vstack([np.asarray(v) for v in values]) @@ -1313,13 +1310,6 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): return self._reindex_output(result) - # values are not series or array-like but scalars - else: - # self._selection_name not passed through to Series as the - # result should not take the name of original selection - # of columns - return self.obj._constructor_sliced(values, index=key_index) - def _transform_general( self, func, *args, engine="cython", engine_kwargs=None, **kwargs ):
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry 2nd step toward #35412. In this, the order of the largest if-else is switched with the condition negated. We can then drop the else entirely, resulting in much of the function having one less level of nesting.
https://api.github.com/repos/pandas-dev/pandas/pulls/36053
2020-09-01T22:05:45Z
2020-09-01T23:17:23Z
2020-09-01T23:17:23Z
2020-09-01T23:56:26Z
CLN remove unnecessary trailing commas in pandas/io
diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index e4d9324ce5130..1a4ba544f5d59 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -244,7 +244,7 @@ class XportReader(ReaderBase, abc.Iterator): __doc__ = _xport_reader_doc def __init__( - self, filepath_or_buffer, index=None, encoding="ISO-8859-1", chunksize=None, + self, filepath_or_buffer, index=None, encoding="ISO-8859-1", chunksize=None ): self._encoding = encoding diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 0074ebc4decb0..34d520004cc65 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1980,7 +1980,7 @@ def _open_file_binary_write( compression_typ = infer_compression(fname, compression_typ) compression = dict(compression_args, method=compression_typ) ioargs = get_filepath_or_buffer( - fname, mode="wb", compression=compression, storage_options=storage_options, + fname, mode="wb", compression=compression, storage_options=storage_options ) f, _ = get_handle( ioargs.filepath_or_buffer,
@MarcoGorelli can you review this PR this is related to issue #35925, if this looks good to you I can open another PR - [x] pandas/io/sas/sas_xport.py - [x] pandas/io/stata.py - [x] passes `black pandas`
https://api.github.com/repos/pandas-dev/pandas/pulls/36052
2020-09-01T21:28:54Z
2020-09-03T16:33:47Z
2020-09-03T16:33:47Z
2020-09-03T20:04:49Z
BUG: frame._item_cache not cleared when Series is altered
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 9b1ad658d4666..c52a956146fc2 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -32,6 +32,7 @@ Bug fixes - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) - Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`) - Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) +- Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`36051`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3bad2d6dd18b9..7a5ba69902dfa 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3315,6 +3315,10 @@ def _maybe_update_cacher( if len(self) == len(ref): # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self) + else: + # GH#33675 we have swapped in a new array, so parent + # reference to self is now invalid + ref._item_cache.pop(cacher[0], None) if verify_is_copy: self._check_setitem_copy(stacklevel=5, t="referant") diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 9bf5d24085697..b4f91590e09d1 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -135,13 +135,20 @@ def test_drop_and_dropna_caching(self): df2 = df.copy() df["A"].dropna() tm.assert_series_equal(df["A"], original) - return_value = df["A"].dropna(inplace=True) - tm.assert_series_equal(df["A"], expected) + + ser = df["A"] + return_value = ser.dropna(inplace=True) + tm.assert_series_equal(ser, expected) + tm.assert_series_equal(df["A"], original) assert return_value is None + df2["A"].drop([1]) tm.assert_series_equal(df2["A"], original) - return_value = df2["A"].drop([1], inplace=True) - tm.assert_series_equal(df2["A"], original.drop([1])) + + ser = df2["A"] + return_value = ser.drop([1], inplace=True) + tm.assert_series_equal(ser, original.drop([1])) + tm.assert_series_equal(df2["A"], original) assert return_value is None def test_dropna_corner(self, float_frame): diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index fa5fe5ba5c384..9910ef1b04b1a 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -81,6 +81,21 @@ def test_setitem_cache_updating(self): tm.assert_frame_equal(out, expected) tm.assert_series_equal(out["A"], expected["A"]) + def test_altering_series_clears_parent_cache(self): + # GH #33675 + df = pd.DataFrame([[1, 2], [3, 4]], index=["a", "b"], columns=["A", "B"]) + ser = df["A"] + + assert "A" in df._item_cache + + # Adding a new entry to ser swaps in a new array, so "A" needs to + # be removed from df._item_cache + ser["c"] = 5 + assert len(ser) == 3 + assert "A" not in df._item_cache + assert df["A"] is not ser + assert len(df["A"]) == 2 + class TestChaining: def test_setitem_chained_setfault(self):
- [x] closes #33675 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36051
2020-09-01T21:16:16Z
2020-09-02T15:43:17Z
2020-09-02T15:43:17Z
2020-09-02T16:55:54Z
BUG: incorrect year returned in isocalendar for certain dates
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index ac9fe9d2fca26..0c95a1bf22dce 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -32,6 +32,7 @@ Bug fixes - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) - Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should be ``""`` (:issue:`35712`) - Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) +- Bug in :meth:`Series.dt.isocalendar` and :meth:`DatetimeIndex.isocalendar` that returned incorrect year for certain dates (:issue:`36032`) - Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index 6cce2f5e1fd95..d8c83daa661a3 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -201,10 +201,10 @@ cpdef iso_calendar_t get_iso_calendar(int year, int month, int day) nogil: iso_week = 1 iso_year = year - if iso_week == 1 and doy > 7: + if iso_week == 1 and month == 12: iso_year += 1 - elif iso_week >= 52 and doy < 7: + elif iso_week >= 52 and month == 1: iso_year -= 1 return iso_year, iso_week, dow + 1 diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index d2ad9c8c398ea..723bd303b1974 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -682,6 +682,9 @@ def test_setitem_with_different_tz(self): [[pd.NaT], [[np.NaN, np.NaN, np.NaN]]], [["2019-12-31", "2019-12-29"], [[2020, 1, 2], [2019, 52, 7]]], [["2010-01-01", pd.NaT], [[2009, 53, 5], [np.NaN, np.NaN, np.NaN]]], + # see GH#36032 + [["2016-01-08", "2016-01-04"], [[2016, 1, 5], [2016, 1, 1]]], + [["2016-01-07", "2016-01-01"], [[2016, 1, 4], [2015, 53, 5]]], ], ) def test_isocalendar(self, input_series, expected_output): diff --git a/pandas/tests/tslibs/test_ccalendar.py b/pandas/tests/tslibs/test_ccalendar.py index aab86d3a2df69..1ff700fdc23a3 100644 --- a/pandas/tests/tslibs/test_ccalendar.py +++ b/pandas/tests/tslibs/test_ccalendar.py @@ -1,10 +1,13 @@ from datetime import date, datetime +from hypothesis import given, strategies as st import numpy as np import pytest from pandas._libs.tslibs import ccalendar +import pandas as pd + @pytest.mark.parametrize( "date_tuple,expected", @@ -48,3 +51,15 @@ def test_dt_correct_iso_8601_year_week_and_day(input_date_tuple, expected_iso_tu expected_from_date_isocalendar = date(*input_date_tuple).isocalendar() assert result == expected_from_date_isocalendar assert result == expected_iso_tuple + + +@given( + st.datetimes( + min_value=pd.Timestamp.min.to_pydatetime(warn=False), + max_value=pd.Timestamp.max.to_pydatetime(warn=False), + ) +) +def test_isocalendar(dt): + expected = dt.isocalendar() + result = ccalendar.get_iso_calendar(dt.year, dt.month, dt.day) + assert result == expected
- [x] closes #36032 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36050
2020-09-01T20:56:01Z
2020-09-04T20:29:26Z
2020-09-04T20:29:25Z
2020-09-07T05:14:36Z
CLN: rename private functions used across modules
diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index 01fe98a6f5403..8ceba22b1f7a4 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -12,8 +12,8 @@ from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.core import LinePlot, MPLPlot -from pandas.plotting._matplotlib.style import _get_standard_colors -from pandas.plotting._matplotlib.tools import _flatten, _subplots +from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.tools import create_subplots, flatten_axes if TYPE_CHECKING: from matplotlib.axes import Axes @@ -84,7 +84,7 @@ def _validate_color_args(self): self.color = None # get standard colors for default - colors = _get_standard_colors(num_colors=3, colormap=self.colormap, color=None) + colors = get_standard_colors(num_colors=3, colormap=self.colormap, color=None) # use 2 colors by default, for box/whisker and median # flier colors isn't needed here # because it can be specified by ``sym`` kw @@ -200,11 +200,11 @@ def _grouped_plot_by_column( by = [by] columns = data._get_numeric_data().columns.difference(by) naxes = len(columns) - fig, axes = _subplots( + fig, axes = create_subplots( naxes=naxes, sharex=True, sharey=True, figsize=figsize, ax=ax, layout=layout ) - _axes = _flatten(axes) + _axes = flatten_axes(axes) ax_values = [] @@ -259,7 +259,7 @@ def _get_colors(): # num_colors=3 is required as method maybe_color_bp takes the colors # in positions 0 and 2. # if colors not provided, use same defaults as DataFrame.plot.box - result = _get_standard_colors(num_colors=3) + result = get_standard_colors(num_colors=3) result = np.take(result, [0, 0, 2]) result = np.append(result, "k") @@ -414,7 +414,7 @@ def boxplot_frame_groupby( ): if subplots is True: naxes = len(grouped) - fig, axes = _subplots( + fig, axes = create_subplots( naxes=naxes, squeeze=False, ax=ax, @@ -423,7 +423,7 @@ def boxplot_frame_groupby( figsize=figsize, layout=layout, ) - axes = _flatten(axes) + axes = flatten_axes(axes) ret = pd.Series(dtype=object) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 93ba9bd26630b..5eae88d07c295 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -32,14 +32,14 @@ from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.compat import _mpl_ge_3_0_0 from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters -from pandas.plotting._matplotlib.style import _get_standard_colors +from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.tools import ( - _flatten, - _get_all_lines, - _get_xlim, - _handle_shared_axes, - _subplots, + create_subplots, + flatten_axes, format_date_labels, + get_all_lines, + get_xlim, + handle_shared_axes, table, ) @@ -315,7 +315,7 @@ def _maybe_right_yaxis(self, ax: "Axes", axes_num): def _setup_subplots(self): if self.subplots: - fig, axes = _subplots( + fig, axes = create_subplots( naxes=self.nseries, sharex=self.sharex, sharey=self.sharey, @@ -334,7 +334,7 @@ def _setup_subplots(self): fig.set_size_inches(self.figsize) axes = self.ax - axes = _flatten(axes) + axes = flatten_axes(axes) valid_log = {False, True, "sym", None} input_log = {self.logx, self.logy, self.loglog} @@ -466,7 +466,7 @@ def _adorn_subplots(self): if len(self.axes) > 0: all_axes = self._get_subplots() nrows, ncols = self._get_axes_layout() - _handle_shared_axes( + handle_shared_axes( axarr=all_axes, nplots=len(all_axes), naxes=nrows * ncols, @@ -753,7 +753,7 @@ def _get_colors(self, num_colors=None, color_kwds="color"): if num_colors is None: num_colors = self.nseries - return _get_standard_colors( + return get_standard_colors( num_colors=num_colors, colormap=self.colormap, color=self.kwds.get(color_kwds), @@ -1132,8 +1132,8 @@ def _make_plot(self): # reset of xlim should be used for ts data # TODO: GH28021, should find a way to change view limit on xaxis - lines = _get_all_lines(ax) - left, right = _get_xlim(lines) + lines = get_all_lines(ax) + left, right = get_xlim(lines) ax.set_xlim(left, right) @classmethod diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py index ffd46d1b191db..89035552d4309 100644 --- a/pandas/plotting/_matplotlib/hist.py +++ b/pandas/plotting/_matplotlib/hist.py @@ -8,7 +8,11 @@ from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.core import LinePlot, MPLPlot -from pandas.plotting._matplotlib.tools import _flatten, _set_ticks_props, _subplots +from pandas.plotting._matplotlib.tools import ( + create_subplots, + flatten_axes, + set_ticks_props, +) if TYPE_CHECKING: from matplotlib.axes import Axes @@ -198,11 +202,11 @@ def _grouped_plot( grouped = grouped[column] naxes = len(grouped) - fig, axes = _subplots( + fig, axes = create_subplots( naxes=naxes, figsize=figsize, sharex=sharex, sharey=sharey, ax=ax, layout=layout ) - _axes = _flatten(axes) + _axes = flatten_axes(axes) for i, (key, group) in enumerate(grouped): ax = _axes[i] @@ -286,7 +290,7 @@ def plot_group(group, ax): rot=rot, ) - _set_ticks_props( + set_ticks_props( axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot ) @@ -337,7 +341,7 @@ def hist_series( ax.grid(grid) axes = np.array([ax]) - _set_ticks_props( + set_ticks_props( axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot ) @@ -419,7 +423,7 @@ def hist_frame( if naxes == 0: raise ValueError("hist method requires numerical columns, nothing to plot.") - fig, axes = _subplots( + fig, axes = create_subplots( naxes=naxes, ax=ax, squeeze=False, @@ -428,7 +432,7 @@ def hist_frame( figsize=figsize, layout=layout, ) - _axes = _flatten(axes) + _axes = flatten_axes(axes) can_set_label = "label" not in kwds @@ -442,7 +446,7 @@ def hist_frame( if legend: ax.legend() - _set_ticks_props( + set_ticks_props( axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot ) fig.subplots_adjust(wspace=0.3, hspace=0.3) diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py index c5e7c55970c3e..a1c62f9fce23c 100644 --- a/pandas/plotting/_matplotlib/misc.py +++ b/pandas/plotting/_matplotlib/misc.py @@ -10,8 +10,8 @@ from pandas.core.dtypes.missing import notna from pandas.io.formats.printing import pprint_thing -from pandas.plotting._matplotlib.style import _get_standard_colors -from pandas.plotting._matplotlib.tools import _set_ticks_props, _subplots +from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.tools import create_subplots, set_ticks_props if TYPE_CHECKING: from matplotlib.axes import Axes @@ -36,7 +36,7 @@ def scatter_matrix( df = frame._get_numeric_data() n = df.columns.size naxes = n * n - fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False) + fig, axes = create_subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False) # no gaps between subplots fig.subplots_adjust(wspace=0, hspace=0) @@ -112,7 +112,7 @@ def scatter_matrix( locs = locs.astype(int) axes[0][0].yaxis.set_ticklabels(locs) - _set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) + set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) return axes @@ -147,7 +147,7 @@ def normalize(series): ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1]) to_plot: Dict[Label, List[List]] = {} - colors = _get_standard_colors( + colors = get_standard_colors( num_colors=len(classes), colormap=colormap, color_type="random", color=color ) @@ -255,7 +255,7 @@ def f(t): t = np.linspace(-np.pi, np.pi, samples) used_legends: Set[str] = set() - color_values = _get_standard_colors( + color_values = get_standard_colors( num_colors=len(classes), colormap=colormap, color_type="random", color=color ) colors = dict(zip(classes, color_values)) @@ -382,7 +382,7 @@ def parallel_coordinates( if ax is None: ax = plt.gca() - color_values = _get_standard_colors( + color_values = get_standard_colors( num_colors=len(classes), colormap=colormap, color_type="random", color=color ) diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py index 5f1105f0e4233..904a760a03e58 100644 --- a/pandas/plotting/_matplotlib/style.py +++ b/pandas/plotting/_matplotlib/style.py @@ -10,7 +10,7 @@ import pandas.core.common as com -def _get_standard_colors( +def get_standard_colors( num_colors=None, colormap=None, color_type: str = "default", color=None ): import matplotlib.pyplot as plt diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 4d643ffb734e4..98aaab6838fba 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -100,7 +100,7 @@ def _get_layout(nplots: int, layout=None, layout_type: str = "box") -> Tuple[int # copied from matplotlib/pyplot.py and modified for pandas.plotting -def _subplots( +def create_subplots( naxes: int, sharex: bool = False, sharey: bool = False, @@ -194,7 +194,7 @@ def _subplots( fig = plt.figure(**fig_kw) else: if is_list_like(ax): - ax = _flatten(ax) + ax = flatten_axes(ax) if layout is not None: warnings.warn( "When passing multiple axes, layout keyword is ignored", UserWarning @@ -221,7 +221,7 @@ def _subplots( if squeeze: return fig, ax else: - return fig, _flatten(ax) + return fig, flatten_axes(ax) else: warnings.warn( "To output multiple subplots, the figure containing " @@ -264,7 +264,7 @@ def _subplots( for ax in axarr[naxes:]: ax.set_visible(False) - _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) + handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) if squeeze: # Reshape the array to have the final desired dimension (nrow,ncol), @@ -297,7 +297,7 @@ def _remove_labels_from_axis(axis: "Axis"): axis.get_label().set_visible(False) -def _handle_shared_axes( +def handle_shared_axes( axarr: Iterable["Axes"], nplots: int, naxes: int, @@ -351,7 +351,7 @@ def _handle_shared_axes( _remove_labels_from_axis(ax.yaxis) -def _flatten(axes: Union["Axes", Sequence["Axes"]]) -> Sequence["Axes"]: +def flatten_axes(axes: Union["Axes", Sequence["Axes"]]) -> Sequence["Axes"]: if not is_list_like(axes): return np.array([axes]) elif isinstance(axes, (np.ndarray, ABCIndexClass)): @@ -359,7 +359,7 @@ def _flatten(axes: Union["Axes", Sequence["Axes"]]) -> Sequence["Axes"]: return np.array(axes) -def _set_ticks_props( +def set_ticks_props( axes: Union["Axes", Sequence["Axes"]], xlabelsize=None, xrot=None, @@ -368,7 +368,7 @@ def _set_ticks_props( ): import matplotlib.pyplot as plt - for ax in _flatten(axes): + for ax in flatten_axes(axes): if xlabelsize is not None: plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) if xrot is not None: @@ -380,7 +380,7 @@ def _set_ticks_props( return axes -def _get_all_lines(ax: "Axes") -> List["Line2D"]: +def get_all_lines(ax: "Axes") -> List["Line2D"]: lines = ax.get_lines() if hasattr(ax, "right_ax"): @@ -392,7 +392,7 @@ def _get_all_lines(ax: "Axes") -> List["Line2D"]: return lines -def _get_xlim(lines: Iterable["Line2D"]) -> Tuple[float, float]: +def get_xlim(lines: Iterable["Line2D"]) -> Tuple[float, float]: left, right = np.inf, -np.inf for l in lines: x = l.get_xdata(orig=False) diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index 3b1ff233c5ec1..b753c96af6290 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -13,13 +13,13 @@ from pandas import DataFrame, Series import pandas._testing as tm -""" -This is a common base class used for various plotting tests -""" - @td.skip_if_no_mpl class TestPlotBase: + """ + This is a common base class used for various plotting tests + """ + def setup_method(self, method): import matplotlib as mpl @@ -330,7 +330,7 @@ def _check_axes_shape(self, axes, axes_num=None, layout=None, figsize=None): figsize : tuple expected figsize. default is matplotlib default """ - from pandas.plotting._matplotlib.tools import _flatten + from pandas.plotting._matplotlib.tools import flatten_axes if figsize is None: figsize = self.default_figsize @@ -343,7 +343,7 @@ def _check_axes_shape(self, axes, axes_num=None, layout=None, figsize=None): assert len(ax.get_children()) > 0 if layout is not None: - result = self._get_axes_layout(_flatten(axes)) + result = self._get_axes_layout(flatten_axes(axes)) assert result == layout tm.assert_numpy_array_equal( @@ -370,9 +370,9 @@ def _flatten_visible(self, axes): axes : matplotlib Axes object, or its list-like """ - from pandas.plotting._matplotlib.tools import _flatten + from pandas.plotting._matplotlib.tools import flatten_axes - axes = _flatten(axes) + axes = flatten_axes(axes) axes = [ax for ax in axes if ax.get_visible()] return axes diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index f5c1c58f3f7ed..130acaa8bcd58 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -353,7 +353,7 @@ def test_get_standard_colors_random_seed(self): # GH17525 df = DataFrame(np.zeros((10, 10))) - # Make sure that the random seed isn't reset by _get_standard_colors + # Make sure that the random seed isn't reset by get_standard_colors plotting.parallel_coordinates(df, 0) rand1 = random.random() plotting.parallel_coordinates(df, 0) @@ -361,19 +361,19 @@ def test_get_standard_colors_random_seed(self): assert rand1 != rand2 # Make sure it produces the same colors every time it's called - from pandas.plotting._matplotlib.style import _get_standard_colors + from pandas.plotting._matplotlib.style import get_standard_colors - color1 = _get_standard_colors(1, color_type="random") - color2 = _get_standard_colors(1, color_type="random") + color1 = get_standard_colors(1, color_type="random") + color2 = get_standard_colors(1, color_type="random") assert color1 == color2 def test_get_standard_colors_default_num_colors(self): - from pandas.plotting._matplotlib.style import _get_standard_colors + from pandas.plotting._matplotlib.style import get_standard_colors # Make sure the default color_types returns the specified amount - color1 = _get_standard_colors(1, color_type="default") - color2 = _get_standard_colors(9, color_type="default") - color3 = _get_standard_colors(20, color_type="default") + color1 = get_standard_colors(1, color_type="default") + color2 = get_standard_colors(9, color_type="default") + color3 = get_standard_colors(20, color_type="default") assert len(color1) == 1 assert len(color2) == 9 assert len(color3) == 20 @@ -401,10 +401,10 @@ def test_get_standard_colors_no_appending(self): # correctly. from matplotlib import cm - from pandas.plotting._matplotlib.style import _get_standard_colors + from pandas.plotting._matplotlib.style import get_standard_colors color_before = cm.gnuplot(range(5)) - color_after = _get_standard_colors(1, color=color_before) + color_after = get_standard_colors(1, color=color_before) assert len(color_after) == len(color_before) df = DataFrame(np.random.randn(48, 4), columns=list("ABCD")) diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index cc00626e992f3..c296e2a6278c5 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -809,53 +809,53 @@ def test_series_grid_settings(self): @pytest.mark.slow def test_standard_colors(self): - from pandas.plotting._matplotlib.style import _get_standard_colors + from pandas.plotting._matplotlib.style import get_standard_colors for c in ["r", "red", "green", "#FF0000"]: - result = _get_standard_colors(1, color=c) + result = get_standard_colors(1, color=c) assert result == [c] - result = _get_standard_colors(1, color=[c]) + result = get_standard_colors(1, color=[c]) assert result == [c] - result = _get_standard_colors(3, color=c) + result = get_standard_colors(3, color=c) assert result == [c] * 3 - result = _get_standard_colors(3, color=[c]) + result = get_standard_colors(3, color=[c]) assert result == [c] * 3 @pytest.mark.slow def test_standard_colors_all(self): import matplotlib.colors as colors - from pandas.plotting._matplotlib.style import _get_standard_colors + from pandas.plotting._matplotlib.style import get_standard_colors # multiple colors like mediumaquamarine for c in colors.cnames: - result = _get_standard_colors(num_colors=1, color=c) + result = get_standard_colors(num_colors=1, color=c) assert result == [c] - result = _get_standard_colors(num_colors=1, color=[c]) + result = get_standard_colors(num_colors=1, color=[c]) assert result == [c] - result = _get_standard_colors(num_colors=3, color=c) + result = get_standard_colors(num_colors=3, color=c) assert result == [c] * 3 - result = _get_standard_colors(num_colors=3, color=[c]) + result = get_standard_colors(num_colors=3, color=[c]) assert result == [c] * 3 # single letter colors like k for c in colors.ColorConverter.colors: - result = _get_standard_colors(num_colors=1, color=c) + result = get_standard_colors(num_colors=1, color=c) assert result == [c] - result = _get_standard_colors(num_colors=1, color=[c]) + result = get_standard_colors(num_colors=1, color=[c]) assert result == [c] - result = _get_standard_colors(num_colors=3, color=c) + result = get_standard_colors(num_colors=3, color=c) assert result == [c] * 3 - result = _get_standard_colors(num_colors=3, color=[c]) + result = get_standard_colors(num_colors=3, color=[c]) assert result == [c] * 3 def test_series_plot_color_kwargs(self):
https://api.github.com/repos/pandas-dev/pandas/pulls/36049
2020-09-01T19:35:37Z
2020-09-02T17:54:27Z
2020-09-02T17:54:27Z
2020-09-02T18:27:14Z
CI: pin setuptools on 1.1.x
diff --git a/ci/setup_env.sh b/ci/setup_env.sh index aa43d8b7dd00a..065f9e56ea171 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -148,7 +148,7 @@ python setup.py build_ext -q -i -j2 # - py35_compat # - py36_32bit echo "[Updating pip]" -python -m pip install --no-deps -U pip wheel setuptools +python -m pip install --no-deps -U pip wheel "setuptools<50.0.0" echo "[Install pandas]" python -m pip install --no-build-isolation -e .
top answer on stack overflow is to pin https://stackoverflow.com/questions/63663362/django-python3-on-install-i-get-parent-module-setuptools-not-loaded also see https://github.com/MacPython/pandas-wheels/pull/97#issuecomment-684938715 Note: 50.0.1 released a hour ago but still the same. https://dev.azure.com/pandas-dev/pandas/_build/results?buildId=41325&view=logs&j=a67b4c4c-cd2e-5e3c-a361-de73ac9c05f9&t=9a6bfc0f-544f-57f9-291f-bf4b75b05642
https://api.github.com/repos/pandas-dev/pandas/pulls/36048
2020-09-01T18:42:16Z
2020-09-01T23:27:11Z
2020-09-01T23:27:11Z
2020-09-02T09:23:22Z
REF: simplify CSVFormatter
diff --git a/pandas/_typing.py b/pandas/_typing.py index b237013ac7805..7aef5c02e290f 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -15,6 +15,7 @@ List, Mapping, Optional, + Sequence, Type, TypeVar, Union, @@ -82,6 +83,7 @@ Axis = Union[str, int] Label = Optional[Hashable] +IndexLabel = Optional[Union[Label, Sequence[Label]]] Level = Union[Label, int] Ordered = Optional[bool] JSONSerializable = Optional[Union[PythonScalar, List, Dict]] diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 93c945638a174..126692fb8e899 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -40,6 +40,7 @@ CompressionOptions, FilePathOrBuffer, FrameOrSeries, + IndexLabel, JSONSerializable, Label, Level, @@ -3160,7 +3161,7 @@ def to_csv( columns: Optional[Sequence[Label]] = None, header: Union[bool_t, List[str]] = True, index: bool_t = True, - index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None, + index_label: IndexLabel = None, mode: str = "w", encoding: Optional[str] = None, compression: CompressionOptions = "infer", diff --git a/pandas/io/common.py b/pandas/io/common.py index a80b89569f429..007e0dcbbcfe1 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -208,6 +208,21 @@ def get_filepath_or_buffer( # handle compression dict compression_method, compression = get_compression_method(compression) compression_method = infer_compression(filepath_or_buffer, compression_method) + + # GH21227 internal compression is not used for non-binary handles. + if ( + compression_method + and hasattr(filepath_or_buffer, "write") + and mode + and "b" not in mode + ): + warnings.warn( + "compression has no effect when passing a non-binary object as input.", + RuntimeWarning, + stacklevel=2, + ) + compression_method = None + compression = dict(compression, method=compression_method) # bz2 and xz do not write the byte order mark for utf-16 and utf-32 diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 15cd5c026c6b6..90ab6f61f4d74 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -5,13 +5,18 @@ import csv as csvlib from io import StringIO, TextIOWrapper import os -from typing import Hashable, List, Optional, Sequence, Union -import warnings +from typing import Any, Dict, Hashable, Iterator, List, Optional, Sequence, Union import numpy as np from pandas._libs import writers as libwriters -from pandas._typing import CompressionOptions, FilePathOrBuffer, StorageOptions +from pandas._typing import ( + CompressionOptions, + FilePathOrBuffer, + IndexLabel, + Label, + StorageOptions, +) from pandas.core.dtypes.generic import ( ABCDatetimeIndex, @@ -21,6 +26,8 @@ ) from pandas.core.dtypes.missing import notna +from pandas.core.indexes.api import Index + from pandas.io.common import get_filepath_or_buffer, get_handle @@ -32,10 +39,10 @@ def __init__( sep: str = ",", na_rep: str = "", float_format: Optional[str] = None, - cols=None, + cols: Optional[Sequence[Label]] = None, header: Union[bool, Sequence[Hashable]] = True, index: bool = True, - index_label: Optional[Union[bool, Hashable, Sequence[Hashable]]] = None, + index_label: IndexLabel = None, mode: str = "w", encoding: Optional[str] = None, errors: str = "strict", @@ -43,7 +50,7 @@ def __init__( quoting: Optional[int] = None, line_terminator="\n", chunksize: Optional[int] = None, - quotechar='"', + quotechar: Optional[str] = '"', date_format: Optional[str] = None, doublequote: bool = True, escapechar: Optional[str] = None, @@ -52,16 +59,19 @@ def __init__( ): self.obj = obj + self.encoding = encoding or "utf-8" + if path_or_buf is None: path_or_buf = StringIO() ioargs = get_filepath_or_buffer( path_or_buf, - encoding=encoding, + encoding=self.encoding, compression=compression, mode=mode, storage_options=storage_options, ) + self.compression = ioargs.compression.pop("method") self.compression_args = ioargs.compression self.path_or_buf = ioargs.filepath_or_buffer @@ -72,46 +82,79 @@ def __init__( self.na_rep = na_rep self.float_format = float_format self.decimal = decimal - self.header = header self.index = index self.index_label = index_label - if encoding is None: - encoding = "utf-8" - self.encoding = encoding self.errors = errors + self.quoting = quoting or csvlib.QUOTE_MINIMAL + self.quotechar = quotechar + self.doublequote = doublequote + self.escapechar = escapechar + self.line_terminator = line_terminator or os.linesep + self.date_format = date_format + self.cols = cols # type: ignore[assignment] + self.chunksize = chunksize # type: ignore[assignment] + + @property + def index_label(self) -> IndexLabel: + return self._index_label + + @index_label.setter + def index_label(self, index_label: IndexLabel) -> None: + if index_label is not False: + if index_label is None: + index_label = self._get_index_label_from_obj() + elif not isinstance(index_label, (list, tuple, np.ndarray, ABCIndexClass)): + # given a string for a DF with Index + index_label = [index_label] + self._index_label = index_label + + def _get_index_label_from_obj(self) -> List[str]: + if isinstance(self.obj.index, ABCMultiIndex): + return self._get_index_label_multiindex() + else: + return self._get_index_label_flat() + + def _get_index_label_multiindex(self) -> List[str]: + return [name or "" for name in self.obj.index.names] - if quoting is None: - quoting = csvlib.QUOTE_MINIMAL - self.quoting = quoting + def _get_index_label_flat(self) -> List[str]: + index_label = self.obj.index.name + return [""] if index_label is None else [index_label] - if quoting == csvlib.QUOTE_NONE: + @property + def quotechar(self) -> Optional[str]: + if self.quoting != csvlib.QUOTE_NONE: # prevents crash in _csv - quotechar = None - self.quotechar = quotechar + return self._quotechar + return None - self.doublequote = doublequote - self.escapechar = escapechar + @quotechar.setter + def quotechar(self, quotechar: Optional[str]) -> None: + self._quotechar = quotechar - self.line_terminator = line_terminator or os.linesep + @property + def has_mi_columns(self) -> bool: + return bool(isinstance(self.obj.columns, ABCMultiIndex)) - self.date_format = date_format + @property + def cols(self) -> Sequence[Label]: + return self._cols - self.has_mi_columns = isinstance(obj.columns, ABCMultiIndex) + @cols.setter + def cols(self, cols: Optional[Sequence[Label]]) -> None: + self._cols = self._refine_cols(cols) + def _refine_cols(self, cols: Optional[Sequence[Label]]) -> Sequence[Label]: # validate mi options if self.has_mi_columns: if cols is not None: - raise TypeError("cannot specify cols with a MultiIndex on the columns") + msg = "cannot specify cols with a MultiIndex on the columns" + raise TypeError(msg) if cols is not None: if isinstance(cols, ABCIndexClass): - cols = cols.to_native_types( - na_rep=na_rep, - float_format=float_format, - date_format=date_format, - quoting=self.quoting, - ) + cols = cols.to_native_types(**self._number_format) else: cols = list(cols) self.obj = self.obj.loc[:, cols] @@ -120,58 +163,90 @@ def __init__( # and make sure sure cols is just a list of labels cols = self.obj.columns if isinstance(cols, ABCIndexClass): - cols = cols.to_native_types( - na_rep=na_rep, - float_format=float_format, - date_format=date_format, - quoting=self.quoting, - ) + return cols.to_native_types(**self._number_format) else: - cols = list(cols) + assert isinstance(cols, Sequence) + return list(cols) - # save it - self.cols = cols + @property + def _number_format(self) -> Dict[str, Any]: + """Dictionary used for storing number formatting settings.""" + return dict( + na_rep=self.na_rep, + float_format=self.float_format, + date_format=self.date_format, + quoting=self.quoting, + decimal=self.decimal, + ) - # preallocate data 2d list - ncols = self.obj.shape[-1] - self.data = [None] * ncols + @property + def chunksize(self) -> int: + return self._chunksize + @chunksize.setter + def chunksize(self, chunksize: Optional[int]) -> None: if chunksize is None: chunksize = (100000 // (len(self.cols) or 1)) or 1 - self.chunksize = int(chunksize) + assert chunksize is not None + self._chunksize = int(chunksize) - self.data_index = obj.index + @property + def data_index(self) -> Index: + data_index = self.obj.index if ( - isinstance(self.data_index, (ABCDatetimeIndex, ABCPeriodIndex)) - and date_format is not None + isinstance(data_index, (ABCDatetimeIndex, ABCPeriodIndex)) + and self.date_format is not None ): - from pandas import Index - - self.data_index = Index( - [x.strftime(date_format) if notna(x) else "" for x in self.data_index] + data_index = Index( + [x.strftime(self.date_format) if notna(x) else "" for x in data_index] ) + return data_index + + @property + def nlevels(self) -> int: + if self.index: + return getattr(self.data_index, "nlevels", 1) + else: + return 0 + + @property + def _has_aliases(self) -> bool: + return isinstance(self.header, (tuple, list, np.ndarray, ABCIndexClass)) + + @property + def _need_to_save_header(self) -> bool: + return bool(self._has_aliases or self.header) + + @property + def write_cols(self) -> Sequence[Label]: + if self._has_aliases: + assert not isinstance(self.header, bool) + if len(self.header) != len(self.cols): + raise ValueError( + f"Writing {len(self.cols)} cols but got {len(self.header)} aliases" + ) + else: + return self.header + else: + return self.cols + + @property + def encoded_labels(self) -> List[Label]: + encoded_labels: List[Label] = [] + + if self.index and self.index_label: + assert isinstance(self.index_label, Sequence) + encoded_labels = list(self.index_label) - self.nlevels = getattr(self.data_index, "nlevels", 1) - if not index: - self.nlevels = 0 + if not self.has_mi_columns or self._has_aliases: + encoded_labels += list(self.write_cols) + + return encoded_labels def save(self) -> None: """ Create the writer & save. """ - # GH21227 internal compression is not used for non-binary handles. - if ( - self.compression - and hasattr(self.path_or_buf, "write") - and "b" not in self.mode - ): - warnings.warn( - "compression has no effect when passing a non-binary object as input.", - RuntimeWarning, - stacklevel=2, - ) - self.compression = None - # get a handle or wrap an existing handle to take care of 1) compression and # 2) text -> byte conversion f, handles = get_handle( @@ -215,133 +290,63 @@ def save(self) -> None: for _fh in handles: _fh.close() - def _save_header(self): - writer = self.writer - obj = self.obj - index_label = self.index_label - cols = self.cols - has_mi_columns = self.has_mi_columns - header = self.header - encoded_labels: List[str] = [] - - has_aliases = isinstance(header, (tuple, list, np.ndarray, ABCIndexClass)) - if not (has_aliases or self.header): - return - if has_aliases: - if len(header) != len(cols): - raise ValueError( - f"Writing {len(cols)} cols but got {len(header)} aliases" - ) - else: - write_cols = header - else: - write_cols = cols - - if self.index: - # should write something for index label - if index_label is not False: - if index_label is None: - if isinstance(obj.index, ABCMultiIndex): - index_label = [] - for i, name in enumerate(obj.index.names): - if name is None: - name = "" - index_label.append(name) - else: - index_label = obj.index.name - if index_label is None: - index_label = [""] - else: - index_label = [index_label] - elif not isinstance( - index_label, (list, tuple, np.ndarray, ABCIndexClass) - ): - # given a string for a DF with Index - index_label = [index_label] - - encoded_labels = list(index_label) - else: - encoded_labels = [] - - if not has_mi_columns or has_aliases: - encoded_labels += list(write_cols) - writer.writerow(encoded_labels) - else: - # write out the mi - columns = obj.columns - - # write out the names for each level, then ALL of the values for - # each level - for i in range(columns.nlevels): - - # we need at least 1 index column to write our col names - col_line = [] - if self.index: - - # name is the first column - col_line.append(columns.names[i]) - - if isinstance(index_label, list) and len(index_label) > 1: - col_line.extend([""] * (len(index_label) - 1)) - - col_line.extend(columns._get_level_values(i)) - - writer.writerow(col_line) - - # Write out the index line if it's not empty. - # Otherwise, we will print out an extraneous - # blank line between the mi and the data rows. - if encoded_labels and set(encoded_labels) != {""}: - encoded_labels.extend([""] * len(columns)) - writer.writerow(encoded_labels) - def _save(self) -> None: - self._save_header() + if self._need_to_save_header: + self._save_header() + self._save_body() + def _save_header(self) -> None: + if not self.has_mi_columns or self._has_aliases: + self.writer.writerow(self.encoded_labels) + else: + for row in self._generate_multiindex_header_rows(): + self.writer.writerow(row) + + def _generate_multiindex_header_rows(self) -> Iterator[List[Label]]: + columns = self.obj.columns + for i in range(columns.nlevels): + # we need at least 1 index column to write our col names + col_line = [] + if self.index: + # name is the first column + col_line.append(columns.names[i]) + + if isinstance(self.index_label, list) and len(self.index_label) > 1: + col_line.extend([""] * (len(self.index_label) - 1)) + + col_line.extend(columns._get_level_values(i)) + yield col_line + + # Write out the index line if it's not empty. + # Otherwise, we will print out an extraneous + # blank line between the mi and the data rows. + if self.encoded_labels and set(self.encoded_labels) != {""}: + yield self.encoded_labels + [""] * len(columns) + + def _save_body(self) -> None: nrows = len(self.data_index) - - # write in chunksize bites - chunksize = self.chunksize - chunks = int(nrows / chunksize) + 1 - + chunks = int(nrows / self.chunksize) + 1 for i in range(chunks): - start_i = i * chunksize - end_i = min((i + 1) * chunksize, nrows) + start_i = i * self.chunksize + end_i = min(start_i + self.chunksize, nrows) if start_i >= end_i: break - self._save_chunk(start_i, end_i) def _save_chunk(self, start_i: int, end_i: int) -> None: - data_index = self.data_index + ncols = self.obj.shape[-1] + data = [None] * ncols # create the data for a chunk slicer = slice(start_i, end_i) df = self.obj.iloc[slicer] - blocks = df._mgr.blocks - - for i in range(len(blocks)): - b = blocks[i] - d = b.to_native_types( - na_rep=self.na_rep, - float_format=self.float_format, - decimal=self.decimal, - date_format=self.date_format, - quoting=self.quoting, - ) - for col_loc, col in zip(b.mgr_locs, d): - # self.data is a preallocated list - self.data[col_loc] = col + for block in df._mgr.blocks: + d = block.to_native_types(**self._number_format) - ix = data_index.to_native_types( - slicer=slicer, - na_rep=self.na_rep, - float_format=self.float_format, - decimal=self.decimal, - date_format=self.date_format, - quoting=self.quoting, - ) + for col_loc, col in zip(block.mgr_locs, d): + data[col_loc] = col - libwriters.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer) + ix = self.data_index.to_native_types(slicer=slicer, **self._number_format) + libwriters.write_csv_rows(data, ix, self.nlevels, self.cols, self.writer)
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Refactor CSVFormatter ---------------------- 1. Put data validation in setters 2. Extract helper methods and properties
https://api.github.com/repos/pandas-dev/pandas/pulls/36046
2020-09-01T17:49:29Z
2020-09-09T10:33:11Z
2020-09-09T10:33:11Z
2020-11-06T15:35:00Z
BUG: NDFrame.replace wrong exception type, wrong return when size==0
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 407e8ba029ada..7460043d8c89e 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -281,7 +281,7 @@ ExtensionArray Other ^^^^^ -- +- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3bad2d6dd18b9..ce2625f3ec15d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6179,8 +6179,8 @@ def replace( self, to_replace=None, value=None, - inplace=False, - limit=None, + inplace: bool_t = False, + limit: Optional[int] = None, regex=False, method="pad", ): @@ -6256,7 +6256,7 @@ def replace( If True, in place. Note: this will modify any other views on this object (e.g. a column from a DataFrame). Returns the caller if this is True. - limit : int, default None + limit : int or None, default None Maximum size gap to forward or backward fill. regex : bool or same types as `to_replace`, default False Whether to interpret `to_replace` and/or `value` as regular @@ -6490,7 +6490,7 @@ def replace( inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: - raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool") + raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if value is None: # passing a single value that is scalar like @@ -6550,12 +6550,14 @@ def replace( # need a non-zero len on all axes if not self.size: - return self + if inplace: + return + return self.copy() if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of - # `in foo`is needed for when we have a Series and not dict + # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index f78a28c66e946..ccaa005369a1c 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -397,6 +397,29 @@ def test_replace_invalid_to_replace(self): with pytest.raises(TypeError, match=msg): series.replace(lambda x: x.strip()) + @pytest.mark.parametrize("frame", [False, True]) + def test_replace_nonbool_regex(self, frame): + obj = pd.Series(["a", "b", "c "]) + if frame: + obj = obj.to_frame() + + msg = "'to_replace' must be 'None' if 'regex' is not a bool" + with pytest.raises(ValueError, match=msg): + obj.replace(to_replace=["a"], regex="foo") + + @pytest.mark.parametrize("frame", [False, True]) + def test_replace_empty_copy(self, frame): + obj = pd.Series([], dtype=np.float64) + if frame: + obj = obj.to_frame() + + res = obj.replace(4, 5, inplace=True) + assert res is None + + res = obj.replace(4, 5, inplace=False) + tm.assert_equal(res, obj) + assert res is not obj + def test_replace_only_one_dictlike_arg(self): # GH#33340
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36045
2020-09-01T17:04:47Z
2020-09-02T03:19:27Z
2020-09-02T03:19:27Z
2020-09-02T16:57:21Z
DOC: add semicolons to suppress text repr of matplotlib objects in visualization.rst
diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index a6c3d9814b03d..9eb6ac4629901 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -44,7 +44,7 @@ The ``plot`` method on Series and DataFrame is just a simple wrapper around ts = ts.cumsum() @savefig series_plot_basic.png - ts.plot() + ts.plot(); If the index consists of dates, it calls :meth:`gcf().autofmt_xdate() <matplotlib.figure.Figure.autofmt_xdate>` to try to format the x-axis nicely as per above. @@ -82,7 +82,7 @@ You can plot one column versus another using the ``x`` and ``y`` keywords in df3["A"] = pd.Series(list(range(len(df)))) @savefig df_plot_xy.png - df3.plot(x="A", y="B") + df3.plot(x="A", y="B"); .. note:: @@ -162,8 +162,8 @@ For labeled, non-time series data, you may wish to produce a bar plot: plt.figure(); @savefig bar_plot_ex.png - df.iloc[5].plot.bar() - plt.axhline(0, color="k") + df.iloc[5].plot.bar(); + plt.axhline(0, color="k"); Calling a DataFrame's :meth:`plot.bar() <DataFrame.plot.bar>` method produces a multiple bar plot: @@ -229,7 +229,7 @@ Histograms can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Seri plt.figure(); @savefig hist_new.png - df4.plot.hist(alpha=0.5) + df4.plot.hist(alpha=0.5); .. ipython:: python @@ -245,7 +245,7 @@ using the ``bins`` keyword. plt.figure(); @savefig hist_new_stacked.png - df4.plot.hist(stacked=True, bins=20) + df4.plot.hist(stacked=True, bins=20); .. ipython:: python :suppress: @@ -261,7 +261,7 @@ horizontal and cumulative histograms can be drawn by plt.figure(); @savefig hist_new_kwargs.png - df4["a"].plot.hist(orientation="horizontal", cumulative=True) + df4["a"].plot.hist(orientation="horizontal", cumulative=True); .. ipython:: python :suppress: @@ -279,7 +279,7 @@ The existing interface ``DataFrame.hist`` to plot histogram still can be used. plt.figure(); @savefig hist_plot_ex.png - df["A"].diff().hist() + df["A"].diff().hist(); .. ipython:: python :suppress: @@ -291,10 +291,10 @@ subplots: .. ipython:: python - plt.figure() + plt.figure(); @savefig frame_hist_ex.png - df.diff().hist(color="k", alpha=0.5, bins=50) + df.diff().hist(color="k", alpha=0.5, bins=50); The ``by`` keyword can be specified to plot grouped histograms: @@ -311,7 +311,7 @@ The ``by`` keyword can be specified to plot grouped histograms: data = pd.Series(np.random.randn(1000)) @savefig grouped_hist.png - data.hist(by=np.random.randint(0, 4, 1000), figsize=(6, 4)) + data.hist(by=np.random.randint(0, 4, 1000), figsize=(6, 4)); .. _visualization.box: @@ -336,7 +336,7 @@ a uniform random variable on [0,1). df = pd.DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"]) @savefig box_plot_new.png - df.plot.box() + df.plot.box(); Boxplot can be colorized by passing ``color`` keyword. You can pass a ``dict`` whose keys are ``boxes``, ``whiskers``, ``medians`` and ``caps``. @@ -361,7 +361,7 @@ more complicated colorization, you can get each drawn artists by passing } @savefig box_new_colorize.png - df.plot.box(color=color, sym="r+") + df.plot.box(color=color, sym="r+"); .. ipython:: python :suppress: @@ -375,7 +375,7 @@ For example, horizontal and custom-positioned boxplot can be drawn by .. ipython:: python @savefig box_new_kwargs.png - df.plot.box(vert=False, positions=[1, 4, 5, 6, 8]) + df.plot.box(vert=False, positions=[1, 4, 5, 6, 8]); See the :meth:`boxplot <matplotlib.axes.Axes.boxplot>` method and the @@ -622,7 +622,7 @@ too dense to plot each point individually. df["b"] = df["b"] + np.arange(1000) @savefig hexbin_plot.png - df.plot.hexbin(x="a", y="b", gridsize=25) + df.plot.hexbin(x="a", y="b", gridsize=25); A useful keyword argument is ``gridsize``; it controls the number of hexagons @@ -651,7 +651,7 @@ given by column ``z``. The bins are aggregated with NumPy's ``max`` function. df["z"] = np.random.uniform(0, 3, 1000) @savefig hexbin_plot_agg.png - df.plot.hexbin(x="a", y="b", C="z", reduce_C_function=np.max, gridsize=25) + df.plot.hexbin(x="a", y="b", C="z", reduce_C_function=np.max, gridsize=25); .. ipython:: python :suppress: @@ -682,7 +682,7 @@ A ``ValueError`` will be raised if there are any negative values in your data. series = pd.Series(3 * np.random.rand(4), index=["a", "b", "c", "d"], name="series") @savefig series_pie_plot.png - series.plot.pie(figsize=(6, 6)) + series.plot.pie(figsize=(6, 6)); .. ipython:: python :suppress: @@ -713,7 +713,7 @@ drawn in each pie plots by default; specify ``legend=False`` to hide it. ) @savefig df_pie_plot.png - df.plot.pie(subplots=True, figsize=(8, 4)) + df.plot.pie(subplots=True, figsize=(8, 4)); .. ipython:: python :suppress: @@ -746,7 +746,7 @@ Also, other keywords supported by :func:`matplotlib.pyplot.pie` can be used. autopct="%.2f", fontsize=20, figsize=(6, 6), - ) + ); If you pass values whose sum total is less than 1.0, matplotlib draws a semicircle. @@ -762,7 +762,7 @@ If you pass values whose sum total is less than 1.0, matplotlib draws a semicirc series = pd.Series([0.1] * 4, index=["a", "b", "c", "d"], name="series2") @savefig series_pie_plot_semi.png - series.plot.pie(figsize=(6, 6)) + series.plot.pie(figsize=(6, 6)); See the `matplotlib pie documentation <https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pie>`__ for more. @@ -862,7 +862,7 @@ You can create density plots using the :meth:`Series.plot.kde` and :meth:`DataFr ser = pd.Series(np.random.randn(1000)) @savefig kde_plot.png - ser.plot.kde() + ser.plot.kde(); .. ipython:: python :suppress: @@ -889,10 +889,10 @@ of the same class will usually be closer together and form larger structures. data = pd.read_csv("data/iris.data") - plt.figure() + plt.figure(); @savefig andrews_curves.png - andrews_curves(data, "Name") + andrews_curves(data, "Name"); .. _visualization.parallel_coordinates: @@ -913,10 +913,10 @@ represents one data point. Points that tend to cluster will appear closer togeth data = pd.read_csv("data/iris.data") - plt.figure() + plt.figure(); @savefig parallel_coordinates.png - parallel_coordinates(data, "Name") + parallel_coordinates(data, "Name"); .. ipython:: python :suppress: @@ -943,13 +943,13 @@ be passed, and when ``lag=1`` the plot is essentially ``data[:-1]`` vs. from pandas.plotting import lag_plot - plt.figure() + plt.figure(); spacing = np.linspace(-99 * np.pi, 99 * np.pi, num=1000) data = pd.Series(0.1 * np.random.rand(1000) + 0.9 * np.sin(spacing)) @savefig lag_plot.png - lag_plot(data) + lag_plot(data); .. ipython:: python :suppress: @@ -980,13 +980,13 @@ autocorrelation plots. from pandas.plotting import autocorrelation_plot - plt.figure() + plt.figure(); spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000) data = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing)) @savefig autocorrelation_plot.png - autocorrelation_plot(data) + autocorrelation_plot(data); .. ipython:: python :suppress: @@ -1016,7 +1016,7 @@ are what constitutes the bootstrap plot. data = pd.Series(np.random.rand(1000)) @savefig bootstrap_plot.png - bootstrap_plot(data, size=50, samples=500, color="grey") + bootstrap_plot(data, size=50, samples=500, color="grey"); .. ipython:: python :suppress: @@ -1049,10 +1049,10 @@ for more information. data = pd.read_csv("data/iris.data") - plt.figure() + plt.figure(); @savefig radviz.png - radviz(data, "Name") + radviz(data, "Name"); .. ipython:: python :suppress: @@ -1117,7 +1117,7 @@ shown by default. df = df.cumsum() @savefig frame_plot_basic_noleg.png - df.plot(legend=False) + df.plot(legend=False); .. ipython:: python :suppress: @@ -1137,11 +1137,11 @@ it empty for ylabel. .. ipython:: python :suppress: - plt.figure() + plt.figure(); .. ipython:: python - df.plot() + df.plot(); @savefig plot_xlabel_ylabel.png df.plot(xlabel="new x", ylabel="new y") @@ -1169,7 +1169,7 @@ You may pass ``logy`` to get a log-scale Y axis. ts = np.exp(ts.cumsum()) @savefig series_plot_logy.png - ts.plot(logy=True) + ts.plot(logy=True); .. ipython:: python :suppress: @@ -1190,10 +1190,10 @@ To plot data on a secondary y-axis, use the ``secondary_y`` keyword: .. ipython:: python - df["A"].plot() + df["A"].plot(); @savefig series_plot_secondary_y.png - df["B"].plot(secondary_y=True, style="g") + df["B"].plot(secondary_y=True, style="g"); .. ipython:: python :suppress: @@ -1205,11 +1205,11 @@ keyword: .. ipython:: python - plt.figure() + plt.figure(); ax = df.plot(secondary_y=["A", "B"]) - ax.set_ylabel("CD scale") + ax.set_ylabel("CD scale"); @savefig frame_plot_secondary_y.png - ax.right_ax.set_ylabel("AB scale") + ax.right_ax.set_ylabel("AB scale"); .. ipython:: python :suppress: @@ -1222,10 +1222,10 @@ with "(right)" in the legend. To turn off the automatic marking, use the .. ipython:: python - plt.figure() + plt.figure(); @savefig frame_plot_secondary_y_no_right.png - df.plot(secondary_y=["A", "B"], mark_right=False) + df.plot(secondary_y=["A", "B"], mark_right=False); .. ipython:: python :suppress: @@ -1259,10 +1259,10 @@ Here is the default behavior, notice how the x-axis tick labeling is performed: .. ipython:: python - plt.figure() + plt.figure(); @savefig ser_plot_suppress.png - df["A"].plot() + df["A"].plot(); .. ipython:: python :suppress: @@ -1273,10 +1273,10 @@ Using the ``x_compat`` parameter, you can suppress this behavior: .. ipython:: python - plt.figure() + plt.figure(); @savefig ser_plot_suppress_parm.png - df["A"].plot(x_compat=True) + df["A"].plot(x_compat=True); .. ipython:: python :suppress: @@ -1288,7 +1288,7 @@ in ``pandas.plotting.plot_params`` can be used in a ``with`` statement: .. ipython:: python - plt.figure() + plt.figure(); @savefig ser_plot_suppress_context.png with pd.plotting.plot_params.use("x_compat", True): @@ -1467,7 +1467,7 @@ Here is an example of one way to easily plot group means with standard deviation # Plot fig, ax = plt.subplots() @savefig errorbar_example.png - means.plot.bar(yerr=errors, ax=ax, capsize=4, rot=0) + means.plot.bar(yerr=errors, ax=ax, capsize=4, rot=0); .. ipython:: python :suppress: @@ -1493,7 +1493,7 @@ Plotting with matplotlib table is now supported in :meth:`DataFrame.plot` and : ax.xaxis.tick_top() # Display x-axis ticks on top. @savefig line_plot_table_true.png - df.plot(table=True, ax=ax) + df.plot(table=True, ax=ax); .. ipython:: python :suppress: @@ -1511,7 +1511,7 @@ as seen in the example below. ax.xaxis.tick_top() # Display x-axis ticks on top. @savefig line_plot_table_data.png - df.plot(table=np.round(df.T, 2), ax=ax) + df.plot(table=np.round(df.T, 2), ax=ax); .. ipython:: python :suppress: @@ -1529,10 +1529,10 @@ matplotlib `table <https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes fig, ax = plt.subplots(1, 1) - table(ax, np.round(df.describe(), 2), loc="upper right", colWidths=[0.2, 0.2, 0.2]) + table(ax, np.round(df.describe(), 2), loc="upper right", colWidths=[0.2, 0.2, 0.2]); @savefig line_plot_table_describe.png - df.plot(ax=ax, ylim=(0, 2), legend=None) + df.plot(ax=ax, ylim=(0, 2), legend=None); .. ipython:: python :suppress: @@ -1571,10 +1571,10 @@ To use the cubehelix colormap, we can pass ``colormap='cubehelix'``. df = pd.DataFrame(np.random.randn(1000, 10), index=ts.index) df = df.cumsum() - plt.figure() + plt.figure(); @savefig cubehelix.png - df.plot(colormap="cubehelix") + df.plot(colormap="cubehelix"); .. ipython:: python :suppress: @@ -1587,10 +1587,10 @@ Alternatively, we can pass the colormap itself: from matplotlib import cm - plt.figure() + plt.figure(); @savefig cubehelix_cm.png - df.plot(colormap=cm.cubehelix) + df.plot(colormap=cm.cubehelix); .. ipython:: python :suppress: @@ -1609,10 +1609,10 @@ Colormaps can also be used other plot types, like bar charts: dd = pd.DataFrame(np.random.randn(10, 10)).applymap(abs) dd = dd.cumsum() - plt.figure() + plt.figure(); @savefig greens.png - dd.plot.bar(colormap="Greens") + dd.plot.bar(colormap="Greens"); .. ipython:: python :suppress: @@ -1623,10 +1623,10 @@ Parallel coordinates charts: .. ipython:: python - plt.figure() + plt.figure(); @savefig parallel_gist_rainbow.png - parallel_coordinates(data, "Name", colormap="gist_rainbow") + parallel_coordinates(data, "Name", colormap="gist_rainbow"); .. ipython:: python :suppress: @@ -1637,10 +1637,10 @@ Andrews curves charts: .. ipython:: python - plt.figure() + plt.figure(); @savefig andrews_curve_winter.png - andrews_curves(data, "Name", colormap="winter") + andrews_curves(data, "Name", colormap="winter"); .. ipython:: python :suppress: @@ -1676,12 +1676,12 @@ when plotting a large number of points. ma = price.rolling(20).mean() mstd = price.rolling(20).std() - plt.figure() + plt.figure(); - plt.plot(price.index, price, "k") - plt.plot(ma.index, ma, "b") + plt.plot(price.index, price, "k"); + plt.plot(ma.index, ma, "b"); @savefig bollinger.png - plt.fill_between(mstd.index, ma - 2 * mstd, ma + 2 * mstd, color="b", alpha=0.2) + plt.fill_between(mstd.index, ma - 2 * mstd, ma + 2 * mstd, color="b", alpha=0.2); .. ipython:: python :suppress:
add semicolons (";") after the plotting function to suppress unnecessary debug messages similar to: <matplotlib.axes._subplots.AxesSubplot at 0x7fe278bb5160> - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36043
2020-09-01T15:46:11Z
2020-10-10T17:28:43Z
2020-10-10T17:28:43Z
2022-07-15T23:39:51Z
Backport PR #35999 on branch 1.1.x (BUG: None in Float64Index raising TypeError, should return False)
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index b0d375a52f8ac..ad5f647928738 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -29,7 +29,7 @@ Bug fixes - Bug in :class:`Series` constructor raising a ``TypeError`` when constructing sparse datetime64 dtypes (:issue:`35762`) - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) - Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`) -- +- Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index d6659cc1895b1..569562f5b5037 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -80,7 +80,11 @@ cdef class IndexEngine: values = self._get_index_values() self._check_type(val) - loc = _bin_search(values, val) # .searchsorted(val, side='left') + try: + loc = _bin_search(values, val) # .searchsorted(val, side='left') + except TypeError: + # GH#35788 e.g. val=None with float64 values + raise KeyError(val) if loc >= len(values): raise KeyError(val) if values[loc] != val: diff --git a/pandas/tests/indexes/numeric/test_indexing.py b/pandas/tests/indexes/numeric/test_indexing.py index 473e370c76f8b..508bd2f566507 100644 --- a/pandas/tests/indexes/numeric/test_indexing.py +++ b/pandas/tests/indexes/numeric/test_indexing.py @@ -228,6 +228,12 @@ def test_take_fill_value_ints(self, klass): class TestContains: + @pytest.mark.parametrize("klass", [Float64Index, Int64Index, UInt64Index]) + def test_contains_none(self, klass): + # GH#35788 should return False, not raise TypeError + index = klass([0, 1, 2, 3, 4]) + assert None not in index + def test_contains_float64_nans(self): index = Float64Index([1.0, 2.0, np.nan]) assert np.nan in index
Backport PR #35999: BUG: None in Float64Index raising TypeError, should return False
https://api.github.com/repos/pandas-dev/pandas/pulls/36041
2020-09-01T15:03:13Z
2020-09-01T16:57:17Z
2020-09-01T16:57:17Z
2020-09-01T16:57:17Z
Backport PR #35938 on branch 1.1.x (REGR: Fix comparison broadcasting over array of Intervals)
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index b0d375a52f8ac..c74976b14e814 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -17,6 +17,7 @@ Fixed regressions - Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`) - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) +- Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) - .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 6867e8aba7411..40bd5ad8f5a1f 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -358,6 +358,11 @@ cdef class Interval(IntervalMixin): self_tuple = (self.left, self.right, self.closed) other_tuple = (other.left, other.right, other.closed) return PyObject_RichCompare(self_tuple, other_tuple, op) + elif util.is_array(other): + return np.array( + [PyObject_RichCompare(self, x, op) for x in other], + dtype=bool, + ) return NotImplemented diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 8603bff0587b6..83dfd42ae2a6e 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1581,3 +1581,10 @@ def test_replace_with_compiled_regex(self): result = df.replace({regex: "z"}, regex=True) expected = pd.DataFrame(["z", "b", "c"]) tm.assert_frame_equal(result, expected) + + def test_replace_intervals(self): + # https://github.com/pandas-dev/pandas/issues/35931 + df = pd.DataFrame({"a": [pd.Interval(0, 1), pd.Interval(0, 1)]}) + result = df.replace({"a": {pd.Interval(0, 1): "x"}}) + expected = pd.DataFrame({"a": ["x", "x"]}) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/scalar/interval/test_arithmetic.py b/pandas/tests/scalar/interval/test_arithmetic.py index 5252f1a4d5a24..b4c2b448e252a 100644 --- a/pandas/tests/scalar/interval/test_arithmetic.py +++ b/pandas/tests/scalar/interval/test_arithmetic.py @@ -45,3 +45,15 @@ def test_numeric_interval_add_timedelta_raises(interval, delta): with pytest.raises((TypeError, ValueError), match=msg): delta + interval + + +@pytest.mark.parametrize("klass", [timedelta, np.timedelta64, Timedelta]) +def test_timdelta_add_timestamp_interval(klass): + delta = klass(0) + expected = Interval(Timestamp("2020-01-01"), Timestamp("2020-02-01")) + + result = delta + expected + assert result == expected + + result = expected + delta + assert result == expected diff --git a/pandas/tests/scalar/interval/test_interval.py b/pandas/tests/scalar/interval/test_interval.py index a0151bb9ac7bf..8ad9a2c7a9c70 100644 --- a/pandas/tests/scalar/interval/test_interval.py +++ b/pandas/tests/scalar/interval/test_interval.py @@ -2,6 +2,7 @@ import pytest from pandas import Interval, Period, Timedelta, Timestamp +import pandas._testing as tm import pandas.core.common as com @@ -267,3 +268,11 @@ def test_constructor_errors_tz(self, tz_left, tz_right): msg = "left and right must have the same time zone" with pytest.raises(error, match=msg): Interval(left, right) + + def test_equality_comparison_broadcasts_over_array(self): + # https://github.com/pandas-dev/pandas/issues/35931 + interval = Interval(0, 1) + arr = np.array([interval, interval]) + result = interval == arr + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected)
Backport PR #35938: REGR: Fix comparison broadcasting over array of Intervals
https://api.github.com/repos/pandas-dev/pandas/pulls/36039
2020-09-01T14:56:52Z
2020-09-01T18:29:08Z
2020-09-01T18:29:08Z
2020-09-01T18:29:08Z
BUG: 35977 Adding regex support for ExtensionBlock replace method
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index e690334a36c5b..a6fa579318272 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -685,9 +685,11 @@ Sparse ExtensionArray ^^^^^^^^^^^^^^ + - Fixed Bug where :class:`DataFrame` column set to scalar extension type via a dict instantion was considered an object type rather than the extension type (:issue:`35965`) - Fixed bug where ``astype()`` with equal dtype and ``copy=False`` would return a new object (:issue:`284881`) - Fixed bug when applying a NumPy ufunc with multiple outputs to a :class:`pandas.arrays.IntegerArray` returning None (:issue:`36913`) +- Fixed bug in :meth:`Dataframe.replace` now returns correct result for `regex=True`` with ``string`` dtype (:issue:`35977`) - Fixed an inconsistency in :class:`PeriodArray`'s ``__init__`` signature to those of :class:`DatetimeArray` and :class:`TimedeltaArray` (:issue:`37289`) - Reductions for :class:`BooleanArray`, :class:`Categorical`, :class:`DatetimeArray`, :class:`FloatingArray`, :class:`IntegerArray`, :class:`PeriodArray`, :class:`TimedeltaArray`, and :class:`PandasArray` are now keyword-only methods (:issue:`37541`) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 967e218078a28..cd936e9d0acec 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2029,6 +2029,32 @@ def _unstack(self, unstacker, fill_value, new_placement): ] return blocks, mask + def replace( + self, + to_replace, + value, + inplace: bool = False, + regex: bool = False + ): + """ + replace the to_replace value with value, regex is not supported by super class + when regex is required ObjectBlock replace method is called + """ + inplace = validate_bool_kwarg(inplace, "inplace") + regex = validate_bool_kwarg(regex, "regex") + if regex: + dtype = self.values.dtype + block = self.astype(object) + if not inplace: + return [ + b.astype(dtype) + for b in block._replace_regex(to_replace, value, inplace, regex) + ] + block._replace_regex(to_replace, value, inplace, regex) + return block.astype(dtype) + else: + return super().replace(to_replace, value, inplace, regex) + class ObjectValuesExtensionBlock(ExtensionBlock): """ diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 8f3dcc96ddc3d..88a3f768fd20d 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1638,3 +1638,41 @@ def test_replace_unicode(self): result = df1.replace(columns_values_map) expected = DataFrame({"positive": np.ones(3)}) tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "to_replace,value,expected,inplace", + [ + (r"^\s*$", pd.NA, + DataFrame({"col1": ["d", "ee", "f", pd.NA]}), False), + (r"e{2}", "rep", + DataFrame({"col1": ["d", "rep", "f", ""]}), False), + (r"f", "replace", + DataFrame({"col1": ["d", "ee", "replace", ""]}), False), + (r"^\s*$", pd.NA, + DataFrame({"col1": ["d", "ee", "f", pd.NA]}), True), + (r"e{2}", "replace", + DataFrame({"col1": ["d", "replace", "f", ""]}), True), + (r"f", "replace", + DataFrame({"col1": ["d", "ee", "replace", ""]}), True), + ], + ) + def test_replace_regex(self, to_replace, value, expected, inplace): + # GH35977 + df = DataFrame({"col1": ["d", "ee", "f", ""]}, dtype="string") + df_replaced = df.replace(to_replace, value, inplace=inplace, regex=True) + result = df if inplace else df_replaced + tm.assert_frame_equal(result, expected.astype("string")) + + @pytest.mark.parametrize( + "to_replace,value,expected", + [ + ("", pd.NA, DataFrame({"col1": ["d", "ee", "f", pd.NA]})), + ("ee", "replace", DataFrame({"col1": ["d", "replace", "f", ""]})), + ("f", "replace", DataFrame({"col1": ["d", "ee", "replace", ""]})), + ], + ) + def test_replace_string(self, to_replace, value, expected): + # GH35977 + df = DataFrame({"col1": ["d", "ee", "f", ""]}, dtype="string") + result = df.replace(to_replace, value, inplace=False, regex=False) + tm.assert_frame_equal(result, expected.astype("string"))
- [x] closes #35977 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Overrides replace method to add support of regex in ExtensionBlock . If input to_replace is a regex we convert data type of block to Object and calls object replace method which supports regex otherwise we call replace method of super class Block. ```ipython In [2]: b.replace(r'^\s*$', pd.NA, regex=True, inplace=True) ...: print(b) a b 0 a d 1 b <NA> 2 c <NA> In [3]: b.replace(r'^\s*$', pd.NA, regex=True, inplace=False) Out[3]: a b 0 a d 1 b <NA> 2 c <NA> ```
https://api.github.com/repos/pandas-dev/pandas/pulls/36038
2020-09-01T14:37:13Z
2021-02-11T01:38:59Z
null
2021-02-11T01:38:59Z
CI: pin s3fs for Windows py37_np18 on 1.1.x
diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 287d6877b9810..4d134b43760fe 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -29,7 +29,7 @@ dependencies: - pytables - python-dateutil - pytz - - s3fs>=0.4.0 + - s3fs>=0.4.0,<0.5.0 - scipy - sqlalchemy - xlrd
https://api.github.com/repos/pandas-dev/pandas/pulls/36035
2020-09-01T13:52:17Z
2020-09-01T15:26:05Z
2020-09-01T15:26:05Z
2020-09-01T15:26:59Z
TYP: Postponed Evaluation of Annotations (PEP 563)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 6d6bb21165814..b6603e1a9636b 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -2,6 +2,8 @@ Generic data algorithms. This module is experimental at the moment and not intended for public consumption """ +from __future__ import annotations + import operator from textwrap import dedent from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union @@ -682,7 +684,7 @@ def value_counts( normalize: bool = False, bins=None, dropna: bool = True, -) -> "Series": +) -> Series: """ Compute a histogram of the counts of non-null values. @@ -824,7 +826,7 @@ def duplicated(values, keep="first") -> np.ndarray: return f(values, keep=keep) -def mode(values, dropna: bool = True) -> "Series": +def mode(values, dropna: bool = True) -> Series: """ Returns the mode(s) of an array. @@ -1136,7 +1138,7 @@ class SelectNSeries(SelectN): nordered : Series """ - def compute(self, method: str) -> "Series": + def compute(self, method: str) -> Series: n = self.n dtype = self.obj.dtype @@ -1210,7 +1212,7 @@ def __init__(self, obj, n: int, keep: str, columns): columns = list(columns) self.columns = columns - def compute(self, method: str) -> "DataFrame": + def compute(self, method: str) -> DataFrame: from pandas import Int64Index diff --git a/pandas/core/construction.py b/pandas/core/construction.py index f145e76046bee..02b8ed17244cd 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -4,6 +4,7 @@ These should not depend on core.internals. """ +from __future__ import annotations from collections import abc from typing import TYPE_CHECKING, Any, Optional, Sequence, Union, cast @@ -49,16 +50,14 @@ import pandas.core.common as com if TYPE_CHECKING: - from pandas.core.arrays import ExtensionArray # noqa: F401 - from pandas.core.indexes.api import Index # noqa: F401 - from pandas.core.series import Series # noqa: F401 + from pandas import ExtensionArray, Index, Series def array( data: Union[Sequence[object], AnyArrayLike], dtype: Optional[Dtype] = None, copy: bool = True, -) -> "ExtensionArray": +) -> ExtensionArray: """ Create an array. @@ -389,7 +388,7 @@ def extract_array(obj, extract_numpy: bool = False): def sanitize_array( data, - index: Optional["Index"], + index: Optional[Index], dtype: Optional[DtypeObj] = None, copy: bool = False, raise_cast_failure: bool = False, @@ -594,13 +593,13 @@ def is_empty_data(data: Any) -> bool: def create_series_with_explicit_dtype( data: Any = None, - index: Optional[Union[ArrayLike, "Index"]] = None, + index: Optional[Union[ArrayLike, Index]] = None, dtype: Optional[Dtype] = None, name: Optional[str] = None, copy: bool = False, fastpath: bool = False, dtype_if_empty: Dtype = object, -) -> "Series": +) -> Series: """ Helper to pass an explicit dtype when instantiating an empty Series. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 312d449e36022..2d95917049b32 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8,6 +8,7 @@ alignment and a host of useful data manipulation methods having to do with the labeling information """ +from __future__ import annotations import collections from collections import abc @@ -885,7 +886,7 @@ def to_string( # ---------------------------------------------------------------------- @property - def style(self) -> "Styler": + def style(self) -> Styler: """ Returns a Styler object. @@ -6530,7 +6531,7 @@ def groupby( squeeze: bool = no_default, observed: bool = False, dropna: bool = True, - ) -> "DataFrameGroupBy": + ) -> DataFrameGroupBy: from pandas.core.groupby.generic import DataFrameGroupBy if squeeze is not no_default: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3bad2d6dd18b9..42b4f06ee5334 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import collections from datetime import timedelta import functools @@ -110,7 +112,7 @@ from pandas._libs.tslibs import BaseOffset from pandas.core.resample import Resampler - from pandas.core.series import Series # noqa: F401 + from pandas.core.series import Series from pandas.core.window.indexers import BaseIndexer # goal is to be able to define the docs close to function, while still being @@ -391,7 +393,7 @@ def _get_block_manager_axis(cls, axis: Axis) -> int: return m - axis return axis - def _get_axis_resolvers(self, axis: str) -> Dict[str, Union["Series", MultiIndex]]: + def _get_axis_resolvers(self, axis: str) -> Dict[str, Union[Series, MultiIndex]]: # index or columns axis_index = getattr(self, axis) d = dict() @@ -421,10 +423,10 @@ def _get_axis_resolvers(self, axis: str) -> Dict[str, Union["Series", MultiIndex d[axis] = dindex return d - def _get_index_resolvers(self) -> Dict[str, Union["Series", MultiIndex]]: + def _get_index_resolvers(self) -> Dict[str, Union[Series, MultiIndex]]: from pandas.core.computation.parsing import clean_column_name - d: Dict[str, Union["Series", MultiIndex]] = {} + d: Dict[str, Union[Series, MultiIndex]] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) @@ -660,7 +662,7 @@ def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries: result = self.set_axis(new_labels, axis=axis, inplace=False) return result - def pop(self, item: Label) -> Union["Series", Any]: + def pop(self, item: Label) -> Union[Series, Any]: result = self[item] del self[item] if self.ndim == 2: @@ -7678,7 +7680,7 @@ def resample( level=None, origin: Union[str, TimestampConvertibleTypes] = "start_day", offset: Optional[TimedeltaConvertibleTypes] = None, - ) -> "Resampler": + ) -> Resampler: """ Resample time-series data. @@ -10451,7 +10453,7 @@ def mad(self, axis=None, skipna=None, level=None): @doc(Rolling) def rolling( self, - window: "Union[int, timedelta, BaseOffset, BaseIndexer]", + window: Union[int, timedelta, BaseOffset, BaseIndexer], min_periods: Optional[int] = None, center: bool_t = False, win_type: Optional[str] = None,
a few files as POC/for discussion from https://www.python.org/dev/peps/pep-0563/ > PEP 3107 added support for arbitrary annotations on parts of a function definition. Just like default values, annotations are evaluated at function definition time. This creates a number of issues for the type hinting use case: > - forward references: when a type hint contains names that have not been defined yet, that definition needs to be expressed as a string literal; > - type hints are executed at module import time, which is not computationally free. > Postponing the evaluation of annotations solves both problems.
https://api.github.com/repos/pandas-dev/pandas/pulls/36034
2020-09-01T10:47:32Z
2020-09-02T21:31:48Z
2020-09-02T21:31:48Z
2020-09-03T08:51:04Z
remove trailing commas for #35925
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 7f0eef039a1e8..f2ce2f056ce82 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -128,7 +128,7 @@ def write( self.api.parquet.write_table(table, path, compression=compression, **kwargs) def read( - self, path, columns=None, storage_options: StorageOptions = None, **kwargs, + self, path, columns=None, storage_options: StorageOptions = None, **kwargs ): if is_fsspec_url(path) and "filesystem" not in kwargs: import_optional_dependency("fsspec") @@ -218,7 +218,7 @@ def write( ) def read( - self, path, columns=None, storage_options: StorageOptions = None, **kwargs, + self, path, columns=None, storage_options: StorageOptions = None, **kwargs ): if is_fsspec_url(path): fsspec = import_optional_dependency("fsspec") diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 983aa56324083..9ad527684120e 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1967,10 +1967,6 @@ def _do_date_conversions(self, names, data): class CParserWrapper(ParserBase): - """ - - """ - def __init__(self, src, **kwds): self.kwds = kwds kwds = kwds.copy() diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index f08e0514a68e1..0913627324c48 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2931,7 +2931,7 @@ def read_index_node( # If the index was an empty array write_array_empty() will # have written a sentinel. Here we replace it with the original. if "shape" in node._v_attrs and np.prod(node._v_attrs.shape) == 0: - data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type,) + data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type) kind = _ensure_decoded(node._v_attrs.kind) name = None @@ -4103,7 +4103,7 @@ def create_description( return d def read_coordinates( - self, where=None, start: Optional[int] = None, stop: Optional[int] = None, + self, where=None, start: Optional[int] = None, stop: Optional[int] = None ): """ select coordinates (row numbers) from a table; return the @@ -4374,7 +4374,7 @@ def write_data_chunk( self.table.flush() def delete( - self, where=None, start: Optional[int] = None, stop: Optional[int] = None, + self, where=None, start: Optional[int] = None, stop: Optional[int] = None ): # delete all rows (and return the nrows) @@ -4805,7 +4805,7 @@ def _convert_index(name: str, index: Index, encoding: str, errors: str) -> Index if inferred_type == "date": converted = np.asarray([v.toordinal() for v in values], dtype=np.int32) return IndexCol( - name, converted, "date", _tables().Time32Col(), index_name=index_name, + name, converted, "date", _tables().Time32Col(), index_name=index_name ) elif inferred_type == "string": @@ -4821,13 +4821,13 @@ def _convert_index(name: str, index: Index, encoding: str, errors: str) -> Index elif inferred_type in ["integer", "floating"]: return IndexCol( - name, values=converted, kind=kind, typ=atom, index_name=index_name, + name, values=converted, kind=kind, typ=atom, index_name=index_name ) else: assert isinstance(converted, np.ndarray) and converted.dtype == object assert kind == "object", kind atom = _tables().ObjectAtom() - return IndexCol(name, converted, kind, atom, index_name=index_name,) + return IndexCol(name, converted, kind, atom, index_name=index_name) def _unconvert_index(
- [x] pandas/io/parquet.py - [x] pandas/io/parsers.py - [x] pandas/io/pytables.py
https://api.github.com/repos/pandas-dev/pandas/pulls/36029
2020-09-01T07:15:29Z
2020-09-01T16:36:51Z
2020-09-01T16:36:51Z
2020-09-01T16:46:34Z
TYP: add type annotation to expr.py #36027
diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 125ecb0d88036..5d0ea0f729514 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -6,7 +6,7 @@ from functools import partial, reduce from keyword import iskeyword import tokenize -from typing import Callable, Optional, Set, Tuple, Type, TypeVar +from typing import Callable, Dict, Optional, Set, Tuple, Type, TypeVar import numpy as np @@ -364,7 +364,7 @@ class BaseExprVisitor(ast.NodeVisitor): unary_ops = _unary_ops_syms unary_op_nodes = "UAdd", "USub", "Invert", "Not" - unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes)) + unary_op_nodes_map: Dict[str, str] = dict(zip(list(unary_ops), unary_op_nodes)) rewrite_map = { ast.Eq: ast.In,
- [x] closes #36027 - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/36028
2020-09-01T05:27:11Z
2020-09-01T15:39:26Z
null
2020-09-02T01:05:48Z
DOC: Update documentation for pd.Interval if string endpoints are not allowed anymore
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 40bd5ad8f5a1f..931ad8326c371 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -291,12 +291,6 @@ cdef class Interval(IntervalMixin): True >>> year_2017.length Timedelta('365 days 00:00:00') - - And also you can create string intervals - - >>> volume_1 = pd.Interval('Ant', 'Dog', closed='both') - >>> 'Bee' in volume_1 - True """ _typ = "interval" __array_priority__ = 1000
- [X] closes #36002 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Removed outdated example in documentation for pd.Interval (string endpoints are not allowed anymore).
https://api.github.com/repos/pandas-dev/pandas/pulls/36026
2020-09-01T05:19:17Z
2020-09-01T16:15:27Z
2020-09-01T16:15:26Z
2020-09-01T16:15:33Z
TYP: add type annotation to `_xlwt.py` #36024
diff --git a/pandas/io/excel/_xlwt.py b/pandas/io/excel/_xlwt.py index 78efe77e9fe2d..e1f72eb533c51 100644 --- a/pandas/io/excel/_xlwt.py +++ b/pandas/io/excel/_xlwt.py @@ -1,8 +1,13 @@ +from typing import TYPE_CHECKING, Dict + import pandas._libs.json as json from pandas.io.excel._base import ExcelWriter from pandas.io.excel._util import _validate_freeze_panes +if TYPE_CHECKING: + from xlwt import XFStyle + class _XlwtWriter(ExcelWriter): engine = "xlwt" @@ -29,12 +34,11 @@ def save(self): """ Save workbook to disk. """ - return self.book.save(self.path) + self.book.save(self.path) def write_cells( self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None ): - # Write the frame cells using xlwt. sheet_name = self._get_sheet_name(sheet_name) @@ -49,7 +53,7 @@ def write_cells( wks.set_horz_split_pos(freeze_panes[0]) wks.set_vert_split_pos(freeze_panes[1]) - style_dict = {} + style_dict: Dict[str, XFStyle] = {} for cell in cells: val, fmt = self._value_with_fmt(cell.val) @@ -101,14 +105,14 @@ def _style_to_xlwt( f"{key}: {cls._style_to_xlwt(value, False)}" for key, value in item.items() ] - out = f"{(line_sep).join(it)} " + out = f"{line_sep.join(it)} " return out else: it = [ f"{key} {cls._style_to_xlwt(value, False)}" for key, value in item.items() ] - out = f"{(field_sep).join(it)} " + out = f"{field_sep.join(it)} " return out else: item = f"{item}" diff --git a/setup.cfg b/setup.cfg index c10624d60aaff..2447a91f88f4e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -223,9 +223,6 @@ check_untyped_defs=False [mypy-pandas.io.excel._util] check_untyped_defs=False -[mypy-pandas.io.excel._xlwt] -check_untyped_defs=False - [mypy-pandas.io.formats.console] check_untyped_defs=False
- [x] closes #36024 - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` pandas\io\excel\_xlwt.py:52: error: Need type annotation for 'style_dict' (hint: "style_dict: Dict[<type>, <type>] = ...") [var-annotated]
https://api.github.com/repos/pandas-dev/pandas/pulls/36025
2020-09-01T05:01:30Z
2020-09-01T16:06:11Z
2020-09-01T16:06:11Z
2020-09-02T01:06:26Z
Comma cleanup for #35925
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 52a1e3aae9058..b0ba0d991c9b0 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -86,11 +86,7 @@ def wrapper(x): result0 = f(axis=0, skipna=False) result1 = f(axis=1, skipna=False) tm.assert_series_equal( - result0, - frame.apply(wrapper), - check_dtype=check_dtype, - rtol=rtol, - atol=atol, + result0, frame.apply(wrapper), check_dtype=check_dtype, rtol=rtol, atol=atol ) # HACK: win32 tm.assert_series_equal( @@ -116,7 +112,7 @@ def wrapper(x): if opname in ["sum", "prod"]: expected = frame.apply(skipna_wrapper, axis=1) tm.assert_series_equal( - result1, expected, check_dtype=False, rtol=rtol, atol=atol, + result1, expected, check_dtype=False, rtol=rtol, atol=atol ) # check dtypes diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index c8f5b2b0f6364..0d1004809f7f1 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -932,7 +932,7 @@ def test_constructor_mrecarray(self): # from GH3479 assert_fr_equal = functools.partial( - tm.assert_frame_equal, check_index_type=True, check_column_type=True, + tm.assert_frame_equal, check_index_type=True, check_column_type=True ) arrays = [ ("float", np.array([1.5, 2.0])), diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index 6a8f1e7c1aca2..d80ebaa09b6a8 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -417,7 +417,7 @@ def test_unstack_mixed_type_name_in_multiindex( result = df.unstack(unstack_idx) expected = pd.DataFrame( - expected_values, columns=expected_columns, index=expected_index, + expected_values, columns=expected_columns, index=expected_index ) tm.assert_frame_equal(result, expected) @@ -807,7 +807,7 @@ def test_unstack_multi_level_cols(self): [["B", "C"], ["B", "D"]], names=["c1", "c2"] ), index=pd.MultiIndex.from_tuples( - [[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"], + [[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"] ), ) assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
Comma cleanup for #35925
https://api.github.com/repos/pandas-dev/pandas/pulls/36023
2020-09-01T03:16:46Z
2020-09-01T12:38:28Z
2020-09-01T12:38:28Z
2020-09-01T18:22:18Z
TYP/CLN: cleanup `_openpyxl.py`, add type annotation #36021
diff --git a/ci/deps/azure-37-locale_slow.yaml b/ci/deps/azure-37-locale_slow.yaml index 8000f3e6b9a9c..fbb1ea671d696 100644 --- a/ci/deps/azure-37-locale_slow.yaml +++ b/ci/deps/azure-37-locale_slow.yaml @@ -18,7 +18,7 @@ dependencies: - lxml - matplotlib=3.0.0 - numpy=1.16.* - - openpyxl=2.5.7 + - openpyxl=2.6.0 - python-dateutil - python-blosc - pytz=2017.3 diff --git a/ci/deps/azure-37-minimum_versions.yaml b/ci/deps/azure-37-minimum_versions.yaml index 05b1957198bc4..31f82f3304db3 100644 --- a/ci/deps/azure-37-minimum_versions.yaml +++ b/ci/deps/azure-37-minimum_versions.yaml @@ -19,7 +19,7 @@ dependencies: - numba=0.46.0 - numexpr=2.6.8 - numpy=1.16.5 - - openpyxl=2.5.7 + - openpyxl=2.6.0 - pytables=3.4.4 - python-dateutil=2.7.3 - pytz=2017.3 diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 4c270117e079e..c9ac1b0d284a3 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -274,7 +274,7 @@ html5lib 1.0.1 HTML parser for read_html (see :ref lxml 4.3.0 HTML parser for read_html (see :ref:`note <optional_html>`) matplotlib 2.2.3 Visualization numba 0.46.0 Alternative execution engine for rolling operations -openpyxl 2.5.7 Reading / writing for xlsx files +openpyxl 2.6.0 Reading / writing for xlsx files pandas-gbq 0.12.0 Google Big Query access psycopg2 2.7 PostgreSQL engine for sqlalchemy pyarrow 0.15.0 Parquet, ORC, and feather reading / writing diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 1617bf66c4f04..76bebd4a9a1cb 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -109,7 +109,7 @@ Optional libraries below the lowest tested version may still work, but are not c +-----------------+-----------------+---------+ | numba | 0.46.0 | | +-----------------+-----------------+---------+ -| openpyxl | 2.5.7 | | +| openpyxl | 2.6.0 | X | +-----------------+-----------------+---------+ | pyarrow | 0.15.0 | X | +-----------------+-----------------+---------+ diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index c2730536af8a3..3c67902d41baa 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -1,4 +1,4 @@ -from typing import List +from typing import TYPE_CHECKING, Dict, List, Optional import numpy as np @@ -8,6 +8,9 @@ from pandas.io.excel._base import ExcelWriter, _BaseExcelReader from pandas.io.excel._util import _validate_freeze_panes +if TYPE_CHECKING: + from openpyxl.descriptors.serialisable import Serialisable + class _OpenpyxlWriter(ExcelWriter): engine = "openpyxl" @@ -22,53 +25,22 @@ def __init__(self, path, engine=None, mode="w", **engine_kwargs): if self.mode == "a": # Load from existing workbook from openpyxl import load_workbook - book = load_workbook(self.path) - self.book = book + self.book = load_workbook(self.path) else: # Create workbook object with default optimized_write=True. self.book = Workbook() if self.book.worksheets: - try: - self.book.remove(self.book.worksheets[0]) - except AttributeError: - - # compat - for openpyxl <= 2.4 - self.book.remove_sheet(self.book.worksheets[0]) + self.book.remove(self.book.worksheets[0]) def save(self): """ Save workbook to disk. """ - return self.book.save(self.path) - - @classmethod - def _convert_to_style(cls, style_dict): - """ - Converts a style_dict to an openpyxl style object. - - Parameters - ---------- - style_dict : style dictionary to convert - """ - from openpyxl.style import Style - - xls_style = Style() - for key, value in style_dict.items(): - for nk, nv in value.items(): - if key == "borders": - ( - xls_style.borders.__getattribute__(nk).__setattr__( - "border_style", nv - ) - ) - else: - xls_style.__getattribute__(key).__setattr__(nk, nv) - - return xls_style + self.book.save(self.path) @classmethod - def _convert_to_style_kwargs(cls, style_dict): + def _convert_to_style_kwargs(cls, style_dict: dict) -> Dict[str, "Serialisable"]: """ Convert a style_dict to a set of kwargs suitable for initializing or updating-on-copy an openpyxl v2 style object. @@ -93,7 +65,7 @@ def _convert_to_style_kwargs(cls, style_dict): """ _style_key_map = {"borders": "border"} - style_kwargs = {} + style_kwargs: Dict[str, Serialisable] = {} for k, v in style_dict.items(): if k in _style_key_map: k = _style_key_map[k] @@ -404,7 +376,7 @@ def write_cells( # Write the frame cells using openpyxl. sheet_name = self._get_sheet_name(sheet_name) - _style_cache = {} + _style_cache: Dict[str, Dict[str, Serialisable]] = {} if sheet_name in self.sheets: wks = self.sheets[sheet_name] @@ -426,7 +398,7 @@ def write_cells( if fmt: xcell.number_format = fmt - style_kwargs = {} + style_kwargs: Optional[Dict[str, Serialisable]] = {} if cell.style: key = str(cell.style) style_kwargs = _style_cache.get(key) @@ -515,16 +487,17 @@ def get_sheet_by_index(self, index: int): def _convert_cell(self, cell, convert_float: bool) -> Scalar: - # TODO: replace with openpyxl constants + from openpyxl.cell.cell import TYPE_BOOL, TYPE_ERROR, TYPE_NUMERIC + if cell.is_date: return cell.value - elif cell.data_type == "e": + elif cell.data_type == TYPE_ERROR: return np.nan - elif cell.data_type == "b": + elif cell.data_type == TYPE_BOOL: return bool(cell.value) elif cell.value is None: return "" # compat with xlrd - elif cell.data_type == "n": + elif cell.data_type == TYPE_NUMERIC: # GH5394 if convert_float: val = int(cell.value) diff --git a/setup.cfg b/setup.cfg index c10624d60aaff..e346a625911c5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -217,9 +217,6 @@ check_untyped_defs=False [mypy-pandas.io.excel._base] check_untyped_defs=False -[mypy-pandas.io.excel._openpyxl] -check_untyped_defs=False - [mypy-pandas.io.excel._util] check_untyped_defs=False
- [x] closes #36021 - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/36022
2020-09-01T02:58:11Z
2020-09-01T19:36:21Z
2020-09-01T19:36:21Z
2020-09-02T01:04:42Z
REF: implement Block._replace_list
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 1b42df1b0147c..ad388ef3f53b0 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -788,6 +788,43 @@ def _replace_single(self, *args, **kwargs): """ no-op on a non-ObjectBlock """ return self if kwargs["inplace"] else self.copy() + def _replace_list( + self, + src_list: List[Any], + dest_list: List[Any], + masks: List[np.ndarray], + inplace: bool = False, + regex: bool = False, + ) -> List["Block"]: + """ + See BlockManager._replace_list docstring. + """ + src_len = len(src_list) - 1 + + rb = [self if inplace else self.copy()] + for i, (src, dest) in enumerate(zip(src_list, dest_list)): + new_rb: List["Block"] = [] + for blk in rb: + m = masks[i][blk.mgr_locs.indexer] + convert = i == src_len # only convert once at the end + result = blk._replace_coerce( + mask=m, + to_replace=src, + value=dest, + inplace=inplace, + convert=convert, + regex=regex, + ) + if m.any() or convert: + if isinstance(result, list): + new_rb.extend(result) + else: + new_rb.append(result) + else: + new_rb.append(blk) + rb = new_rb + return rb + def setitem(self, indexer, value): """ Attempt self.values[indexer] = value, possibly creating a new array. diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 00321b76cb6bf..389252e7ef0f2 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -3,6 +3,7 @@ import operator import re from typing import ( + Any, DefaultDict, Dict, List, @@ -600,8 +601,12 @@ def replace(self, value, **kwargs) -> "BlockManager": return self.apply("replace", value=value, **kwargs) def replace_list( - self, src_list, dest_list, inplace: bool = False, regex: bool = False - ) -> "BlockManager": + self: T, + src_list: List[Any], + dest_list: List[Any], + inplace: bool = False, + regex: bool = False, + ) -> T: """ do a list replace """ inplace = validate_bool_kwarg(inplace, "inplace") @@ -625,34 +630,14 @@ def comp(s: Scalar, mask: np.ndarray, regex: bool = False): masks = [comp(s, mask, regex) for s in src_list] - result_blocks = [] - src_len = len(src_list) - 1 - for blk in self.blocks: - - # its possible to get multiple result blocks here - # replace ALWAYS will return a list - rb = [blk if inplace else blk.copy()] - for i, (s, d) in enumerate(zip(src_list, dest_list)): - new_rb: List[Block] = [] - for b in rb: - m = masks[i][b.mgr_locs.indexer] - convert = i == src_len # only convert once at the end - result = b._replace_coerce( - mask=m, - to_replace=s, - value=d, - inplace=inplace, - convert=convert, - regex=regex, - ) - if m.any() or convert: - new_rb = _extend_blocks(result, new_rb) - else: - new_rb.append(b) - rb = new_rb - result_blocks.extend(rb) - - bm = type(self).from_blocks(result_blocks, self.axes) + bm = self.apply( + "_replace_list", + src_list=src_list, + dest_list=dest_list, + masks=masks, + inplace=inplace, + regex=regex, + ) bm._consolidate_inplace() return bm
So we can re-use BlockManager.apply for the block iteration.
https://api.github.com/repos/pandas-dev/pandas/pulls/36020
2020-09-01T01:30:38Z
2020-09-01T23:31:10Z
2020-09-01T23:31:10Z
2020-09-02T00:55:52Z
TYP: annotate plotting._matplotlib.misc
diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py index bb6530b0f6412..c5e7c55970c3e 100644 --- a/pandas/plotting/_matplotlib/misc.py +++ b/pandas/plotting/_matplotlib/misc.py @@ -1,18 +1,27 @@ import random +from typing import TYPE_CHECKING, Dict, List, Optional, Set import matplotlib.lines as mlines import matplotlib.patches as patches import numpy as np +from pandas._typing import Label + from pandas.core.dtypes.missing import notna from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.style import _get_standard_colors from pandas.plotting._matplotlib.tools import _set_ticks_props, _subplots +if TYPE_CHECKING: + from matplotlib.axes import Axes + from matplotlib.figure import Figure + + from pandas import DataFrame, Series + def scatter_matrix( - frame, + frame: "DataFrame", alpha=0.5, figsize=None, ax=None, @@ -114,7 +123,14 @@ def _get_marker_compat(marker): return marker -def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds): +def radviz( + frame: "DataFrame", + class_column, + ax: Optional["Axes"] = None, + color=None, + colormap=None, + **kwds, +) -> "Axes": import matplotlib.pyplot as plt def normalize(series): @@ -130,7 +146,7 @@ def normalize(series): if ax is None: ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1]) - to_plot = {} + to_plot: Dict[Label, List[List]] = {} colors = _get_standard_colors( num_colors=len(classes), colormap=colormap, color_type="random", color=color ) @@ -197,8 +213,14 @@ def normalize(series): def andrews_curves( - frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwds -): + frame: "DataFrame", + class_column, + ax: Optional["Axes"] = None, + samples: int = 200, + color=None, + colormap=None, + **kwds, +) -> "Axes": import matplotlib.pyplot as plt def function(amplitudes): @@ -231,7 +253,7 @@ def f(t): classes = frame[class_column].drop_duplicates() df = frame.drop(class_column, axis=1) t = np.linspace(-np.pi, np.pi, samples) - used_legends = set() + used_legends: Set[str] = set() color_values = _get_standard_colors( num_colors=len(classes), colormap=colormap, color_type="random", color=color @@ -256,7 +278,13 @@ def f(t): return ax -def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): +def bootstrap_plot( + series: "Series", + fig: Optional["Figure"] = None, + size: int = 50, + samples: int = 500, + **kwds, +) -> "Figure": import matplotlib.pyplot as plt @@ -306,19 +334,19 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): def parallel_coordinates( - frame, + frame: "DataFrame", class_column, cols=None, - ax=None, + ax: Optional["Axes"] = None, color=None, use_columns=False, xticks=None, colormap=None, - axvlines=True, + axvlines: bool = True, axvlines_kwds=None, - sort_labels=False, + sort_labels: bool = False, **kwds, -): +) -> "Axes": import matplotlib.pyplot as plt if axvlines_kwds is None: @@ -333,7 +361,7 @@ def parallel_coordinates( else: df = frame[cols] - used_legends = set() + used_legends: Set[str] = set() ncols = len(df.columns) @@ -385,7 +413,9 @@ def parallel_coordinates( return ax -def lag_plot(series, lag=1, ax=None, **kwds): +def lag_plot( + series: "Series", lag: int = 1, ax: Optional["Axes"] = None, **kwds +) -> "Axes": # workaround because `c='b'` is hardcoded in matplotlib's scatter method import matplotlib.pyplot as plt @@ -402,7 +432,9 @@ def lag_plot(series, lag=1, ax=None, **kwds): return ax -def autocorrelation_plot(series, ax=None, **kwds): +def autocorrelation_plot( + series: "Series", ax: Optional["Axes"] = None, **kwds +) -> "Axes": import matplotlib.pyplot as plt n = len(series) diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py index 7990bff4f517c..5f1105f0e4233 100644 --- a/pandas/plotting/_matplotlib/style.py +++ b/pandas/plotting/_matplotlib/style.py @@ -11,7 +11,7 @@ def _get_standard_colors( - num_colors=None, colormap=None, color_type="default", color=None + num_colors=None, colormap=None, color_type: str = "default", color=None ): import matplotlib.pyplot as plt
https://api.github.com/repos/pandas-dev/pandas/pulls/36017
2020-08-31T22:46:43Z
2020-09-01T16:39:55Z
2020-09-01T16:39:55Z
2020-09-01T16:43:16Z
TYP: Annotate plotting stacker
diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index b33daf39de37c..01fe98a6f5403 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -1,4 +1,5 @@ from collections import namedtuple +from typing import TYPE_CHECKING import warnings from matplotlib.artist import setp @@ -14,6 +15,9 @@ from pandas.plotting._matplotlib.style import _get_standard_colors from pandas.plotting._matplotlib.tools import _flatten, _subplots +if TYPE_CHECKING: + from matplotlib.axes import Axes + class BoxPlot(LinePlot): _kind = "box" @@ -150,7 +154,7 @@ def _make_plot(self): labels = [pprint_thing(key) for key in range(len(labels))] self._set_ticklabels(ax, labels) - def _set_ticklabels(self, ax, labels): + def _set_ticklabels(self, ax: "Axes", labels): if self.orientation == "vertical": ax.set_xticklabels(labels) else: @@ -292,7 +296,7 @@ def maybe_color_bp(bp, **kwds): if not kwds.get("capprops"): setp(bp["caps"], color=colors[3], alpha=1) - def plot_group(keys, values, ax): + def plot_group(keys, values, ax: "Axes"): keys = [pprint_thing(x) for x in keys] values = [np.asarray(remove_na_arraylike(v), dtype=object) for v in values] bp = ax.boxplot(values, **kwds) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 4d23a5e5fc249..93ba9bd26630b 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1,5 +1,5 @@ import re -from typing import TYPE_CHECKING, List, Optional +from typing import TYPE_CHECKING, List, Optional, Tuple import warnings from matplotlib.artist import Artist @@ -45,6 +45,7 @@ if TYPE_CHECKING: from matplotlib.axes import Axes + from matplotlib.axis import Axis class MPLPlot: @@ -68,16 +69,10 @@ def _kind(self): _pop_attributes = [ "label", "style", - "logy", - "logx", - "loglog", "mark_right", "stacked", ] _attr_defaults = { - "logy": False, - "logx": False, - "loglog": False, "mark_right": True, "stacked": False, } @@ -167,6 +162,9 @@ def __init__( self.legend_handles: List[Artist] = [] self.legend_labels: List[Label] = [] + self.logx = kwds.pop("logx", False) + self.logy = kwds.pop("logy", False) + self.loglog = kwds.pop("loglog", False) for attr in self._pop_attributes: value = kwds.pop(attr, self._attr_defaults.get(attr, None)) setattr(self, attr, value) @@ -283,11 +281,11 @@ def generate(self): def _args_adjust(self): pass - def _has_plotted_object(self, ax): + def _has_plotted_object(self, ax: "Axes") -> bool: """check whether ax has data""" return len(ax.lines) != 0 or len(ax.artists) != 0 or len(ax.containers) != 0 - def _maybe_right_yaxis(self, ax, axes_num): + def _maybe_right_yaxis(self, ax: "Axes", axes_num): if not self.on_right(axes_num): # secondary axes may be passed via ax kw return self._get_ax_layer(ax) @@ -523,7 +521,7 @@ def _adorn_subplots(self): raise ValueError(msg) self.axes[0].set_title(self.title) - def _apply_axis_properties(self, axis, rot=None, fontsize=None): + def _apply_axis_properties(self, axis: "Axis", rot=None, fontsize=None): """ Tick creation within matplotlib is reasonably expensive and is internally deferred until accessed as Ticks are created/destroyed @@ -540,7 +538,7 @@ def _apply_axis_properties(self, axis, rot=None, fontsize=None): label.set_fontsize(fontsize) @property - def legend_title(self): + def legend_title(self) -> Optional[str]: if not isinstance(self.data.columns, ABCMultiIndex): name = self.data.columns.name if name is not None: @@ -591,7 +589,7 @@ def _make_legend(self): if ax.get_visible(): ax.legend(loc="best") - def _get_ax_legend_handle(self, ax): + def _get_ax_legend_handle(self, ax: "Axes"): """ Take in axes and return ax, legend and handle under different scenarios """ @@ -616,7 +614,7 @@ def plt(self): _need_to_set_index = False - def _get_xticks(self, convert_period=False): + def _get_xticks(self, convert_period: bool = False): index = self.data.index is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time") @@ -646,7 +644,7 @@ def _get_xticks(self, convert_period=False): @classmethod @register_pandas_matplotlib_converters - def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): + def _plot(cls, ax: "Axes", x, y, style=None, is_errorbar: bool = False, **kwds): mask = isna(y) if mask.any(): y = np.ma.array(y) @@ -667,10 +665,10 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): if style is not None: args = (x, y, style) else: - args = (x, y) + args = (x, y) # type:ignore[assignment] return ax.plot(*args, **kwds) - def _get_index_name(self): + def _get_index_name(self) -> Optional[str]: if isinstance(self.data.index, ABCMultiIndex): name = self.data.index.names if com.any_not_none(*name): @@ -877,7 +875,7 @@ def _get_subplots(self): ax for ax in self.axes[0].get_figure().get_axes() if isinstance(ax, Subplot) ] - def _get_axes_layout(self): + def _get_axes_layout(self) -> Tuple[int, int]: axes = self._get_subplots() x_set = set() y_set = set() @@ -916,15 +914,15 @@ def __init__(self, data, x, y, **kwargs): self.y = y @property - def nseries(self): + def nseries(self) -> int: return 1 - def _post_plot_logic(self, ax, data): + def _post_plot_logic(self, ax: "Axes", data): x, y = self.x, self.y ax.set_ylabel(pprint_thing(y)) ax.set_xlabel(pprint_thing(x)) - def _plot_colorbar(self, ax, **kwds): + def _plot_colorbar(self, ax: "Axes", **kwds): # Addresses issues #10611 and #10678: # When plotting scatterplots and hexbinplots in IPython # inline backend the colorbar axis height tends not to @@ -1080,7 +1078,7 @@ def __init__(self, data, **kwargs): if "x_compat" in self.kwds: self.x_compat = bool(self.kwds.pop("x_compat")) - def _is_ts_plot(self): + def _is_ts_plot(self) -> bool: # this is slightly deceptive return not self.x_compat and self.use_index and self._use_dynamic_x() @@ -1139,7 +1137,9 @@ def _make_plot(self): ax.set_xlim(left, right) @classmethod - def _plot(cls, ax, x, y, style=None, column_num=None, stacking_id=None, **kwds): + def _plot( + cls, ax: "Axes", x, y, style=None, column_num=None, stacking_id=None, **kwds + ): # column_num is used to get the target column from plotf in line and # area plots if column_num == 0: @@ -1183,7 +1183,7 @@ def _get_stacking_id(self): return None @classmethod - def _initialize_stacker(cls, ax, stacking_id, n): + def _initialize_stacker(cls, ax: "Axes", stacking_id, n: int): if stacking_id is None: return if not hasattr(ax, "_stacker_pos_prior"): @@ -1194,7 +1194,7 @@ def _initialize_stacker(cls, ax, stacking_id, n): ax._stacker_neg_prior[stacking_id] = np.zeros(n) @classmethod - def _get_stacked_values(cls, ax, stacking_id, values, label): + def _get_stacked_values(cls, ax: "Axes", stacking_id, values, label): if stacking_id is None: return values if not hasattr(ax, "_stacker_pos_prior"): @@ -1213,7 +1213,7 @@ def _get_stacked_values(cls, ax, stacking_id, values, label): ) @classmethod - def _update_stacker(cls, ax, stacking_id, values): + def _update_stacker(cls, ax: "Axes", stacking_id, values): if stacking_id is None: return if (values >= 0).all(): @@ -1221,7 +1221,7 @@ def _update_stacker(cls, ax, stacking_id, values): elif (values <= 0).all(): ax._stacker_neg_prior[stacking_id] += values - def _post_plot_logic(self, ax, data): + def _post_plot_logic(self, ax: "Axes", data): from matplotlib.ticker import FixedLocator def get_label(i): @@ -1276,7 +1276,7 @@ def __init__(self, data, **kwargs): @classmethod def _plot( cls, - ax, + ax: "Axes", x, y, style=None, @@ -1318,7 +1318,7 @@ def _plot( res = [rect] return res - def _post_plot_logic(self, ax, data): + def _post_plot_logic(self, ax: "Axes", data): LinePlot._post_plot_logic(self, ax, data) if self.ylim is None: @@ -1372,7 +1372,7 @@ def _args_adjust(self): self.left = np.array(self.left) @classmethod - def _plot(cls, ax, x, y, w, start=0, log=False, **kwds): + def _plot(cls, ax: "Axes", x, y, w, start=0, log=False, **kwds): return ax.bar(x, y, w, bottom=start, log=log, **kwds) @property @@ -1454,7 +1454,7 @@ def _make_plot(self): ) self._add_legend_handle(rect, label, index=i) - def _post_plot_logic(self, ax, data): + def _post_plot_logic(self, ax: "Axes", data): if self.use_index: str_index = [pprint_thing(key) for key in data.index] else: @@ -1466,7 +1466,7 @@ def _post_plot_logic(self, ax, data): self._decorate_ticks(ax, name, str_index, s_edge, e_edge) - def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge): + def _decorate_ticks(self, ax: "Axes", name, ticklabels, start_edge, end_edge): ax.set_xlim((start_edge, end_edge)) if self.xticks is not None: @@ -1489,10 +1489,10 @@ def _start_base(self): return self.left @classmethod - def _plot(cls, ax, x, y, w, start=0, log=False, **kwds): + def _plot(cls, ax: "Axes", x, y, w, start=0, log=False, **kwds): return ax.barh(x, y, w, left=start, log=log, **kwds) - def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge): + def _decorate_ticks(self, ax: "Axes", name, ticklabels, start_edge, end_edge): # horizontal bars ax.set_ylim((start_edge, end_edge)) ax.set_yticks(self.tick_pos) diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py index ee41479b3c7c9..ffd46d1b191db 100644 --- a/pandas/plotting/_matplotlib/hist.py +++ b/pandas/plotting/_matplotlib/hist.py @@ -1,3 +1,5 @@ +from typing import TYPE_CHECKING + import numpy as np from pandas.core.dtypes.common import is_integer, is_list_like @@ -8,6 +10,9 @@ from pandas.plotting._matplotlib.core import LinePlot, MPLPlot from pandas.plotting._matplotlib.tools import _flatten, _set_ticks_props, _subplots +if TYPE_CHECKING: + from matplotlib.axes import Axes + class HistPlot(LinePlot): _kind = "hist" @@ -90,7 +95,7 @@ def _make_plot_keywords(self, kwds, y): kwds["bins"] = self.bins return kwds - def _post_plot_logic(self, ax, data): + def _post_plot_logic(self, ax: "Axes", data): if self.orientation == "horizontal": ax.set_xlabel("Frequency") else:
In AreaPlot._plot we call `ax.fill_between`, which means `ax` must be an `Axes` object (and in particular, not an `Axis` object). This chases down all the other places we can infer `Axes` from that. Then some edits in an `__init__` to get mypy passing, and revert annotations of `_plot` in a few places because mypy complained about signature mismatch.
https://api.github.com/repos/pandas-dev/pandas/pulls/36016
2020-08-31T22:23:53Z
2020-09-01T17:28:03Z
2020-09-01T17:28:03Z
2020-09-01T17:41:26Z
BUG: PeriodIndex.get_loc incorrectly raising ValueError instead of KeyError
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 1617bf66c4f04..e7f9e8011bef6 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -217,7 +217,7 @@ Interval Indexing ^^^^^^^^ - +- Bug in :meth:`PeriodIndex.get_loc` incorrectly raising ``ValueError`` on non-datelike strings instead of ``KeyError``, causing similar errors in :meth:`Series.__geitem__`, :meth:`Series.__contains__`, and :meth:`Series.loc.__getitem__` (:issue:`34240`) - - diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 18970ea0544e4..3017521c6a065 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -755,9 +755,9 @@ def is_in_obj(gpr) -> bool: return False try: return gpr is obj[gpr.name] - except (KeyError, IndexError, ValueError): - # TODO: ValueError: Given date string not likely a datetime. - # should be KeyError? + except (KeyError, IndexError): + # IndexError reached in e.g. test_skip_group_keys when we pass + # lambda here return False for i, (gpr, level) in enumerate(zip(keys, levels)): diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 11334803d4583..cdb502199c6f1 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -504,7 +504,7 @@ def get_loc(self, key, method=None, tolerance=None): try: asdt, reso = parse_time_string(key, self.freq) - except DateParseError as err: + except (ValueError, DateParseError) as err: # A string with invalid format raise KeyError(f"Cannot interpret '{key}' as period") from err diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index b61d1d903f89a..d2499b85ad181 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -359,6 +359,22 @@ def test_get_loc2(self): ], ) + def test_get_loc_invalid_string_raises_keyerror(self): + # GH#34240 + pi = pd.period_range("2000", periods=3, name="A") + with pytest.raises(KeyError, match="A"): + pi.get_loc("A") + + ser = pd.Series([1, 2, 3], index=pi) + with pytest.raises(KeyError, match="A"): + ser.loc["A"] + + with pytest.raises(KeyError, match="A"): + ser["A"] + + assert "A" not in ser + assert "A" not in pi + class TestGetIndexer: def test_get_indexer(self):
- [x] closes #34240 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36015
2020-08-31T21:51:19Z
2020-09-01T23:32:01Z
2020-09-01T23:32:00Z
2020-09-02T01:02:52Z
Backport PR #36011 on branch 1.1.x (CI: suppress another setuptools warning)
diff --git a/pandas/tests/util/test_show_versions.py b/pandas/tests/util/test_show_versions.py index 04e841c05e44a..fe5fc3e21d960 100644 --- a/pandas/tests/util/test_show_versions.py +++ b/pandas/tests/util/test_show_versions.py @@ -25,6 +25,7 @@ # https://github.com/pandas-dev/pandas/issues/35252 "ignore:Distutils:UserWarning" ) +@pytest.mark.filterwarnings("ignore:Setuptools is replacing distutils:UserWarning") def test_show_versions(capsys): # gh-32041 pd.show_versions()
Backport PR #36011: CI: suppress another setuptools warning
https://api.github.com/repos/pandas-dev/pandas/pulls/36013
2020-08-31T16:28:59Z
2020-09-01T14:14:38Z
2020-09-01T14:14:38Z
2020-09-01T14:14:38Z
CI: Unpin MyPy
diff --git a/environment.yml b/environment.yml index 96f2c8d2086c7..4622aac1dc6f8 100644 --- a/environment.yml +++ b/environment.yml @@ -21,7 +21,7 @@ dependencies: - flake8-comprehensions>=3.1.0 # used by flake8, linting of unnecessary comprehensions - flake8-rst>=0.6.0,<=0.7.0 # linting of code blocks in rst files - isort>=5.2.1 # check that imports are in the right order - - mypy=0.730 + - mypy=0.782 - pycodestyle # used by flake8 # documentation diff --git a/pandas/_config/config.py b/pandas/_config/config.py index fb41b37980b2e..0b802f2cc9e69 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -460,9 +460,7 @@ def register_option( path = key.split(".") for k in path: - # NOTE: tokenize.Name is not a public constant - # error: Module has no attribute "Name" [attr-defined] - if not re.match("^" + tokenize.Name + "$", k): # type: ignore[attr-defined] + if not re.match("^" + tokenize.Name + "$", k): raise ValueError(f"{k} is not a valid identifier") if keyword.iskeyword(k): raise ValueError(f"{k} is a python keyword") diff --git a/pandas/core/common.py b/pandas/core/common.py index e7260a9923ee0..6fd4700ab7f3f 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -9,7 +9,7 @@ from datetime import datetime, timedelta from functools import partial import inspect -from typing import Any, Collection, Iterable, Iterator, List, Union +from typing import Any, Collection, Iterable, Iterator, List, Union, cast import warnings import numpy as np @@ -277,6 +277,11 @@ def maybe_iterable_to_list(obj: Union[Iterable[T], T]) -> Union[Collection[T], T """ if isinstance(obj, abc.Iterable) and not isinstance(obj, abc.Sized): return list(obj) + # error: Incompatible return value type (got + # "Union[pandas.core.common.<subclass of "Iterable" and "Sized">, + # pandas.core.common.<subclass of "Iterable" and "Sized">1, T]", expected + # "Union[Collection[T], T]") [return-value] + obj = cast(Collection, obj) return obj diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 125ecb0d88036..df71b4fe415f8 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -364,7 +364,7 @@ class BaseExprVisitor(ast.NodeVisitor): unary_ops = _unary_ops_syms unary_op_nodes = "UAdd", "USub", "Invert", "Not" - unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes)) + unary_op_nodes_map = {k: v for k, v in zip(unary_ops, unary_op_nodes)} rewrite_map = { ast.Eq: ast.In, diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py index 909643d50e9d7..8c4437f2cdeb9 100644 --- a/pandas/core/indexes/frozen.py +++ b/pandas/core/indexes/frozen.py @@ -103,5 +103,7 @@ def __str__(self) -> str: def __repr__(self) -> str: return f"{type(self).__name__}({str(self)})" - __setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled - pop = append = extend = remove = sort = insert = _disabled + __setitem__ = __setslice__ = _disabled # type: ignore[assignment] + __delitem__ = __delslice__ = _disabled # type: ignore[assignment] + pop = append = extend = _disabled # type: ignore[assignment] + remove = sort = insert = _disabled # type: ignore[assignment] diff --git a/pandas/core/resample.py b/pandas/core/resample.py index fc54128ae5aa6..7b5154756e613 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -966,8 +966,7 @@ def __init__(self, obj, *args, **kwargs): for attr in self._attributes: setattr(self, attr, kwargs.get(attr, getattr(parent, attr))) - # error: Too many arguments for "__init__" of "object" - super().__init__(None) # type: ignore[call-arg] + super().__init__(None) self._groupby = groupby self._groupby.mutated = True self._groupby.grouper.mutated = True diff --git a/requirements-dev.txt b/requirements-dev.txt index 1fca25c9fecd9..cc3775de3a4ba 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -12,7 +12,7 @@ flake8<3.8.0 flake8-comprehensions>=3.1.0 flake8-rst>=0.6.0,<=0.7.0 isort>=5.2.1 -mypy==0.730 +mypy==0.782 pycodestyle gitpython gitdb diff --git a/setup.cfg b/setup.cfg index c10624d60aaff..2114909256a7a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -127,10 +127,7 @@ show_error_codes = True [mypy-pandas.tests.*] check_untyped_defs=False -[mypy-pandas.conftest] -ignore_errors=True - -[mypy-pandas.tests.tools.test_to_datetime] +[mypy-pandas.conftest,pandas.tests.window.conftest] ignore_errors=True [mypy-pandas._testing] @@ -139,7 +136,22 @@ check_untyped_defs=False [mypy-pandas._version] check_untyped_defs=False -[mypy-pandas.core.arrays.interval] +[mypy-pandas.compat.pickle_compat] +check_untyped_defs=False + +[mypy-pandas.core.apply] +check_untyped_defs=False + +[mypy-pandas.core.arrays.base] +check_untyped_defs=False + +[mypy-pandas.core.arrays.datetimelike] +check_untyped_defs=False + +[mypy-pandas.core.arrays.sparse.array] +check_untyped_defs=False + +[mypy-pandas.core.arrays.string_] check_untyped_defs=False [mypy-pandas.core.base] @@ -151,6 +163,9 @@ check_untyped_defs=False [mypy-pandas.core.computation.expressions] check_untyped_defs=False +[mypy-pandas.core.computation.ops] +check_untyped_defs=False + [mypy-pandas.core.computation.pytables] check_untyped_defs=False @@ -163,6 +178,9 @@ check_untyped_defs=False [mypy-pandas.core.generic] check_untyped_defs=False +[mypy-pandas.core.groupby.base] +check_untyped_defs=False + [mypy-pandas.core.groupby.generic] check_untyped_defs=False @@ -172,15 +190,33 @@ check_untyped_defs=False [mypy-pandas.core.groupby.ops] check_untyped_defs=False +[mypy-pandas.core.indexes.base] +check_untyped_defs=False + +[mypy-pandas.core.indexes.category] +check_untyped_defs=False + +[mypy-pandas.core.indexes.datetimelike] +check_untyped_defs=False + [mypy-pandas.core.indexes.datetimes] check_untyped_defs=False +[mypy-pandas.core.indexes.extension] +check_untyped_defs=False + [mypy-pandas.core.indexes.interval] check_untyped_defs=False [mypy-pandas.core.indexes.multi] check_untyped_defs=False +[mypy-pandas.core.indexes.period] +check_untyped_defs=False + +[mypy-pandas.core.indexes.range] +check_untyped_defs=False + [mypy-pandas.core.internals.blocks] check_untyped_defs=False @@ -190,15 +226,27 @@ check_untyped_defs=False [mypy-pandas.core.internals.managers] check_untyped_defs=False +[mypy-pandas.core.internals.ops] +check_untyped_defs=False + [mypy-pandas.core.missing] check_untyped_defs=False [mypy-pandas.core.ops.docstrings] check_untyped_defs=False +[mypy-pandas.core.resample] +check_untyped_defs=False + +[mypy-pandas.core.reshape.concat] +check_untyped_defs=False + [mypy-pandas.core.reshape.merge] check_untyped_defs=False +[mypy-pandas.core.series] +check_untyped_defs=False + [mypy-pandas.core.strings] check_untyped_defs=False @@ -214,6 +262,9 @@ check_untyped_defs=False [mypy-pandas.io.clipboard] check_untyped_defs=False +[mypy-pandas.io.common] +check_untyped_defs=False + [mypy-pandas.io.excel._base] check_untyped_defs=False @@ -232,6 +283,9 @@ check_untyped_defs=False [mypy-pandas.io.formats.css] check_untyped_defs=False +[mypy-pandas.io.formats.csvs] +check_untyped_defs=False + [mypy-pandas.io.formats.excel] check_untyped_defs=False @@ -270,3 +324,10 @@ check_untyped_defs=False [mypy-pandas.plotting._matplotlib.misc] check_untyped_defs=False + +[mypy-pandas.plotting._misc] +check_untyped_defs=False + +[mypy-pandas.util._decorators] +check_untyped_defs=False +
https://api.github.com/repos/pandas-dev/pandas/pulls/36012
2020-08-31T15:29:23Z
2020-09-01T23:34:16Z
2020-09-01T23:34:16Z
2020-09-02T09:21:03Z
CI: suppress another setuptools warning
diff --git a/pandas/tests/util/test_show_versions.py b/pandas/tests/util/test_show_versions.py index 04e841c05e44a..fe5fc3e21d960 100644 --- a/pandas/tests/util/test_show_versions.py +++ b/pandas/tests/util/test_show_versions.py @@ -25,6 +25,7 @@ # https://github.com/pandas-dev/pandas/issues/35252 "ignore:Distutils:UserWarning" ) +@pytest.mark.filterwarnings("ignore:Setuptools is replacing distutils:UserWarning") def test_show_versions(capsys): # gh-32041 pd.show_versions()
Seeing this on some new PRs. xref #35252
https://api.github.com/repos/pandas-dev/pandas/pulls/36011
2020-08-31T14:53:54Z
2020-08-31T16:27:28Z
2020-08-31T16:27:28Z
2020-08-31T16:57:40Z
POC: ArrayManager -- array-based data manager for columnar store
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e8834bd509bf0..516d37a483ce4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -132,3 +132,22 @@ jobs: - name: Upload dev docs run: rsync -az --delete doc/build/html/ docs@${{ secrets.server_ip }}:/usr/share/nginx/pandas/pandas-docs/dev if: github.event_name == 'push' + + data_manager: + name: Test experimental data manager + runs-on: ubuntu-latest + steps: + + - name: Setting conda path + run: echo "${HOME}/miniconda3/bin" >> $GITHUB_PATH + + - name: Checkout + uses: actions/checkout@v1 + + - name: Setup environment and build pandas + run: ci/setup_env.sh + + - name: Run tests + run: | + source activate pandas-dev + pytest pandas/tests/frame/methods --array-manager diff --git a/pandas/_typing.py b/pandas/_typing.py index 91cb01dac76fb..9b957ab4d0686 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -39,6 +39,7 @@ from pandas.core.generic import NDFrame # noqa: F401 from pandas.core.groupby.generic import DataFrameGroupBy, SeriesGroupBy from pandas.core.indexes.base import Index + from pandas.core.internals import ArrayManager, BlockManager from pandas.core.resample import Resampler from pandas.core.series import Series from pandas.core.window.rolling import BaseWindow @@ -159,3 +160,6 @@ ColspaceArgType = Union[ str, int, Sequence[Union[str, int]], Mapping[Hashable, Union[str, int]] ] + +# internals +Manager = Union["ArrayManager", "BlockManager"] diff --git a/pandas/conftest.py b/pandas/conftest.py index e30a55cef3166..45d545a522fc7 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -75,6 +75,19 @@ def pytest_addoption(parser): action="store_true", help="Fail if a test is skipped for missing data file.", ) + parser.addoption( + "--array-manager", + "--am", + action="store_true", + help="Use the experimental ArrayManager as default data manager.", + ) + + +def pytest_sessionstart(session): + # Note: we need to set the option here and not in pytest_runtest_setup below + # to ensure this is run before creating fixture data + if session.config.getoption("--array-manager"): + pd.options.mode.data_manager = "array" def pytest_runtest_setup(item): @@ -1454,3 +1467,11 @@ def indexer_si(request): Parametrize over __setitem__, iloc.__setitem__ """ return request.param + + +@pytest.fixture +def using_array_manager(request): + """ + Fixture to check if the array manager is being used. + """ + return pd.options.mode.data_manager == "array" diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index fba82ae499e90..56ef1ea28ed1b 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -483,6 +483,12 @@ def use_inf_as_na_cb(key): cf.register_option( "use_inf_as_null", False, use_inf_as_null_doc, cb=use_inf_as_na_cb ) + cf.register_option( + "data_manager", + "block", + "Internal data manager type", + validator=is_one_of_factory(["block", "array"]), + ) cf.deprecate_option( "mode.use_inf_as_null", msg=use_inf_as_null_doc, rkey="mode.use_inf_as_na" diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 344e5d6667074..36ccd0b8a2f7d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -62,6 +62,7 @@ IndexKeyFunc, IndexLabel, Level, + Manager, PythonFuncType, Renamer, StorageOptions, @@ -137,13 +138,14 @@ ) from pandas.core.indexes.multi import MultiIndex, maybe_droplevels from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable -from pandas.core.internals import BlockManager +from pandas.core.internals import ArrayManager, BlockManager from pandas.core.internals.construction import ( arrays_to_mgr, dataclasses_to_dicts, init_dict, init_ndarray, masked_rec_array_to_mgr, + mgr_to_mgr, nested_data_to_arrays, reorder_arrays, sanitize_index, @@ -523,7 +525,7 @@ def __init__( if isinstance(data, DataFrame): data = data._mgr - if isinstance(data, BlockManager): + if isinstance(data, (BlockManager, ArrayManager)): if index is None and columns is None and dtype is None and copy is False: # GH#33357 fastpath NDFrame.__init__(self, data) @@ -601,8 +603,31 @@ def __init__( values, index, columns, dtype=values.dtype, copy=False ) + # ensure correct Manager type according to settings + manager = get_option("mode.data_manager") + mgr = mgr_to_mgr(mgr, typ=manager) + NDFrame.__init__(self, mgr) + def _as_manager(self, typ: str) -> DataFrame: + """ + Private helper function to create a DataFrame with specific manager. + + Parameters + ---------- + typ : {"block", "array"} + + Returns + ------- + DataFrame + New DataFrame using specified manager type. Is not guaranteed + to be a copy or not. + """ + new_mgr: Manager + new_mgr = mgr_to_mgr(self._mgr, typ=typ) + # fastpath of passing a manager doesn't check the option/manager class + return DataFrame(new_mgr) + # ---------------------------------------------------------------------- @property @@ -675,6 +700,8 @@ def _is_homogeneous_type(self) -> bool: ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ + if isinstance(self._mgr, ArrayManager): + return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: @@ -685,6 +712,8 @@ def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ + if isinstance(self._mgr, ArrayManager): + return False if self._mgr.any_extension_types: # TODO(EA2D) special case would be unnecessary with 2D EAs return False @@ -5506,7 +5535,7 @@ def sort_values( # type: ignore[override] ) if ignore_index: - new_data.axes[1] = ibase.default_index(len(indexer)) + new_data.set_axis(1, ibase.default_index(len(indexer))) result = self._constructor(new_data) if inplace: @@ -6051,7 +6080,10 @@ def _dispatch_frame_op(self, right, func, axis: Optional[int] = None): # fails in cases with empty columns reached via # _frame_arith_method_with_reindex - bm = self._mgr.operate_blockwise(right._mgr, array_op) + # TODO operate_blockwise expects a manager of the same type + bm = self._mgr.operate_blockwise( + right._mgr, array_op # type: ignore[arg-type] + ) return type(self)(bm) elif isinstance(right, Series) and axis == 1: @@ -8894,11 +8926,11 @@ def func(values: np.ndarray): # We only use this in the case that operates on self.values return op(values, axis=axis, skipna=skipna, **kwds) - def blk_func(values): + def blk_func(values, axis=1): if isinstance(values, ExtensionArray): return values._reduce(name, skipna=skipna, **kwds) else: - return op(values, axis=1, skipna=skipna, **kwds) + return op(values, axis=axis, skipna=skipna, **kwds) def _get_data() -> DataFrame: if filter_type is None: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0daeed0e393e6..9e3f6f8e36175 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -45,6 +45,7 @@ IndexLabel, JSONSerializable, Level, + Manager, NpDtype, Renamer, StorageOptions, @@ -102,7 +103,7 @@ RangeIndex, ensure_index, ) -from pandas.core.internals import BlockManager +from pandas.core.internals import ArrayManager, BlockManager from pandas.core.missing import find_valid_index from pandas.core.ops import align_method_FRAME from pandas.core.shared_docs import _shared_docs @@ -179,7 +180,7 @@ class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin): ) _metadata: List[str] = [] _is_copy = None - _mgr: BlockManager + _mgr: Manager _attrs: Dict[Optional[Hashable], Any] _typ: str @@ -188,7 +189,7 @@ class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin): def __init__( self, - data: BlockManager, + data: Manager, copy: bool = False, attrs: Optional[Mapping[Optional[Hashable], Any]] = None, ): @@ -207,7 +208,7 @@ def __init__( @classmethod def _init_mgr( cls, mgr, axes, dtype: Optional[Dtype] = None, copy: bool = False - ) -> BlockManager: + ) -> Manager: """ passed a manager and a axes dict """ for a, axe in axes.items(): if axe is not None: @@ -220,7 +221,13 @@ def _init_mgr( mgr = mgr.copy() if dtype is not None: # avoid further copies if we can - if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype: + if ( + isinstance(mgr, BlockManager) + and len(mgr.blocks) == 1 + and mgr.blocks[0].values.dtype == dtype + ): + pass + else: mgr = mgr.astype(dtype=dtype) return mgr @@ -4544,11 +4551,11 @@ def sort_index( new_data = self._mgr.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed - new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic() + new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic()) if ignore_index: axis = 1 if isinstance(self, ABCDataFrame) else 0 - new_data.axes[axis] = ibase.default_index(len(indexer)) + new_data.set_axis(axis, ibase.default_index(len(indexer))) result = self._constructor(new_data) @@ -5521,6 +5528,8 @@ def _protect_consolidate(self, f): Consolidate _mgr -- if the blocks have changed, then clear the cache """ + if isinstance(self._mgr, ArrayManager): + return f() blocks_before = len(self._mgr.blocks) result = f() if len(self._mgr.blocks) != blocks_before: @@ -5710,11 +5719,13 @@ def _to_dict_of_blocks(self, copy: bool_t = True): Return a dict of dtype -> Constructor Types that each is a homogeneous dtype. - Internal ONLY + Internal ONLY - only works for BlockManager """ + mgr = self._mgr + mgr = cast(BlockManager, mgr) return { k: self._constructor(v).__finalize__(self) - for k, v, in self._mgr.to_dict(copy=copy).items() + for k, v, in mgr.to_dict(copy=copy).items() } def astype( diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 174a2f4052b06..c561204c1c125 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1086,10 +1086,12 @@ def py_fallback(bvalues: ArrayLike) -> ArrayLike: # in the operation. We un-split here. result = result._consolidate() assert isinstance(result, (Series, DataFrame)) # for mypy - assert len(result._mgr.blocks) == 1 + mgr = result._mgr + assert isinstance(mgr, BlockManager) + assert len(mgr.blocks) == 1 # unwrap DataFrame to get array - result = result._mgr.blocks[0].values + result = mgr.blocks[0].values return result def blk_func(bvalues: ArrayLike) -> ArrayLike: diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index fbccac1c2af67..e71143224556b 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -1,3 +1,5 @@ +from pandas.core.internals.array_manager import ArrayManager +from pandas.core.internals.base import DataManager from pandas.core.internals.blocks import ( # io.pytables, io.packers Block, BoolBlock, @@ -35,6 +37,8 @@ "TimeDeltaBlock", "safe_reshape", "make_block", + "DataManager", + "ArrayManager", "BlockManager", "SingleBlockManager", "concatenate_block_managers", diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py new file mode 100644 index 0000000000000..134bf59ed7f9c --- /dev/null +++ b/pandas/core/internals/array_manager.py @@ -0,0 +1,892 @@ +""" +Experimental manager based on storing a collection of 1D arrays +""" +from typing import TYPE_CHECKING, Any, Callable, List, Optional, Tuple, TypeVar, Union + +import numpy as np + +from pandas._libs import algos as libalgos, lib +from pandas._typing import ArrayLike, DtypeObj, Hashable +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.cast import find_common_type, infer_dtype_from_scalar +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_dtype_equal, + is_extension_array_dtype, + is_numeric_dtype, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries +from pandas.core.dtypes.missing import isna + +import pandas.core.algorithms as algos +from pandas.core.arrays import ExtensionArray, PandasDtype +from pandas.core.arrays.sparse import SparseDtype +from pandas.core.construction import extract_array +from pandas.core.indexers import maybe_convert_indices +from pandas.core.indexes.api import Index, ensure_index +from pandas.core.internals.base import DataManager +from pandas.core.internals.blocks import make_block + +if TYPE_CHECKING: + from pandas.core.internals.managers import SingleBlockManager + + +T = TypeVar("T", bound="ArrayManager") + + +class ArrayManager(DataManager): + """ + Core internal data structure to implement DataFrame and Series. + + Alternative to the BlockManager, storing a list of 1D arrays instead of + Blocks. + + This is *not* a public API class + + Parameters + ---------- + arrays : Sequence of arrays + axes : Sequence of Index + do_integrity_check : bool, default True + + """ + + __slots__ = [ + "_axes", # private attribute, because 'axes' has different order, see below + "arrays", + ] + + arrays: List[Union[np.ndarray, ExtensionArray]] + _axes: List[Index] + + def __init__( + self, + arrays: List[Union[np.ndarray, ExtensionArray]], + axes: List[Index], + do_integrity_check: bool = True, + ): + # Note: we are storing the axes in "_axes" in the (row, columns) order + # which contrasts the order how it is stored in BlockManager + self._axes = axes + self.arrays = arrays + + if do_integrity_check: + self._axes = [ensure_index(ax) for ax in axes] + self._verify_integrity() + + def make_empty(self: T, axes=None) -> T: + """Return an empty ArrayManager with the items axis of len 0 (no columns)""" + if axes is None: + axes = [self.axes[1:], Index([])] + + arrays: List[Union[np.ndarray, ExtensionArray]] = [] + return type(self)(arrays, axes) + + @property + def items(self) -> Index: + return self._axes[1] + + @property + def axes(self) -> List[Index]: # type: ignore[override] + # mypy doesn't work to override attribute with property + # see https://github.com/python/mypy/issues/4125 + """Axes is BlockManager-compatible order (columns, rows)""" + return [self._axes[1], self._axes[0]] + + @property + def shape(self) -> Tuple[int, ...]: + # this still gives the BlockManager-compatible transposed shape + return tuple(len(ax) for ax in self.axes) + + @property + def shape_proper(self) -> Tuple[int, ...]: + # this returns (n_rows, n_columns) + return tuple(len(ax) for ax in self._axes) + + @staticmethod + def _normalize_axis(axis): + # switch axis + axis = 1 if axis == 0 else 0 + return axis + + # TODO can be shared + def set_axis(self, axis: int, new_labels: Index) -> None: + # Caller is responsible for ensuring we have an Index object. + axis = self._normalize_axis(axis) + old_len = len(self._axes[axis]) + new_len = len(new_labels) + + if new_len != old_len: + raise ValueError( + f"Length mismatch: Expected axis has {old_len} elements, new " + f"values have {new_len} elements" + ) + + self._axes[axis] = new_labels + + def consolidate(self) -> "ArrayManager": + return self + + def is_consolidated(self) -> bool: + return True + + def _consolidate_inplace(self) -> None: + pass + + def get_dtypes(self): + return np.array([arr.dtype for arr in self.arrays], dtype="object") + + # TODO setstate getstate + + def __repr__(self) -> str: + output = type(self).__name__ + output += f"\nIndex: {self._axes[0]}" + output += f"\nColumns: {self._axes[1]}" + output += f"\n{len(self.arrays)} arrays:" + for arr in self.arrays: + output += f"\n{arr.dtype}" + return output + + def _verify_integrity(self) -> None: + n_rows, n_columns = self.shape_proper + if not len(self.arrays) == n_columns: + raise ValueError( + "Number of passed arrays must equal the size of the column Index: " + f"{len(self.arrays)} arrays vs {n_columns} columns." + ) + for arr in self.arrays: + if not len(arr) == n_rows: + raise ValueError( + "Passed arrays should have the same length as the rows Index: " + f"{len(arr)} vs {n_rows} rows" + ) + if not isinstance(arr, (np.ndarray, ExtensionArray)): + raise ValueError( + "Passed arrays should be np.ndarray or ExtensionArray instances, " + f"got {type(arr)} instead" + ) + + def reduce( + self: T, func: Callable, ignore_failures: bool = False + ) -> Tuple[T, np.ndarray]: + # TODO this still fails because `func` assumes to work on 2D arrays + # TODO implement ignore_failures + assert self.ndim == 2 + + res_arrays = [] + for arr in self.arrays: + res = func(arr, axis=0) + res_arrays.append(np.array([res])) + + index = Index([None]) # placeholder + new_mgr = type(self)(res_arrays, [index, self.items]) + indexer = np.arange(self.shape[0]) + return new_mgr, indexer + + def operate_blockwise(self, other: "ArrayManager", array_op) -> "ArrayManager": + """ + Apply array_op blockwise with another (aligned) BlockManager. + """ + # TODO what if `other` is BlockManager ? + left_arrays = self.arrays + right_arrays = other.arrays + result_arrays = [ + array_op(left, right) for left, right in zip(left_arrays, right_arrays) + ] + return type(self)(result_arrays, self._axes) + + def apply( + self: T, + f, + align_keys: Optional[List[str]] = None, + ignore_failures: bool = False, + **kwargs, + ) -> T: + """ + Iterate over the arrays, collect and create a new ArrayManager. + + Parameters + ---------- + f : str or callable + Name of the Array method to apply. + align_keys: List[str] or None, default None + ignore_failures: bool, default False + **kwargs + Keywords to pass to `f` + + Returns + ------- + ArrayManager + """ + assert "filter" not in kwargs + + align_keys = align_keys or [] + result_arrays: List[np.ndarray] = [] + result_indices: List[int] = [] + # fillna: Series/DataFrame is responsible for making sure value is aligned + + aligned_args = {k: kwargs[k] for k in align_keys} + + if f == "apply": + f = kwargs.pop("func") + + for i, arr in enumerate(self.arrays): + + if aligned_args: + + for k, obj in aligned_args.items(): + if isinstance(obj, (ABCSeries, ABCDataFrame)): + # The caller is responsible for ensuring that + # obj.axes[-1].equals(self.items) + if obj.ndim == 1: + kwargs[k] = obj.iloc[i] + else: + kwargs[k] = obj.iloc[:, i]._values + else: + # otherwise we have an array-like + kwargs[k] = obj[i] + + try: + if callable(f): + applied = f(arr, **kwargs) + else: + applied = getattr(arr, f)(**kwargs) + except (TypeError, NotImplementedError): + if not ignore_failures: + raise + continue + # if not isinstance(applied, ExtensionArray): + # # TODO not all EA operations return new EAs (eg astype) + # applied = array(applied) + result_arrays.append(applied) + result_indices.append(i) + + new_axes: List[Index] + if ignore_failures: + # TODO copy? + new_axes = [self._axes[0], self._axes[1][result_indices]] + else: + new_axes = self._axes + + if len(result_arrays) == 0: + return self.make_empty(new_axes) + + return type(self)(result_arrays, new_axes) + + def apply_with_block(self: T, f, align_keys=None, **kwargs) -> T: + + align_keys = align_keys or [] + aligned_args = {k: kwargs[k] for k in align_keys} + + result_arrays = [] + + for i, arr in enumerate(self.arrays): + + if aligned_args: + for k, obj in aligned_args.items(): + if isinstance(obj, (ABCSeries, ABCDataFrame)): + # The caller is responsible for ensuring that + # obj.axes[-1].equals(self.items) + if obj.ndim == 1: + kwargs[k] = obj.iloc[[i]] + else: + kwargs[k] = obj.iloc[:, [i]]._values + else: + # otherwise we have an ndarray + kwargs[k] = obj[[i]] + + if hasattr(arr, "tz") and arr.tz is None: # type: ignore[union-attr] + # DatetimeArray needs to be converted to ndarray for DatetimeBlock + arr = arr._data # type: ignore[union-attr] + elif arr.dtype.kind == "m": + # TimedeltaArray needs to be converted to ndarray for TimedeltaBlock + arr = arr._data # type: ignore[union-attr] + if isinstance(arr, np.ndarray): + arr = np.atleast_2d(arr) + block = make_block(arr, placement=slice(0, 1, 1), ndim=2) + applied = getattr(block, f)(**kwargs) + if isinstance(applied, list): + applied = applied[0] + arr = applied.values + if isinstance(arr, np.ndarray): + arr = arr[0, :] + result_arrays.append(arr) + + return type(self)(result_arrays, self._axes) + + # TODO quantile + + def isna(self, func) -> "ArrayManager": + return self.apply("apply", func=func) + + def where(self, other, cond, align: bool, errors: str, axis: int) -> "ArrayManager": + if align: + align_keys = ["other", "cond"] + else: + align_keys = ["cond"] + other = extract_array(other, extract_numpy=True) + + return self.apply_with_block( + "where", + align_keys=align_keys, + other=other, + cond=cond, + errors=errors, + axis=axis, + ) + + # TODO what is this used for? + # def setitem(self, indexer, value) -> "ArrayManager": + # return self.apply_with_block("setitem", indexer=indexer, value=value) + + def putmask(self, mask, new, align: bool = True, axis: int = 0): + + if align: + align_keys = ["new", "mask"] + else: + align_keys = ["mask"] + new = extract_array(new, extract_numpy=True) + + return self.apply_with_block( + "putmask", + align_keys=align_keys, + mask=mask, + new=new, + axis=axis, + ) + + def diff(self, n: int, axis: int) -> "ArrayManager": + return self.apply_with_block("diff", n=n, axis=axis) + + def interpolate(self, **kwargs) -> "ArrayManager": + return self.apply_with_block("interpolate", **kwargs) + + def shift(self, periods: int, axis: int, fill_value) -> "ArrayManager": + if fill_value is lib.no_default: + fill_value = None + + if axis == 0 and self.ndim == 2: + # TODO column-wise shift + raise NotImplementedError + + return self.apply_with_block( + "shift", periods=periods, axis=axis, fill_value=fill_value + ) + + def fillna(self, value, limit, inplace: bool, downcast) -> "ArrayManager": + # TODO implement downcast + inplace = validate_bool_kwarg(inplace, "inplace") + + def array_fillna(array, value, limit, inplace): + + mask = isna(array) + if limit is not None: + limit = libalgos.validate_limit(None, limit=limit) + mask[mask.cumsum() > limit] = False + + # TODO could optimize for arrays that cannot hold NAs + # (like _can_hold_na on Blocks) + if not inplace: + array = array.copy() + + # np.putmask(array, mask, value) + if np.any(mask): + # TODO allow invalid value if there is nothing to fill? + array[mask] = value + return array + + return self.apply(array_fillna, value=value, limit=limit, inplace=inplace) + + def downcast(self) -> "ArrayManager": + return self.apply_with_block("downcast") + + def astype( + self, dtype, copy: bool = False, errors: str = "raise" + ) -> "ArrayManager": + return self.apply("astype", dtype=dtype, copy=copy) # , errors=errors) + + def convert( + self, + copy: bool = True, + datetime: bool = True, + numeric: bool = True, + timedelta: bool = True, + ) -> "ArrayManager": + return self.apply_with_block( + "convert", + copy=copy, + datetime=datetime, + numeric=numeric, + timedelta=timedelta, + ) + + def replace(self, value, **kwargs) -> "ArrayManager": + assert np.ndim(value) == 0, value + # TODO "replace" is right now implemented on the blocks, we should move + # it to general array algos so it can be reused here + return self.apply_with_block("replace", value=value, **kwargs) + + def replace_list( + self: T, + src_list: List[Any], + dest_list: List[Any], + inplace: bool = False, + regex: bool = False, + ) -> T: + """ do a list replace """ + inplace = validate_bool_kwarg(inplace, "inplace") + + return self.apply_with_block( + "_replace_list", + src_list=src_list, + dest_list=dest_list, + inplace=inplace, + regex=regex, + ) + + def to_native_types(self, **kwargs): + return self.apply_with_block("to_native_types", **kwargs) + + @property + def is_mixed_type(self) -> bool: + return True + + @property + def is_numeric_mixed_type(self) -> bool: + return False + + @property + def any_extension_types(self) -> bool: + """Whether any of the blocks in this manager are extension blocks""" + return False # any(block.is_extension for block in self.blocks) + + @property + def is_view(self) -> bool: + """ return a boolean if we are a single block and are a view """ + # TODO what is this used for? + return False + + @property + def is_single_block(self) -> bool: + return False + + def get_bool_data(self, copy: bool = False) -> "ArrayManager": + """ + Parameters + ---------- + copy : bool, default False + Whether to copy the blocks + """ + mask = np.array([is_bool_dtype(t) for t in self.get_dtypes()], dtype="object") + arrays = [self.arrays[i] for i in np.nonzero(mask)[0]] + # TODO copy? + new_axes = [self._axes[0], self._axes[1][mask]] + return type(self)(arrays, new_axes) + + def get_numeric_data(self, copy: bool = False) -> "ArrayManager": + """ + Parameters + ---------- + copy : bool, default False + Whether to copy the blocks + """ + mask = np.array([is_numeric_dtype(t) for t in self.get_dtypes()]) + arrays = [self.arrays[i] for i in np.nonzero(mask)[0]] + # TODO copy? + new_axes = [self._axes[0], self._axes[1][mask]] + return type(self)(arrays, new_axes) + + def copy(self: T, deep=True) -> T: + """ + Make deep or shallow copy of ArrayManager + + Parameters + ---------- + deep : bool or string, default True + If False, return shallow copy (do not copy data) + If 'all', copy data and a deep copy of the index + + Returns + ------- + BlockManager + """ + # this preserves the notion of view copying of axes + if deep: + # hit in e.g. tests.io.json.test_pandas + + def copy_func(ax): + return ax.copy(deep=True) if deep == "all" else ax.view() + + new_axes = [copy_func(ax) for ax in self._axes] + else: + new_axes = list(self._axes) + + if deep: + new_arrays = [arr.copy() for arr in self.arrays] + else: + new_arrays = self.arrays + return type(self)(new_arrays, new_axes) + + def as_array( + self, + transpose: bool = False, + dtype=None, + copy: bool = False, + na_value=lib.no_default, + ) -> np.ndarray: + """ + Convert the blockmanager data into an numpy array. + + Parameters + ---------- + transpose : bool, default False + If True, transpose the return array. + dtype : object, default None + Data type of the return array. + copy : bool, default False + If True then guarantee that a copy is returned. A value of + False does not guarantee that the underlying data is not + copied. + na_value : object, default lib.no_default + Value to be used as the missing value sentinel. + + Returns + ------- + arr : ndarray + """ + if len(self.arrays) == 0: + arr = np.empty(self.shape, dtype=float) + return arr.transpose() if transpose else arr + + # We want to copy when na_value is provided to avoid + # mutating the original object + copy = copy or na_value is not lib.no_default + + if not dtype: + dtype = _interleaved_dtype(self.arrays) + + if isinstance(dtype, SparseDtype): + dtype = dtype.subtype + elif isinstance(dtype, PandasDtype): + dtype = dtype.numpy_dtype + elif is_extension_array_dtype(dtype): + dtype = "object" + elif is_dtype_equal(dtype, str): + dtype = "object" + + result = np.empty(self.shape_proper, dtype=dtype) + + for i, arr in enumerate(self.arrays): + arr = arr.astype(dtype, copy=copy) + result[:, i] = arr + + if na_value is not lib.no_default: + result[isna(result)] = na_value + + return result + # return arr.transpose() if transpose else arr + + def get_slice(self, slobj: slice, axis: int = 0) -> "ArrayManager": + axis = self._normalize_axis(axis) + + if axis == 0: + arrays = [arr[slobj] for arr in self.arrays] + elif axis == 1: + arrays = self.arrays[slobj] + + new_axes = list(self._axes) + new_axes[axis] = new_axes[axis][slobj] + + return type(self)(arrays, new_axes, do_integrity_check=False) + + def fast_xs(self, loc: int) -> ArrayLike: + """ + Return the array corresponding to `frame.iloc[loc]`. + + Parameters + ---------- + loc : int + + Returns + ------- + np.ndarray or ExtensionArray + """ + dtype = _interleaved_dtype(self.arrays) + + if isinstance(dtype, SparseDtype): + temp_dtype = dtype.subtype + elif isinstance(dtype, PandasDtype): + temp_dtype = dtype.numpy_dtype + elif is_extension_array_dtype(dtype): + temp_dtype = "object" + elif is_dtype_equal(dtype, str): + temp_dtype = "object" + else: + temp_dtype = dtype + + result = np.array([arr[loc] for arr in self.arrays], dtype=temp_dtype) + if isinstance(dtype, ExtensionDtype): + result = dtype.construct_array_type()._from_sequence(result, dtype=dtype) + return result + + def iget(self, i: int) -> "SingleBlockManager": + """ + Return the data as a SingleBlockManager. + """ + from pandas.core.internals.managers import SingleBlockManager + + values = self.arrays[i] + block = make_block(values, placement=slice(0, len(values)), ndim=1) + + return SingleBlockManager(block, self._axes[0]) + + def iget_values(self, i: int) -> ArrayLike: + """ + Return the data for column i as the values (ndarray or ExtensionArray). + """ + return self.arrays[i] + + def idelete(self, indexer): + """ + Delete selected locations in-place (new block and array, same BlockManager) + """ + to_keep = np.ones(self.shape[0], dtype=np.bool_) + to_keep[indexer] = False + + self.arrays = [self.arrays[i] for i in np.nonzero(to_keep)[0]] + self._axes = [self._axes[0], self._axes[1][to_keep]] + + def iset(self, loc: Union[int, slice, np.ndarray], value): + """ + Set new item in-place. Does not consolidate. Adds new Block if not + contained in the current set of items + """ + if lib.is_integer(loc): + # TODO normalize array -> this should in theory not be needed? + value = extract_array(value, extract_numpy=True) + if isinstance(value, np.ndarray) and value.ndim == 2: + value = value[0, :] + + assert isinstance(value, (np.ndarray, ExtensionArray)) + # value = np.asarray(value) + # assert isinstance(value, np.ndarray) + assert len(value) == len(self._axes[0]) + self.arrays[loc] = value + return + + # TODO + raise Exception + + def insert(self, loc: int, item: Hashable, value, allow_duplicates: bool = False): + """ + Insert item at selected position. + + Parameters + ---------- + loc : int + item : hashable + value : array_like + allow_duplicates: bool + If False, trying to insert non-unique item will raise + + """ + if not allow_duplicates and item in self.items: + # Should this be a different kind of error?? + raise ValueError(f"cannot insert {item}, already exists") + + if not isinstance(loc, int): + raise TypeError("loc must be int") + + # insert to the axis; this could possibly raise a TypeError + new_axis = self.items.insert(loc, item) + + value = extract_array(value, extract_numpy=True) + if value.ndim == 2: + value = value[0, :] + # TODO self.arrays can be empty + # assert len(value) == len(self.arrays[0]) + + # TODO is this copy needed? + arrays = self.arrays.copy() + arrays.insert(loc, value) + + self.arrays = arrays + self._axes[1] = new_axis + + def reindex_indexer( + self: T, + new_axis, + indexer, + axis: int, + fill_value=None, + allow_dups: bool = False, + copy: bool = True, + # ignored keywords + consolidate: bool = True, + only_slice: bool = False, + ) -> T: + axis = self._normalize_axis(axis) + return self._reindex_indexer( + new_axis, indexer, axis, fill_value, allow_dups, copy + ) + + def _reindex_indexer( + self: T, + new_axis, + indexer, + axis: int, + fill_value=None, + allow_dups: bool = False, + copy: bool = True, + ) -> T: + """ + Parameters + ---------- + new_axis : Index + indexer : ndarray of int64 or None + axis : int + fill_value : object, default None + allow_dups : bool, default False + copy : bool, default True + + + pandas-indexer with -1's only. + """ + if indexer is None: + if new_axis is self._axes[axis] and not copy: + return self + + result = self.copy(deep=copy) + result._axes = list(self._axes) + result._axes[axis] = new_axis + return result + + # some axes don't allow reindexing with dups + if not allow_dups: + self._axes[axis]._can_reindex(indexer) + + # if axis >= self.ndim: + # raise IndexError("Requested axis not found in manager") + + if axis == 1: + new_arrays = [] + for i in indexer: + if i == -1: + arr = self._make_na_array(fill_value=fill_value) + else: + arr = self.arrays[i] + new_arrays.append(arr) + + else: + new_arrays = [ + algos.take( + arr, + indexer, + allow_fill=True, + fill_value=fill_value, + # if fill_value is not None else blk.fill_value + ) + for arr in self.arrays + ] + + new_axes = list(self._axes) + new_axes[axis] = new_axis + + return type(self)(new_arrays, new_axes) + + def take(self, indexer, axis: int = 1, verify: bool = True, convert: bool = True): + """ + Take items along any axis. + """ + axis = self._normalize_axis(axis) + + indexer = ( + np.arange(indexer.start, indexer.stop, indexer.step, dtype="int64") + if isinstance(indexer, slice) + else np.asanyarray(indexer, dtype="int64") + ) + + n = self.shape_proper[axis] + if convert: + indexer = maybe_convert_indices(indexer, n) + + if verify: + if ((indexer == -1) | (indexer >= n)).any(): + raise Exception("Indices must be nonzero and less than the axis length") + + new_labels = self._axes[axis].take(indexer) + return self._reindex_indexer( + new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True + ) + + def _make_na_array(self, fill_value=None): + if fill_value is None: + fill_value = np.nan + + dtype, fill_value = infer_dtype_from_scalar(fill_value) + values = np.empty(self.shape_proper[0], dtype=dtype) + values.fill(fill_value) + return values + + def equals(self, other: object) -> bool: + # TODO + raise NotImplementedError + + def unstack(self, unstacker, fill_value) -> "ArrayManager": + """ + Return a BlockManager with all blocks unstacked.. + + Parameters + ---------- + unstacker : reshape._Unstacker + fill_value : Any + fill_value for newly introduced missing values. + + Returns + ------- + unstacked : BlockManager + """ + indexer, _ = unstacker._indexer_and_to_sort + new_indexer = np.full(unstacker.mask.shape, -1) + new_indexer[unstacker.mask] = indexer + new_indexer2D = new_indexer.reshape(*unstacker.full_shape) + + new_arrays = [] + for arr in self.arrays: + for i in range(unstacker.full_shape[1]): + new_arr = algos.take( + arr, new_indexer2D[:, i], allow_fill=True, fill_value=fill_value + ) + new_arrays.append(new_arr) + + new_index = unstacker.new_index + new_columns = unstacker.get_new_columns(self._axes[1]) + new_axes = [new_index, new_columns] + + return type(self)(new_arrays, new_axes, do_integrity_check=False) + + # TODO + # equals + # to_dict + # quantile + + +def _interleaved_dtype(blocks) -> Optional[DtypeObj]: + """ + Find the common dtype for `blocks`. + + Parameters + ---------- + blocks : List[Block] + + Returns + ------- + dtype : np.dtype, ExtensionDtype, or None + None is returned when `blocks` is empty. + """ + if not len(blocks): + return None + + return find_common_type([b.dtype for b in blocks]) diff --git a/pandas/core/internals/base.py b/pandas/core/internals/base.py new file mode 100644 index 0000000000000..2295e3f2c41b2 --- /dev/null +++ b/pandas/core/internals/base.py @@ -0,0 +1,72 @@ +""" +Base class for the internal managers. Both BlockManager and ArrayManager +inherit from this class. +""" +from typing import List, TypeVar + +from pandas.errors import AbstractMethodError + +from pandas.core.base import PandasObject +from pandas.core.indexes.api import Index, ensure_index + +T = TypeVar("T", bound="DataManager") + + +class DataManager(PandasObject): + + # TODO share more methods/attributes + + axes: List[Index] + + @property + def items(self) -> Index: + raise AbstractMethodError(self) + + def __len__(self) -> int: + return len(self.items) + + @property + def ndim(self) -> int: + return len(self.axes) + + def reindex_indexer( + self: T, + new_axis, + indexer, + axis: int, + fill_value=None, + allow_dups: bool = False, + copy: bool = True, + consolidate: bool = True, + only_slice: bool = False, + ) -> T: + raise AbstractMethodError(self) + + def reindex_axis( + self, + new_index, + axis: int, + method=None, + limit=None, + fill_value=None, + copy: bool = True, + consolidate: bool = True, + only_slice: bool = False, + ): + """ + Conform data manager to new index. + """ + new_index = ensure_index(new_index) + new_index, indexer = self.axes[axis].reindex( + new_index, method=method, limit=limit + ) + + return self.reindex_indexer( + new_index, + indexer, + axis=axis, + fill_value=fill_value, + copy=copy, + consolidate=consolidate, + only_slice=only_slice, + ) diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index f97077954f8bf..32b6f9d64dd8d 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -1,11 +1,12 @@ from collections import defaultdict import copy +import itertools from typing import TYPE_CHECKING, Any, Dict, List, Sequence, Tuple, cast import numpy as np from pandas._libs import NaT, internals as libinternals -from pandas._typing import ArrayLike, DtypeObj, Shape +from pandas._typing import ArrayLike, DtypeObj, Manager, Shape from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import maybe_promote @@ -25,6 +26,7 @@ import pandas.core.algorithms as algos from pandas.core.arrays import DatetimeArray, ExtensionArray +from pandas.core.internals.array_manager import ArrayManager from pandas.core.internals.blocks import make_block from pandas.core.internals.managers import BlockManager @@ -35,7 +37,7 @@ def concatenate_block_managers( mgrs_indexers, axes: List["Index"], concat_axis: int, copy: bool -) -> BlockManager: +) -> Manager: """ Concatenate block managers into one. @@ -50,6 +52,21 @@ def concatenate_block_managers( ------- BlockManager """ + if isinstance(mgrs_indexers[0][0], ArrayManager): + + if concat_axis == 1: + # TODO for now only fastpath without indexers + mgrs = [t[0] for t in mgrs_indexers] + arrays = [ + concat_compat([mgrs[i].arrays[j] for i in range(len(mgrs))], axis=0) + for j in range(len(mgrs[0].arrays)) + ] + return ArrayManager(arrays, [axes[1], axes[0]]) + elif concat_axis == 0: + mgrs = [t[0] for t in mgrs_indexers] + arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs])) + return ArrayManager(arrays, [axes[1], axes[0]]) + concat_plans = [ _get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers ] diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 5161cf7038fe8..57a87e1e283d9 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -19,7 +19,7 @@ import numpy.ma as ma from pandas._libs import lib -from pandas._typing import Axis, DtypeObj, Scalar +from pandas._typing import Axis, DtypeObj, Manager, Scalar from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, @@ -149,6 +149,33 @@ def masked_rec_array_to_mgr( return mgr +def mgr_to_mgr(mgr, typ: str): + """ + Convert to specific type of Manager. Does not copy if the type is already + correct. Does not guarantee a copy otherwise. + """ + from pandas.core.internals import ArrayManager, BlockManager + + new_mgr: Manager + + if typ == "block": + if isinstance(mgr, BlockManager): + new_mgr = mgr + else: + new_mgr = arrays_to_mgr( + mgr.arrays, mgr.axes[0], mgr.axes[1], mgr.axes[0], dtype=None + ) + elif typ == "array": + if isinstance(mgr, ArrayManager): + new_mgr = mgr + else: + arrays = [mgr.iget_values(i).copy() for i in range(len(mgr.axes[0]))] + new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]]) + else: + raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{type}'") + return new_mgr + + # --------------------------------------------------------------------- # DataFrame Constructor Interface diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index fd503280eeafb..cc5576719ff43 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -40,10 +40,10 @@ import pandas.core.algorithms as algos from pandas.core.arrays.sparse import SparseDtype -from pandas.core.base import PandasObject from pandas.core.construction import extract_array from pandas.core.indexers import maybe_convert_indices from pandas.core.indexes.api import Index, ensure_index +from pandas.core.internals.base import DataManager from pandas.core.internals.blocks import ( Block, CategoricalBlock, @@ -62,7 +62,7 @@ T = TypeVar("T", bound="BlockManager") -class BlockManager(PandasObject): +class BlockManager(DataManager): """ Core internal data structure to implement DataFrame, Series, etc. @@ -1229,35 +1229,6 @@ def insert(self, loc: int, item: Hashable, value, allow_duplicates: bool = False stacklevel=5, ) - def reindex_axis( - self, - new_index, - axis: int, - method=None, - limit=None, - fill_value=None, - copy: bool = True, - consolidate: bool = True, - only_slice: bool = False, - ): - """ - Conform block manager to new index. - """ - new_index = ensure_index(new_index) - new_index, indexer = self.axes[axis].reindex( - new_index, method=method, limit=limit - ) - - return self.reindex_indexer( - new_index, - indexer, - axis=axis, - fill_value=fill_value, - copy=copy, - consolidate=consolidate, - only_slice=only_slice, - ) - def reindex_indexer( self: T, new_axis, diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index d1275590306ef..f1d0af60e1c7f 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -21,6 +21,7 @@ Tuple, Type, Union, + cast, ) import warnings @@ -67,6 +68,7 @@ from pandas.core.computation.pytables import PyTablesExpr, maybe_expression from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index +from pandas.core.internals import BlockManager from pandas.io.common import stringify_path from pandas.io.formats.printing import adjoin, pprint_thing @@ -3983,19 +3985,21 @@ def _get_blocks_and_items( def get_blk_items(mgr): return [mgr.items.take(blk.mgr_locs) for blk in mgr.blocks] - blocks: List["Block"] = list(frame._mgr.blocks) - blk_items: List[Index] = get_blk_items(frame._mgr) + mgr = frame._mgr + mgr = cast(BlockManager, mgr) + blocks: List["Block"] = list(mgr.blocks) + blk_items: List[Index] = get_blk_items(mgr) if len(data_columns): axis, axis_labels = new_non_index_axes[0] new_labels = Index(axis_labels).difference(Index(data_columns)) mgr = frame.reindex(new_labels, axis=axis)._mgr - blocks = list(mgr.blocks) + blocks = list(mgr.blocks) # type: ignore[union-attr] blk_items = get_blk_items(mgr) for c in data_columns: mgr = frame.reindex([c], axis=axis)._mgr - blocks.extend(mgr.blocks) + blocks.extend(mgr.blocks) # type: ignore[union-attr] blk_items.extend(get_blk_items(mgr)) # reorder the blocks in the same order as the existing table if we can diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py index 356dc800d9662..36c875b8abe6f 100644 --- a/pandas/tests/frame/methods/test_append.py +++ b/pandas/tests/frame/methods/test_append.py @@ -1,10 +1,15 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import DataFrame, Series, Timestamp, date_range, timedelta_range import pandas._testing as tm +# TODO td.skip_array_manager_not_yet_implemented +# appending with reindexing not yet working + class TestDataFrameAppend: def test_append_multiindex(self, multiindex_dataframe_random_data, frame_or_series): @@ -32,6 +37,7 @@ def test_append_empty_list(self): tm.assert_frame_equal(result, expected) assert result is not df # .append() should return a new object + @td.skip_array_manager_not_yet_implemented def test_append_series_dict(self): df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) @@ -72,6 +78,7 @@ def test_append_series_dict(self): expected = df.append(df[-1:], ignore_index=True) tm.assert_frame_equal(result, expected) + @td.skip_array_manager_not_yet_implemented def test_append_list_of_series_dicts(self): df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) @@ -90,6 +97,7 @@ def test_append_list_of_series_dicts(self): expected = df.append(DataFrame(dicts), ignore_index=True, sort=True) tm.assert_frame_equal(result, expected) + @td.skip_array_manager_not_yet_implemented def test_append_missing_cols(self): # GH22252 # exercise the conditional branch in append method where the data @@ -134,6 +142,7 @@ def test_append_empty_dataframe(self): expected = df1.copy() tm.assert_frame_equal(result, expected) + @td.skip_array_manager_not_yet_implemented def test_append_dtypes(self): # GH 5754 @@ -193,6 +202,7 @@ def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp): expected = Series(Timestamp(timestamp, tz=tz), name=0) tm.assert_series_equal(result, expected) + @td.skip_array_manager_not_yet_implemented @pytest.mark.parametrize( "data, dtype", [ diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py index 3c65551aafd0f..a4da77548b920 100644 --- a/pandas/tests/frame/methods/test_astype.py +++ b/pandas/tests/frame/methods/test_astype.py @@ -3,6 +3,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import ( Categorical, @@ -90,6 +92,7 @@ def test_astype_mixed_type(self, mixed_type_frame): casted = mn.astype("O") _check_cast(casted, "object") + @td.skip_array_manager_not_yet_implemented def test_astype_with_exclude_string(self, float_frame): df = float_frame.copy() expected = float_frame.astype(int) @@ -124,6 +127,7 @@ def test_astype_with_view_mixed_float(self, mixed_float_frame): casted = tf.astype(np.int64) casted = tf.astype(np.float32) # noqa + @td.skip_array_manager_not_yet_implemented @pytest.mark.parametrize("dtype", [np.int32, np.int64]) @pytest.mark.parametrize("val", [np.nan, np.inf]) def test_astype_cast_nan_inf_int(self, val, dtype): @@ -382,6 +386,7 @@ def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit): tm.assert_frame_equal(result, expected) + @td.skip_array_manager_not_yet_implemented @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"]) def test_astype_to_datetime_unit(self, unit): # tests all units from datetime origination @@ -406,6 +411,7 @@ def test_astype_to_timedelta_unit_ns(self, unit): tm.assert_frame_equal(result, expected) + @td.skip_array_manager_not_yet_implemented @pytest.mark.parametrize("unit", ["us", "ms", "s", "h", "m", "D"]) def test_astype_to_timedelta_unit(self, unit): # coerce to float @@ -429,6 +435,7 @@ def test_astype_to_incorrect_datetimelike(self, unit): msg = ( fr"cannot astype a datetimelike from \[datetime64\[ns\]\] to " fr"\[timedelta64\[{unit}\]\]" + fr"|(Cannot cast DatetimeArray to dtype timedelta64\[{unit}\])" ) with pytest.raises(TypeError, match=msg): df.astype(other) @@ -436,11 +443,13 @@ def test_astype_to_incorrect_datetimelike(self, unit): msg = ( fr"cannot astype a timedelta from \[timedelta64\[ns\]\] to " fr"\[datetime64\[{unit}\]\]" + fr"|(Cannot cast TimedeltaArray to dtype datetime64\[{unit}\])" ) df = DataFrame(np.array([[1, 2, 3]], dtype=other)) with pytest.raises(TypeError, match=msg): df.astype(dtype) + @td.skip_array_manager_not_yet_implemented def test_astype_arg_for_errors(self): # GH#14878 @@ -567,6 +576,7 @@ def test_astype_empty_dtype_dict(self): tm.assert_frame_equal(result, df) assert result is not df + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) ignore keyword @pytest.mark.parametrize( "df", [ diff --git a/pandas/tests/frame/methods/test_count.py b/pandas/tests/frame/methods/test_count.py index d738c7139093c..1727a76c191ee 100644 --- a/pandas/tests/frame/methods/test_count.py +++ b/pandas/tests/frame/methods/test_count.py @@ -1,6 +1,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas import DataFrame, Index, Series import pandas._testing as tm @@ -103,6 +105,7 @@ def test_count_index_with_nan(self): ) tm.assert_frame_equal(res, expected) + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) groupby def test_count_level( self, multiindex_year_month_day_dataframe_random_data, diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py index 6cea5abcac6d0..f8d729a215ba8 100644 --- a/pandas/tests/frame/methods/test_cov_corr.py +++ b/pandas/tests/frame/methods/test_cov_corr.py @@ -191,14 +191,15 @@ def test_corr_nullable_integer(self, nullable_column, other_column, method): expected = DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"]) tm.assert_frame_equal(result, expected) - def test_corr_item_cache(self): + def test_corr_item_cache(self, using_array_manager): # Check that corr does not lead to incorrect entries in item_cache df = DataFrame({"A": range(10)}) df["B"] = range(10)[::-1] ser = df["A"] # populate item_cache - assert len(df._mgr.blocks) == 2 + if not using_array_manager: + assert len(df._mgr.blocks) == 2 _ = df.corr() diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py index 148263bad0eb0..1de270fc72fb2 100644 --- a/pandas/tests/frame/methods/test_describe.py +++ b/pandas/tests/frame/methods/test_describe.py @@ -1,10 +1,15 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import Categorical, DataFrame, Series, Timestamp, date_range import pandas._testing as tm +# TODO(ArrayManager) quantile is needed for describe() +pytestmark = td.skip_array_manager_not_yet_implemented + class TestDataFrameDescribe: def test_describe_bool_in_mixed_frame(self): diff --git a/pandas/tests/frame/methods/test_drop.py b/pandas/tests/frame/methods/test_drop.py index 58e1bd146191f..bc2b7a4655b8e 100644 --- a/pandas/tests/frame/methods/test_drop.py +++ b/pandas/tests/frame/methods/test_drop.py @@ -5,6 +5,7 @@ from pandas.compat import is_numpy_dev from pandas.errors import PerformanceWarning +import pandas.util._test_decorators as td import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, Timestamp @@ -156,6 +157,7 @@ def test_drop(self): assert return_value is None tm.assert_frame_equal(df, expected) + @td.skip_array_manager_not_yet_implemented def test_drop_multiindex_not_lexsorted(self): # GH#11640 diff --git a/pandas/tests/frame/methods/test_equals.py b/pandas/tests/frame/methods/test_equals.py index de2509ed91be2..dc45c9eb97ae4 100644 --- a/pandas/tests/frame/methods/test_equals.py +++ b/pandas/tests/frame/methods/test_equals.py @@ -1,8 +1,13 @@ import numpy as np +import pandas.util._test_decorators as td + from pandas import DataFrame, date_range import pandas._testing as tm +# TODO(ArrayManager) implement equals +pytestmark = td.skip_array_manager_not_yet_implemented + class TestEquals: def test_dataframe_not_equal(self): diff --git a/pandas/tests/frame/methods/test_explode.py b/pandas/tests/frame/methods/test_explode.py index bd0901387eeed..be80dd49ff1fb 100644 --- a/pandas/tests/frame/methods/test_explode.py +++ b/pandas/tests/frame/methods/test_explode.py @@ -1,9 +1,14 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd import pandas._testing as tm +# TODO(ArrayManager) concat with reindexing +pytestmark = td.skip_array_manager_not_yet_implemented + def test_error(): df = pd.DataFrame( diff --git a/pandas/tests/frame/methods/test_fillna.py b/pandas/tests/frame/methods/test_fillna.py index b427611099be3..58016be82c405 100644 --- a/pandas/tests/frame/methods/test_fillna.py +++ b/pandas/tests/frame/methods/test_fillna.py @@ -1,6 +1,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas import ( Categorical, DataFrame, @@ -230,6 +232,7 @@ def test_fillna_categorical_nan(self): df = DataFrame({"a": Categorical(idx)}) tm.assert_frame_equal(df.fillna(value=NaT), df) + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) implement downcast def test_fillna_downcast(self): # GH#15277 # infer int64 from float64 @@ -244,6 +247,7 @@ def test_fillna_downcast(self): expected = DataFrame({"a": [1, 0]}) tm.assert_frame_equal(result, expected) + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) object upcasting def test_fillna_dtype_conversion(self): # make sure that fillna on an empty frame works df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5]) @@ -268,6 +272,7 @@ def test_fillna_dtype_conversion(self): result = df.fillna(v) tm.assert_frame_equal(result, expected) + @td.skip_array_manager_invalid_test def test_fillna_datetime_columns(self): # GH#7095 df = DataFrame( @@ -335,13 +340,13 @@ def test_frame_pad_backfill_limit(self): result = df[:2].reindex(index, method="pad", limit=5) expected = df[:2].reindex(index).fillna(method="pad") - expected.values[-3:] = np.nan + expected.iloc[-3:] = np.nan tm.assert_frame_equal(result, expected) result = df[-2:].reindex(index, method="backfill", limit=5) expected = df[-2:].reindex(index).fillna(method="backfill") - expected.values[:3] = np.nan + expected.iloc[:3] = np.nan tm.assert_frame_equal(result, expected) def test_frame_fillna_limit(self): @@ -352,14 +357,14 @@ def test_frame_fillna_limit(self): result = result.fillna(method="pad", limit=5) expected = df[:2].reindex(index).fillna(method="pad") - expected.values[-3:] = np.nan + expected.iloc[-3:] = np.nan tm.assert_frame_equal(result, expected) result = df[-2:].reindex(index) result = result.fillna(method="backfill", limit=5) expected = df[-2:].reindex(index).fillna(method="backfill") - expected.values[:3] = np.nan + expected.iloc[:3] = np.nan tm.assert_frame_equal(result, expected) def test_fillna_skip_certain_blocks(self): diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py index 6b86a13fcf1b9..2477ad79d8a2c 100644 --- a/pandas/tests/frame/methods/test_interpolate.py +++ b/pandas/tests/frame/methods/test_interpolate.py @@ -324,6 +324,7 @@ def test_interp_string_axis(self, axis_name, axis_number): expected = df.interpolate(method="linear", axis=axis_number) tm.assert_frame_equal(result, expected) + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) support axis=1 @pytest.mark.parametrize("method", ["ffill", "bfill", "pad"]) def test_interp_fillna_methods(self, axis, method): # GH 12918 diff --git a/pandas/tests/frame/methods/test_is_homogeneous_dtype.py b/pandas/tests/frame/methods/test_is_homogeneous_dtype.py index 0fca4e988b775..126c78a657c58 100644 --- a/pandas/tests/frame/methods/test_is_homogeneous_dtype.py +++ b/pandas/tests/frame/methods/test_is_homogeneous_dtype.py @@ -1,8 +1,13 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas import Categorical, DataFrame +# _is_homogeneous_type always returns True for ArrayManager +pytestmark = td.skip_array_manager_invalid_test + @pytest.mark.parametrize( "data, expected", diff --git a/pandas/tests/frame/methods/test_join.py b/pandas/tests/frame/methods/test_join.py index eba92cc71a6d0..42694dc3ff37c 100644 --- a/pandas/tests/frame/methods/test_join.py +++ b/pandas/tests/frame/methods/test_join.py @@ -3,10 +3,15 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import DataFrame, Index, MultiIndex, date_range, period_range import pandas._testing as tm +# TODO(ArrayManager) concat with reindexing +pytestmark = td.skip_array_manager_not_yet_implemented + @pytest.fixture def frame_with_period_index(): diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index 6ddba8b5e7064..3f7f2e51add96 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -1,10 +1,14 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import DataFrame, Series, Timestamp import pandas._testing as tm +pytestmark = td.skip_array_manager_not_yet_implemented + class TestDataFrameQuantile: @pytest.mark.parametrize( diff --git a/pandas/tests/frame/methods/test_rank.py b/pandas/tests/frame/methods/test_rank.py index 4255c1cb5e65f..5b66f58b8f069 100644 --- a/pandas/tests/frame/methods/test_rank.py +++ b/pandas/tests/frame/methods/test_rank.py @@ -238,6 +238,7 @@ def test_rank_methods_frame(self): expected = DataFrame(sprank, columns=cols).astype("float64") tm.assert_frame_equal(result, expected) + @td.skip_array_manager_not_yet_implemented @pytest.mark.parametrize("dtype", ["O", "f8", "i8"]) def test_rank_descending(self, method, dtype): diff --git a/pandas/tests/frame/methods/test_reorder_levels.py b/pandas/tests/frame/methods/test_reorder_levels.py index 6bfbf089a6108..451fc9a5cf717 100644 --- a/pandas/tests/frame/methods/test_reorder_levels.py +++ b/pandas/tests/frame/methods/test_reorder_levels.py @@ -1,6 +1,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas import DataFrame, MultiIndex import pandas._testing as tm @@ -47,6 +49,7 @@ def test_reorder_levels(self, frame_or_series): result = obj.reorder_levels(["L0", "L0", "L0"]) tm.assert_equal(result, expected) + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) groupby def test_reorder_levels_swaplevel_equivalence( self, multiindex_year_month_day_dataframe_random_data ): diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index 00d4a4277a42f..e43eb3fb47b7e 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -4,6 +4,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype import pandas as pd @@ -518,6 +520,7 @@ def test_reset_index_delevel_infer_dtype(self): assert is_integer_dtype(deleveled["prm1"]) assert is_float_dtype(deleveled["prm2"]) + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) groupby def test_reset_index_with_drop( self, multiindex_year_month_day_dataframe_random_data ): @@ -616,6 +619,7 @@ def test_reset_index_empty_frame_with_datetime64_multiindex(): tm.assert_frame_equal(result, expected) +@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) groupby def test_reset_index_empty_frame_with_datetime64_multiindex_from_groupby(): # https://github.com/pandas-dev/pandas/issues/35657 df = DataFrame({"c1": [10.0], "c2": ["a"], "c3": pd.to_datetime("2020-01-01")}) diff --git a/pandas/tests/frame/methods/test_select_dtypes.py b/pandas/tests/frame/methods/test_select_dtypes.py index f2dbe4a799a17..434df5ccccaf7 100644 --- a/pandas/tests/frame/methods/test_select_dtypes.py +++ b/pandas/tests/frame/methods/test_select_dtypes.py @@ -42,6 +42,9 @@ def __len__(self) -> int: def __getitem__(self, item): pass + def copy(self): + return self + class TestSelectDtypes: def test_select_dtypes_include_using_list_like(self): diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py index 40b3f1e89c015..aefc407d0c432 100644 --- a/pandas/tests/frame/methods/test_shift.py +++ b/pandas/tests/frame/methods/test_shift.py @@ -1,6 +1,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import CategoricalIndex, DataFrame, Index, Series, date_range, offsets import pandas._testing as tm @@ -145,12 +147,13 @@ def test_shift_duplicate_columns(self): tm.assert_frame_equal(shifted[0], shifted[1]) tm.assert_frame_equal(shifted[0], shifted[2]) - def test_shift_axis1_multiple_blocks(self): + def test_shift_axis1_multiple_blocks(self, using_array_manager): # GH#35488 df1 = DataFrame(np.random.randint(1000, size=(5, 3))) df2 = DataFrame(np.random.randint(1000, size=(5, 2))) df3 = pd.concat([df1, df2], axis=1) - assert len(df3._mgr.blocks) == 2 + if not using_array_manager: + assert len(df3._mgr.blocks) == 2 result = df3.shift(2, axis=1) @@ -163,7 +166,8 @@ def test_shift_axis1_multiple_blocks(self): # Case with periods < 0 # rebuild df3 because `take` call above consolidated df3 = pd.concat([df1, df2], axis=1) - assert len(df3._mgr.blocks) == 2 + if not using_array_manager: + assert len(df3._mgr.blocks) == 2 result = df3.shift(-2, axis=1) expected = df3.take([2, 3, 4, -1, -1], axis=1) @@ -272,6 +276,7 @@ def test_datetime_frame_shift_with_freq_error(self, datetime_frame): with pytest.raises(ValueError, match=msg): no_freq.shift(freq="infer") + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) axis=1 support def test_shift_dt64values_int_fill_deprecated(self): # GH#31971 ser = Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")]) diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index 3be6a8453420e..221296bfd6d76 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -1,6 +1,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import ( CategoricalDtype, @@ -371,6 +373,7 @@ def test_sort_index_multiindex(self, level): result = df.sort_index(level=level, sort_remaining=False) tm.assert_frame_equal(result, expected) + @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) groupby def test_sort_index_intervalindex(self): # this is a de-facto sort via unstack # confirming that we sort in the order of the bins diff --git a/pandas/tests/frame/methods/test_sort_values.py b/pandas/tests/frame/methods/test_sort_values.py index 987848ec697d1..cd3286fa38056 100644 --- a/pandas/tests/frame/methods/test_sort_values.py +++ b/pandas/tests/frame/methods/test_sort_values.py @@ -566,12 +566,13 @@ def test_sort_values_nat_na_position_default(self): result = expected.sort_values(["A", "date"]) tm.assert_frame_equal(result, expected) - def test_sort_values_item_cache(self): + def test_sort_values_item_cache(self, using_array_manager): # previous behavior incorrect retained an invalid _item_cache entry df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"]) df["D"] = df["A"] * 2 ser = df["A"] - assert len(df._mgr.blocks) == 2 + if not using_array_manager: + assert len(df._mgr.blocks) == 2 df.sort_values(by="A") ser.values[0] = 99 diff --git a/pandas/tests/frame/methods/test_to_dict_of_blocks.py b/pandas/tests/frame/methods/test_to_dict_of_blocks.py index 0257a5d43170f..8de47cb17d7d3 100644 --- a/pandas/tests/frame/methods/test_to_dict_of_blocks.py +++ b/pandas/tests/frame/methods/test_to_dict_of_blocks.py @@ -1,9 +1,13 @@ import numpy as np +import pandas.util._test_decorators as td + from pandas import DataFrame, MultiIndex import pandas._testing as tm from pandas.core.arrays import PandasArray +pytestmark = td.skip_array_manager_invalid_test + class TestToDictOfBlocks: def test_copy_blocks(self, float_frame): diff --git a/pandas/tests/frame/methods/test_to_numpy.py b/pandas/tests/frame/methods/test_to_numpy.py index 3d69c004db6bb..0682989294457 100644 --- a/pandas/tests/frame/methods/test_to_numpy.py +++ b/pandas/tests/frame/methods/test_to_numpy.py @@ -1,5 +1,7 @@ import numpy as np +import pandas.util._test_decorators as td + from pandas import DataFrame, Timestamp import pandas._testing as tm @@ -17,6 +19,7 @@ def test_to_numpy_dtype(self): result = df.to_numpy(dtype="int64") tm.assert_numpy_array_equal(result, expected) + @td.skip_array_manager_invalid_test def test_to_numpy_copy(self): arr = np.random.randn(4, 3) df = DataFrame(arr) diff --git a/pandas/tests/frame/methods/test_transpose.py b/pandas/tests/frame/methods/test_transpose.py index 8635168f1eb03..548842e653a63 100644 --- a/pandas/tests/frame/methods/test_transpose.py +++ b/pandas/tests/frame/methods/test_transpose.py @@ -1,6 +1,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas import DataFrame, date_range import pandas._testing as tm @@ -79,6 +81,7 @@ def test_transpose_float(self, float_frame): for col, s in mixed_T.items(): assert s.dtype == np.object_ + @td.skip_array_manager_invalid_test def test_transpose_get_view(self, float_frame): dft = float_frame.T dft.values[:, 5:10] = 5 diff --git a/pandas/tests/frame/methods/test_values.py b/pandas/tests/frame/methods/test_values.py index fb0c5d31f692b..5426e4368722e 100644 --- a/pandas/tests/frame/methods/test_values.py +++ b/pandas/tests/frame/methods/test_values.py @@ -1,11 +1,14 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas import DataFrame, NaT, Series, Timestamp, date_range, period_range import pandas._testing as tm class TestDataFrameValues: + @td.skip_array_manager_invalid_test def test_values(self, float_frame): float_frame.values[:, 0] = 5.0 assert (float_frame.values[:, 0] == 5).all() diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 22eb642ed8512..afc25c48beb5f 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -972,7 +972,7 @@ def test_align_frame(self): result = ts + ts[::2] expected = ts + ts - expected.values[1::2] = np.nan + expected.iloc[1::2] = np.nan tm.assert_frame_equal(result, expected) half = ts[::2] diff --git a/pandas/tests/internals/test_managers.py b/pandas/tests/internals/test_managers.py new file mode 100644 index 0000000000000..333455875904a --- /dev/null +++ b/pandas/tests/internals/test_managers.py @@ -0,0 +1,40 @@ +""" +Testing interaction between the different managers (BlockManager, ArrayManager) +""" +from pandas.core.dtypes.missing import array_equivalent + +import pandas as pd +import pandas._testing as tm +from pandas.core.internals import ArrayManager, BlockManager + + +def test_dataframe_creation(): + + with pd.option_context("mode.data_manager", "block"): + df_block = pd.DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]}) + assert isinstance(df_block._mgr, BlockManager) + + with pd.option_context("mode.data_manager", "array"): + df_array = pd.DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]}) + assert isinstance(df_array._mgr, ArrayManager) + + # also ensure both are seen as equal + tm.assert_frame_equal(df_block, df_array) + + # conversion from one manager to the other + result = df_block._as_manager("block") + assert isinstance(result._mgr, BlockManager) + result = df_block._as_manager("array") + assert isinstance(result._mgr, ArrayManager) + tm.assert_frame_equal(result, df_block) + assert all( + array_equivalent(left, right) + for left, right in zip(result._mgr.arrays, df_array._mgr.arrays) + ) + + result = df_array._as_manager("array") + assert isinstance(result._mgr, ArrayManager) + result = df_array._as_manager("block") + assert isinstance(result._mgr, BlockManager) + tm.assert_frame_equal(result, df_array) + assert len(result._mgr.blocks) == 2 diff --git a/pandas/tests/io/formats/test_printing.py b/pandas/tests/io/formats/test_printing.py index f0d5ef19c4468..2339e21288bb5 100644 --- a/pandas/tests/io/formats/test_printing.py +++ b/pandas/tests/io/formats/test_printing.py @@ -3,6 +3,8 @@ import pandas._config.config as cf +import pandas.util._test_decorators as td + import pandas as pd import pandas.io.formats.format as fmt @@ -119,6 +121,7 @@ def test_ambiguous_width(self): assert adjoined == expected +@td.skip_array_manager_not_yet_implemented class TestTableSchemaRepr: @classmethod def setup_class(cls): diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index 5faca6bd89dad..6ead81db1fab0 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -5,6 +5,8 @@ import pandas as pd import pandas._testing as tm +pytestmark = td.skip_array_manager_not_yet_implemented + def test_compression_roundtrip(compression): df = pd.DataFrame( diff --git a/pandas/tests/io/json/test_deprecated_kwargs.py b/pandas/tests/io/json/test_deprecated_kwargs.py index 79245bc9d34a8..7367aaefb1c1e 100644 --- a/pandas/tests/io/json/test_deprecated_kwargs.py +++ b/pandas/tests/io/json/test_deprecated_kwargs.py @@ -2,11 +2,15 @@ Tests for the deprecated keyword arguments for `read_json`. """ +import pandas.util._test_decorators as td + import pandas as pd import pandas._testing as tm from pandas.io.json import read_json +pytestmark = td.skip_array_manager_not_yet_implemented + def test_deprecated_kwargs(): df = pd.DataFrame({"A": [2, 4, 6], "B": [3, 6, 9]}, index=[0, 1, 2]) diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index 215d663e68d8f..e25964f556e4e 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -6,6 +6,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype, PeriodDtype import pandas as pd @@ -20,6 +22,8 @@ set_default_names, ) +pytestmark = td.skip_array_manager_not_yet_implemented + class TestBuildSchema: def setup_method(self, method): diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index b232c827f5ece..d7fc1257d8396 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -3,11 +3,15 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas import DataFrame, Index, Series, json_normalize import pandas._testing as tm from pandas.io.json._normalize import nested_to_record +pytestmark = td.skip_array_manager_not_yet_implemented + @pytest.fixture def deep_nested(): diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index dba3cb4db3ab8..c3ada52eba5aa 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -15,6 +15,9 @@ from pandas import DataFrame, DatetimeIndex, Series, Timestamp, compat, read_json import pandas._testing as tm +pytestmark = td.skip_array_manager_not_yet_implemented + + _seriesd = tm.getSeriesData() _frame = DataFrame(_seriesd) diff --git a/pandas/tests/io/json/test_readlines.py b/pandas/tests/io/json/test_readlines.py index 099d99507e136..2484c12f42600 100644 --- a/pandas/tests/io/json/test_readlines.py +++ b/pandas/tests/io/json/test_readlines.py @@ -3,12 +3,16 @@ import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import DataFrame, read_json import pandas._testing as tm from pandas.io.json._json import JsonReader +pytestmark = td.skip_array_manager_not_yet_implemented + @pytest.fixture def lines_json_df(): diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index 74adb397d91f4..dff506809ee4f 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -16,10 +16,13 @@ import pandas._libs.json as ujson from pandas._libs.tslib import Timestamp from pandas.compat import IS64, is_platform_windows +import pandas.util._test_decorators as td from pandas import DataFrame, DatetimeIndex, Index, NaT, Series, Timedelta, date_range import pandas._testing as tm +pytestmark = td.skip_array_manager_not_yet_implemented + def _clean_dict(d): """ diff --git a/pandas/tests/io/pytables/test_complex.py b/pandas/tests/io/pytables/test_complex.py index 71bb6584889aa..72e8b4aea5ede 100644 --- a/pandas/tests/io/pytables/test_complex.py +++ b/pandas/tests/io/pytables/test_complex.py @@ -3,6 +3,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import DataFrame, Series import pandas._testing as tm @@ -10,6 +12,9 @@ from pandas.io.pytables import read_hdf +# TODO(ArrayManager) HDFStore relies on accessing the blocks +pytestmark = td.skip_array_manager_not_yet_implemented + def test_complex_fixed(setup_path): df = DataFrame( diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index 3f0fd6e7483f8..131711a32d114 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -55,6 +55,10 @@ from pandas.io.pytables import TableIterator # isort:skip +# TODO(ArrayManager) HDFStore relies on accessing the blocks +pytestmark = td.skip_array_manager_not_yet_implemented + + _default_compressor = "blosc" ignore_natural_naming_warning = pytest.mark.filterwarnings( "ignore:object name:tables.exceptions.NaturalNameWarning" diff --git a/pandas/tests/io/pytables/test_timezones.py b/pandas/tests/io/pytables/test_timezones.py index 9ee44b58d6ced..a106a579d7e52 100644 --- a/pandas/tests/io/pytables/test_timezones.py +++ b/pandas/tests/io/pytables/test_timezones.py @@ -14,6 +14,9 @@ ensure_clean_store, ) +# TODO(ArrayManager) HDFStore relies on accessing the blocks +pytestmark = td.skip_array_manager_not_yet_implemented + def _compare_with_tz(a, b): tm.assert_frame_equal(a, b) diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 725c14f410357..d31bee9aca135 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -272,7 +272,9 @@ def test_read_fspath_all(self, reader, module, path, datapath): ("to_excel", {"engine": "xlwt"}, "xlwt"), ("to_feather", {}, "pyarrow"), ("to_html", {}, "os"), - ("to_json", {}, "os"), + pytest.param( + "to_json", {}, "os", marks=td.skip_array_manager_not_yet_implemented + ), ("to_latex", {}, "os"), ("to_pickle", {}, "os"), ("to_stata", {"time_stamp": pd.to_datetime("2019-01-01 00:00")}, "os"), diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index 158504082e657..76bc188afdd1f 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -8,11 +8,15 @@ import pytest +import pandas.util._test_decorators as td + import pandas as pd import pandas._testing as tm import pandas.io.common as icom +pytestmark = td.skip_array_manager_not_yet_implemented + @pytest.mark.parametrize( "obj", diff --git a/pandas/tests/io/test_fsspec.py b/pandas/tests/io/test_fsspec.py index b1038b6d28083..d9575a6ad81e5 100644 --- a/pandas/tests/io/test_fsspec.py +++ b/pandas/tests/io/test_fsspec.py @@ -247,6 +247,7 @@ def test_pickle_options(fsspectest): tm.assert_frame_equal(df, out) +@td.skip_array_manager_not_yet_implemented def test_json_options(fsspectest): df = DataFrame({"a": [0]}) df.to_json("testmem://afile", storage_options={"test": "json_write"}) diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 24944281419c3..035460185fa81 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -12,6 +12,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas.core.dtypes.common import is_categorical_dtype import pandas as pd @@ -29,6 +31,9 @@ read_stata, ) +# TODO(ArrayManager) the stata code relies on BlockManager internals (eg blknos) +pytestmark = td.skip_array_manager_not_yet_implemented + @pytest.fixture() def mixed_frame(): diff --git a/pandas/tests/io/test_user_agent.py b/pandas/tests/io/test_user_agent.py index 32399c7de7a68..fd3ca3919d416 100644 --- a/pandas/tests/io/test_user_agent.py +++ b/pandas/tests/io/test_user_agent.py @@ -8,6 +8,8 @@ import pytest +import pandas.util._test_decorators as td + import pandas as pd import pandas._testing as tm @@ -180,13 +182,25 @@ def do_GET(self): "responder, read_method, port, parquet_engine", [ (CSVUserAgentResponder, pd.read_csv, 34259, None), - (JSONUserAgentResponder, pd.read_json, 34260, None), + pytest.param( + JSONUserAgentResponder, + pd.read_json, + 34260, + None, + marks=td.skip_array_manager_not_yet_implemented, + ), (ParquetPyArrowUserAgentResponder, pd.read_parquet, 34268, "pyarrow"), (ParquetFastParquetUserAgentResponder, pd.read_parquet, 34273, "fastparquet"), (PickleUserAgentResponder, pd.read_pickle, 34271, None), (StataUserAgentResponder, pd.read_stata, 34272, None), (GzippedCSVUserAgentResponder, pd.read_csv, 34261, None), - (GzippedJSONUserAgentResponder, pd.read_json, 34262, None), + pytest.param( + GzippedJSONUserAgentResponder, + pd.read_json, + 34262, + None, + marks=td.skip_array_manager_not_yet_implemented, + ), ], ) def test_server_and_default_headers(responder, read_method, port, parquet_engine): @@ -212,13 +226,25 @@ def test_server_and_default_headers(responder, read_method, port, parquet_engine "responder, read_method, port, parquet_engine", [ (CSVUserAgentResponder, pd.read_csv, 34263, None), - (JSONUserAgentResponder, pd.read_json, 34264, None), + pytest.param( + JSONUserAgentResponder, + pd.read_json, + 34264, + None, + marks=td.skip_array_manager_not_yet_implemented, + ), (ParquetPyArrowUserAgentResponder, pd.read_parquet, 34270, "pyarrow"), (ParquetFastParquetUserAgentResponder, pd.read_parquet, 34275, "fastparquet"), (PickleUserAgentResponder, pd.read_pickle, 34273, None), (StataUserAgentResponder, pd.read_stata, 34274, None), (GzippedCSVUserAgentResponder, pd.read_csv, 34265, None), - (GzippedJSONUserAgentResponder, pd.read_json, 34266, None), + pytest.param( + GzippedJSONUserAgentResponder, + pd.read_json, + 34266, + None, + marks=td.skip_array_manager_not_yet_implemented, + ), ], ) def test_server_and_custom_headers(responder, read_method, port, parquet_engine): diff --git a/pandas/tests/series/methods/test_describe.py b/pandas/tests/series/methods/test_describe.py index a15dc0751aa7d..e479e5c1416db 100644 --- a/pandas/tests/series/methods/test_describe.py +++ b/pandas/tests/series/methods/test_describe.py @@ -1,8 +1,13 @@ import numpy as np +import pandas.util._test_decorators as td + from pandas import Period, Series, Timedelta, Timestamp, date_range import pandas._testing as tm +# TODO(ArrayManager) quantile is needed for describe() +pytestmark = td.skip_array_manager_not_yet_implemented + class TestSeriesDescribe: def test_describe(self): diff --git a/pandas/tests/series/methods/test_quantile.py b/pandas/tests/series/methods/test_quantile.py index 1d3e91d07afe3..5771d8e2b8a47 100644 --- a/pandas/tests/series/methods/test_quantile.py +++ b/pandas/tests/series/methods/test_quantile.py @@ -1,6 +1,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas.core.dtypes.common import is_integer import pandas as pd @@ -8,6 +10,8 @@ import pandas._testing as tm from pandas.core.indexes.datetimes import Timestamp +pytestmark = td.skip_array_manager_not_yet_implemented + class TestSeriesQuantile: def test_quantile(self, datetime_series): diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 209a4233fc3b7..95ef2f6c00fe8 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -274,3 +274,18 @@ def async_mark(): async_mark = pytest.mark.skip(reason="Missing dependency pytest-asyncio") return async_mark + + +# Note: we are using a string as condition (and not for example +# `get_option("mode.data_manager") == "array"`) because this needs to be +# evaluated at test time (otherwise this boolean condition gets evaluated +# at import time, when the pd.options.mode.data_manager has not yet been set) + +skip_array_manager_not_yet_implemented = pytest.mark.skipif( + "config.getvalue('--array-manager')", reason="JSON C code relies on Blocks" +) + +skip_array_manager_invalid_test = pytest.mark.skipif( + "config.getvalue('--array-manager')", + reason="Test that relies on BlockManager internals or specific behaviour", +)
Related to the discussion in https://github.com/pandas-dev/pandas/issues/10556, and following up on the mailing list discussion *"A case for a simplified (non-consolidating) BlockManager with 1D blocks"* ([archive](https://mail.python.org/pipermail/pandas-dev/2020-May/001219.html)). This branch experiments with an *"array manager"*, storing a list of 1D arrays instead of blocks. The idea is that this `ArrayManager` could optionally be used instead of `BlockManager`. If we ensure the "DataManager" has a clear interface for the rest of pandas (and thus parts outside of the internals don't rely on details like block layout, xref https://github.com/pandas-dev/pandas/issues/34669), this should be possible without much changes outside of /core/internals. Some notes on this experiment: - This is not a complete POC, not every aspect and behaviour of the BlockManager has already been replicated, and there are still places in pandas that rely on the blocks being present, so lots of tests are still failing (although changes in behaviour are also desired). That said, a *lot* of the basic operations do work. Two illustrations of this: - An updated version of the notebook I showed in the mailing list discussion as well: with a certain setup, comparing a set of operations between block vs array manager: https://nbviewer.jupyter.org/gist/jorisvandenbossche/f917d4301d21069e2be2e3b7c7aa4d07 - I ran the arithmetic.py benchmark file, comparing against master, see below for the results. - For now, I focused on an ArrayManager storing a list of *numpy arrays*. Of course we need to expand that to support ExtensionArrays as well (or ExtensionArrays only?), but the reason I limited to numpy arrays for now: besides making it a bit simpler to experiment with, this also gives a fairer comparison with the consolidated BlockManager (because it focuses on the numpy array being 1D vs 2D, and doesn't mix in performance/implementation differences of numpy array vs ExtensionArray). - Personally, I think this looks promising. Many of the methods are a *lot* simpler than the BlockManager equivalent (although not every aspect is implemented yet, that's correct). And for the case I showed in the notebook, performance looks also good. For the benchmark suite I ran, there are obviously slowdowns for the "wide dataframe" benchmarks. There is still a lot of work needed to make this fully working with the rest of pandas, though ;) - Given the early proof of concept stage, detailed code feedback is not yet needed, but I would find it very useful to discuss the following aspects: - High-level feedback on the approach: does the approach of the two subclasses look interesting? The approach of the ArrayManager itself storing a list of arrays? ... - What to do with Series, which now is a SingleBlockManager inheriting from BlockManager (should we also have a "SingleArrayManager"?) - *If* we find this interesting, how can we go from here? How do we decide on this? (what aspects already need to work, how fast does it need to be?) I don't think getting a fully complete implementation passing all tests is is possible in a single PR. Are we fine with merging something partial in master and continue from there? Or a shared feature branch in upstream? ... <details> <summary>Benchmark results for asv_bench/arithmetic.py</summary> As an example, I ran `asv continuous -f 1.1 upstream/master HEAD -b arithmetic`. The benchmarks with a slowdown bigger than a factor 2 can basically be brought back to two cases: - Benchmarks for "wide" dataframes (eg `FrameWithFrameWide` using a case with n_cols > n_rows) - Benchmarks from the `IntFrameWithScalar` class: from a quick profile, it seems that the usage of numexpr is the cause, and disabling this seems to reduce the slowdown to a factor 2. The numexpr code (and checking if it should be used etc) apparently has a high overhead per call, which I assume is something that can be solved (moving those checks a level higher up, so we don't need to repeat it for each column) ``` before after ratio [b45327f5] [047f9091] <master> ! 40.6±6ms failed n/a arithmetic.Ops.time_frame_multi_and(False, 'default') ! 32.7±2ms failed n/a arithmetic.Ops.time_frame_multi_and(False, 1) ! 26.5±1ms failed n/a arithmetic.Ops.time_frame_multi_and(True, 'default') ! 37.7±2ms failed n/a arithmetic.Ops.time_frame_multi_and(True, 1) + 1.06±0.3ms 93.5±7ms 88.57 arithmetic.FrameWithFrameWide.time_op_same_blocks(<built-in function gt>) + 1.51±0.2ms 80.6±3ms 53.34 arithmetic.FrameWithFrameWide.time_op_same_blocks(<built-in function add>) + 1.22±0.08ms 55.1±5ms 45.19 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis0('le') + 1.30±0.07ms 55.6±20ms 42.83 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis0('ne') + 2.12±0.4ms 90.1±4ms 42.47 arithmetic.FrameWithFrameWide.time_op_different_blocks(<built-in function gt>) + 1.17±0.04ms 49.4±4ms 42.38 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis0('gt') + 1.28±0.07ms 52.9±3ms 41.28 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis0('lt') + 1.29±0.2ms 52.5±0.6ms 40.63 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis0('ge') + 1.44±0.02ms 56.8±7ms 39.56 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis0('eq') + 2.08±0.3ms 78.9±10ms 37.90 arithmetic.Ops2.time_frame_float_mod + 2.34±0.1ms 78.3±4ms 33.51 arithmetic.FrameWithFrameWide.time_op_different_blocks(<built-in function add>) + 1.66±0.2ms 46.6±1ms 28.00 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis0('mul') + 1.78±0.2ms 48.2±5ms 27.02 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis0('truediv') + 1.14±0.04ms 26.8±4ms 23.49 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 3.0, <built-in function le>) + 1.83±0.2ms 42.9±1ms 23.39 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis0('add') + 1.94±0.3ms 45.1±4ms 23.29 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis0('sub') + 1.23±0.07ms 23.0±3ms 18.65 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 3.0, <built-in function ge>) + 1.33±0.08ms 22.8±1ms 17.14 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function eq>) + 1.03±0.05ms 17.6±2ms 17.13 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 2, <built-in function ge>) + 1.65±0.5ms 28.1±7ms 17.00 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 5.0, <built-in function eq>) + 1.21±0.05ms 20.1±3ms 16.67 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 2, <built-in function gt>) + 1.18±0.03ms 19.4±0.9ms 16.54 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 4, <built-in function eq>) + 1.08±0.07ms 17.8±1ms 16.53 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 2, <built-in function lt>) + 1.22±0.05ms 20.0±2ms 16.41 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 3.0, <built-in function gt>) + 1.30±0.06ms 21.2±3ms 16.28 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function ne>) + 1.15±0.06ms 18.6±3ms 16.18 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 4, <built-in function lt>) + 1.42±0.1ms 22.6±1ms 15.96 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 3.0, <built-in function lt>) + 1.11±0.01ms 17.6±0.4ms 15.85 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 4, <built-in function ne>) + 5.30±0.8ms 81.7±20ms 15.40 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis1('lt') + 1.37±0.2ms 20.7±3ms 15.09 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function gt>) + 1.22±0.05ms 18.0±6ms 14.72 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function ge>) + 1.28±0.1ms 18.6±3ms 14.55 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 4, <built-in function gt>) + 1.17±0.08ms 17.0±3ms 14.54 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 2, <built-in function gt>) + 1.22±0.1ms 17.6±0.8ms 14.44 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 4, <built-in function eq>) + 1.35±0.1ms 19.4±2ms 14.35 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function le>) + 1.35±0.1ms 19.2±4ms 14.21 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 4, <built-in function ge>) + 4.36±0.3ms 61.8±8ms 14.17 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis1('le') + 1.31±0.1ms 18.5±2ms 14.09 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function lt>) + 4.48±0.5ms 62.9±5ms 14.06 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis1('ge') + 1.15±0.1ms 16.1±1ms 14.01 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 2, <built-in function ne>) + 1.33±0.1ms 18.6±2ms 14.00 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 5.0, <built-in function ge>) + 4.37±0.4ms 58.9±2ms 13.48 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis1('ne') + 1.22±0.2ms 16.2±3ms 13.25 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 4, <built-in function le>) + 1.25±0.1ms 16.5±1ms 13.13 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 2, <built-in function le>) + 1.44±0.2ms 18.6±4ms 12.90 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 5.0, <built-in function ge>) + 1.75±0.3ms 22.3±2ms 12.74 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 3.0, <built-in function eq>) + 1.42±0.3ms 18.0±7ms 12.68 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 5.0, <built-in function gt>) + 1.36±0.1ms 17.2±1ms 12.67 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 3.0, <built-in function ne>) + 440±30μs 5.57±0.1ms 12.65 arithmetic.Ops2.time_frame_series_dot + 1.63±0.2ms 20.6±2ms 12.65 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 5.0, <built-in function lt>) + 1.35±0.07ms 17.0±3ms 12.58 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 4, <built-in function le>) + 1.34±0.2ms 16.7±1ms 12.46 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 2, <built-in function eq>) + 1.50±0.1ms 18.6±5ms 12.43 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 2, <built-in function ge>) + 1.35±0.07ms 16.8±1ms 12.42 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 4, <built-in function ge>) + 1.35±0.1ms 16.7±2ms 12.37 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 5.0, <built-in function le>) + 1.55±0.3ms 18.9±2ms 12.20 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 5.0, <built-in function le>) + 1.67±0.3ms 20.3±5ms 12.17 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 5.0, <built-in function ne>) + 1.55±0.2ms 18.5±0.7ms 11.94 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 2, <built-in function le>) + 5.05±0.5ms 59.1±3ms 11.70 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis1('gt') + 1.51±0.2ms 17.6±2ms 11.66 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 5.0, <built-in function lt>) + 1.33±0.08ms 15.3±1ms 11.50 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 2, <built-in function ne>) + 4.47±0.1ms 51.2±1ms 11.45 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis1('eq') + 1.35±0.1ms 15.4±2ms 11.45 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 4, <built-in function lt>) + 1.76±0.5ms 19.8±2ms 11.28 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 2, <built-in function lt>) + 1.55±0.09ms 16.8±0.3ms 10.86 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 5.0, <built-in function ne>) + 1.71±0.1ms 18.2±2ms 10.58 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 5.0, <built-in function eq>) + 1.51±0.2ms 15.9±3ms 10.54 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 2, <built-in function eq>) + 1.53±0.2ms 15.6±0.3ms 10.19 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 4, <built-in function ne>) + 1.95±0.2ms 19.7±5ms 10.08 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 5.0, <built-in function gt>) + 2.22±0.08ms 21.6±4ms 9.73 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 4, <built-in function add>) + 1.77±0.08ms 16.7±1ms 9.48 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 4, <built-in function gt>) + 2.19±0.1ms 19.9±2ms 9.08 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 2, <built-in function mul>) + 1.91±0.04ms 17.0±2ms 8.88 arithmetic.Ops.time_frame_comparison(True, 'default') + 2.18±0.1ms 19.0±1ms 8.73 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 2, <built-in function add>) + 2.23±0.08ms 19.1±1ms 8.59 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 2, <built-in function sub>) + 2.24±0.07ms 19.0±3ms 8.47 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 5.0, <built-in function mul>) + 2.34±0.06ms 19.5±2ms 8.31 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 2, <built-in function truediv>) + 2.52±0.2ms 20.3±6ms 8.06 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 5.0, <built-in function truediv>) + 2.39±0.2ms 19.2±2ms 8.05 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 5.0, <built-in function truediv>) + 3.07±0.4ms 24.4±5ms 7.94 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 5.0, <built-in function mod>) + 2.24±0.1ms 17.5±2ms 7.85 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 5.0, <built-in function add>) + 2.24±0.2ms 17.4±0.7ms 7.79 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 2, <built-in function sub>) + 2.33±0.1ms 18.0±2ms 7.73 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 5.0, <built-in function mul>) + 2.15±0.1ms 16.4±4ms 7.60 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 3.0, <built-in function sub>) + 2.10±0.05ms 15.9±2ms 7.57 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 3.0, <built-in function add>) + 2.27±0.1ms 16.8±1ms 7.39 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 5.0, <built-in function add>) + 3.59±0.1ms 26.1±5ms 7.27 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 3.0, <built-in function mod>) + 2.32±0.1ms 16.8±3ms 7.25 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 5.0, <built-in function sub>) + 2.36±0.08ms 17.1±0.7ms 7.23 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 4, <built-in function truediv>) + 2.42±0.2ms 17.4±2ms 7.17 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 4, <built-in function sub>) + 2.31±0.09ms 16.4±0.9ms 7.11 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 2, <built-in function add>) + 7.34±0.9ms 52.2±2ms 7.10 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis1('add') + 2.32±0.1ms 16.4±0.9ms 7.07 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function add>) + 2.25±0.2ms 15.8±2ms 7.03 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 4, <built-in function sub>) + 2.51±0.5ms 17.3±2ms 6.91 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 4, <built-in function add>) + 2.43±0.1ms 16.7±0.8ms 6.84 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 4, <built-in function mul>) + 2.24±0.1ms 15.2±2ms 6.81 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 2, <built-in function mul>) + 7.81±1ms 52.9±4ms 6.78 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis1('sub') + 2.48±0.2ms 16.4±2ms 6.62 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 3.0, <built-in function mul>) + 6.82±1ms 44.4±0.7ms 6.51 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis1('mul') + 2.25±0.05ms 14.6±0.8ms 6.48 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function sub>) + 3.14±0.7ms 19.8±2ms 6.30 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 5.0, <built-in function mod>) + 2.57±0.2ms 15.9±2ms 6.19 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 5.0, <built-in function sub>) + 2.57±0.1ms 15.8±2ms 6.16 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function truediv>) + 7.70±1ms 47.2±3ms 6.13 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis1('truediv') + 3.02±0.1ms 18.4±3ms 6.08 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function mod>) + 2.79±0.2ms 16.8±0.8ms 6.04 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 3.0, <built-in function truediv>) + 3.16±0.3ms 19.1±0.7ms 6.04 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 4, <built-in function mod>) + 2.51±0.2ms 14.9±0.5ms 5.92 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function mul>) + 2.71±0.1ms 15.9±0.8ms 5.86 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 4, <built-in function mul>) + 2.72±0.3ms 15.9±1ms 5.83 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 2, <built-in function truediv>) + 11.9±1ms 64.0±5ms 5.39 arithmetic.Ops2.time_frame_int_mod + 3.59±0.4ms 19.1±5ms 5.33 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 2, <built-in function mod>) + 6.23±0.4ms 32.7±6ms 5.25 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 2, <built-in function mod>) + 3.28±0.2ms 17.2±2ms 5.23 arithmetic.Ops.time_frame_add(True, 'default') + 23.7±6ms 112±7ms 4.70 arithmetic.FrameWithFrameWide.time_op_same_blocks(<built-in function floordiv>) + 3.51±0.4ms 16.5±0.6ms 4.70 arithmetic.Ops.time_frame_mult(True, 'default') + 3.61±2ms 16.3±1ms 4.52 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 4, <built-in function truediv>) + 45.8±4ms 194±20ms 4.25 arithmetic.FrameWithFrameWide.time_op_different_blocks(<built-in function floordiv>) + 5.64±0.6ms 21.9±1ms 3.89 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 4, <built-in function mod>) + 3.13±0.1ms 11.4±0.5ms 3.63 arithmetic.Ops.time_frame_comparison(True, 1) + 12.2±0.8ms 42.5±4ms 3.47 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 5.0, <built-in function pow>) + 4.03±0.7ms 11.2±0.3ms 2.79 arithmetic.Ops.time_frame_add(True, 1) + 53.0±6ms 143±10ms 2.69 arithmetic.Ops2.time_frame_float_floor_by_zero + 4.11±0.2ms 11.1±1ms 2.69 arithmetic.Ops.time_frame_mult(True, 1) + 54.9±4ms 125±9ms 2.28 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis0('floordiv') + 25.0±0.6ms 55.9±5ms 2.24 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 4, <built-in function pow>) + 2.42±0.2ms 5.21±0.6ms 2.16 arithmetic.Ops.time_frame_comparison(False, 'default') + 16.2±1ms 31.9±3ms 1.97 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.int64'>, 3.0, <built-in function pow>) + 30.9±3ms 58.1±10ms 1.88 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 5.0, <built-in function pow>) + 3.36±0.3ms 5.76±0.4ms 1.71 arithmetic.Ops.time_frame_add(False, 'default') + 3.10±0.3ms 5.03±0.3ms 1.62 arithmetic.Ops.time_frame_comparison(False, 1) + 30.5±3ms 49.2±9ms 1.61 arithmetic.IntFrameWithScalar.time_frame_op_with_scalar(<class 'numpy.float64'>, 3.0, <built-in function pow>) + 3.42±0.3ms 5.51±0.4ms 1.61 arithmetic.Ops.time_frame_mult(False, 1) + 3.52±0.2ms 5.63±0.1ms 1.60 arithmetic.Ops.time_frame_add(False, 1) + 3.60±0.2ms 5.74±0.5ms 1.59 arithmetic.Ops.time_frame_mult(False, 'default') + 57.9±1ms 89.7±6ms 1.55 arithmetic.Ops2.time_frame_float_div + 32.1±0.5ms 48.7±2ms 1.52 arithmetic.Ops2.time_frame_dot + 2.96±0.06ms 4.32±0.4ms 1.46 arithmetic.DateInferOps.time_add_timedeltas + 65.9±2ms 93.8±1ms 1.42 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis1('pow') + 106±2ms 132±3ms 1.25 arithmetic.MixedFrameWithSeriesAxis.time_frame_op_with_series_axis0('pow') + 1.33±0.01ms 1.64±0.2ms 1.24 arithmetic.OffsetArrayArithmetic.time_add_series_offset(<YearEnd: month=12>) + 7.09±0.2ms 8.49±0.5ms 1.20 arithmetic.DateInferOps.time_subtract_datetimes + 1.13±0ms 1.33±0.09ms 1.18 arithmetic.OffsetArrayArithmetic.time_add_series_offset(<YearBegin: month=1>) + 1.25±0.02ms 1.47±0.1ms 1.18 arithmetic.OffsetArrayArithmetic.time_add_series_offset(<SemiMonthEnd: day_of_month=15>) + 2.52±0.04ms 2.97±0.2ms 1.18 arithmetic.OffsetArrayArithmetic.time_add_series_offset(<BusinessDay>) + 1.16±0.01ms 1.32±0.06ms 1.13 arithmetic.OffsetArrayArithmetic.time_add_series_offset(<QuarterBegin: startingMonth=3>) - 1.67±0.2ms 1.42±0.02ms 0.85 arithmetic.OffsetArrayArithmetic.time_add_dti_offset(<MonthEnd>) - 282±20μs 230±5μs 0.81 arithmetic.NumericInferOps.time_subtract(<class 'numpy.int8'>) - 4.36±0.2ms 3.54±0.3ms 0.81 arithmetic.NumericInferOps.time_modulo(<class 'numpy.uint16'>) - 1.29±0.1ms 1.03±0.06ms 0.80 arithmetic.NumericInferOps.time_multiply(<class 'numpy.int64'>) - 1.77±0.09ms 1.39±0.03ms 0.79 arithmetic.OffsetArrayArithmetic.time_add_dti_offset(<SemiMonthBegin: day_of_month=15>) - 1.54±0.2ms 1.13±0.02ms 0.74 arithmetic.NumericInferOps.time_divide(<class 'numpy.int8'>) - 301±40μs 221±4μs 0.73 arithmetic.OffsetArrayArithmetic.time_add_series_offset(<Day>) - 3.85±0.5ms 2.58±0.2ms 0.67 arithmetic.OffsetArrayArithmetic.time_add_dti_offset(<BusinessDay>) SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. PERFORMANCE DECREASED. ``` </details>
https://api.github.com/repos/pandas-dev/pandas/pulls/36010
2020-08-31T14:17:54Z
2021-01-13T13:23:14Z
2021-01-13T13:23:14Z
2021-01-13T14:10:37Z
Backport PR #35936: (REGR: Fix inplace updates on column to set correct values)
diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index a87e06678faad..b0d375a52f8ac 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`) +- Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) - diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 4c3805f812bb0..c4a866edba490 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1046,6 +1046,7 @@ def iset(self, loc: Union[int, slice, np.ndarray], value): Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items """ + value = extract_array(value, extract_numpy=True) # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical if self._blklocs is None and self.ndim > 1: diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index 78000c0252375..11dc3069ee08e 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -354,6 +354,12 @@ def test_fillna_frame(self, data_missing): # Non-scalar "scalar" values. super().test_fillna_frame(data_missing) + @pytest.mark.skip("Invalid test") + def test_fillna_fill_other(self, data): + # inplace update doesn't work correctly with patched extension arrays + # extract_array returns PandasArray, while dtype is a numpy dtype + super().test_fillna_fill_other(data_missing) + class TestReshaping(BaseNumPyTests, base.BaseReshapingTests): @pytest.mark.skip("Incorrect parent test") diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index c9fec3215d57f..b8183eb9f4185 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -626,3 +626,17 @@ def test_add_column_with_pandas_array(self): assert type(df["c"]._mgr.blocks[0]) == ObjectBlock assert type(df2["c"]._mgr.blocks[0]) == ObjectBlock tm.assert_frame_equal(df, df2) + + +def test_update_inplace_sets_valid_block_values(): + # https://github.com/pandas-dev/pandas/issues/33457 + df = pd.DataFrame({"a": pd.Series([1, 2, None], dtype="category")}) + + # inplace update of a single column + df["a"].fillna(1, inplace=True) + + # check we havent put a Series into any block.values + assert isinstance(df._mgr.blocks[0].values, pd.Categorical) + + # smoketest for OP bug from GH#35731 + assert df.isnull().sum().sum() == 0
xref #35936
https://api.github.com/repos/pandas-dev/pandas/pulls/36009
2020-08-31T13:08:37Z
2020-09-01T14:16:35Z
2020-09-01T14:16:35Z
2020-09-01T14:16:51Z
TYP: check_untyped_defs core.internals.concat
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 99a586f056b12..88839d2211f81 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -1,10 +1,11 @@ from collections import defaultdict import copy -from typing import List +from typing import Dict, List import numpy as np from pandas._libs import NaT, internals as libinternals +from pandas._typing import DtypeObj from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import maybe_promote @@ -100,10 +101,10 @@ def _get_mgr_concatenation_plan(mgr, indexers): """ # Calculate post-reindex shape , save for item axis which will be separate # for each block anyway. - mgr_shape = list(mgr.shape) + mgr_shape_list = list(mgr.shape) for ax, indexer in indexers.items(): - mgr_shape[ax] = len(indexer) - mgr_shape = tuple(mgr_shape) + mgr_shape_list[ax] = len(indexer) + mgr_shape = tuple(mgr_shape_list) if 0 in indexers: ax0_indexer = indexers.pop(0) @@ -126,9 +127,9 @@ def _get_mgr_concatenation_plan(mgr, indexers): join_unit_indexers = indexers.copy() - shape = list(mgr_shape) - shape[0] = len(placements) - shape = tuple(shape) + shape_list = list(mgr_shape) + shape_list[0] = len(placements) + shape = tuple(shape_list) if blkno == -1: unit = JoinUnit(None, shape) @@ -374,8 +375,8 @@ def _get_empty_dtype_and_na(join_units): else: dtypes[i] = unit.dtype - upcast_classes = defaultdict(list) - null_upcast_classes = defaultdict(list) + upcast_classes: Dict[str, List[DtypeObj]] = defaultdict(list) + null_upcast_classes: Dict[str, List[DtypeObj]] = defaultdict(list) for dtype, unit in zip(dtypes, join_units): if dtype is None: continue diff --git a/setup.cfg b/setup.cfg index 2ba22e5aad3c7..c10624d60aaff 100644 --- a/setup.cfg +++ b/setup.cfg @@ -184,9 +184,6 @@ check_untyped_defs=False [mypy-pandas.core.internals.blocks] check_untyped_defs=False -[mypy-pandas.core.internals.concat] -check_untyped_defs=False - [mypy-pandas.core.internals.construction] check_untyped_defs=False
pandas\core\internals\concat.py:106: error: Incompatible types in assignment (expression has type "Tuple[Any, ...]", variable has type "List[Any]") [assignment] pandas\core\internals\concat.py:131: error: Incompatible types in assignment (expression has type "Tuple[Any, ...]", variable has type "List[Any]") [assignment] pandas\core\internals\concat.py:377: error: Need type annotation for 'upcast_classes' [var-annotated] pandas\core\internals\concat.py:378: error: Need type annotation for 'null_upcast_classes' [var-annotated]
https://api.github.com/repos/pandas-dev/pandas/pulls/36008
2020-08-31T12:33:43Z
2020-08-31T20:26:35Z
2020-08-31T20:26:35Z
2020-09-01T09:16:11Z
TYP: misc typing cleanup in core/indexes/multi.py
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index b29c27982f087..f66b009e6d505 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -19,7 +19,7 @@ from pandas._libs import algos as libalgos, index as libindex, lib from pandas._libs.hashtable import duplicated_int64 -from pandas._typing import AnyArrayLike, Scalar +from pandas._typing import AnyArrayLike, Label, Scalar from pandas.compat.numpy import function as nv from pandas.errors import InvalidIndexError, PerformanceWarning, UnsortedIndexError from pandas.util._decorators import Appender, cache_readonly, doc @@ -449,7 +449,12 @@ def from_arrays(cls, arrays, sortorder=None, names=lib.no_default) -> "MultiInde ) @classmethod - def from_tuples(cls, tuples, sortorder=None, names=None): + def from_tuples( + cls, + tuples, + sortorder: Optional[int] = None, + names: Optional[Sequence[Label]] = None, + ): """ Convert list of tuples to MultiIndex. @@ -490,6 +495,7 @@ def from_tuples(cls, tuples, sortorder=None, names=None): elif is_iterator(tuples): tuples = list(tuples) + arrays: List[Sequence[Label]] if len(tuples) == 0: if names is None: raise TypeError("Cannot infer number of levels from empty list") @@ -700,8 +706,13 @@ def levels(self): return FrozenList(result) def _set_levels( - self, levels, level=None, copy=False, validate=True, verify_integrity=False - ): + self, + levels, + level=None, + copy: bool = False, + validate: bool = True, + verify_integrity: bool = False, + ) -> None: # This is NOT part of the levels property because it should be # externally not allowed to set levels. User beware if you change # _levels directly @@ -719,10 +730,10 @@ def _set_levels( ) else: level_numbers = [self._get_level_number(lev) for lev in level] - new_levels = list(self._levels) + new_levels_list = list(self._levels) for lev_num, lev in zip(level_numbers, levels): - new_levels[lev_num] = ensure_index(lev, copy=copy)._shallow_copy() - new_levels = FrozenList(new_levels) + new_levels_list[lev_num] = ensure_index(lev, copy=copy)._shallow_copy() + new_levels = FrozenList(new_levels_list) if verify_integrity: new_codes = self._verify_integrity(levels=new_levels) @@ -875,8 +886,13 @@ def codes(self): return self._codes def _set_codes( - self, codes, level=None, copy=False, validate=True, verify_integrity=False - ): + self, + codes, + level=None, + copy: bool = False, + validate: bool = True, + verify_integrity: bool = False, + ) -> None: if validate: if level is None and len(codes) != self.nlevels: raise ValueError("Length of codes must match number of levels") @@ -890,11 +906,13 @@ def _set_codes( ) else: level_numbers = [self._get_level_number(lev) for lev in level] - new_codes = list(self._codes) + new_codes_list = list(self._codes) for lev_num, level_codes in zip(level_numbers, codes): lev = self.levels[lev_num] - new_codes[lev_num] = _coerce_indexer_frozen(level_codes, lev, copy=copy) - new_codes = FrozenList(new_codes) + new_codes_list[lev_num] = _coerce_indexer_frozen( + level_codes, lev, copy=copy + ) + new_codes = FrozenList(new_codes_list) if verify_integrity: new_codes = self._verify_integrity(codes=new_codes) @@ -2435,7 +2453,7 @@ def _get_partial_string_timestamp_match_key(self, key): if isinstance(key, str) and self.levels[0]._supports_partial_string_indexing: # Convert key '2016-01-01' to # ('2016-01-01'[, slice(None, None, None)]+) - key = tuple([key] + [slice(None)] * (len(self.levels) - 1)) + key = (key,) + (slice(None),) * (len(self.levels) - 1) if isinstance(key, tuple): # Convert (..., '2016-01-01', ...) in tuple to @@ -3086,7 +3104,7 @@ def _update_indexer(idxr, indexer=indexer): elif is_list_like(k): # a collection of labels to include from this level (these # are or'd) - indexers = None + indexers: Optional[Int64Index] = None for x in k: try: idxrs = _convert_to_indexer(
pandas\core\indexes\multi.py:496: error: Need type annotation for 'arrays' [var-annotated] pandas\core\indexes\multi.py:722: error: Incompatible types in assignment (expression has type "List[Any]", variable has type "FrozenList") [assignment] pandas\core\indexes\multi.py:893: error: Incompatible types in assignment (expression has type "List[Any]", variable has type "FrozenList") [assignment] pandas\core\indexes\multi.py:2438: error: List item 0 has incompatible type "slice"; expected "str" [list-item] pandas\core\indexes\multi.py:3095: error: Unsupported left operand type for | ("None") [operator]
https://api.github.com/repos/pandas-dev/pandas/pulls/36007
2020-08-31T11:52:25Z
2020-09-01T16:26:46Z
2020-09-01T16:26:46Z
2020-09-01T16:33:31Z
TYP: misc typing cleanup for core/computation/expressions.py
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 05a5538a88772..a9c0cb0571446 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -6,6 +6,7 @@ """ import operator +from typing import List, Set import warnings import numpy as np @@ -21,7 +22,7 @@ import numexpr as ne _TEST_MODE = None -_TEST_RESULT = None +_TEST_RESULT: List[bool] = list() _USE_NUMEXPR = _NUMEXPR_INSTALLED _evaluate = None _where = None @@ -75,7 +76,7 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check): # required min elements (otherwise we are adding overhead) if np.prod(a.shape) > _MIN_ELEMENTS: # check for dtype compatibility - dtypes = set() + dtypes: Set[str] = set() for o in [a, b]: # Series implements dtypes, check for dimension count as well if hasattr(o, "dtypes") and o.ndim > 1: @@ -247,25 +248,28 @@ def where(cond, a, b, use_numexpr=True): return _where(cond, a, b) if use_numexpr else _where_standard(cond, a, b) -def set_test_mode(v=True): +def set_test_mode(v: bool = True) -> None: """ - Keeps track of whether numexpr was used. Stores an additional ``True`` - for every successful use of evaluate with numexpr since the last - ``get_test_result`` + Keeps track of whether numexpr was used. + + Stores an additional ``True`` for every successful use of evaluate with + numexpr since the last ``get_test_result``. """ global _TEST_MODE, _TEST_RESULT _TEST_MODE = v _TEST_RESULT = [] -def _store_test_result(used_numexpr): +def _store_test_result(used_numexpr: bool) -> None: global _TEST_RESULT if used_numexpr: _TEST_RESULT.append(used_numexpr) -def get_test_result(): - """get test result and reset test_results""" +def get_test_result() -> List[bool]: + """ + Get test result and reset test_results. + """ global _TEST_RESULT res = _TEST_RESULT _TEST_RESULT = []
pandas\core\computation\expressions.py:78: error: Need type annotation for 'dtypes' (hint: "dtypes: Set[<type>] = ...") [var-annotated] pandas\core\computation\expressions.py:258: error: Need type annotation for '_TEST_RESULT' (hint: "_TEST_RESULT: List[<type>] = ...") [var-annotated]
https://api.github.com/repos/pandas-dev/pandas/pulls/36005
2020-08-31T11:07:47Z
2020-08-31T20:45:17Z
2020-08-31T20:45:17Z
2020-09-01T09:19:38Z
BUG: Can't restore index from parquet with offset-specified timezone #35997
diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 21d54e2514f8b..a4597e61971a1 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -392,6 +392,7 @@ I/O - Bug in :meth:`read_csv` with ``engine='python'`` truncating data if multiple items present in first row and first element started with BOM (:issue:`36343`) - Removed ``private_key`` and ``verbose`` from :func:`read_gbq` as they are no longer supported in ``pandas-gbq`` (:issue:`34654`, :issue:`30200`) - Bumped minimum pytables version to 3.5.1 to avoid a ``ValueError`` in :meth:`read_hdf` (:issue:`24839`) +- Bug in :meth:`read_parquet` with fixed offset timezones. String representation of timezones was not recognized (:issue:`35997`, :issue:`36004`) Plotting ^^^^^^^^ diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index b82291a71057e..3deabc57ec522 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -1,4 +1,4 @@ -from datetime import timezone +from datetime import timedelta, timezone from cpython.datetime cimport datetime, timedelta, tzinfo @@ -102,6 +102,14 @@ cpdef inline tzinfo maybe_get_tz(object tz): # On Python 3 on Windows, the filename is not always set correctly. if isinstance(tz, _dateutil_tzfile) and '.tar.gz' in tz._filename: tz._filename = zone + elif tz[0] in {'-', '+'}: + hours = int(tz[0:3]) + minutes = int(tz[0] + tz[4:6]) + tz = timezone(timedelta(hours=hours, minutes=minutes)) + elif tz[0:4] in {'UTC-', 'UTC+'}: + hours = int(tz[3:6]) + minutes = int(tz[3] + tz[7:9]) + tz = timezone(timedelta(hours=hours, minutes=minutes)) else: tz = pytz.timezone(tz) elif is_integer_object(tz): diff --git a/pandas/conftest.py b/pandas/conftest.py index 5ac5e3670f69f..998c45d82fb4d 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -857,6 +857,10 @@ def iris(datapath): "Asia/Tokyo", "dateutil/US/Pacific", "dateutil/Asia/Singapore", + "+01:15", + "-02:15", + "UTC+01:15", + "UTC-02:15", tzutc(), tzlocal(), FixedOffset(300), diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index f7b25f8c0eeac..9114edc19315f 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -125,6 +125,21 @@ def df_full(): ) +@pytest.fixture( + params=[ + datetime.datetime.now(datetime.timezone.utc), + datetime.datetime.now(datetime.timezone.min), + datetime.datetime.now(datetime.timezone.max), + datetime.datetime.strptime("2019-01-04T16:41:24+0200", "%Y-%m-%dT%H:%M:%S%z"), + datetime.datetime.strptime("2019-01-04T16:41:24+0215", "%Y-%m-%dT%H:%M:%S%z"), + datetime.datetime.strptime("2019-01-04T16:41:24-0200", "%Y-%m-%dT%H:%M:%S%z"), + datetime.datetime.strptime("2019-01-04T16:41:24-0215", "%Y-%m-%dT%H:%M:%S%z"), + ] +) +def timezone_aware_date_list(request): + return request.param + + def check_round_trip( df, engine=None, @@ -134,6 +149,7 @@ def check_round_trip( expected=None, check_names=True, check_like=False, + check_dtype=True, repeat=2, ): """Verify parquet serializer and deserializer produce the same results. @@ -175,7 +191,11 @@ def compare(repeat): actual = read_parquet(path, **read_kwargs) tm.assert_frame_equal( - expected, actual, check_names=check_names, check_like=check_like + expected, + actual, + check_names=check_names, + check_like=check_like, + check_dtype=check_dtype, ) if path is None: @@ -739,6 +759,21 @@ def test_timestamp_nanoseconds(self, pa): df = pd.DataFrame({"a": pd.date_range("2017-01-01", freq="1n", periods=10)}) check_round_trip(df, pa, write_kwargs={"version": "2.0"}) + def test_timezone_aware_index(self, pa, timezone_aware_date_list): + idx = 5 * [timezone_aware_date_list] + df = pd.DataFrame(index=idx, data={"index_as_col": idx}) + + # see gh-36004 + # compare time(zone) values only, skip their class: + # pyarrow always creates fixed offset timezones using pytz.FixedOffset() + # even if it was datetime.timezone() originally + # + # technically they are the same: + # they both implement datetime.tzinfo + # they both wrap datetime.timedelta() + # this use-case sets the resolution to 1 minute + check_round_trip(df, pa, check_dtype=False) + @td.skip_if_no("pyarrow", min_version="0.17") def test_filter_row_groups(self, pa): # https://github.com/pandas-dev/pandas/issues/26551 @@ -877,3 +912,12 @@ def test_empty_dataframe(self, fp): expected = df.copy() expected.index.name = "index" check_round_trip(df, fp, expected=expected) + + def test_timezone_aware_index(self, fp, timezone_aware_date_list): + idx = 5 * [timezone_aware_date_list] + + df = pd.DataFrame(index=idx, data={"index_as_col": idx}) + + expected = df.copy() + expected.index.name = "index" + check_round_trip(df, fp, expected=expected) diff --git a/pandas/tests/tslibs/test_timezones.py b/pandas/tests/tslibs/test_timezones.py index 81b41f567976d..e49f511fe3cc4 100644 --- a/pandas/tests/tslibs/test_timezones.py +++ b/pandas/tests/tslibs/test_timezones.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, timedelta, timezone import dateutil.tz import pytest @@ -118,3 +118,25 @@ def test_maybe_get_tz_invalid_types(): msg = "<class 'pandas._libs.tslibs.timestamps.Timestamp'>" with pytest.raises(TypeError, match=msg): timezones.maybe_get_tz(Timestamp.now("UTC")) + + +def test_maybe_get_tz_offset_only(): + # see gh-36004 + + # timezone.utc + tz = timezones.maybe_get_tz(timezone.utc) + assert tz == timezone(timedelta(hours=0, minutes=0)) + + # without UTC+- prefix + tz = timezones.maybe_get_tz("+01:15") + assert tz == timezone(timedelta(hours=1, minutes=15)) + + tz = timezones.maybe_get_tz("-01:15") + assert tz == timezone(-timedelta(hours=1, minutes=15)) + + # with UTC+- prefix + tz = timezones.maybe_get_tz("UTC+02:45") + assert tz == timezone(timedelta(hours=2, minutes=45)) + + tz = timezones.maybe_get_tz("UTC-02:45") + assert tz == timezone(-timedelta(hours=2, minutes=45))
- [x] closes #35997 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/36004
2020-08-31T08:30:23Z
2020-10-07T01:44:44Z
2020-10-07T01:44:44Z
2020-10-07T09:23:21Z