title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
Upgrade GitHub Actions versions
diff --git a/.github/workflows/asv-bot.yml b/.github/workflows/asv-bot.yml index f3946aeb84a63..18e13a5ff51c9 100644 --- a/.github/workflows/asv-bot.yml +++ b/.github/workflows/asv-bot.yml @@ -29,19 +29,19 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 - name: Cache conda - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ~/conda_pkgs_dir key: ${{ runner.os }}-conda-${{ hashFiles('${{ env.ENV_FILE }}') }} # Although asv sets up its own env, deps are still needed # during discovery process - - uses: conda-incubator/setup-miniconda@v2 + - uses: conda-incubator/setup-miniconda@v2.1.1 with: activate-environment: pandas-dev channel-priority: strict @@ -65,7 +65,7 @@ jobs: echo 'EOF' >> $GITHUB_ENV echo "REGEX=$REGEX" >> $GITHUB_ENV - - uses: actions/github-script@v5 + - uses: actions/github-script@v6 env: BENCH_OUTPUT: ${{env.BENCH_OUTPUT}} REGEX: ${{env.REGEX}} diff --git a/.github/workflows/autoupdate-pre-commit-config.yml b/.github/workflows/autoupdate-pre-commit-config.yml index 3696cba8cf2e6..d2eac234ca361 100644 --- a/.github/workflows/autoupdate-pre-commit-config.yml +++ b/.github/workflows/autoupdate-pre-commit-config.yml @@ -12,9 +12,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 - name: Cache multiple paths - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: | ~/.cache/pre-commit diff --git a/.github/workflows/code-checks.yml b/.github/workflows/code-checks.yml index f32fed3b3ee68..9f582edc56523 100644 --- a/.github/workflows/code-checks.yml +++ b/.github/workflows/code-checks.yml @@ -24,10 +24,10 @@ jobs: cancel-in-progress: true steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: '3.9.7' @@ -48,17 +48,17 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 - name: Cache conda - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ~/conda_pkgs_dir key: ${{ runner.os }}-conda-${{ hashFiles('${{ env.ENV_FILE }}') }} - - uses: conda-incubator/setup-miniconda@v2 + - uses: conda-incubator/setup-miniconda@v2.1.1 with: mamba-version: "*" channels: conda-forge @@ -68,7 +68,7 @@ jobs: use-only-tar-bz2: true - name: Install node.js (for pyright) - uses: actions/setup-node@v2 + uses: actions/setup-node@v3 with: node-version: "16" @@ -114,17 +114,17 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 - name: Cache conda - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ~/conda_pkgs_dir key: ${{ runner.os }}-conda-${{ hashFiles('${{ env.ENV_FILE }}') }} - - uses: conda-incubator/setup-miniconda@v2 + - uses: conda-incubator/setup-miniconda@v2.1.1 with: mamba-version: "*" channels: conda-forge @@ -151,7 +151,7 @@ jobs: if: ${{ steps.build.outcome == 'success' }} - name: Publish benchmarks artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: Benchmarks log path: asv_bench/benchmarks.log @@ -174,7 +174,7 @@ jobs: run: docker image prune -f - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 diff --git a/.github/workflows/comment_bot.yml b/.github/workflows/comment_bot.yml index 8f610fd5781ef..3824e015e8336 100644 --- a/.github/workflows/comment_bot.yml +++ b/.github/workflows/comment_bot.yml @@ -12,18 +12,18 @@ jobs: if: startsWith(github.event.comment.body, '@github-actions pre-commit') runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: r-lib/actions/pr-fetch@v2 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Cache multiple paths - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: | ~/.cache/pre-commit ~/.cache/pip key: pre-commit-dispatched-${{ runner.os }}-build - - uses: actions/setup-python@v2 + - uses: actions/setup-python@v3 with: python-version: 3.8 - name: Install-pre-commit diff --git a/.github/workflows/docbuild-and-upload.yml b/.github/workflows/docbuild-and-upload.yml index 4cce75779d750..bba9f62a0eca6 100644 --- a/.github/workflows/docbuild-and-upload.yml +++ b/.github/workflows/docbuild-and-upload.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 @@ -65,7 +65,7 @@ jobs: run: mv doc/build/html web/build/docs - name: Save website as an artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: website path: web/build diff --git a/.github/workflows/posix.yml b/.github/workflows/posix.yml index bc8791afc69f7..182c5c2629349 100644 --- a/.github/workflows/posix.yml +++ b/.github/workflows/posix.yml @@ -121,12 +121,12 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 - name: Cache conda - uses: actions/cache@v2 + uses: actions/cache@v3 env: CACHE_NUMBER: 0 with: @@ -138,7 +138,7 @@ jobs: # xsel for clipboard tests run: sudo apt-get update && sudo apt-get install -y libc6-dev-i386 xsel ${{ env.EXTRA_APT }} - - uses: conda-incubator/setup-miniconda@v2 + - uses: conda-incubator/setup-miniconda@v2.1.1 with: mamba-version: "*" channels: conda-forge @@ -153,7 +153,7 @@ jobs: if: ${{ matrix.pyarrow_version }} - name: Setup PyPy - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: "pypy-3.8" if: ${{ env.IS_PYPY == 'true' }} @@ -178,7 +178,7 @@ jobs: run: pushd /tmp && python -c "import pandas; pandas.show_versions();" && popd - name: Publish test results - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: Test results path: test-data.xml diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml index c287827206336..36cd26504956e 100644 --- a/.github/workflows/python-dev.yml +++ b/.github/workflows/python-dev.yml @@ -45,12 +45,12 @@ jobs: cancel-in-progress: true steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: fetch-depth: 0 - name: Set up Python Dev Version - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: '3.11-dev' @@ -79,7 +79,7 @@ jobs: ci/run_tests.sh - name: Publish test results - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: Test results path: test-data.xml diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml index 431710a49a7dd..bcfe0e69029f6 100644 --- a/.github/workflows/sdist.yml +++ b/.github/workflows/sdist.yml @@ -32,12 +32,12 @@ jobs: cancel-in-progress: true steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} @@ -60,7 +60,7 @@ jobs: name: ${{matrix.python-version}}-sdist.gz path: dist/*.gz - - uses: conda-incubator/setup-miniconda@v2 + - uses: conda-incubator/setup-miniconda@v2.1.1 with: activate-environment: pandas-sdist channels: conda-forge
Part of https://github.com/pandas-dev/pandas/pull/46493#issuecomment-1079541210 - [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/46540
2022-03-28T18:08:47Z
2022-03-28T23:13:32Z
2022-03-28T23:13:31Z
2022-03-30T20:09:43Z
REF: Create StorageExtensionDtype
diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index f3db5598e306c..21b5dc625956e 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -1,9 +1,6 @@ from __future__ import annotations -from typing import ( - TYPE_CHECKING, - Any, -) +from typing import TYPE_CHECKING import numpy as np @@ -24,6 +21,7 @@ from pandas.core.dtypes.base import ( ExtensionDtype, + StorageExtensionDtype, register_extension_dtype, ) from pandas.core.dtypes.common import ( @@ -55,7 +53,7 @@ @register_extension_dtype -class StringDtype(ExtensionDtype): +class StringDtype(StorageExtensionDtype): """ Extension dtype for string data. @@ -67,7 +65,7 @@ class StringDtype(ExtensionDtype): parts of the API may change without warning. In particular, StringDtype.na_value may change to no longer be - ``numpy.nan``. + ``pd.NA``. Parameters ---------- @@ -141,7 +139,6 @@ def construct_from_string(cls, string): ----- TypeError If the string is not a valid option. - """ if not isinstance(string, str): raise TypeError( @@ -156,15 +153,6 @@ def construct_from_string(cls, string): else: raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") - def __eq__(self, other: Any) -> bool: - if isinstance(other, str) and other == "string": - return True - return super().__eq__(other) - - def __hash__(self) -> int: - # custom __eq__ so have to override __hash__ - return super().__hash__() - # https://github.com/pandas-dev/pandas/issues/36126 # error: Signature of "construct_array_type" incompatible with supertype # "ExtensionDtype" @@ -185,12 +173,6 @@ def construct_array_type( # type: ignore[override] else: return ArrowStringArray - def __repr__(self): - return f"string[{self.storage}]" - - def __str__(self): - return self.name - def __from_arrow__( self, array: pyarrow.Array | pyarrow.ChunkedArray ) -> BaseStringArray: diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index eb5d1ccc5ed84..9762b779477e4 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -1,7 +1,6 @@ """ Extend pandas with custom array types. """ - from __future__ import annotations from typing import ( @@ -14,6 +13,7 @@ import numpy as np +from pandas._libs import missing as libmissing from pandas._libs.hashtable import object_hash from pandas._typing import ( DtypeObj, @@ -391,6 +391,32 @@ def _can_hold_na(self) -> bool: return True +class StorageExtensionDtype(ExtensionDtype): + """ExtensionDtype that may be backed by more than one implementation.""" + + name: str + na_value = libmissing.NA + _metadata = ("storage",) + + def __init__(self, storage=None) -> None: + self.storage = storage + + def __repr__(self): + return f"{self.name}[{self.storage}]" + + def __str__(self): + return self.name + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.type) and other == self.name: + return True + return super().__eq__(other) + + def __hash__(self) -> int: + # custom __eq__ so have to override __hash__ + return super().__hash__() + + def register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]: """ Register an ExtensionType with pandas as class decorator.
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). Working towards eventually having `int[pyarrow]` for example, generalizing some of the storage concepts of `StringDtype` into `StorageExtensionDtype`
https://api.github.com/repos/pandas-dev/pandas/pulls/46537
2022-03-27T23:42:24Z
2022-03-28T19:38:35Z
2022-03-28T19:38:35Z
2022-05-25T18:19:38Z
CI: xfail geopandas downstream test on Windows due to fiona install
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index c1e7a8ae883ae..83b476fefea46 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -8,6 +8,7 @@ import numpy as np import pytest +from pandas.compat import is_platform_windows import pandas.util._test_decorators as td import pandas as pd @@ -225,6 +226,13 @@ def test_pandas_datareader(): # importing from pandas, Cython import warning @pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning") +@pytest.mark.xfail( + is_platform_windows(), + raises=ImportError, + reason="ImportError: the 'read_file' function requires the 'fiona' package, " + "but it is not installed or does not import correctly", + strict=False, +) def test_geopandas(): geopandas = import_module("geopandas")
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). Similar to https://github.com/pandas-dev/pandas/pull/46296 but now happening on Windows ``` def _check_fiona(func): if fiona is None: > raise ImportError( f"the {func} requires the 'fiona' package, but it is not installed or does " f"not import correctly.\nImporting fiona resulted in: {fiona_import_error}" ) E ImportError: the 'read_file' function requires the 'fiona' package, but it is not installed or does not import correctly. E Importing fiona resulted in: DLL load failed while importing ogrext: The specified procedure could not be found. ``` cc @jorisvandenbossche can this downstream test be changed to not go through fiona?
https://api.github.com/repos/pandas-dev/pandas/pulls/46536
2022-03-27T22:57:08Z
2022-03-28T12:37:11Z
2022-03-28T12:37:11Z
2022-04-06T13:04:20Z
TST: Add test with large shape to check_below_min_count
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 3e07682d1cdd2..240b9dacce73a 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -1090,3 +1090,32 @@ def test_nanops_independent_of_mask_param(operation): median_expected = operation(s) median_result = operation(s, mask=mask) assert median_expected == median_result + + +@pytest.mark.parametrize("min_count", [-1, 0]) +def test_check_below_min_count__negative_or_zero_min_count(min_count): + # GH35227 + result = nanops.check_below_min_count((21, 37), None, min_count) + expected_result = False + assert result == expected_result + + +@pytest.mark.parametrize( + "mask", [None, np.array([False, False, True]), np.array([True] + 9 * [False])] +) +@pytest.mark.parametrize("min_count, expected_result", [(1, False), (101, True)]) +def test_check_below_min_count__positive_min_count(mask, min_count, expected_result): + # GH35227 + shape = (10, 10) + result = nanops.check_below_min_count(shape, mask, min_count) + assert result == expected_result + + +@td.skip_if_windows +@td.skip_if_32bit +@pytest.mark.parametrize("min_count, expected_result", [(1, False), (2812191852, True)]) +def test_check_below_min_count__large_shape(min_count, expected_result): + # GH35227 large shape used to show that the issue is fixed + shape = (2244367, 1253) + result = nanops.check_below_min_count(shape, mask=None, min_count=min_count) + assert result == expected_result
The function in question didn't have any tests in the first place so I thought that I will add them and simply use the problematic tuple for `shape` argument. - [x] closes #35227 - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/46534
2022-03-27T20:00:34Z
2022-04-05T23:34:48Z
2022-04-05T23:34:48Z
2022-04-07T19:20:09Z
Revert "TYP: Many typing constructs are invariant"
diff --git a/pandas/_typing.py b/pandas/_typing.py index 2b38b25d6347d..e3b3a4774f558 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -74,14 +74,6 @@ npt: Any = None -# Functions that take Dict/Mapping/List/Sequence/Callable can be tricky to type: -# - keys of Dict and Mapping do not accept sub-classes -# - items of List and Sequence do not accept sub-classes -# - input argument to Callable cannot be sub-classes -# If you want to allow any type and it's sub-classes in the above cases, you can -# use TypeVar("AllowsSubclasses", bound=class) -HashableT = TypeVar("HashableT", bound=Hashable) - # array-like ArrayLike = Union["ExtensionArray", np.ndarray] @@ -113,7 +105,7 @@ NDFrameT = TypeVar("NDFrameT", bound="NDFrame") Axis = Union[str, int] -IndexLabel = Union[Hashable, Sequence[HashableT]] +IndexLabel = Union[Hashable, Sequence[Hashable]] Level = Union[Hashable, int] Shape = Tuple[int, ...] Suffixes = Tuple[Optional[str], Optional[str]] @@ -135,19 +127,19 @@ Dtype = Union["ExtensionDtype", NpDtype] AstypeArg = Union["ExtensionDtype", "npt.DTypeLike"] # DtypeArg specifies all allowable dtypes in a functions its dtype argument -DtypeArg = Union[Dtype, Dict[HashableT, Dtype]] +DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] DtypeObj = Union[np.dtype, "ExtensionDtype"] # converters -ConvertersArg = Dict[HashableT, Callable[[Dtype], Dtype]] +ConvertersArg = Dict[Hashable, Callable[[Dtype], Dtype]] # parse_dates ParseDatesArg = Union[ - bool, List[HashableT], List[List[HashableT]], Dict[HashableT, List[Hashable]] + bool, List[Hashable], List[List[Hashable]], Dict[Hashable, List[Hashable]] ] # For functions like rename that convert one label to another -Renamer = Union[Mapping[HashableT, Any], Callable[[HashableT], Hashable]] +Renamer = Union[Mapping[Hashable, Any], Callable[[Hashable], Hashable]] # to maintain type information across generic functions and parametrization T = TypeVar("T") @@ -164,7 +156,7 @@ # types of `func` kwarg for DataFrame.aggregate and Series.aggregate AggFuncTypeBase = Union[Callable, str] -AggFuncTypeDict = Dict[HashableT, Union[AggFuncTypeBase, List[AggFuncTypeBase]]] +AggFuncTypeDict = Dict[Hashable, Union[AggFuncTypeBase, List[AggFuncTypeBase]]] AggFuncType = Union[ AggFuncTypeBase, List[AggFuncTypeBase], @@ -268,10 +260,10 @@ def closed(self) -> bool: FormattersType = Union[ List[Callable], Tuple[Callable, ...], Mapping[Union[str, int], Callable] ] -ColspaceType = Mapping[HashableT, Union[str, int]] +ColspaceType = Mapping[Hashable, Union[str, int]] FloatFormatType = Union[str, Callable, "EngFormatter"] ColspaceArgType = Union[ - str, int, Sequence[Union[str, int]], Mapping[HashableT, Union[str, int]] + str, int, Sequence[Union[str, int]], Mapping[Hashable, Union[str, int]] ] # Arguments for fillna()
I accidentally pushed to pandas instead of my fork :( Is it possible in github to disallow direct pushes to the repository (only allow PRs)? edit: It seems that this is possible https://stackoverflow.com/a/57685576
https://api.github.com/repos/pandas-dev/pandas/pulls/46530
2022-03-27T18:26:03Z
2022-03-27T20:12:38Z
2022-03-27T20:12:38Z
2022-04-02T01:15:07Z
STYLE: fix PDF026 issues
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index aeb49c2b1a545..0a2f3f8f2506d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -50,7 +50,7 @@ repos: - flake8==4.0.1 - flake8-comprehensions==3.7.0 - flake8-bugbear==21.3.2 - - pandas-dev-flaker==0.4.0 + - pandas-dev-flaker==0.5.0 - repo: https://github.com/PyCQA/isort rev: 5.10.1 hooks: diff --git a/environment.yml b/environment.yml index a424100eda21a..187f666938aeb 100644 --- a/environment.yml +++ b/environment.yml @@ -33,7 +33,7 @@ dependencies: - gitpython # obtain contributors from git for whatsnew - gitdb - numpydoc - - pandas-dev-flaker=0.4.0 + - pandas-dev-flaker=0.5.0 - pydata-sphinx-theme - pytest-cython - sphinx diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 0013ddf73cddc..caa08c67cbfab 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1555,14 +1555,10 @@ def __matmul__(self, other: Series) -> Series: ... @overload - def __matmul__( - self, other: AnyArrayLike | DataFrame | Series - ) -> DataFrame | Series: + def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... - def __matmul__( - self, other: AnyArrayLike | DataFrame | Series - ) -> DataFrame | Series: + def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ diff --git a/requirements-dev.txt b/requirements-dev.txt index 2746b91986a3c..3ccedcbad1782 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -19,7 +19,7 @@ pyupgrade gitpython gitdb numpydoc -pandas-dev-flaker==0.4.0 +pandas-dev-flaker==0.5.0 pydata-sphinx-theme pytest-cython sphinx
- [x] closes #46528 - [x] closes #42359 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/46529
2022-03-27T16:13:46Z
2022-03-29T09:27:29Z
2022-03-29T09:27:28Z
2022-03-29T16:21:27Z
REF: re-use tz_convert_from_utc_single in _localize_tso
diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index 11de4e60f202d..4c74959fee60d 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -55,7 +55,9 @@ ) from pandas._libs.tslibs.timestamps import Timestamp from pandas._libs.tslibs.timezones import tz_compare -from pandas._libs.tslibs.tzconversion import tz_convert_from_utc_single +from pandas._libs.tslibs.tzconversion import ( + py_tz_convert_from_utc_single as tz_convert_from_utc_single, +) from pandas._libs.tslibs.vectorized import ( dt64arr_to_periodarr, get_resolution, diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index f51f25c2065f2..e4b0c527a4cac 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -72,8 +72,9 @@ from pandas._libs.tslibs.nattype cimport ( ) from pandas._libs.tslibs.tzconversion cimport ( bisect_right_i8, - infer_datetuil_fold, + infer_dateutil_fold, localize_tzinfo_api, + tz_convert_from_utc_single, tz_localize_to_utc_single, ) @@ -531,7 +532,7 @@ cdef _TSObject _create_tsobject_tz_using_offset(npy_datetimestruct dts, if typ == 'dateutil': tdata = <int64_t*>cnp.PyArray_DATA(trans) pos = bisect_right_i8(tdata, obj.value, trans.shape[0]) - 1 - obj.fold = infer_datetuil_fold(obj.value, trans, deltas, pos) + obj.fold = infer_dateutil_fold(obj.value, trans, deltas, pos) # Keep the converter same as PyDateTime's dt = datetime(obj.dts.year, obj.dts.month, obj.dts.day, @@ -683,7 +684,7 @@ cdef inline void _localize_tso(_TSObject obj, tzinfo tz): int64_t[::1] deltas int64_t local_val int64_t* tdata - Py_ssize_t pos, ntrans + Py_ssize_t pos, ntrans, outpos = -1 str typ assert obj.tzinfo is None @@ -692,35 +693,12 @@ cdef inline void _localize_tso(_TSObject obj, tzinfo tz): pass elif obj.value == NPY_NAT: pass - elif is_tzlocal(tz): - local_val = obj.value + localize_tzinfo_api(obj.value, tz, &obj.fold) - dt64_to_dtstruct(local_val, &obj.dts) else: - # Adjust datetime64 timestamp, recompute datetimestruct - trans, deltas, typ = get_dst_info(tz) - ntrans = trans.shape[0] - - if typ == "pytz": - # i.e. treat_tz_as_pytz(tz) - tdata = <int64_t*>cnp.PyArray_DATA(trans) - pos = bisect_right_i8(tdata, obj.value, ntrans) - 1 - local_val = obj.value + deltas[pos] - - # find right representation of dst etc in pytz timezone - tz = tz._tzinfos[tz._transition_info[pos]] - elif typ == "dateutil": - # i.e. treat_tz_as_dateutil(tz) - tdata = <int64_t*>cnp.PyArray_DATA(trans) - pos = bisect_right_i8(tdata, obj.value, ntrans) - 1 - local_val = obj.value + deltas[pos] + local_val = tz_convert_from_utc_single(obj.value, tz, &obj.fold, &outpos) - # dateutil supports fold, so we infer fold from value - obj.fold = infer_datetuil_fold(obj.value, trans, deltas, pos) - else: - # All other cases have len(deltas) == 1. As of 2018-07-17 - # (and 2022-03-07), all test cases that get here have - # is_fixed_offset(tz). - local_val = obj.value + deltas[0] + if outpos != -1: + # infer we went through a pytz path + tz = tz._tzinfos[tz._transition_info[outpos]] dt64_to_dtstruct(local_val, &obj.dts) diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd index 74aab9f297379..ce7541fe1e74e 100644 --- a/pandas/_libs/tslibs/tzconversion.pxd +++ b/pandas/_libs/tslibs/tzconversion.pxd @@ -8,14 +8,16 @@ from numpy cimport ( cdef int64_t localize_tzinfo_api( int64_t utc_val, tzinfo tz, bint* fold=* ) except? -1 -cpdef int64_t tz_convert_from_utc_single(int64_t val, tzinfo tz) +cdef int64_t tz_convert_from_utc_single( + int64_t utc_val, tzinfo tz, bint* fold=?, Py_ssize_t* outpos=? +) except? -1 cdef int64_t tz_localize_to_utc_single( int64_t val, tzinfo tz, object ambiguous=*, object nonexistent=* ) except? -1 cdef Py_ssize_t bisect_right_i8(int64_t *data, int64_t val, Py_ssize_t n) -cdef bint infer_datetuil_fold( +cdef bint infer_dateutil_fold( int64_t value, const int64_t[::1] trans, const int64_t[::1] deltas, diff --git a/pandas/_libs/tslibs/tzconversion.pyi b/pandas/_libs/tslibs/tzconversion.pyi index e1a0263cf59ef..5e513eefdca15 100644 --- a/pandas/_libs/tslibs/tzconversion.pyi +++ b/pandas/_libs/tslibs/tzconversion.pyi @@ -12,7 +12,9 @@ def tz_convert_from_utc( vals: npt.NDArray[np.int64], # const int64_t[:] tz: tzinfo, ) -> npt.NDArray[np.int64]: ... -def tz_convert_from_utc_single(val: np.int64, tz: tzinfo) -> np.int64: ... + +# py_tz_convert_from_utc_single exposed for testing +def py_tz_convert_from_utc_single(val: np.int64, tz: tzinfo) -> np.int64: ... def tz_localize_to_utc( vals: npt.NDArray[np.int64], tz: tzinfo | None, diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index a63a27b8194de..afcfe94a695bb 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -444,7 +444,18 @@ cdef int64_t localize_tzinfo_api( return _tz_localize_using_tzinfo_api(utc_val, tz, to_utc=False, fold=fold) -cpdef int64_t tz_convert_from_utc_single(int64_t utc_val, tzinfo tz): +def py_tz_convert_from_utc_single(int64_t utc_val, tzinfo tz): + # The 'bint* fold=NULL' in tz_convert_from_utc_single means we cannot + # make it cdef, so this is version exposed for testing from python. + return tz_convert_from_utc_single(utc_val, tz) + + +cdef int64_t tz_convert_from_utc_single( + int64_t utc_val, + tzinfo tz, + bint* fold=NULL, + Py_ssize_t* outpos=NULL, +) except? -1: """ Convert the val (in i8) from UTC to tz @@ -454,6 +465,8 @@ cpdef int64_t tz_convert_from_utc_single(int64_t utc_val, tzinfo tz): ---------- utc_val : int64 tz : tzinfo + fold : bint*, default NULL + outpos : Py_ssize_t*, default NULL Returns ------- @@ -473,15 +486,31 @@ cpdef int64_t tz_convert_from_utc_single(int64_t utc_val, tzinfo tz): return utc_val elif is_tzlocal(tz): return utc_val + _tz_localize_using_tzinfo_api(utc_val, tz, to_utc=False) - elif is_fixed_offset(tz): - _, deltas, _ = get_dst_info(tz) - delta = deltas[0] - return utc_val + delta else: - trans, deltas, _ = get_dst_info(tz) + trans, deltas, typ = get_dst_info(tz) tdata = <int64_t*>cnp.PyArray_DATA(trans) - pos = bisect_right_i8(tdata, utc_val, trans.shape[0]) - 1 - return utc_val + deltas[pos] + + if typ == "dateutil": + pos = bisect_right_i8(tdata, utc_val, trans.shape[0]) - 1 + + if fold is not NULL: + fold[0] = infer_dateutil_fold(utc_val, trans, deltas, pos) + return utc_val + deltas[pos] + + elif typ == "pytz": + pos = bisect_right_i8(tdata, utc_val, trans.shape[0]) - 1 + + # We need to get 'pos' back to the caller so it can pick the + # correct "standardized" tzinfo objecg. + if outpos is not NULL: + outpos[0] = pos + return utc_val + deltas[pos] + + else: + # All other cases have len(deltas) == 1. As of 2018-07-17 + # (and 2022-03-07), all test cases that get here have + # is_fixed_offset(tz). + return utc_val + deltas[0] def tz_convert_from_utc(const int64_t[:] vals, tzinfo tz): @@ -635,7 +664,7 @@ cdef int64_t _tz_localize_using_tzinfo_api( # NB: relies on dateutil internals, subject to change. -cdef bint infer_datetuil_fold( +cdef bint infer_dateutil_fold( int64_t value, const int64_t[::1] trans, const int64_t[::1] deltas, diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py index d0864ae8e1b7b..a790b2617783f 100644 --- a/pandas/tests/tslibs/test_conversion.py +++ b/pandas/tests/tslibs/test_conversion.py @@ -21,7 +21,7 @@ def _compare_utc_to_local(tz_didx): def f(x): - return tzconversion.tz_convert_from_utc_single(x, tz_didx.tz) + return tzconversion.py_tz_convert_from_utc_single(x, tz_didx.tz) result = tzconversion.tz_convert_from_utc(tz_didx.asi8, tz_didx.tz) expected = np.vectorize(f)(tz_didx.asi8)
Largely sits on top of #46516 The observation is that what _localize_tso is doing is similar to what tz_convert_from_utc_single is doing if we could just get a couple more pieces of information back from the latter call. Getting `fold` back is easy to do by passing via pointer (which we do elsewhere). Getting `new_tz` back is uglier but its the best we got. (im assuming that returning a tuple`(int64_t, bint, Py_ssize_t)` would incur a perf penalty but ive bothered the cython folks enough already recently) May be able to get some extra de-duplication in _create_tsobject_tz_using_offset and/or ints_to_pydatetime. But I'm skittish perf-wise. #46516 should definitely be merged. This I won't be offended if reviewers ask for %timeits out the wazoo. At some point, someone with just the right amount of adderall drip should figure out the optimal way to check for `typ=="dateutil" etc.
https://api.github.com/repos/pandas-dev/pandas/pulls/46525
2022-03-27T04:02:05Z
2022-03-29T22:41:10Z
2022-03-29T22:41:10Z
2022-03-29T22:55:19Z
Add a new material in Community tutorials page
diff --git a/doc/source/getting_started/tutorials.rst b/doc/source/getting_started/tutorials.rst index a4c555ac227e6..8febc3adb9666 100644 --- a/doc/source/getting_started/tutorials.rst +++ b/doc/source/getting_started/tutorials.rst @@ -75,6 +75,16 @@ Excel charts with pandas, vincent and xlsxwriter * `Using Pandas and XlsxWriter to create Excel charts <https://pandas-xlsxwriter-charts.readthedocs.io/>`_ +Joyful pandas +------------- + +A tutorial written in Chinese by Yuanhao Geng. It covers the basic operations +for NumPy and pandas, 4 main data manipulation methods (including indexing, groupby, reshaping +and concatenation) and 4 main data types (including missing data, string data, categorical +data and time series data). At the end of each chapter, corresponding exercises are posted. +All the datasets and related materials can be found in the GitHub repository +`datawhalechina/joyful-pandas <https://github.com/datawhalechina/joyful-pandas>`_. + Video tutorials ---------------
Currently, [Joyful pandas](https://github.com/datawhalechina/joyful-pandas) is one of the popular tutorials for pandas on GitHub, which ranks after guipsamora/pandas_exercises and Julia Evans' pandas cookbook by stars. This tutorial is based on pandas with a newer version and includes most of the essential parts in pandas. The webpage for the tutorial follows pandas's sphinx template. I believe this can be helpful for the community. Thanks.
https://api.github.com/repos/pandas-dev/pandas/pulls/46523
2022-03-26T17:09:32Z
2022-03-27T00:20:06Z
2022-03-27T00:20:06Z
2022-03-27T02:02:47Z
ENH: consistency of input args for boundaries - Interval
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py index 7046c8862b0d7..b42729476c818 100644 --- a/asv_bench/benchmarks/reshape.py +++ b/asv_bench/benchmarks/reshape.py @@ -268,7 +268,9 @@ def setup(self, bins): self.datetime_series = pd.Series( np.random.randint(N, size=N), dtype="datetime64[ns]" ) - self.interval_bins = pd.IntervalIndex.from_breaks(np.linspace(0, N, bins)) + self.interval_bins = pd.IntervalIndex.from_breaks( + np.linspace(0, N, bins), "right" + ) def time_cut_int(self, bins): pd.cut(self.int_series, bins) diff --git a/doc/redirects.csv b/doc/redirects.csv index 9b8a5a73dedff..173e670e30f0e 100644 --- a/doc/redirects.csv +++ b/doc/redirects.csv @@ -741,11 +741,11 @@ generated/pandas.Index.values,../reference/api/pandas.Index.values generated/pandas.Index.view,../reference/api/pandas.Index.view generated/pandas.Index.where,../reference/api/pandas.Index.where generated/pandas.infer_freq,../reference/api/pandas.infer_freq -generated/pandas.Interval.closed,../reference/api/pandas.Interval.closed +generated/pandas.Interval.inclusive,../reference/api/pandas.Interval.inclusive generated/pandas.Interval.closed_left,../reference/api/pandas.Interval.closed_left generated/pandas.Interval.closed_right,../reference/api/pandas.Interval.closed_right generated/pandas.Interval,../reference/api/pandas.Interval -generated/pandas.IntervalIndex.closed,../reference/api/pandas.IntervalIndex.closed +generated/pandas.IntervalIndex.inclusive,../reference/api/pandas.IntervalIndex.inclusive generated/pandas.IntervalIndex.contains,../reference/api/pandas.IntervalIndex.contains generated/pandas.IntervalIndex.from_arrays,../reference/api/pandas.IntervalIndex.from_arrays generated/pandas.IntervalIndex.from_breaks,../reference/api/pandas.IntervalIndex.from_breaks diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst index 1b8e0fdb856b5..fed0d2c5f7827 100644 --- a/doc/source/reference/arrays.rst +++ b/doc/source/reference/arrays.rst @@ -303,7 +303,7 @@ Properties .. autosummary:: :toctree: api/ - Interval.closed + Interval.inclusive Interval.closed_left Interval.closed_right Interval.is_empty @@ -340,7 +340,7 @@ A collection of intervals may be stored in an :class:`arrays.IntervalArray`. arrays.IntervalArray.left arrays.IntervalArray.right - arrays.IntervalArray.closed + arrays.IntervalArray.inclusive arrays.IntervalArray.mid arrays.IntervalArray.length arrays.IntervalArray.is_empty diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst index ddfef14036ef3..89a9a0a92ef08 100644 --- a/doc/source/reference/indexing.rst +++ b/doc/source/reference/indexing.rst @@ -242,7 +242,7 @@ IntervalIndex components IntervalIndex.left IntervalIndex.right IntervalIndex.mid - IntervalIndex.closed + IntervalIndex.inclusive IntervalIndex.length IntervalIndex.values IntervalIndex.is_empty diff --git a/doc/source/user_guide/advanced.rst b/doc/source/user_guide/advanced.rst index 3081c6f7c6a08..aaff76261b3ad 100644 --- a/doc/source/user_guide/advanced.rst +++ b/doc/source/user_guide/advanced.rst @@ -1020,7 +1020,7 @@ Trying to select an ``Interval`` that is not exactly contained in the ``Interval In [7]: df.loc[pd.Interval(0.5, 2.5)] --------------------------------------------------------------------------- - KeyError: Interval(0.5, 2.5, closed='right') + KeyError: Interval(0.5, 2.5, inclusive='right') Selecting all ``Intervals`` that overlap a given ``Interval`` can be performed using the :meth:`~IntervalIndex.overlaps` method to create a boolean indexer. diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst index faf4b1ac44d5b..a23c977e94b65 100644 --- a/doc/source/whatsnew/v0.20.0.rst +++ b/doc/source/whatsnew/v0.20.0.rst @@ -448,7 +448,7 @@ Selecting via a specific interval: .. ipython:: python - df.loc[pd.Interval(1.5, 3.0)] + df.loc[pd.Interval(1.5, 3.0, "right")] Selecting via a scalar value that is contained *in* the intervals. diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index e4dd6fa091d80..4f04d5a0ee69d 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -584,18 +584,18 @@ this would previously return ``True`` for any ``Interval`` overlapping an ``Inte .. code-block:: python - In [4]: pd.Interval(1, 2, closed='neither') in ii + In [4]: pd.Interval(1, 2, inclusive='neither') in ii Out[4]: True - In [5]: pd.Interval(-10, 10, closed='both') in ii + In [5]: pd.Interval(-10, 10, inclusive='both') in ii Out[5]: True *New behavior*: .. ipython:: python - pd.Interval(1, 2, closed='neither') in ii - pd.Interval(-10, 10, closed='both') in ii + pd.Interval(1, 2, inclusive='neither') in ii + pd.Interval(-10, 10, inclusive='both') in ii The :meth:`~IntervalIndex.get_loc` method now only returns locations for exact matches to ``Interval`` queries, as opposed to the previous behavior of returning locations for overlapping matches. A ``KeyError`` will be raised if an exact match is not found. @@ -619,7 +619,7 @@ returning locations for overlapping matches. A ``KeyError`` will be raised if a In [7]: ii.get_loc(pd.Interval(2, 6)) --------------------------------------------------------------------------- - KeyError: Interval(2, 6, closed='right') + KeyError: Interval(2, 6, inclusive='right') Likewise, :meth:`~IntervalIndex.get_indexer` and :meth:`~IntervalIndex.get_indexer_non_unique` will also only return locations for exact matches to ``Interval`` queries, with ``-1`` denoting that an exact match was not found. @@ -680,11 +680,11 @@ Similarly, a ``KeyError`` will be raised for non-exact matches instead of return In [6]: s[pd.Interval(2, 3)] --------------------------------------------------------------------------- - KeyError: Interval(2, 3, closed='right') + KeyError: Interval(2, 3, inclusive='right') In [7]: s.loc[pd.Interval(2, 3)] --------------------------------------------------------------------------- - KeyError: Interval(2, 3, closed='right') + KeyError: Interval(2, 3, inclusive='right') The :meth:`~IntervalIndex.overlaps` method can be used to create a boolean indexer that replicates the previous behavior of returning overlapping matches. diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index eb08034bb92eb..ac9f8b02c7acb 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -669,7 +669,12 @@ Other Deprecations - Deprecated the methods :meth:`DataFrame.mad`, :meth:`Series.mad`, and the corresponding groupby methods (:issue:`11787`) - Deprecated positional arguments to :meth:`Index.join` except for ``other``, use keyword-only arguments instead of positional arguments (:issue:`46518`) - Deprecated indexing on a timezone-naive :class:`DatetimeIndex` using a string representing a timezone-aware datetime (:issue:`46903`, :issue:`36148`) -- +- Deprecated the ``closed`` argument in :class:`Interval` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`) +- Deprecated the ``closed`` argument in :class:`IntervalIndex` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`) +- Deprecated the ``closed`` argument in :class:`IntervalDtype` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`) +- Deprecated the ``closed`` argument in :class:`IntervalArray` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`) +- Deprecated the ``closed`` argument in :class:`intervaltree` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`) +- Deprecated the ``closed`` argument in :class:`ArrowInterval` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`) .. --------------------------------------------------------------------------- .. _whatsnew_150.performance: diff --git a/pandas/_libs/interval.pyi b/pandas/_libs/interval.pyi index 9b727b6278792..d177e597478d9 100644 --- a/pandas/_libs/interval.pyi +++ b/pandas/_libs/interval.pyi @@ -11,6 +11,7 @@ from typing import ( import numpy as np import numpy.typing as npt +from pandas._libs import lib from pandas._typing import ( IntervalClosedType, Timedelta, @@ -54,19 +55,24 @@ class IntervalMixin: def is_empty(self) -> bool: ... def _check_closed_matches(self, other: IntervalMixin, name: str = ...) -> None: ... +def _warning_interval( + inclusive, closed +) -> tuple[IntervalClosedType, lib.NoDefault]: ... + class Interval(IntervalMixin, Generic[_OrderableT]): @property def left(self: Interval[_OrderableT]) -> _OrderableT: ... @property def right(self: Interval[_OrderableT]) -> _OrderableT: ... @property - def closed(self) -> IntervalClosedType: ... + def inclusive(self) -> IntervalClosedType: ... mid: _MidDescriptor length: _LengthDescriptor def __init__( self, left: _OrderableT, right: _OrderableT, + inclusive: IntervalClosedType = ..., closed: IntervalClosedType = ..., ) -> None: ... def __hash__(self) -> int: ... @@ -157,7 +163,7 @@ class IntervalTree(IntervalMixin): self, left: np.ndarray, right: np.ndarray, - closed: IntervalClosedType = ..., + inclusive: IntervalClosedType = ..., leaf_size: int = ..., ) -> None: ... @property diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 44c50e64147f4..178836ff1548b 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -40,7 +40,9 @@ from numpy cimport ( cnp.import_array() +import warnings +from pandas._libs import lib from pandas._libs cimport util from pandas._libs.hashtable cimport Int64Vector from pandas._libs.tslibs.timedeltas cimport _Timedelta @@ -52,7 +54,7 @@ from pandas._libs.tslibs.util cimport ( is_timedelta64_object, ) -VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither']) +VALID_CLOSED = frozenset(['both', 'neither', 'left', 'right']) cdef class IntervalMixin: @@ -69,7 +71,7 @@ cdef class IntervalMixin: bool True if the Interval is closed on the left-side. """ - return self.closed in ('left', 'both') + return self.inclusive in ('left', 'both') @property def closed_right(self): @@ -83,7 +85,7 @@ cdef class IntervalMixin: bool True if the Interval is closed on the left-side. """ - return self.closed in ('right', 'both') + return self.inclusive in ('right', 'both') @property def open_left(self): @@ -150,43 +152,43 @@ cdef class IntervalMixin: -------- An :class:`Interval` that contains points is not empty: - >>> pd.Interval(0, 1, closed='right').is_empty + >>> pd.Interval(0, 1, inclusive='right').is_empty False An ``Interval`` that does not contain any points is empty: - >>> pd.Interval(0, 0, closed='right').is_empty + >>> pd.Interval(0, 0, inclusive='right').is_empty True - >>> pd.Interval(0, 0, closed='left').is_empty + >>> pd.Interval(0, 0, inclusive='left').is_empty True - >>> pd.Interval(0, 0, closed='neither').is_empty + >>> pd.Interval(0, 0, inclusive='neither').is_empty True An ``Interval`` that contains a single point is not empty: - >>> pd.Interval(0, 0, closed='both').is_empty + >>> pd.Interval(0, 0, inclusive='both').is_empty False An :class:`~arrays.IntervalArray` or :class:`IntervalIndex` returns a boolean ``ndarray`` positionally indicating if an ``Interval`` is empty: - >>> ivs = [pd.Interval(0, 0, closed='neither'), - ... pd.Interval(1, 2, closed='neither')] + >>> ivs = [pd.Interval(0, 0, inclusive='neither'), + ... pd.Interval(1, 2, inclusive='neither')] >>> pd.arrays.IntervalArray(ivs).is_empty array([ True, False]) Missing values are not considered empty: - >>> ivs = [pd.Interval(0, 0, closed='neither'), np.nan] + >>> ivs = [pd.Interval(0, 0, inclusive='neither'), np.nan] >>> pd.IntervalIndex(ivs).is_empty array([ True, False]) """ - return (self.right == self.left) & (self.closed != 'both') + return (self.right == self.left) & (self.inclusive != 'both') def _check_closed_matches(self, other, name='other'): """ - Check if the closed attribute of `other` matches. + Check if the inclusive attribute of `other` matches. Note that 'left' and 'right' are considered different from 'both'. @@ -201,16 +203,42 @@ cdef class IntervalMixin: ValueError When `other` is not closed exactly the same as self. """ - if self.closed != other.closed: - raise ValueError(f"'{name}.closed' is {repr(other.closed)}, " - f"expected {repr(self.closed)}.") + if self.inclusive != other.inclusive: + raise ValueError(f"'{name}.inclusive' is {repr(other.inclusive)}, " + f"expected {repr(self.inclusive)}.") cdef bint _interval_like(other): return (hasattr(other, 'left') and hasattr(other, 'right') - and hasattr(other, 'closed')) + and hasattr(other, 'inclusive')) +def _warning_interval(inclusive: str | None = None, closed: None | lib.NoDefault = lib.no_default): + """ + warning in interval class for variable inclusive and closed + """ + if inclusive is not None and closed != lib.no_default: + raise ValueError( + "Deprecated argument `closed` cannot be passed " + "if argument `inclusive` is not None" + ) + elif closed != lib.no_default: + warnings.warn( + "Argument `closed` is deprecated in favor of `inclusive`.", + FutureWarning, + stacklevel=2, + ) + if closed is None: + inclusive = "both" + elif closed in ("both", "neither", "left", "right"): + inclusive = closed + else: + raise ValueError( + "Argument `closed` has to be either" + "'both', 'neither', 'left' or 'right'" + ) + + return inclusive, closed cdef class Interval(IntervalMixin): """ @@ -226,6 +254,14 @@ cdef class Interval(IntervalMixin): Whether the interval is closed on the left-side, right-side, both or neither. See the Notes for more detailed explanation. + .. deprecated:: 1.5.0 + + inclusive : {'both', 'neither', 'left', 'right'}, default 'both' + Whether the interval is closed on the left-side, right-side, both or + neither. See the Notes for more detailed explanation. + + .. versionadded:: 1.5.0 + See Also -------- IntervalIndex : An Index of Interval objects that are all closed on the @@ -243,28 +279,28 @@ cdef class Interval(IntervalMixin): A closed interval (in mathematics denoted by square brackets) contains its endpoints, i.e. the closed interval ``[0, 5]`` is characterized by the - conditions ``0 <= x <= 5``. This is what ``closed='both'`` stands for. + conditions ``0 <= x <= 5``. This is what ``inclusive='both'`` stands for. An open interval (in mathematics denoted by parentheses) does not contain its endpoints, i.e. the open interval ``(0, 5)`` is characterized by the - conditions ``0 < x < 5``. This is what ``closed='neither'`` stands for. + conditions ``0 < x < 5``. This is what ``inclusive='neither'`` stands for. Intervals can also be half-open or half-closed, i.e. ``[0, 5)`` is - described by ``0 <= x < 5`` (``closed='left'``) and ``(0, 5]`` is - described by ``0 < x <= 5`` (``closed='right'``). + described by ``0 <= x < 5`` (``inclusive='left'``) and ``(0, 5]`` is + described by ``0 < x <= 5`` (``inclusive='right'``). Examples -------- It is possible to build Intervals of different types, like numeric ones: - >>> iv = pd.Interval(left=0, right=5) + >>> iv = pd.Interval(left=0, right=5, inclusive='right') >>> iv - Interval(0, 5, closed='right') + Interval(0, 5, inclusive='right') You can check if an element belongs to it >>> 2.5 in iv True - You can test the bounds (``closed='right'``, so ``0 < x <= 5``): + You can test the bounds (``inclusive='right'``, so ``0 < x <= 5``): >>> 0 in iv False @@ -284,16 +320,16 @@ cdef class Interval(IntervalMixin): >>> shifted_iv = iv + 3 >>> shifted_iv - Interval(3, 8, closed='right') + Interval(3, 8, inclusive='right') >>> extended_iv = iv * 10.0 >>> extended_iv - Interval(0.0, 50.0, closed='right') + Interval(0.0, 50.0, inclusive='right') To create a time interval you can use Timestamps as the bounds >>> year_2017 = pd.Interval(pd.Timestamp('2017-01-01 00:00:00'), ... pd.Timestamp('2018-01-01 00:00:00'), - ... closed='left') + ... inclusive='left') >>> pd.Timestamp('2017-01-01 00:00') in year_2017 True >>> year_2017.length @@ -312,21 +348,26 @@ cdef class Interval(IntervalMixin): Right bound for the interval. """ - cdef readonly str closed + cdef readonly str inclusive """ Whether the interval is closed on the left-side, right-side, both or neither. """ - def __init__(self, left, right, str closed='right'): + def __init__(self, left, right, inclusive: str | None = None, closed: None | lib.NoDefault = lib.no_default): # note: it is faster to just do these checks than to use a special # constructor (__cinit__/__new__) to avoid them self._validate_endpoint(left) self._validate_endpoint(right) - if closed not in VALID_CLOSED: - raise ValueError(f"invalid option for 'closed': {closed}") + inclusive, closed = _warning_interval(inclusive, closed) + + if inclusive is None: + inclusive = "both" + + if inclusive not in VALID_CLOSED: + raise ValueError(f"invalid option for 'inclusive': {inclusive}") if not left <= right: raise ValueError("left side of interval must be <= right side") if (isinstance(left, _Timestamp) and @@ -336,7 +377,7 @@ cdef class Interval(IntervalMixin): f"{repr(left.tzinfo)}' and {repr(right.tzinfo)}") self.left = left self.right = right - self.closed = closed + self.inclusive = inclusive def _validate_endpoint(self, endpoint): # GH 23013 @@ -346,7 +387,7 @@ cdef class Interval(IntervalMixin): "are allowed when constructing an Interval.") def __hash__(self): - return hash((self.left, self.right, self.closed)) + return hash((self.left, self.right, self.inclusive)) def __contains__(self, key) -> bool: if _interval_like(key): @@ -356,8 +397,8 @@ cdef class Interval(IntervalMixin): def __richcmp__(self, other, op: int): if isinstance(other, Interval): - self_tuple = (self.left, self.right, self.closed) - other_tuple = (other.left, other.right, other.closed) + self_tuple = (self.left, self.right, self.inclusive) + other_tuple = (other.left, other.right, other.inclusive) return PyObject_RichCompare(self_tuple, other_tuple, op) elif util.is_array(other): return np.array( @@ -368,7 +409,7 @@ cdef class Interval(IntervalMixin): return NotImplemented def __reduce__(self): - args = (self.left, self.right, self.closed) + args = (self.left, self.right, self.inclusive) return (type(self), args) def _repr_base(self): @@ -386,7 +427,7 @@ cdef class Interval(IntervalMixin): left, right = self._repr_base() name = type(self).__name__ - repr_str = f'{name}({repr(left)}, {repr(right)}, closed={repr(self.closed)})' + repr_str = f'{name}({repr(left)}, {repr(right)}, inclusive={repr(self.inclusive)})' return repr_str def __str__(self) -> str: @@ -402,7 +443,7 @@ cdef class Interval(IntervalMixin): or PyDelta_Check(y) or is_timedelta64_object(y) ): - return Interval(self.left + y, self.right + y, closed=self.closed) + return Interval(self.left + y, self.right + y, inclusive=self.inclusive) elif ( # __radd__ pattern # TODO(cython3): remove this @@ -413,7 +454,7 @@ cdef class Interval(IntervalMixin): or is_timedelta64_object(self) ) ): - return Interval(y.left + self, y.right + self, closed=y.closed) + return Interval(y.left + self, y.right + self, inclusive=y.inclusive) return NotImplemented def __radd__(self, other): @@ -422,7 +463,7 @@ cdef class Interval(IntervalMixin): or PyDelta_Check(other) or is_timedelta64_object(other) ): - return Interval(self.left + other, self.right + other, closed=self.closed) + return Interval(self.left + other, self.right + other, inclusive=self.inclusive) return NotImplemented def __sub__(self, y): @@ -431,32 +472,33 @@ cdef class Interval(IntervalMixin): or PyDelta_Check(y) or is_timedelta64_object(y) ): - return Interval(self.left - y, self.right - y, closed=self.closed) + return Interval(self.left - y, self.right - y, inclusive=self.inclusive) return NotImplemented def __mul__(self, y): if isinstance(y, numbers.Number): - return Interval(self.left * y, self.right * y, closed=self.closed) + return Interval(self.left * y, self.right * y, inclusive=self.inclusive) elif isinstance(y, Interval) and isinstance(self, numbers.Number): # __radd__ semantics # TODO(cython3): remove this - return Interval(y.left * self, y.right * self, closed=y.closed) + return Interval(y.left * self, y.right * self, inclusive=y.inclusive) + return NotImplemented def __rmul__(self, other): if isinstance(other, numbers.Number): - return Interval(self.left * other, self.right * other, closed=self.closed) + return Interval(self.left * other, self.right * other, inclusive=self.inclusive) return NotImplemented def __truediv__(self, y): if isinstance(y, numbers.Number): - return Interval(self.left / y, self.right / y, closed=self.closed) + return Interval(self.left / y, self.right / y, inclusive=self.inclusive) return NotImplemented def __floordiv__(self, y): if isinstance(y, numbers.Number): return Interval( - self.left // y, self.right // y, closed=self.closed) + self.left // y, self.right // y, inclusive=self.inclusive) return NotImplemented def overlaps(self, other): @@ -494,14 +536,14 @@ cdef class Interval(IntervalMixin): Intervals that share closed endpoints overlap: - >>> i4 = pd.Interval(0, 1, closed='both') - >>> i5 = pd.Interval(1, 2, closed='both') + >>> i4 = pd.Interval(0, 1, inclusive='both') + >>> i5 = pd.Interval(1, 2, inclusive='both') >>> i4.overlaps(i5) True Intervals that only have an open endpoint in common do not overlap: - >>> i6 = pd.Interval(1, 2, closed='neither') + >>> i6 = pd.Interval(1, 2, inclusive='neither') >>> i4.overlaps(i6) False """ @@ -537,10 +579,10 @@ def intervals_to_interval_bounds(ndarray intervals, bint validate_closed=True): tuple of left : ndarray right : ndarray - closed: str + inclusive: str """ cdef: - object closed = None, interval + object inclusive = None, interval Py_ssize_t i, n = len(intervals) ndarray left, right bint seen_closed = False @@ -563,13 +605,13 @@ def intervals_to_interval_bounds(ndarray intervals, bint validate_closed=True): right[i] = interval.right if not seen_closed: seen_closed = True - closed = interval.closed - elif closed != interval.closed: - closed = None + inclusive = interval.inclusive + elif inclusive != interval.inclusive: + inclusive = None if validate_closed: raise ValueError("intervals must all be closed on the same side") - return left, right, closed + return left, right, inclusive include "intervaltree.pxi" diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in index 547fcc0b8aa07..51db5f1e76c99 100644 --- a/pandas/_libs/intervaltree.pxi.in +++ b/pandas/_libs/intervaltree.pxi.in @@ -3,9 +3,13 @@ Template for intervaltree WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in """ +import warnings +from pandas._libs import lib from pandas._libs.algos import is_monotonic +from pandas._libs.interval import _warning_interval + ctypedef fused int_scalar_t: int64_t float64_t @@ -34,11 +38,11 @@ cdef class IntervalTree(IntervalMixin): ndarray left, right IntervalNode root object dtype - str closed + str inclusive object _is_overlapping, _left_sorter, _right_sorter Py_ssize_t _na_count - def __init__(self, left, right, closed='right', leaf_size=100): + def __init__(self, left, right, inclusive: str | None = None, closed: None | lib.NoDefault = lib.no_default, leaf_size=100): """ Parameters ---------- @@ -48,13 +52,27 @@ cdef class IntervalTree(IntervalMixin): closed : {'left', 'right', 'both', 'neither'}, optional Whether the intervals are closed on the left-side, right-side, both or neither. Defaults to 'right'. + + .. deprecated:: 1.5.0 + + inclusive : {"both", "neither", "left", "right"}, optional + Whether the intervals are closed on the left-side, right-side, both + or neither. Defaults to 'right'. + + .. versionadded:: 1.5.0 + leaf_size : int, optional Parameter that controls when the tree switches from creating nodes to brute-force search. Tune this parameter to optimize query performance. """ - if closed not in ['left', 'right', 'both', 'neither']: - raise ValueError("invalid option for 'closed': %s" % closed) + inclusive, closed = _warning_interval(inclusive, closed) + + if inclusive is None: + inclusive = "both" + + if inclusive not in ['left', 'right', 'both', 'neither']: + raise ValueError("invalid option for 'inclusive': %s" % inclusive) left = np.asarray(left) right = np.asarray(right) @@ -64,7 +82,7 @@ cdef class IntervalTree(IntervalMixin): indices = np.arange(len(left), dtype='int64') - self.closed = closed + self.inclusive = inclusive # GH 23352: ensure no nan in nodes mask = ~np.isnan(self.left) @@ -73,7 +91,7 @@ cdef class IntervalTree(IntervalMixin): self.right = self.right[mask] indices = indices[mask] - node_cls = NODE_CLASSES[str(self.dtype), closed] + node_cls = NODE_CLASSES[str(self.dtype), inclusive] self.root = node_cls(self.left, self.right, indices, leaf_size) @property @@ -102,7 +120,7 @@ cdef class IntervalTree(IntervalMixin): return self._is_overlapping # <= when both sides closed since endpoints can overlap - op = le if self.closed == 'both' else lt + op = le if self.inclusive == 'both' else lt # overlap if start of current interval < end of previous interval # (current and previous in terms of sorted order by left/start side) @@ -180,9 +198,9 @@ cdef class IntervalTree(IntervalMixin): missing.to_array().astype('intp')) def __repr__(self) -> str: - return ('<IntervalTree[{dtype},{closed}]: ' + return ('<IntervalTree[{dtype},{inclusive}]: ' '{n_elements} elements>'.format( - dtype=self.dtype, closed=self.closed, + dtype=self.dtype, inclusive=self.inclusive, n_elements=self.root.n_elements)) # compat with IndexEngine interface @@ -251,7 +269,7 @@ cdef class IntervalNode: nodes = [] for dtype in ['float64', 'int64', 'uint64']: - for closed, cmp_left, cmp_right in [ + for inclusive, cmp_left, cmp_right in [ ('left', '<=', '<'), ('right', '<', '<='), ('both', '<=', '<='), @@ -265,7 +283,7 @@ for dtype in ['float64', 'int64', 'uint64']: elif dtype.startswith('float'): fused_prefix = '' nodes.append((dtype, dtype.title(), - closed, closed.title(), + inclusive, inclusive.title(), cmp_left, cmp_right, cmp_left_converse, diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 4e245d1bd8693..2136c410ef4a0 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2142,7 +2142,7 @@ cpdef bint is_interval_array(ndarray values): """ cdef: Py_ssize_t i, n = len(values) - str closed = None + str inclusive = None bint numeric = False bint dt64 = False bint td64 = False @@ -2155,15 +2155,15 @@ cpdef bint is_interval_array(ndarray values): val = values[i] if is_interval(val): - if closed is None: - closed = val.closed + if inclusive is None: + inclusive = val.inclusive numeric = ( util.is_float_object(val.left) or util.is_integer_object(val.left) ) td64 = is_timedelta(val.left) dt64 = PyDateTime_Check(val.left) - elif val.closed != closed: + elif val.inclusive != inclusive: # mismatched closedness return False elif numeric: @@ -2186,7 +2186,7 @@ cpdef bint is_interval_array(ndarray values): else: return False - if closed is None: + if inclusive is None: # we saw all-NAs, no actual Intervals return False return True diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index efbe9995525d7..d04dc76c01f7e 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -606,7 +606,7 @@ def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray") assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs) assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs) - assert_attr_equal("closed", left, right, obj=obj) + assert_attr_equal("inclusive", left, right, obj=obj) def assert_period_array_equal(left, right, obj="PeriodArray"): diff --git a/pandas/conftest.py b/pandas/conftest.py index 73ced327ac4a9..dfe8c5f1778d3 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -606,7 +606,7 @@ def _create_mi_with_dt64tz_level(): "bool-object": tm.makeBoolIndex(10).astype(object), "bool-dtype": Index(np.random.randn(10) < 0), "categorical": tm.makeCategoricalIndex(100), - "interval": tm.makeIntervalIndex(100), + "interval": tm.makeIntervalIndex(100, inclusive="right"), "empty": Index([]), "tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])), "mi-with-dt64tz-level": _create_mi_with_dt64tz_level(), @@ -934,8 +934,14 @@ def rand_series_with_duplicate_datetimeindex(): # ---------------------------------------------------------------- @pytest.fixture( params=[ - (Interval(left=0, right=5), IntervalDtype("int64", "right")), - (Interval(left=0.1, right=0.5), IntervalDtype("float64", "right")), + ( + Interval(left=0, right=5, inclusive="right"), + IntervalDtype("int64", inclusive="right"), + ), + ( + Interval(left=0.1, right=0.5, inclusive="right"), + IntervalDtype("float64", inclusive="right"), + ), (Period("2012-01", freq="M"), "period[M]"), (Period("2012-02-01", freq="D"), "period[D]"), ( diff --git a/pandas/core/arrays/arrow/_arrow_utils.py b/pandas/core/arrays/arrow/_arrow_utils.py index ca7ec0ef2ebaf..e0f242e2ced5d 100644 --- a/pandas/core/arrays/arrow/_arrow_utils.py +++ b/pandas/core/arrays/arrow/_arrow_utils.py @@ -6,6 +6,8 @@ import numpy as np import pyarrow +from pandas._libs import lib +from pandas._libs.interval import _warning_interval from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level @@ -103,11 +105,17 @@ def to_pandas_dtype(self): class ArrowIntervalType(pyarrow.ExtensionType): - def __init__(self, subtype, closed) -> None: + def __init__( + self, + subtype, + inclusive: str | None = None, + closed: None | lib.NoDefault = lib.no_default, + ) -> None: # attributes need to be set first before calling # super init (as that calls serialize) - assert closed in VALID_CLOSED - self._closed = closed + inclusive, closed = _warning_interval(inclusive, closed) + assert inclusive in VALID_CLOSED + self._closed = inclusive if not isinstance(subtype, pyarrow.DataType): subtype = pyarrow.type_for_alias(str(subtype)) self._subtype = subtype @@ -120,37 +128,37 @@ def subtype(self): return self._subtype @property - def closed(self): + def inclusive(self): return self._closed def __arrow_ext_serialize__(self): - metadata = {"subtype": str(self.subtype), "closed": self.closed} + metadata = {"subtype": str(self.subtype), "inclusive": self.inclusive} return json.dumps(metadata).encode() @classmethod def __arrow_ext_deserialize__(cls, storage_type, serialized): metadata = json.loads(serialized.decode()) subtype = pyarrow.type_for_alias(metadata["subtype"]) - closed = metadata["closed"] - return ArrowIntervalType(subtype, closed) + inclusive = metadata["inclusive"] + return ArrowIntervalType(subtype, inclusive) def __eq__(self, other): if isinstance(other, pyarrow.BaseExtensionType): return ( type(self) == type(other) and self.subtype == other.subtype - and self.closed == other.closed + and self.inclusive == other.inclusive ) else: return NotImplemented def __hash__(self): - return hash((str(self), str(self.subtype), self.closed)) + return hash((str(self), str(self.subtype), self.inclusive)) def to_pandas_dtype(self): import pandas as pd - return pd.IntervalDtype(self.subtype.to_pandas_dtype(), self.closed) + return pd.IntervalDtype(self.subtype.to_pandas_dtype(), self.inclusive) # register the type with a dummy instance diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 4c81fe8b61a1f..eecf1dff4dd48 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -24,6 +24,7 @@ VALID_CLOSED, Interval, IntervalMixin, + _warning_interval, intervals_to_interval_bounds, ) from pandas._libs.missing import NA @@ -122,7 +123,7 @@ data : array-like (1-dimensional) Array-like containing Interval objects from which to build the %(klass)s. -closed : {'left', 'right', 'both', 'neither'}, default 'right' +inclusive : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. dtype : dtype or None, default None @@ -137,7 +138,7 @@ ---------- left right -closed +inclusive mid length is_empty @@ -189,7 +190,8 @@ A new ``IntervalArray`` can be constructed directly from an array-like of ``Interval`` objects: - >>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) + >>> pd.arrays.IntervalArray([pd.Interval(0, 1, "right"), + ... pd.Interval(1, 5, "right")]) <IntervalArray> [(0, 1], (1, 5]] Length: 2, dtype: interval[int64, right] @@ -217,18 +219,20 @@ class IntervalArray(IntervalMixin, ExtensionArray): def __new__( cls: type[IntervalArrayT], data, - closed=None, + inclusive: str | None = None, + closed: None | lib.NoDefault = lib.no_default, dtype: Dtype | None = None, copy: bool = False, verify_integrity: bool = True, ): + inclusive, closed = _warning_interval(inclusive, closed) data = extract_array(data, extract_numpy=True) if isinstance(data, cls): left = data._left right = data._right - closed = closed or data.closed + inclusive = inclusive or data.inclusive else: # don't allow scalars @@ -242,17 +246,17 @@ def __new__( # might need to convert empty or purely na data data = _maybe_convert_platform_interval(data) left, right, infer_closed = intervals_to_interval_bounds( - data, validate_closed=closed is None + data, validate_closed=inclusive is None ) if left.dtype == object: left = lib.maybe_convert_objects(left) right = lib.maybe_convert_objects(right) - closed = closed or infer_closed + inclusive = inclusive or infer_closed return cls._simple_new( left, right, - closed, + inclusive=inclusive, copy=copy, dtype=dtype, verify_integrity=verify_integrity, @@ -263,17 +267,21 @@ def _simple_new( cls: type[IntervalArrayT], left, right, - closed: IntervalClosedType | None = None, + inclusive=None, + closed: None | lib.NoDefault = lib.no_default, copy: bool = False, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> IntervalArrayT: result = IntervalMixin.__new__(cls) - if closed is None and isinstance(dtype, IntervalDtype): - closed = dtype.closed + inclusive, closed = _warning_interval(inclusive, closed) + + if inclusive is None and isinstance(dtype, IntervalDtype): + inclusive = dtype.inclusive + + inclusive = inclusive or "both" - closed = closed or "right" left = ensure_index(left, copy=copy) right = ensure_index(right, copy=copy) @@ -288,12 +296,11 @@ def _simple_new( else: msg = f"dtype must be an IntervalDtype, got {dtype}" raise TypeError(msg) - - if dtype.closed is None: + if dtype.inclusive is None: # possibly loading an old pickle - dtype = IntervalDtype(dtype.subtype, closed) - elif closed != dtype.closed: - raise ValueError("closed keyword does not match dtype.closed") + dtype = IntervalDtype(dtype.subtype, inclusive) + elif inclusive != dtype.inclusive: + raise ValueError("inclusive keyword does not match dtype.inclusive") # coerce dtypes to match if needed if is_float_dtype(left) and is_integer_dtype(right): @@ -336,7 +343,7 @@ def _simple_new( # If these share data, then setitem could corrupt our IA right = right.copy() - dtype = IntervalDtype(left.dtype, closed=closed) + dtype = IntervalDtype(left.dtype, inclusive=inclusive) result._dtype = dtype result._left = left @@ -364,7 +371,7 @@ def _from_factorized( # a new IA from an (empty) object-dtype array, so turn it into the # correct dtype. values = values.astype(original.dtype.subtype) - return cls(values, closed=original.closed) + return cls(values, inclusive=original.inclusive) _interval_shared_docs["from_breaks"] = textwrap.dedent( """ @@ -374,7 +381,7 @@ def _from_factorized( ---------- breaks : array-like (1-dimensional) Left and right bounds for each interval. - closed : {'left', 'right', 'both', 'neither'}, default 'right' + inclusive : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. copy : bool, default False @@ -405,7 +412,7 @@ def _from_factorized( """\ Examples -------- - >>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3]) + >>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3], "right") <IntervalArray> [(0, 1], (1, 2], (2, 3]] Length: 3, dtype: interval[int64, right] @@ -416,13 +423,15 @@ def _from_factorized( def from_breaks( cls: type[IntervalArrayT], breaks, - closed: IntervalClosedType | None = "right", + inclusive="both", copy: bool = False, dtype: Dtype | None = None, ) -> IntervalArrayT: breaks = _maybe_convert_platform_interval(breaks) - return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype) + return cls.from_arrays( + breaks[:-1], breaks[1:], inclusive, copy=copy, dtype=dtype + ) _interval_shared_docs["from_arrays"] = textwrap.dedent( """ @@ -434,7 +443,7 @@ def from_breaks( Left bounds for each interval. right : array-like (1-dimensional) Right bounds for each interval. - closed : {'left', 'right', 'both', 'neither'}, default 'right' + inclusive : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. copy : bool, default False @@ -480,7 +489,7 @@ def from_breaks( "klass": "IntervalArray", "examples": textwrap.dedent( """\ - >>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3]) + >>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3], inclusive="right") <IntervalArray> [(0, 1], (1, 2], (2, 3]] Length: 3, dtype: interval[int64, right] @@ -492,7 +501,7 @@ def from_arrays( cls: type[IntervalArrayT], left, right, - closed: IntervalClosedType | None = "right", + inclusive="both", copy: bool = False, dtype: Dtype | None = None, ) -> IntervalArrayT: @@ -500,7 +509,12 @@ def from_arrays( right = _maybe_convert_platform_interval(right) return cls._simple_new( - left, right, closed, copy=copy, dtype=dtype, verify_integrity=True + left, + right, + inclusive=inclusive, + copy=copy, + dtype=dtype, + verify_integrity=True, ) _interval_shared_docs["from_tuples"] = textwrap.dedent( @@ -511,7 +525,7 @@ def from_arrays( ---------- data : array-like (1-dimensional) Array of tuples. - closed : {'left', 'right', 'both', 'neither'}, default 'right' + inclusive : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. copy : bool, default False @@ -544,7 +558,7 @@ def from_arrays( """\ Examples -------- - >>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)]) + >>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)], inclusive="right") <IntervalArray> [(0, 1], (1, 2]] Length: 2, dtype: interval[int64, right] @@ -555,7 +569,7 @@ def from_arrays( def from_tuples( cls: type[IntervalArrayT], data, - closed="right", + inclusive="both", copy: bool = False, dtype: Dtype | None = None, ) -> IntervalArrayT: @@ -582,7 +596,7 @@ def from_tuples( left.append(lhs) right.append(rhs) - return cls.from_arrays(left, right, closed, copy=False, dtype=dtype) + return cls.from_arrays(left, right, inclusive, copy=False, dtype=dtype) def _validate(self): """ @@ -590,13 +604,13 @@ def _validate(self): Checks that - * closed is valid + * inclusive is valid * left and right match lengths * left and right have the same missing values * left is always below right """ - if self.closed not in VALID_CLOSED: - msg = f"invalid option for 'closed': {self.closed}" + if self.inclusive not in VALID_CLOSED: + msg = f"invalid option for 'inclusive': {self.inclusive}" raise ValueError(msg) if len(self._left) != len(self._right): msg = "left and right must have the same length" @@ -624,7 +638,9 @@ def _shallow_copy(self: IntervalArrayT, left, right) -> IntervalArrayT: right : Index Values to be used for the right-side of the intervals. """ - return self._simple_new(left, right, closed=self.closed, verify_integrity=False) + return self._simple_new( + left, right, inclusive=self.inclusive, verify_integrity=False + ) # --------------------------------------------------------------------- # Descriptive @@ -670,7 +686,7 @@ def __getitem__( # scalar if is_scalar(left) and isna(left): return self._fill_value - return Interval(left, right, self.closed) + return Interval(left, right, inclusive=self.inclusive) if np.ndim(left) > 1: # GH#30588 multi-dimensional indexer disallowed raise ValueError("multi-dimensional indexing not allowed") @@ -711,7 +727,7 @@ def _cmp_method(self, other, op): # extract intervals if we have interval categories with matching closed if is_interval_dtype(other_dtype): - if self.closed != other.categories.closed: + if self.inclusive != other.categories.inclusive: return invalid_comparison(self, other, op) other = other.categories.take( @@ -720,7 +736,7 @@ def _cmp_method(self, other, op): # interval-like -> need same closed and matching endpoints if is_interval_dtype(other_dtype): - if self.closed != other.closed: + if self.inclusive != other.inclusive: return invalid_comparison(self, other, op) elif not isinstance(other, Interval): other = type(self)(other) @@ -936,7 +952,7 @@ def equals(self, other) -> bool: return False return bool( - self.closed == other.closed + self.inclusive == other.inclusive and self.left.equals(other.left) and self.right.equals(other.right) ) @@ -956,14 +972,14 @@ def _concat_same_type( ------- IntervalArray """ - closed_set = {interval.closed for interval in to_concat} - if len(closed_set) != 1: + inclusive_set = {interval.inclusive for interval in to_concat} + if len(inclusive_set) != 1: raise ValueError("Intervals must all be closed on the same side.") - closed = closed_set.pop() + inclusive = inclusive_set.pop() left = np.concatenate([interval.left for interval in to_concat]) right = np.concatenate([interval.right for interval in to_concat]) - return cls._simple_new(left, right, closed=closed, copy=False) + return cls._simple_new(left, right, inclusive=inclusive, copy=False) def copy(self: IntervalArrayT) -> IntervalArrayT: """ @@ -975,9 +991,9 @@ def copy(self: IntervalArrayT) -> IntervalArrayT: """ left = self._left.copy() right = self._right.copy() - closed = self.closed + inclusive = self.inclusive # TODO: Could skip verify_integrity here. - return type(self).from_arrays(left, right, closed=closed) + return type(self).from_arrays(left, right, inclusive=inclusive) def isna(self) -> np.ndarray: return isna(self._left) @@ -999,7 +1015,7 @@ def shift(self, periods: int = 1, fill_value: object = None) -> IntervalArray: from pandas import Index fill_value = Index(self._left, copy=False)._na_value - empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1)) + empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1), "right") else: empty = self._from_sequence([fill_value] * empty_len) @@ -1129,7 +1145,7 @@ def _validate_setitem_value(self, value): value_left, value_right = value, value elif isinstance(value, Interval): - # scalar interval + # scalar self._check_closed_matches(value, name="value") value_left, value_right = value.left, value.right self.left._validate_fill_value(value_left) @@ -1257,7 +1273,7 @@ def mid(self) -> Index: """ Check elementwise if an Interval overlaps the values in the %(klass)s. - Two intervals overlap if they share a common point, including closed + Two intervals overlap if they share a common point, including inclusive endpoints. Intervals that only have an open endpoint in common do not overlap. @@ -1281,14 +1297,14 @@ def mid(self) -> Index: >>> intervals.overlaps(pd.Interval(0.5, 1.5)) array([ True, True, False]) - Intervals that share closed endpoints overlap: + Intervals that share inclusive endpoints overlap: - >>> intervals.overlaps(pd.Interval(1, 3, closed='left')) + >>> intervals.overlaps(pd.Interval(1, 3, inclusive='left')) array([ True, True, True]) Intervals that only have an open endpoint in common do not overlap: - >>> intervals.overlaps(pd.Interval(1, 2, closed='right')) + >>> intervals.overlaps(pd.Interval(1, 2, inclusive='right')) array([False, True, False]) """ ) @@ -1300,7 +1316,7 @@ def mid(self) -> Index: "examples": textwrap.dedent( """\ >>> data = [(0, 1), (1, 3), (2, 4)] - >>> intervals = pd.arrays.IntervalArray.from_tuples(data) + >>> intervals = pd.arrays.IntervalArray.from_tuples(data, "right") >>> intervals <IntervalArray> [(0, 1], (1, 3], (2, 4]] @@ -1328,12 +1344,12 @@ def overlaps(self, other): # --------------------------------------------------------------------- @property - def closed(self) -> IntervalClosedType: + def inclusive(self) -> IntervalClosedType: """ Whether the intervals are closed on the left-side, right-side, both or neither. """ - return self.dtype.closed + return self.dtype.inclusive _interval_shared_docs["set_closed"] = textwrap.dedent( """ @@ -1342,7 +1358,7 @@ def closed(self) -> IntervalClosedType: Parameters ---------- - closed : {'left', 'right', 'both', 'neither'} + inclusive : {'left', 'right', 'both', 'neither'} Whether the intervals are closed on the left-side, right-side, both or neither. @@ -1362,7 +1378,7 @@ def closed(self) -> IntervalClosedType: """\ Examples -------- - >>> index = pd.arrays.IntervalArray.from_breaks(range(4)) + >>> index = pd.arrays.IntervalArray.from_breaks(range(4), "right") >>> index <IntervalArray> [(0, 1], (1, 2], (2, 3]] @@ -1375,13 +1391,18 @@ def closed(self) -> IntervalClosedType: ), } ) - def set_closed(self: IntervalArrayT, closed: IntervalClosedType) -> IntervalArrayT: - if closed not in VALID_CLOSED: - msg = f"invalid option for 'closed': {closed}" + def set_closed( + self: IntervalArrayT, inclusive: IntervalClosedType + ) -> IntervalArrayT: + if inclusive not in VALID_CLOSED: + msg = f"invalid option for 'inclusive': {inclusive}" raise ValueError(msg) return type(self)._simple_new( - left=self._left, right=self._right, closed=closed, verify_integrity=False + left=self._left, + right=self._right, + inclusive=inclusive, + verify_integrity=False, ) _interval_shared_docs[ @@ -1403,15 +1424,15 @@ def is_non_overlapping_monotonic(self) -> bool: # or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...) # we already require left <= right - # strict inequality for closed == 'both'; equality implies overlapping + # strict inequality for inclusive == 'both'; equality implies overlapping # at a point when both sides of intervals are included - if self.closed == "both": + if self.inclusive == "both": return bool( (self._right[:-1] < self._left[1:]).all() or (self._left[:-1] > self._right[1:]).all() ) - # non-strict inequality when closed != 'both'; at least one side is + # non-strict inequality when inclusive != 'both'; at least one side is # not included in the intervals, so equality does not imply overlapping return bool( (self._right[:-1] <= self._left[1:]).all() @@ -1429,14 +1450,14 @@ def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: left = self._left right = self._right mask = self.isna() - closed = self.closed + inclusive = self.inclusive result = np.empty(len(left), dtype=object) for i in range(len(left)): if mask[i]: result[i] = np.nan else: - result[i] = Interval(left[i], right[i], closed) + result[i] = Interval(left[i], right[i], inclusive=inclusive) return result def __arrow_array__(self, type=None): @@ -1454,7 +1475,7 @@ def __arrow_array__(self, type=None): f"Conversion to arrow with subtype '{self.dtype.subtype}' " "is not supported" ) from err - interval_type = ArrowIntervalType(subtype, self.closed) + interval_type = ArrowIntervalType(subtype, self.inclusive) storage_array = pyarrow.StructArray.from_arrays( [ pyarrow.array(self._left, type=subtype, from_pandas=True), @@ -1477,12 +1498,13 @@ def __arrow_array__(self, type=None): if type.equals(interval_type.storage_type): return storage_array elif isinstance(type, ArrowIntervalType): - # ensure we have the same subtype and closed attributes + # ensure we have the same subtype and inclusive attributes if not type.equals(interval_type): raise TypeError( "Not supported to convert IntervalArray to type with " f"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) " - f"and 'closed' ({self.closed} vs {type.closed}) attributes" + f"and 'inclusive' ({self.inclusive} vs {type.inclusive}) " + f"attributes" ) else: raise TypeError( @@ -1610,7 +1632,8 @@ def repeat( "klass": "IntervalArray", "examples": textwrap.dedent( """\ - >>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)]) + >>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)] + ... , "right") >>> intervals <IntervalArray> [(0, 1], (1, 3], (2, 4]] @@ -1633,7 +1656,7 @@ def isin(self, values) -> np.ndarray: values = extract_array(values, extract_numpy=True) if is_interval_dtype(values.dtype): - if self.closed != values.closed: + if self.inclusive != values.inclusive: # not comparable -> no overlap return np.zeros(self.shape, dtype=bool) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 88a92ea1455d0..c6a4effac7a37 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -515,7 +515,7 @@ def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj: elif isinstance(dtype, IntervalDtype): # TODO(GH#45349): don't special-case IntervalDtype, allow # overriding instead of returning object below. - return IntervalDtype(np.float64, closed=dtype.closed) + return IntervalDtype(np.float64, inclusive=dtype.inclusive) return _dtype_obj elif dtype.kind == "b": return _dtype_obj @@ -834,7 +834,7 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> tuple[DtypeObj, dtype = PeriodDtype(freq=val.freq) elif lib.is_interval(val): subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0] - dtype = IntervalDtype(subtype=subtype, closed=val.closed) + dtype = IntervalDtype(subtype=subtype, inclusive=val.inclusive) return dtype, val diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 6776064342db0..a192337daf59b 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -479,7 +479,7 @@ def is_interval_dtype(arr_or_dtype) -> bool: >>> is_interval_dtype([1, 2, 3]) False >>> - >>> interval = pd.Interval(1, 2, closed="right") + >>> interval = pd.Interval(1, 2, inclusive="right") >>> is_interval_dtype(interval) False >>> is_interval_dtype(pd.IntervalIndex([interval])) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 58e91f46dff43..64d46976b54f6 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -14,8 +14,14 @@ import numpy as np import pytz -from pandas._libs import missing as libmissing -from pandas._libs.interval import Interval +from pandas._libs import ( + lib, + missing as libmissing, +) +from pandas._libs.interval import ( + Interval, + _warning_interval, +) from pandas._libs.properties import cache_readonly from pandas._libs.tslibs import ( BaseOffset, @@ -1040,7 +1046,7 @@ class IntervalDtype(PandasExtensionDtype): Examples -------- - >>> pd.IntervalDtype(subtype='int64', closed='both') + >>> pd.IntervalDtype(subtype='int64', inclusive='both') interval[int64, both] """ @@ -1051,27 +1057,42 @@ class IntervalDtype(PandasExtensionDtype): num = 103 _metadata = ( "subtype", - "closed", + "inclusive", ) _match = re.compile( - r"(I|i)nterval\[(?P<subtype>[^,]+)(, (?P<closed>(right|left|both|neither)))?\]" + r"(I|i)nterval\[(?P<subtype>[^,]+)(, (" + r"?P<inclusive>(right|left|both|neither)))?\]" ) _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} - def __new__(cls, subtype=None, closed: str_type | None = None): + def __new__( + cls, + subtype=None, + inclusive: str_type | None = None, + closed: None | lib.NoDefault = lib.no_default, + ): from pandas.core.dtypes.common import ( is_string_dtype, pandas_dtype, ) - if closed is not None and closed not in {"right", "left", "both", "neither"}: - raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'") + inclusive, closed = _warning_interval(inclusive, closed) + + if inclusive is not None and inclusive not in { + "right", + "left", + "both", + "neither", + }: + raise ValueError( + "inclusive must be one of 'right', 'left', 'both', 'neither'" + ) if isinstance(subtype, IntervalDtype): - if closed is not None and closed != subtype.closed: + if inclusive is not None and inclusive != subtype.inclusive: raise ValueError( - "dtype.closed and 'closed' do not match. " - "Try IntervalDtype(dtype.subtype, closed) instead." + "dtype.inclusive and 'inclusive' do not match. " + "Try IntervalDtype(dtype.subtype, inclusive) instead." ) return subtype elif subtype is None: @@ -1079,7 +1100,7 @@ def __new__(cls, subtype=None, closed: str_type | None = None): # generally for pickle compat u = object.__new__(cls) u._subtype = None - u._closed = closed + u._closed = inclusive return u elif isinstance(subtype, str) and subtype.lower() == "interval": subtype = None @@ -1089,14 +1110,14 @@ def __new__(cls, subtype=None, closed: str_type | None = None): if m is not None: gd = m.groupdict() subtype = gd["subtype"] - if gd.get("closed", None) is not None: - if closed is not None: - if closed != gd["closed"]: + if gd.get("inclusive", None) is not None: + if inclusive is not None: + if inclusive != gd["inclusive"]: raise ValueError( - "'closed' keyword does not match value " + "'inclusive' keyword does not match value " "specified in dtype string" ) - closed = gd["closed"] + inclusive = gd["inclusive"] try: subtype = pandas_dtype(subtype) @@ -1111,13 +1132,13 @@ def __new__(cls, subtype=None, closed: str_type | None = None): ) raise TypeError(msg) - key = str(subtype) + str(closed) + key = str(subtype) + str(inclusive) try: return cls._cache_dtypes[key] except KeyError: u = object.__new__(cls) u._subtype = subtype - u._closed = closed + u._closed = inclusive cls._cache_dtypes[key] = u return u @@ -1134,7 +1155,7 @@ def _can_hold_na(self) -> bool: return True @property - def closed(self): + def inclusive(self): return self._closed @property @@ -1186,10 +1207,10 @@ def type(self): def __str__(self) -> str_type: if self.subtype is None: return "interval" - if self.closed is None: + if self.inclusive is None: # Only partially initialized GH#38394 return f"interval[{self.subtype}]" - return f"interval[{self.subtype}, {self.closed}]" + return f"interval[{self.subtype}, {self.inclusive}]" def __hash__(self) -> int: # make myself hashable @@ -1203,7 +1224,7 @@ def __eq__(self, other: Any) -> bool: elif self.subtype is None or other.subtype is None: # None should match any subtype return True - elif self.closed != other.closed: + elif self.inclusive != other.inclusive: return False else: from pandas.core.dtypes.common import is_dtype_equal @@ -1215,9 +1236,8 @@ def __setstate__(self, state): # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._subtype = state["subtype"] - - # backward-compat older pickles won't have "closed" key - self._closed = state.pop("closed", None) + # backward-compat older pickles won't have "inclusive" key + self._closed = state.pop("inclusive", None) @classmethod def is_dtype(cls, dtype: object) -> bool: @@ -1259,14 +1279,14 @@ def __from_arrow__( arr = arr.storage left = np.asarray(arr.field("left"), dtype=self.subtype) right = np.asarray(arr.field("right"), dtype=self.subtype) - iarr = IntervalArray.from_arrays(left, right, closed=self.closed) + iarr = IntervalArray.from_arrays(left, right, inclusive=self.inclusive) results.append(iarr) if not results: return IntervalArray.from_arrays( np.array([], dtype=self.subtype), np.array([], dtype=self.subtype), - closed=self.closed, + inclusive=self.inclusive, ) return IntervalArray._concat_same_type(results) @@ -1274,8 +1294,8 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: if not all(isinstance(x, IntervalDtype) for x in dtypes): return None - closed = cast("IntervalDtype", dtypes[0]).closed - if not all(cast("IntervalDtype", x).closed == closed for x in dtypes): + inclusive = cast("IntervalDtype", dtypes[0]).inclusive + if not all(cast("IntervalDtype", x).inclusive == inclusive for x in dtypes): return np.dtype(object) from pandas.core.dtypes.cast import find_common_type @@ -1283,7 +1303,7 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: common = find_common_type([cast("IntervalDtype", x).subtype for x in dtypes]) if common == object: return np.dtype(object) - return IntervalDtype(common, closed=closed) + return IntervalDtype(common, inclusive=inclusive) class PandasDtype(ExtensionDtype): diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index a89b52e0950f2..11e2da47c5738 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -11,7 +11,6 @@ Hashable, Literal, ) -import warnings import numpy as np @@ -20,6 +19,7 @@ Interval, IntervalMixin, IntervalTree, + _warning_interval, ) from pandas._libs.tslibs import ( BaseOffset, @@ -189,12 +189,12 @@ def _new_IntervalIndex(cls, d): ], IntervalArray, ) -@inherit_names(["is_non_overlapping_monotonic", "closed"], IntervalArray, cache=True) +@inherit_names(["is_non_overlapping_monotonic", "inclusive"], IntervalArray, cache=True) class IntervalIndex(ExtensionIndex): _typ = "intervalindex" # annotate properties pinned via inherit_names - closed: IntervalClosedType + inclusive: IntervalClosedType is_non_overlapping_monotonic: bool closed_left: bool closed_right: bool @@ -212,19 +212,22 @@ class IntervalIndex(ExtensionIndex): def __new__( cls, data, - closed=None, + inclusive=None, + closed: None | lib.NoDefault = lib.no_default, dtype: Dtype | None = None, copy: bool = False, name: Hashable = None, verify_integrity: bool = True, ) -> IntervalIndex: + inclusive, closed = _warning_interval(inclusive, closed) + name = maybe_extract_name(name, data, cls) with rewrite_exception("IntervalArray", cls.__name__): array = IntervalArray( data, - closed=closed, + inclusive=inclusive, copy=copy, dtype=dtype, verify_integrity=verify_integrity, @@ -241,7 +244,7 @@ def __new__( """\ Examples -------- - >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3]) + >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3], "right") IntervalIndex([(0, 1], (1, 2], (2, 3]], dtype='interval[int64, right]') """ @@ -251,14 +254,20 @@ def __new__( def from_breaks( cls, breaks, - closed: IntervalClosedType | None = "right", + inclusive=None, + closed: None | lib.NoDefault = lib.no_default, name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, ) -> IntervalIndex: + + inclusive, closed = _warning_interval(inclusive, closed) + if inclusive is None: + inclusive = "both" + with rewrite_exception("IntervalArray", cls.__name__): array = IntervalArray.from_breaks( - breaks, closed=closed, copy=copy, dtype=dtype + breaks, inclusive=inclusive, copy=copy, dtype=dtype ) return cls._simple_new(array, name=name) @@ -271,7 +280,7 @@ def from_breaks( """\ Examples -------- - >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3]) + >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3], "right") IntervalIndex([(0, 1], (1, 2], (2, 3]], dtype='interval[int64, right]') """ @@ -282,14 +291,20 @@ def from_arrays( cls, left, right, - closed: IntervalClosedType = "right", + inclusive=None, + closed: None | lib.NoDefault = lib.no_default, name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, ) -> IntervalIndex: + + inclusive, closed = _warning_interval(inclusive, closed) + if inclusive is None: + inclusive = "both" + with rewrite_exception("IntervalArray", cls.__name__): array = IntervalArray.from_arrays( - left, right, closed, copy=copy, dtype=dtype + left, right, inclusive, copy=copy, dtype=dtype ) return cls._simple_new(array, name=name) @@ -302,7 +317,7 @@ def from_arrays( """\ Examples -------- - >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)]) + >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)], "right") IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') """ @@ -312,13 +327,21 @@ def from_arrays( def from_tuples( cls, data, - closed: str = "right", + inclusive=None, + closed: None | lib.NoDefault = lib.no_default, name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, ) -> IntervalIndex: + + inclusive, closed = _warning_interval(inclusive, closed) + if inclusive is None: + inclusive = "both" + with rewrite_exception("IntervalArray", cls.__name__): - arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype) + arr = IntervalArray.from_tuples( + data, inclusive=inclusive, copy=copy, dtype=dtype + ) return cls._simple_new(arr, name=name) # -------------------------------------------------------------------- @@ -328,7 +351,7 @@ def from_tuples( def _engine(self) -> IntervalTree: # type: ignore[override] left = self._maybe_convert_i8(self.left) right = self._maybe_convert_i8(self.right) - return IntervalTree(left, right, closed=self.closed) + return IntervalTree(left, right, inclusive=self.inclusive) def __contains__(self, key: Any) -> bool: """ @@ -363,7 +386,7 @@ def __reduce__(self): d = { "left": self.left, "right": self.right, - "closed": self.closed, + "inclusive": self.inclusive, "name": self.name, } return _new_IntervalIndex, (type(self), d), None @@ -418,7 +441,7 @@ def is_overlapping(self) -> bool: """ Return True if the IntervalIndex has overlapping intervals, else False. - Two intervals overlap if they share a common point, including closed + Two intervals overlap if they share a common point, including inclusive endpoints. Intervals that only have an open endpoint in common do not overlap. @@ -435,7 +458,7 @@ def is_overlapping(self) -> bool: Examples -------- - >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)]) + >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)], "right") >>> index IntervalIndex([(0, 2], (1, 3], (4, 5]], dtype='interval[int64, right]') @@ -519,7 +542,7 @@ def _maybe_convert_i8(self, key): constructor = Interval if scalar else IntervalIndex.from_arrays # error: "object" not callable return constructor( - left, right, closed=self.closed + left, right, inclusive=self.inclusive ) # type: ignore[operator] if scalar: @@ -600,7 +623,7 @@ def get_loc( Examples -------- >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2) - >>> index = pd.IntervalIndex([i1, i2]) + >>> index = pd.IntervalIndex([i1, i2], "right") >>> index.get_loc(1) 0 @@ -613,20 +636,20 @@ def get_loc( relevant intervals. >>> i3 = pd.Interval(0, 2) - >>> overlapping_index = pd.IntervalIndex([i1, i2, i3]) + >>> overlapping_index = pd.IntervalIndex([i1, i2, i3], "right") >>> overlapping_index.get_loc(0.5) array([ True, False, True]) Only exact matches will be returned if an interval is provided. - >>> index.get_loc(pd.Interval(0, 1)) + >>> index.get_loc(pd.Interval(0, 1, "right")) 0 """ self._check_indexing_method(method) self._check_indexing_error(key) if isinstance(key, Interval): - if self.closed != key.closed: + if self.inclusive != key.inclusive: raise KeyError(key) mask = (self.left == key.left) & (self.right == key.right) elif is_valid_na_for_dtype(key, self.dtype): @@ -687,7 +710,7 @@ def get_indexer_non_unique( target = ensure_index(target) if not self._should_compare(target) and not self._should_partial_index(target): - # e.g. IntervalIndex with different closed or incompatible subtype + # e.g. IntervalIndex with different inclusive or incompatible subtype # -> no matches return self._get_indexer_non_comparable(target, None, unique=False) @@ -839,7 +862,7 @@ def _intersection(self, other, sort): """ intersection specialized to the case with matching dtypes. """ - # For IntervalIndex we also know other.closed == self.closed + # For IntervalIndex we also know other.inclusive == self.inclusive if self.left.is_unique and self.right.is_unique: taken = self._intersection_unique(other) elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1: @@ -1054,27 +1077,8 @@ def interval_range( IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], dtype='interval[int64, both]') """ - if inclusive is not None and closed is not lib.no_default: - raise ValueError( - "Deprecated argument `closed` cannot be passed " - "if argument `inclusive` is not None" - ) - elif closed is not lib.no_default: - warnings.warn( - "Argument `closed` is deprecated in favor of `inclusive`.", - FutureWarning, - stacklevel=2, - ) - if closed is None: - inclusive = "both" - elif closed in ("both", "neither", "left", "right"): - inclusive = closed - else: - raise ValueError( - "Argument `closed` has to be either" - "'both', 'neither', 'left' or 'right'" - ) - elif inclusive is None: + inclusive, closed = _warning_interval(inclusive, closed) + if inclusive is None: inclusive = "both" start = maybe_box_datetimelike(start) @@ -1149,4 +1153,4 @@ def interval_range( else: breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq) - return IntervalIndex.from_breaks(breaks, name=name, closed=inclusive) + return IntervalIndex.from_breaks(breaks, name=name, inclusive=inclusive) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 71e2a1e36cbbf..3836f3e6540b4 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1961,8 +1961,8 @@ def _catch_deprecated_value_error(err: Exception) -> None: # is enforced, stop catching ValueError here altogether if isinstance(err, IncompatibleFrequency): pass - elif "'value.closed' is" in str(err): - # IntervalDtype mismatched 'closed' + elif "'value.inclusive' is" in str(err): + # IntervalDtype mismatched 'inclusive' pass elif "Timezones don't match" not in str(err): raise diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 94705790e40bd..00b2b30eb3122 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -231,7 +231,7 @@ def cut( is to the left of the first bin (which is closed on the right), and 1.5 falls between two bins. - >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) + >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)], inclusive="right") >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins) [NaN, (0.0, 1.0], NaN, (2.0, 3.0], (4.0, 5.0]] Categories (3, interval[int64, right]): [(0, 1] < (2, 3] < (4, 5]] @@ -561,7 +561,7 @@ def _format_labels( bins, precision: int, right: bool = True, include_lowest: bool = False, dtype=None ): """based on the dtype, return our labels""" - closed: IntervalLeftRight = "right" if right else "left" + inclusive: IntervalLeftRight = "right" if right else "left" formatter: Callable[[Any], Timestamp] | Callable[[Any], Timedelta] @@ -584,7 +584,7 @@ def _format_labels( # adjust lhs of first interval by precision to account for being right closed breaks[0] = adjust(breaks[0]) - return IntervalIndex.from_breaks(breaks, closed=closed) + return IntervalIndex.from_breaks(breaks, inclusive=inclusive) def _preprocess_for_cut(x): diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py index 88e3dca62d9e0..99e1ad1767e07 100644 --- a/pandas/tests/arithmetic/test_interval.py +++ b/pandas/tests/arithmetic/test_interval.py @@ -62,16 +62,16 @@ def interval_array(left_right_dtypes): return IntervalArray.from_arrays(left, right) -def create_categorical_intervals(left, right, closed="right"): - return Categorical(IntervalIndex.from_arrays(left, right, closed)) +def create_categorical_intervals(left, right, inclusive="right"): + return Categorical(IntervalIndex.from_arrays(left, right, inclusive)) -def create_series_intervals(left, right, closed="right"): - return Series(IntervalArray.from_arrays(left, right, closed)) +def create_series_intervals(left, right, inclusive="right"): + return Series(IntervalArray.from_arrays(left, right, inclusive)) -def create_series_categorical_intervals(left, right, closed="right"): - return Series(Categorical(IntervalIndex.from_arrays(left, right, closed))) +def create_series_categorical_intervals(left, right, inclusive="right"): + return Series(Categorical(IntervalIndex.from_arrays(left, right, inclusive))) class TestComparison: @@ -126,8 +126,10 @@ def test_compare_scalar_interval(self, op, interval_array): tm.assert_numpy_array_equal(result, expected) def test_compare_scalar_interval_mixed_closed(self, op, closed, other_closed): - interval_array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed) - other = Interval(0, 1, closed=other_closed) + interval_array = IntervalArray.from_arrays( + range(2), range(1, 3), inclusive=closed + ) + other = Interval(0, 1, inclusive=other_closed) result = op(interval_array, other) expected = self.elementwise_comparison(op, interval_array, other) @@ -207,8 +209,10 @@ def test_compare_list_like_interval(self, op, interval_array, interval_construct def test_compare_list_like_interval_mixed_closed( self, op, interval_constructor, closed, other_closed ): - interval_array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed) - other = interval_constructor(range(2), range(1, 3), closed=other_closed) + interval_array = IntervalArray.from_arrays( + range(2), range(1, 3), inclusive=closed + ) + other = interval_constructor(range(2), range(1, 3), inclusive=other_closed) result = op(interval_array, other) expected = self.elementwise_comparison(op, interval_array, other) diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py index eaf86f5d521ae..28ffd5b4caf98 100644 --- a/pandas/tests/arrays/interval/test_interval.py +++ b/pandas/tests/arrays/interval/test_interval.py @@ -55,7 +55,7 @@ def test_is_empty(self, constructor, left, right, closed): # GH27219 tuples = [(left, left), (left, right), np.nan] expected = np.array([closed != "both", False, False]) - result = constructor.from_tuples(tuples, closed=closed).is_empty + result = constructor.from_tuples(tuples, inclusive=closed).is_empty tm.assert_numpy_array_equal(result, expected) @@ -63,23 +63,23 @@ class TestMethods: @pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"]) def test_set_closed(self, closed, new_closed): # GH 21670 - array = IntervalArray.from_breaks(range(10), closed=closed) + array = IntervalArray.from_breaks(range(10), inclusive=closed) result = array.set_closed(new_closed) - expected = IntervalArray.from_breaks(range(10), closed=new_closed) + expected = IntervalArray.from_breaks(range(10), inclusive=new_closed) tm.assert_extension_array_equal(result, expected) @pytest.mark.parametrize( "other", [ - Interval(0, 1, closed="right"), - IntervalArray.from_breaks([1, 2, 3, 4], closed="right"), + Interval(0, 1, inclusive="right"), + IntervalArray.from_breaks([1, 2, 3, 4], inclusive="right"), ], ) def test_where_raises(self, other): # GH#45768 The IntervalArray methods raises; the Series method coerces - ser = pd.Series(IntervalArray.from_breaks([1, 2, 3, 4], closed="left")) + ser = pd.Series(IntervalArray.from_breaks([1, 2, 3, 4], inclusive="left")) mask = np.array([True, False, True]) - match = "'value.closed' is 'right', expected 'left'." + match = "'value.inclusive' is 'right', expected 'left'." with pytest.raises(ValueError, match=match): ser.array._where(mask, other) @@ -89,15 +89,15 @@ def test_where_raises(self, other): def test_shift(self): # https://github.com/pandas-dev/pandas/issues/31495, GH#22428, GH#31502 - a = IntervalArray.from_breaks([1, 2, 3]) + a = IntervalArray.from_breaks([1, 2, 3], "right") result = a.shift() # int -> float - expected = IntervalArray.from_tuples([(np.nan, np.nan), (1.0, 2.0)]) + expected = IntervalArray.from_tuples([(np.nan, np.nan), (1.0, 2.0)], "right") tm.assert_interval_array_equal(result, expected) def test_shift_datetime(self): # GH#31502, GH#31504 - a = IntervalArray.from_breaks(date_range("2000", periods=4)) + a = IntervalArray.from_breaks(date_range("2000", periods=4), "right") result = a.shift(2) expected = a.take([-1, -1, 0], allow_fill=True) tm.assert_interval_array_equal(result, expected) @@ -135,11 +135,11 @@ def test_set_na(self, left_right_dtypes): tm.assert_extension_array_equal(result, expected) def test_setitem_mismatched_closed(self): - arr = IntervalArray.from_breaks(range(4)) + arr = IntervalArray.from_breaks(range(4), "right") orig = arr.copy() other = arr.set_closed("both") - msg = "'value.closed' is 'both', expected 'right'" + msg = "'value.inclusive' is 'both', expected 'right'" with pytest.raises(ValueError, match=msg): arr[0] = other[0] with pytest.raises(ValueError, match=msg): @@ -156,13 +156,13 @@ def test_setitem_mismatched_closed(self): arr[:] = other[::-1].astype("category") # empty list should be no-op - arr[:0] = [] + arr[:0] = IntervalArray.from_breaks([], "right") tm.assert_interval_array_equal(arr, orig) def test_repr(): # GH 25022 - arr = IntervalArray.from_tuples([(0, 1), (1, 2)]) + arr = IntervalArray.from_tuples([(0, 1), (1, 2)], "right") result = repr(arr) expected = ( "<IntervalArray>\n" @@ -254,7 +254,7 @@ def test_arrow_extension_type(): p2 = ArrowIntervalType(pa.int64(), "left") p3 = ArrowIntervalType(pa.int64(), "right") - assert p1.closed == "left" + assert p1.inclusive == "left" assert p1 == p2 assert not p1 == p3 assert hash(p1) == hash(p2) @@ -271,7 +271,7 @@ def test_arrow_array(): result = pa.array(intervals) assert isinstance(result.type, ArrowIntervalType) - assert result.type.closed == intervals.closed + assert result.type.inclusive == intervals.inclusive assert result.type.subtype == pa.int64() assert result.storage.field("left").equals(pa.array([1, 2, 3, 4], type="int64")) assert result.storage.field("right").equals(pa.array([2, 3, 4, 5], type="int64")) @@ -302,7 +302,7 @@ def test_arrow_array_missing(): result = pa.array(arr) assert isinstance(result.type, ArrowIntervalType) - assert result.type.closed == arr.closed + assert result.type.inclusive == arr.inclusive assert result.type.subtype == pa.float64() # fields have missing values (not NaN) @@ -386,13 +386,60 @@ def test_from_arrow_from_raw_struct_array(): import pyarrow as pa arr = pa.array([{"left": 0, "right": 1}, {"left": 1, "right": 2}]) - dtype = pd.IntervalDtype(np.dtype("int64"), closed="neither") + dtype = pd.IntervalDtype(np.dtype("int64"), inclusive="neither") result = dtype.__from_arrow__(arr) expected = IntervalArray.from_breaks( - np.array([0, 1, 2], dtype="int64"), closed="neither" + np.array([0, 1, 2], dtype="int64"), inclusive="neither" ) tm.assert_extension_array_equal(result, expected) result = dtype.__from_arrow__(pa.chunked_array([arr])) tm.assert_extension_array_equal(result, expected) + + +def test_interval_error_and_warning(): + # GH 40245 + msg = ( + "Deprecated argument `closed` cannot " + "be passed if argument `inclusive` is not None" + ) + with pytest.raises(ValueError, match=msg): + Interval(0, 1, closed="both", inclusive="both") + + msg = "Argument `closed` is deprecated in favor of `inclusive`" + with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False): + Interval(0, 1, closed="both") + + +def test_interval_array_error_and_warning(): + # GH 40245 + msg = ( + "Deprecated argument `closed` cannot " + "be passed if argument `inclusive` is not None" + ) + with pytest.raises(ValueError, match=msg): + IntervalArray([Interval(0, 1), Interval(1, 5)], closed="both", inclusive="both") + + msg = "Argument `closed` is deprecated in favor of `inclusive`" + with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False): + IntervalArray([Interval(0, 1), Interval(1, 5)], closed="both") + + +@pyarrow_skip +def test_arrow_interval_type_error_and_warning(): + # GH 40245 + import pyarrow as pa + + from pandas.core.arrays.arrow._arrow_utils import ArrowIntervalType + + msg = ( + "Deprecated argument `closed` cannot " + "be passed if argument `inclusive` is not None" + ) + with pytest.raises(ValueError, match=msg): + ArrowIntervalType(pa.int64(), closed="both", inclusive="both") + + msg = "Argument `closed` is deprecated in favor of `inclusive`" + with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False): + ArrowIntervalType(pa.int64(), closed="both") diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index 3c8d12556bf1c..bf7dca9e1d5a0 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -133,9 +133,9 @@ ), # Interval ( - [pd.Interval(1, 2), pd.Interval(3, 4)], + [pd.Interval(1, 2, "right"), pd.Interval(3, 4, "right")], "interval", - IntervalArray.from_tuples([(1, 2), (3, 4)]), + IntervalArray.from_tuples([(1, 2), (3, 4)], "right"), ), # Sparse ([0, 1], "Sparse[int64]", SparseArray([0, 1], dtype="int64")), @@ -206,7 +206,10 @@ def test_array_copy(): period_array(["2000", "2001"], freq="D"), ), # interval - ([pd.Interval(0, 1), pd.Interval(1, 2)], IntervalArray.from_breaks([0, 1, 2])), + ( + [pd.Interval(0, 1, "right"), pd.Interval(1, 2, "right")], + IntervalArray.from_breaks([0, 1, 2], "right"), + ), # datetime ( [pd.Timestamp("2000"), pd.Timestamp("2001")], @@ -296,7 +299,7 @@ def test_array_inference(data, expected): # mix of frequencies [pd.Period("2000", "D"), pd.Period("2001", "A")], # mix of closed - [pd.Interval(0, 1, closed="left"), pd.Interval(1, 2, closed="right")], + [pd.Interval(0, 1, "left"), pd.Interval(1, 2, "right")], # Mix of timezones [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000", tz="UTC")], # Mix of tz-aware and tz-naive diff --git a/pandas/tests/base/test_conversion.py b/pandas/tests/base/test_conversion.py index 599aaae4d3527..3adaddf89cf30 100644 --- a/pandas/tests/base/test_conversion.py +++ b/pandas/tests/base/test_conversion.py @@ -290,8 +290,10 @@ def test_array_multiindex_raises(): ), (pd.array([0, np.nan], dtype="Int64"), np.array([0, pd.NA], dtype=object)), ( - IntervalArray.from_breaks([0, 1, 2]), - np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object), + IntervalArray.from_breaks([0, 1, 2], "right"), + np.array( + [pd.Interval(0, 1, "right"), pd.Interval(1, 2, "right")], dtype=object + ), ), (SparseArray([0, 1]), np.array([0, 1], dtype=np.int64)), # tz-naive datetime diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py index c46f1b036dbee..55a6cc48ebfc8 100644 --- a/pandas/tests/base/test_value_counts.py +++ b/pandas/tests/base/test_value_counts.py @@ -133,10 +133,10 @@ def test_value_counts_bins(index_or_series): s1 = Series([1, 1, 2, 3]) res1 = s1.value_counts(bins=1) - exp1 = Series({Interval(0.997, 3.0): 4}) + exp1 = Series({Interval(0.997, 3.0, "right"): 4}) tm.assert_series_equal(res1, exp1) res1n = s1.value_counts(bins=1, normalize=True) - exp1n = Series({Interval(0.997, 3.0): 1.0}) + exp1n = Series({Interval(0.997, 3.0, "right"): 1.0}) tm.assert_series_equal(res1n, exp1n) if isinstance(s1, Index): @@ -149,12 +149,12 @@ def test_value_counts_bins(index_or_series): # these return the same res4 = s1.value_counts(bins=4, dropna=True) - intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) + intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0], "right") exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2])) tm.assert_series_equal(res4, exp4) res4 = s1.value_counts(bins=4, dropna=False) - intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) + intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0], "right") exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2])) tm.assert_series_equal(res4, exp4) diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index a32b37fbdd71b..c5d0567b6dfc0 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -269,7 +269,7 @@ def test_is_interval_dtype(): assert com.is_interval_dtype(IntervalDtype()) - interval = pd.Interval(1, 2, closed="right") + interval = pd.Interval(1, 2, inclusive="right") assert not com.is_interval_dtype(interval) assert com.is_interval_dtype(pd.IntervalIndex([interval])) diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index f077317e7ebbe..b7de8016f8fac 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -568,10 +568,19 @@ def test_hash_vs_equality(self, dtype): "subtype", ["interval[int64]", "Interval[int64]", "int64", np.dtype("int64")] ) def test_construction(self, subtype): - i = IntervalDtype(subtype, closed="right") + i = IntervalDtype(subtype, inclusive="right") assert i.subtype == np.dtype("int64") assert is_interval_dtype(i) + @pytest.mark.parametrize( + "subtype", ["interval[int64, right]", "Interval[int64, right]"] + ) + def test_construction_string_regex(self, subtype): + i = IntervalDtype(subtype=subtype) + assert i.subtype == np.dtype("int64") + assert i.inclusive == "right" + assert is_interval_dtype(i) + @pytest.mark.parametrize( "subtype", ["interval[int64]", "Interval[int64]", "int64", np.dtype("int64")] ) @@ -579,10 +588,10 @@ def test_construction_allows_closed_none(self, subtype): # GH#38394 dtype = IntervalDtype(subtype) - assert dtype.closed is None + assert dtype.inclusive is None def test_closed_mismatch(self): - msg = "'closed' keyword does not match value specified in dtype string" + msg = "'inclusive' keyword does not match value specified in dtype string" with pytest.raises(ValueError, match=msg): IntervalDtype("interval[int64, left]", "right") @@ -624,12 +633,12 @@ def test_closed_must_match(self): # GH#37933 dtype = IntervalDtype(np.float64, "left") - msg = "dtype.closed and 'closed' do not match" + msg = "dtype.inclusive and 'inclusive' do not match" with pytest.raises(ValueError, match=msg): - IntervalDtype(dtype, closed="both") + IntervalDtype(dtype, inclusive="both") def test_closed_invalid(self): - with pytest.raises(ValueError, match="closed must be one of"): + with pytest.raises(ValueError, match="inclusive must be one of"): IntervalDtype(np.float64, "foo") def test_construction_from_string(self, dtype): @@ -729,8 +738,8 @@ def test_equality(self, dtype): ) def test_equality_generic(self, subtype): # GH 18980 - closed = "right" if subtype is not None else None - dtype = IntervalDtype(subtype, closed=closed) + inclusive = "right" if subtype is not None else None + dtype = IntervalDtype(subtype, inclusive=inclusive) assert is_dtype_equal(dtype, "interval") assert is_dtype_equal(dtype, IntervalDtype()) @@ -748,9 +757,9 @@ def test_equality_generic(self, subtype): ) def test_name_repr(self, subtype): # GH 18980 - closed = "right" if subtype is not None else None - dtype = IntervalDtype(subtype, closed=closed) - expected = f"interval[{subtype}, {closed}]" + inclusive = "right" if subtype is not None else None + dtype = IntervalDtype(subtype, inclusive=inclusive) + expected = f"interval[{subtype}, {inclusive}]" assert str(dtype) == expected assert dtype.name == "interval" @@ -812,6 +821,21 @@ def test_unpickling_without_closed(self): tm.round_trip_pickle(dtype) + def test_interval_dtype_error_and_warning(self): + # GH 40245 + msg = ( + "Deprecated argument `closed` cannot " + "be passed if argument `inclusive` is not None" + ) + with pytest.raises(ValueError, match=msg): + IntervalDtype("int64", closed="right", inclusive="right") + + msg = "Argument `closed` is deprecated in favor of `inclusive`" + with tm.assert_produces_warning( + FutureWarning, match=msg, check_stacklevel=False + ): + IntervalDtype("int64", closed="right") + class TestCategoricalDtypeParametrized: @pytest.mark.parametrize( diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 15f6e82419049..b12476deccbfc 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -959,7 +959,7 @@ def test_mixed_dtypes_remain_object_array(self): @pytest.mark.parametrize( "idx", [ - pd.IntervalIndex.from_breaks(range(5), closed="both"), + pd.IntervalIndex.from_breaks(range(5), inclusive="both"), pd.period_range("2016-01-01", periods=3, freq="D"), ], ) @@ -1652,7 +1652,7 @@ def test_categorical(self): @pytest.mark.parametrize("asobject", [True, False]) def test_interval(self, asobject): - idx = pd.IntervalIndex.from_breaks(range(5), closed="both") + idx = pd.IntervalIndex.from_breaks(range(5), inclusive="both") if asobject: idx = idx.astype(object) @@ -1668,21 +1668,21 @@ def test_interval(self, asobject): @pytest.mark.parametrize("value", [Timestamp(0), Timedelta(0), 0, 0.0]) def test_interval_mismatched_closed(self, value): - first = Interval(value, value, closed="left") - second = Interval(value, value, closed="right") + first = Interval(value, value, inclusive="left") + second = Interval(value, value, inclusive="right") - # if closed match, we should infer "interval" + # if inclusive match, we should infer "interval" arr = np.array([first, first], dtype=object) assert lib.infer_dtype(arr, skipna=False) == "interval" - # if closed dont match, we should _not_ get "interval" + # if inclusive dont match, we should _not_ get "interval" arr2 = np.array([first, second], dtype=object) assert lib.infer_dtype(arr2, skipna=False) == "mixed" def test_interval_mismatched_subtype(self): - first = Interval(0, 1, closed="left") - second = Interval(Timestamp(0), Timestamp(1), closed="left") - third = Interval(Timedelta(0), Timedelta(1), closed="left") + first = Interval(0, 1, inclusive="left") + second = Interval(Timestamp(0), Timestamp(1), inclusive="left") + third = Interval(Timedelta(0), Timedelta(1), inclusive="left") arr = np.array([first, second]) assert lib.infer_dtype(arr, skipna=False) == "mixed" @@ -1694,7 +1694,7 @@ def test_interval_mismatched_subtype(self): assert lib.infer_dtype(arr, skipna=False) == "mixed" # float vs int subdtype are compatible - flt_interval = Interval(1.5, 2.5, closed="left") + flt_interval = Interval(1.5, 2.5, inclusive="left") arr = np.array([first, flt_interval], dtype=object) assert lib.infer_dtype(arr, skipna=False) == "interval" diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py index 283b67bb9171d..9e016e0101ef6 100644 --- a/pandas/tests/extension/base/setitem.py +++ b/pandas/tests/extension/base/setitem.py @@ -10,6 +10,7 @@ import pandas as pd import pandas._testing as tm +from pandas.core.arrays import IntervalArray from pandas.tests.extension.base.base import BaseExtensionTests @@ -76,10 +77,17 @@ def test_setitem_sequence_mismatched_length_raises(self, data, as_array): self.assert_series_equal(ser, original) def test_setitem_empty_indexer(self, data, box_in_series): + data_dtype = type(data) + if box_in_series: data = pd.Series(data) original = data.copy() - data[np.array([], dtype=int)] = [] + + if data_dtype == IntervalArray: + data[np.array([], dtype=int)] = IntervalArray([], "right") + else: + data[np.array([], dtype=int)] = [] + self.assert_equal(data, original) def test_setitem_sequence_broadcasts(self, data, box_in_series): diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py index 0f916cea9d518..eb307d964d736 100644 --- a/pandas/tests/extension/test_interval.py +++ b/pandas/tests/extension/test_interval.py @@ -30,7 +30,9 @@ def make_data(): N = 100 left_array = np.random.uniform(size=N).cumsum() right_array = left_array + np.random.uniform(size=N) - return [Interval(left, right) for left, right in zip(left_array, right_array)] + return [ + Interval(left, right, "right") for left, right in zip(left_array, right_array) + ] @pytest.fixture @@ -41,7 +43,7 @@ def dtype(): @pytest.fixture def data(): """Length-100 PeriodArray for semantics test.""" - return IntervalArray(make_data()) + return IntervalArray(make_data(), "right") @pytest.fixture diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py index c6d54e28ca1c8..715f69cc03828 100644 --- a/pandas/tests/frame/constructors/test_from_records.py +++ b/pandas/tests/frame/constructors/test_from_records.py @@ -229,7 +229,11 @@ def test_from_records_series_list_dict(self): def test_from_records_series_categorical_index(self): # GH#32805 index = CategoricalIndex( - [Interval(-20, -10), Interval(-10, 0), Interval(0, 10)] + [ + Interval(-20, -10, "right"), + Interval(-10, 0, "right"), + Interval(0, 10, "right"), + ] ) series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index) frame = DataFrame.from_records(series_of_dicts, index=index) diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index fda37fdedb92a..04d95953a3a8d 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -227,7 +227,10 @@ def test_setitem_dict_preserves_dtypes(self): "obj,dtype", [ (Period("2020-01"), PeriodDtype("M")), - (Interval(left=0, right=5), IntervalDtype("int64", "right")), + ( + Interval(left=0, right=5, inclusive="right"), + IntervalDtype("int64", "right"), + ), ( Timestamp("2011-01-01", tz="US/Eastern"), DatetimeTZDtype(tz="US/Eastern"), diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py index 47ebca0b9bf5c..783bef3206d58 100644 --- a/pandas/tests/frame/methods/test_combine_first.py +++ b/pandas/tests/frame/methods/test_combine_first.py @@ -402,7 +402,7 @@ def test_combine_first_string_dtype_only_na(self, nullable_string_dtype): (datetime(2020, 1, 1), datetime(2020, 1, 2)), (pd.Period("2020-01-01", "D"), pd.Period("2020-01-02", "D")), (pd.Timedelta("89 days"), pd.Timedelta("60 min")), - (pd.Interval(left=0, right=1), pd.Interval(left=2, right=3, closed="left")), + (pd.Interval(left=0, right=1), pd.Interval(left=2, right=3, inclusive="left")), ], ) def test_combine_first_timestamp_bug(scalar1, scalar2, nulls_fixture): diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index 37431bc291b76..bd168e4f14558 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -751,7 +751,7 @@ def test_reset_index_interval_columns_object_cast(): result = df.reset_index() expected = DataFrame( [[1, 1.0, 0.0], [2, 0.0, 1.0]], - columns=Index(["Year", Interval(0, 1), Interval(1, 2)]), + columns=Index(["Year", Interval(0, 1, "right"), Interval(1, 2, "right")]), ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index 5d1cc3d4ecee5..9cad965e9cb5c 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -384,7 +384,7 @@ def test_sort_index_intervalindex(self): result = model.groupby(["X1", "X2"], observed=True).mean().unstack() expected = IntervalIndex.from_tuples( - [(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], closed="right" + [(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], inclusive="right" ) result = result.columns.levels[1].categories tm.assert_index_equal(result, expected) @@ -729,7 +729,11 @@ def test_sort_index_multilevel_repr_8017(self, gen, extra): [ pytest.param(["a", "b", "c"], id="str"), pytest.param( - [pd.Interval(0, 1), pd.Interval(1, 2), pd.Interval(2, 3)], + [ + pd.Interval(0, 1, "right"), + pd.Interval(1, 2, "right"), + pd.Interval(2, 3, "right"), + ], id="pd.Interval", ), ], diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 82c7117cc00c6..e62c050fbf812 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -884,7 +884,10 @@ def test_constructor_dict_extension_scalar(self, ea_scalar_and_dtype): "data,dtype", [ (Period("2020-01"), PeriodDtype("M")), - (Interval(left=0, right=5), IntervalDtype("int64", "right")), + ( + Interval(left=0, right=5, inclusive="right"), + IntervalDtype("int64", "right"), + ), ( Timestamp("2011-01-01", tz="US/Eastern"), DatetimeTZDtype(tz="US/Eastern"), @@ -2410,16 +2413,16 @@ def test_constructor_series_nonexact_categoricalindex(self): result = DataFrame({"1": ser1, "2": ser2}) index = CategoricalIndex( [ - Interval(-0.099, 9.9, closed="right"), - Interval(9.9, 19.8, closed="right"), - Interval(19.8, 29.7, closed="right"), - Interval(29.7, 39.6, closed="right"), - Interval(39.6, 49.5, closed="right"), - Interval(49.5, 59.4, closed="right"), - Interval(59.4, 69.3, closed="right"), - Interval(69.3, 79.2, closed="right"), - Interval(79.2, 89.1, closed="right"), - Interval(89.1, 99, closed="right"), + Interval(-0.099, 9.9, inclusive="right"), + Interval(9.9, 19.8, inclusive="right"), + Interval(19.8, 29.7, inclusive="right"), + Interval(29.7, 39.6, inclusive="right"), + Interval(39.6, 49.5, inclusive="right"), + Interval(49.5, 59.4, inclusive="right"), + Interval(59.4, 69.3, inclusive="right"), + Interval(69.3, 79.2, inclusive="right"), + Interval(79.2, 89.1, inclusive="right"), + Interval(89.1, 99, inclusive="right"), ], ordered=True, ) diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 5f1a81c504efe..2c2332a05505f 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -799,13 +799,13 @@ def test_get_group_empty_bins(self, observed): # TODO: should prob allow a str of Interval work as well # IOW '(0, 5]' - result = g.get_group(pd.Interval(0, 5)) + result = g.get_group(pd.Interval(0, 5, "right")) expected = DataFrame([3, 1], index=[0, 1]) tm.assert_frame_equal(result, expected) - msg = r"Interval\(10, 15, closed='right'\)" + msg = r"Interval\(10, 15, inclusive='right'\)" with pytest.raises(KeyError, match=msg): - g.get_group(pd.Interval(10, 15)) + g.get_group(pd.Interval(10, 15, "right")) def test_get_group_grouped_by_tuple(self): # GH 8121 diff --git a/pandas/tests/indexes/categorical/test_astype.py b/pandas/tests/indexes/categorical/test_astype.py index 854ae8b62db30..ec3e3dca92808 100644 --- a/pandas/tests/indexes/categorical/test_astype.py +++ b/pandas/tests/indexes/categorical/test_astype.py @@ -26,7 +26,9 @@ def test_astype(self): assert not isinstance(result, CategoricalIndex) # interval - ii = IntervalIndex.from_arrays(left=[-0.001, 2.0], right=[2, 4], closed="right") + ii = IntervalIndex.from_arrays( + left=[-0.001, 2.0], right=[2, 4], inclusive="right" + ) ci = CategoricalIndex( Categorical.from_codes([0, 1, -1], categories=ii, ordered=True) diff --git a/pandas/tests/indexes/categorical/test_reindex.py b/pandas/tests/indexes/categorical/test_reindex.py index 1337eff1f1c2f..8764063a1a008 100644 --- a/pandas/tests/indexes/categorical/test_reindex.py +++ b/pandas/tests/indexes/categorical/test_reindex.py @@ -69,15 +69,15 @@ def test_reindex_empty_index(self): def test_reindex_categorical_added_category(self): # GH 42424 ci = CategoricalIndex( - [Interval(0, 1, closed="right"), Interval(1, 2, closed="right")], + [Interval(0, 1, inclusive="right"), Interval(1, 2, inclusive="right")], ordered=True, ) ci_add = CategoricalIndex( [ - Interval(0, 1, closed="right"), - Interval(1, 2, closed="right"), - Interval(2, 3, closed="right"), - Interval(3, 4, closed="right"), + Interval(0, 1, inclusive="right"), + Interval(1, 2, inclusive="right"), + Interval(2, 3, inclusive="right"), + Interval(3, 4, inclusive="right"), ], ordered=True, ) diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py index 4cdbe2bbcf12b..6751a383699bb 100644 --- a/pandas/tests/indexes/interval/test_astype.py +++ b/pandas/tests/indexes/interval/test_astype.py @@ -82,7 +82,7 @@ class TestIntSubtype(AstypeTests): indexes = [ IntervalIndex.from_breaks(np.arange(-10, 11, dtype="int64")), - IntervalIndex.from_breaks(np.arange(100, dtype="uint64"), closed="left"), + IntervalIndex.from_breaks(np.arange(100, dtype="uint64"), inclusive="left"), ] @pytest.fixture(params=indexes) @@ -93,10 +93,12 @@ def index(self, request): "subtype", ["float64", "datetime64[ns]", "timedelta64[ns]"] ) def test_subtype_conversion(self, index, subtype): - dtype = IntervalDtype(subtype, index.closed) + dtype = IntervalDtype(subtype, index.inclusive) result = index.astype(dtype) expected = IntervalIndex.from_arrays( - index.left.astype(subtype), index.right.astype(subtype), closed=index.closed + index.left.astype(subtype), + index.right.astype(subtype), + inclusive=index.inclusive, ) tm.assert_index_equal(result, expected) @@ -105,12 +107,12 @@ def test_subtype_conversion(self, index, subtype): ) def test_subtype_integer(self, subtype_start, subtype_end): index = IntervalIndex.from_breaks(np.arange(100, dtype=subtype_start)) - dtype = IntervalDtype(subtype_end, index.closed) + dtype = IntervalDtype(subtype_end, index.inclusive) result = index.astype(dtype) expected = IntervalIndex.from_arrays( index.left.astype(subtype_end), index.right.astype(subtype_end), - closed=index.closed, + inclusive=index.inclusive, ) tm.assert_index_equal(result, expected) @@ -135,7 +137,9 @@ class TestFloatSubtype(AstypeTests): indexes = [ interval_range(-10.0, 10.0, inclusive="neither"), IntervalIndex.from_arrays( - [-1.5, np.nan, 0.0, 0.0, 1.5], [-0.5, np.nan, 1.0, 1.0, 3.0], closed="both" + [-1.5, np.nan, 0.0, 0.0, 1.5], + [-0.5, np.nan, 1.0, 1.0, 3.0], + inclusive="both", ), ] @@ -149,7 +153,9 @@ def test_subtype_integer(self, subtype): dtype = IntervalDtype(subtype, "right") result = index.astype(dtype) expected = IntervalIndex.from_arrays( - index.left.astype(subtype), index.right.astype(subtype), closed=index.closed + index.left.astype(subtype), + index.right.astype(subtype), + inclusive=index.inclusive, ) tm.assert_index_equal(result, expected) @@ -164,7 +170,9 @@ def test_subtype_integer_with_non_integer_borders(self, subtype): dtype = IntervalDtype(subtype, "right") result = index.astype(dtype) expected = IntervalIndex.from_arrays( - index.left.astype(subtype), index.right.astype(subtype), closed=index.closed + index.left.astype(subtype), + index.right.astype(subtype), + inclusive=index.inclusive, ) tm.assert_index_equal(result, expected) @@ -216,7 +224,9 @@ def test_subtype_integer(self, index, subtype): new_left = index.left.astype(subtype) new_right = index.right.astype(subtype) - expected = IntervalIndex.from_arrays(new_left, new_right, closed=index.closed) + expected = IntervalIndex.from_arrays( + new_left, new_right, inclusive=index.inclusive + ) tm.assert_index_equal(result, expected) def test_subtype_float(self, index): diff --git a/pandas/tests/indexes/interval/test_base.py b/pandas/tests/indexes/interval/test_base.py index c44303aa2c862..933707bfe8357 100644 --- a/pandas/tests/indexes/interval/test_base.py +++ b/pandas/tests/indexes/interval/test_base.py @@ -16,14 +16,14 @@ class TestBase(Base): @pytest.fixture def simple_index(self) -> IntervalIndex: - return self._index_cls.from_breaks(range(11), closed="right") + return self._index_cls.from_breaks(range(11), inclusive="right") @pytest.fixture def index(self): return tm.makeIntervalIndex(10) - def create_index(self, *, closed="right"): - return IntervalIndex.from_breaks(range(11), closed=closed) + def create_index(self, *, inclusive="right"): + return IntervalIndex.from_breaks(range(11), inclusive=inclusive) def test_repr_max_seq_item_setting(self): # override base test: not a valid repr as we use interval notation @@ -34,13 +34,13 @@ def test_repr_roundtrip(self): pass def test_take(self, closed): - index = self.create_index(closed=closed) + index = self.create_index(inclusive=closed) result = index.take(range(10)) tm.assert_index_equal(result, index) result = index.take([0, 0, 1]) - expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2], closed=closed) + expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2], inclusive=closed) tm.assert_index_equal(result, expected) def test_where(self, simple_index, listlike_box): diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py index a71a8f9e34ea9..b57bcf7abc1e1 100644 --- a/pandas/tests/indexes/interval/test_constructors.py +++ b/pandas/tests/indexes/interval/test_constructors.py @@ -53,9 +53,9 @@ class ConstructorTests: ) def test_constructor(self, constructor, breaks, closed, name): result_kwargs = self.get_kwargs_from_breaks(breaks, closed) - result = constructor(closed=closed, name=name, **result_kwargs) + result = constructor(inclusive=closed, name=name, **result_kwargs) - assert result.closed == closed + assert result.inclusive == closed assert result.name == name assert result.dtype.subtype == getattr(breaks, "dtype", "int64") tm.assert_index_equal(result.left, Index(breaks[:-1])) @@ -78,7 +78,7 @@ def test_constructor_dtype(self, constructor, breaks, subtype): expected = constructor(**expected_kwargs) result_kwargs = self.get_kwargs_from_breaks(breaks) - iv_dtype = IntervalDtype(subtype, "right") + iv_dtype = IntervalDtype(subtype, "both") for dtype in (iv_dtype, str(iv_dtype)): result = constructor(dtype=dtype, **result_kwargs) tm.assert_index_equal(result, expected) @@ -108,20 +108,20 @@ def test_constructor_pass_closed(self, constructor, breaks): for dtype in (iv_dtype, str(iv_dtype)): with tm.assert_produces_warning(warn): - result = constructor(dtype=dtype, closed="left", **result_kwargs) - assert result.dtype.closed == "left" + result = constructor(dtype=dtype, inclusive="left", **result_kwargs) + assert result.dtype.inclusive == "left" @pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning") @pytest.mark.parametrize("breaks", [[np.nan] * 2, [np.nan] * 4, [np.nan] * 50]) def test_constructor_nan(self, constructor, breaks, closed): # GH 18421 result_kwargs = self.get_kwargs_from_breaks(breaks) - result = constructor(closed=closed, **result_kwargs) + result = constructor(inclusive=closed, **result_kwargs) expected_subtype = np.float64 expected_values = np.array(breaks[:-1], dtype=object) - assert result.closed == closed + assert result.inclusive == closed assert result.dtype.subtype == expected_subtype tm.assert_numpy_array_equal(np.array(result), expected_values) @@ -139,13 +139,13 @@ def test_constructor_nan(self, constructor, breaks, closed): def test_constructor_empty(self, constructor, breaks, closed): # GH 18421 result_kwargs = self.get_kwargs_from_breaks(breaks) - result = constructor(closed=closed, **result_kwargs) + result = constructor(inclusive=closed, **result_kwargs) expected_values = np.array([], dtype=object) expected_subtype = getattr(breaks, "dtype", np.int64) assert result.empty - assert result.closed == closed + assert result.inclusive == closed assert result.dtype.subtype == expected_subtype tm.assert_numpy_array_equal(np.array(result), expected_values) @@ -184,9 +184,9 @@ def test_generic_errors(self, constructor): filler = self.get_kwargs_from_breaks(range(10)) # invalid closed - msg = "closed must be one of 'right', 'left', 'both', 'neither'" + msg = "inclusive must be one of 'right', 'left', 'both', 'neither'" with pytest.raises(ValueError, match=msg): - constructor(closed="invalid", **filler) + constructor(inclusive="invalid", **filler) # unsupported dtype msg = "dtype must be an IntervalDtype, got int64" @@ -219,7 +219,7 @@ class TestFromArrays(ConstructorTests): def constructor(self): return IntervalIndex.from_arrays - def get_kwargs_from_breaks(self, breaks, closed="right"): + def get_kwargs_from_breaks(self, breaks, inclusive="both"): """ converts intervals in breaks format to a dictionary of kwargs to specific to the format expected by IntervalIndex.from_arrays @@ -268,7 +268,7 @@ class TestFromBreaks(ConstructorTests): def constructor(self): return IntervalIndex.from_breaks - def get_kwargs_from_breaks(self, breaks, closed="right"): + def get_kwargs_from_breaks(self, breaks, inclusive="both"): """ converts intervals in breaks format to a dictionary of kwargs to specific to the format expected by IntervalIndex.from_breaks @@ -306,7 +306,7 @@ class TestFromTuples(ConstructorTests): def constructor(self): return IntervalIndex.from_tuples - def get_kwargs_from_breaks(self, breaks, closed="right"): + def get_kwargs_from_breaks(self, breaks, inclusive="both"): """ converts intervals in breaks format to a dictionary of kwargs to specific to the format expected by IntervalIndex.from_tuples @@ -356,7 +356,7 @@ class TestClassConstructors(ConstructorTests): def constructor(self, request): return request.param - def get_kwargs_from_breaks(self, breaks, closed="right"): + def get_kwargs_from_breaks(self, breaks, inclusive="both"): """ converts intervals in breaks format to a dictionary of kwargs to specific to the format expected by the IntervalIndex/Index constructors @@ -365,7 +365,7 @@ def get_kwargs_from_breaks(self, breaks, closed="right"): return {"data": breaks} ivs = [ - Interval(left, right, closed) if notna(left) else left + Interval(left, right, inclusive) if notna(left) else left for left, right in zip(breaks[:-1], breaks[1:]) ] @@ -390,7 +390,7 @@ def test_constructor_string(self): def test_constructor_errors(self, constructor): # mismatched closed within intervals with no constructor override - ivs = [Interval(0, 1, closed="right"), Interval(2, 3, closed="left")] + ivs = [Interval(0, 1, inclusive="right"), Interval(2, 3, inclusive="left")] msg = "intervals must all be closed on the same side" with pytest.raises(ValueError, match=msg): constructor(ivs) @@ -415,14 +415,17 @@ def test_constructor_errors(self, constructor): ([], "both"), ([np.nan, np.nan], "neither"), ( - [Interval(0, 3, closed="neither"), Interval(2, 5, closed="neither")], + [ + Interval(0, 3, inclusive="neither"), + Interval(2, 5, inclusive="neither"), + ], "left", ), ( - [Interval(0, 3, closed="left"), Interval(2, 5, closed="right")], + [Interval(0, 3, inclusive="left"), Interval(2, 5, inclusive="right")], "neither", ), - (IntervalIndex.from_breaks(range(5), closed="both"), "right"), + (IntervalIndex.from_breaks(range(5), inclusive="both"), "right"), ], ) def test_override_inferred_closed(self, constructor, data, closed): @@ -431,8 +434,8 @@ def test_override_inferred_closed(self, constructor, data, closed): tuples = data.to_tuples() else: tuples = [(iv.left, iv.right) if notna(iv) else iv for iv in data] - expected = IntervalIndex.from_tuples(tuples, closed=closed) - result = constructor(data, closed=closed) + expected = IntervalIndex.from_tuples(tuples, inclusive=closed) + result = constructor(data, inclusive=closed) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( @@ -450,10 +453,10 @@ def test_index_object_dtype(self, values_constructor): def test_index_mixed_closed(self): # GH27172 intervals = [ - Interval(0, 1, closed="left"), - Interval(1, 2, closed="right"), - Interval(2, 3, closed="neither"), - Interval(3, 4, closed="both"), + Interval(0, 1, inclusive="left"), + Interval(1, 2, inclusive="right"), + Interval(2, 3, inclusive="neither"), + Interval(3, 4, inclusive="both"), ] result = Index(intervals) expected = Index(intervals, dtype=object) @@ -465,9 +468,9 @@ def test_dtype_closed_mismatch(): dtype = IntervalDtype(np.int64, "left") - msg = "closed keyword does not match dtype.closed" + msg = "inclusive keyword does not match dtype.inclusive" with pytest.raises(ValueError, match=msg): - IntervalIndex([], dtype=dtype, closed="neither") + IntervalIndex([], dtype=dtype, inclusive="neither") with pytest.raises(ValueError, match=msg): - IntervalArray([], dtype=dtype, closed="neither") + IntervalArray([], dtype=dtype, inclusive="neither") diff --git a/pandas/tests/indexes/interval/test_equals.py b/pandas/tests/indexes/interval/test_equals.py index 87e2348e5fdb3..a873116600d6d 100644 --- a/pandas/tests/indexes/interval/test_equals.py +++ b/pandas/tests/indexes/interval/test_equals.py @@ -8,7 +8,7 @@ class TestEquals: def test_equals(self, closed): - expected = IntervalIndex.from_breaks(np.arange(5), closed=closed) + expected = IntervalIndex.from_breaks(np.arange(5), inclusive=closed) assert expected.equals(expected) assert expected.equals(expected.copy()) @@ -21,16 +21,16 @@ def test_equals(self, closed): assert not expected.equals(date_range("20130101", periods=2)) expected_name1 = IntervalIndex.from_breaks( - np.arange(5), closed=closed, name="foo" + np.arange(5), inclusive=closed, name="foo" ) expected_name2 = IntervalIndex.from_breaks( - np.arange(5), closed=closed, name="bar" + np.arange(5), inclusive=closed, name="bar" ) assert expected.equals(expected_name1) assert expected_name1.equals(expected_name2) - for other_closed in {"left", "right", "both", "neither"} - {closed}: - expected_other_closed = IntervalIndex.from_breaks( - np.arange(5), closed=other_closed + for other_inclusive in {"left", "right", "both", "neither"} - {closed}: + expected_other_inclusive = IntervalIndex.from_breaks( + np.arange(5), inclusive=other_inclusive ) - assert not expected.equals(expected_other_closed) + assert not expected.equals(expected_other_inclusive) diff --git a/pandas/tests/indexes/interval/test_formats.py b/pandas/tests/indexes/interval/test_formats.py index db477003900bc..2d9b8c83c7ab2 100644 --- a/pandas/tests/indexes/interval/test_formats.py +++ b/pandas/tests/indexes/interval/test_formats.py @@ -17,7 +17,8 @@ class TestIntervalIndexRendering: def test_frame_repr(self): # https://github.com/pandas-dev/pandas/pull/24134/files df = DataFrame( - {"A": [1, 2, 3, 4]}, index=IntervalIndex.from_breaks([0, 1, 2, 3, 4]) + {"A": [1, 2, 3, 4]}, + index=IntervalIndex.from_breaks([0, 1, 2, 3, 4], "right"), ) result = repr(df) expected = " A\n(0, 1] 1\n(1, 2] 2\n(2, 3] 3\n(3, 4] 4" @@ -40,7 +41,7 @@ def test_frame_repr(self): ) def test_repr_missing(self, constructor, expected): # GH 25984 - index = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)]) + index = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)], "right") obj = constructor(list("abc"), index=index) result = repr(obj) assert result == expected @@ -57,7 +58,8 @@ def test_repr_floats(self): Float64Index([329.973, 345.137], dtype="float64"), Float64Index([345.137, 360.191], dtype="float64"), ) - ] + ], + "right", ), ) result = str(markers) @@ -65,7 +67,7 @@ def test_repr_floats(self): assert result == expected @pytest.mark.parametrize( - "tuples, closed, expected_data", + "tuples, inclusive, expected_data", [ ([(0, 1), (1, 2), (2, 3)], "left", ["[0, 1)", "[1, 2)", "[2, 3)"]), ( @@ -97,9 +99,9 @@ def test_repr_floats(self): ), ], ) - def test_to_native_types(self, tuples, closed, expected_data): + def test_to_native_types(self, tuples, inclusive, expected_data): # GH 28210 - index = IntervalIndex.from_tuples(tuples, closed=closed) + index = IntervalIndex.from_tuples(tuples, inclusive=inclusive) result = index._format_native_types() expected = np.array(expected_data) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py index 7c00b23dc9ac4..4cf754a7e52e0 100644 --- a/pandas/tests/indexes/interval/test_indexing.py +++ b/pandas/tests/indexes/interval/test_indexing.py @@ -25,23 +25,23 @@ class TestGetLoc: @pytest.mark.parametrize("side", ["right", "left", "both", "neither"]) def test_get_loc_interval(self, closed, side): - idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed) + idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], inclusive=closed) for bound in [[0, 1], [1, 2], [2, 3], [3, 4], [0, 2], [2.5, 3], [-1, 4]]: # if get_loc is supplied an interval, it should only search # for exact matches, not overlaps or covers, else KeyError. - msg = re.escape(f"Interval({bound[0]}, {bound[1]}, closed='{side}')") + msg = re.escape(f"Interval({bound[0]}, {bound[1]}, inclusive='{side}')") if closed == side: if bound == [0, 1]: - assert idx.get_loc(Interval(0, 1, closed=side)) == 0 + assert idx.get_loc(Interval(0, 1, inclusive=side)) == 0 elif bound == [2, 3]: - assert idx.get_loc(Interval(2, 3, closed=side)) == 1 + assert idx.get_loc(Interval(2, 3, inclusive=side)) == 1 else: with pytest.raises(KeyError, match=msg): - idx.get_loc(Interval(*bound, closed=side)) + idx.get_loc(Interval(*bound, inclusive=side)) else: with pytest.raises(KeyError, match=msg): - idx.get_loc(Interval(*bound, closed=side)) + idx.get_loc(Interval(*bound, inclusive=side)) @pytest.mark.parametrize("scalar", [-0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5]) def test_get_loc_scalar(self, closed, scalar): @@ -55,7 +55,7 @@ def test_get_loc_scalar(self, closed, scalar): "neither": {0.5: 0, 2.5: 1}, } - idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed) + idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], inclusive=closed) # if get_loc is supplied a scalar, it should return the index of # the interval which contains the scalar, or KeyError. @@ -68,7 +68,7 @@ def test_get_loc_scalar(self, closed, scalar): @pytest.mark.parametrize("scalar", [-1, 0, 0.5, 3, 4.5, 5, 6]) def test_get_loc_length_one_scalar(self, scalar, closed): # GH 20921 - index = IntervalIndex.from_tuples([(0, 5)], closed=closed) + index = IntervalIndex.from_tuples([(0, 5)], inclusive=closed) if scalar in index[0]: result = index.get_loc(scalar) assert result == 0 @@ -80,15 +80,17 @@ def test_get_loc_length_one_scalar(self, scalar, closed): @pytest.mark.parametrize("left, right", [(0, 5), (-1, 4), (-1, 6), (6, 7)]) def test_get_loc_length_one_interval(self, left, right, closed, other_closed): # GH 20921 - index = IntervalIndex.from_tuples([(0, 5)], closed=closed) - interval = Interval(left, right, closed=other_closed) + index = IntervalIndex.from_tuples([(0, 5)], inclusive=closed) + interval = Interval(left, right, inclusive=other_closed) if interval == index[0]: result = index.get_loc(interval) assert result == 0 else: with pytest.raises( KeyError, - match=re.escape(f"Interval({left}, {right}, closed='{other_closed}')"), + match=re.escape( + f"Interval({left}, {right}, inclusive='{other_closed}')" + ), ): index.get_loc(interval) @@ -192,23 +194,35 @@ class TestGetIndexer: @pytest.mark.parametrize( "query, expected", [ - ([Interval(2, 4, closed="right")], [1]), - ([Interval(2, 4, closed="left")], [-1]), - ([Interval(2, 4, closed="both")], [-1]), - ([Interval(2, 4, closed="neither")], [-1]), - ([Interval(1, 4, closed="right")], [-1]), - ([Interval(0, 4, closed="right")], [-1]), - ([Interval(0.5, 1.5, closed="right")], [-1]), - ([Interval(2, 4, closed="right"), Interval(0, 1, closed="right")], [1, -1]), - ([Interval(2, 4, closed="right"), Interval(2, 4, closed="right")], [1, 1]), - ([Interval(5, 7, closed="right"), Interval(2, 4, closed="right")], [2, 1]), - ([Interval(2, 4, closed="right"), Interval(2, 4, closed="left")], [1, -1]), + ([Interval(2, 4, inclusive="right")], [1]), + ([Interval(2, 4, inclusive="left")], [-1]), + ([Interval(2, 4, inclusive="both")], [-1]), + ([Interval(2, 4, inclusive="neither")], [-1]), + ([Interval(1, 4, inclusive="right")], [-1]), + ([Interval(0, 4, inclusive="right")], [-1]), + ([Interval(0.5, 1.5, inclusive="right")], [-1]), + ( + [Interval(2, 4, inclusive="right"), Interval(0, 1, inclusive="right")], + [1, -1], + ), + ( + [Interval(2, 4, inclusive="right"), Interval(2, 4, inclusive="right")], + [1, 1], + ), + ( + [Interval(5, 7, inclusive="right"), Interval(2, 4, inclusive="right")], + [2, 1], + ), + ( + [Interval(2, 4, inclusive="right"), Interval(2, 4, inclusive="left")], + [1, -1], + ), ], ) def test_get_indexer_with_interval(self, query, expected): tuples = [(0, 2), (2, 4), (5, 7)] - index = IntervalIndex.from_tuples(tuples, closed="right") + index = IntervalIndex.from_tuples(tuples, inclusive="right") result = index.get_indexer(query) expected = np.array(expected, dtype="intp") @@ -237,7 +251,7 @@ def test_get_indexer_with_interval(self, query, expected): def test_get_indexer_with_int_and_float(self, query, expected): tuples = [(0, 1), (1, 2), (3, 4)] - index = IntervalIndex.from_tuples(tuples, closed="right") + index = IntervalIndex.from_tuples(tuples, inclusive="right") result = index.get_indexer(query) expected = np.array(expected, dtype="intp") @@ -246,7 +260,7 @@ def test_get_indexer_with_int_and_float(self, query, expected): @pytest.mark.parametrize("item", [[3], np.arange(0.5, 5, 0.5)]) def test_get_indexer_length_one(self, item, closed): # GH 17284 - index = IntervalIndex.from_tuples([(0, 5)], closed=closed) + index = IntervalIndex.from_tuples([(0, 5)], inclusive=closed) result = index.get_indexer(item) expected = np.array([0] * len(item), dtype="intp") tm.assert_numpy_array_equal(result, expected) @@ -254,7 +268,7 @@ def test_get_indexer_length_one(self, item, closed): @pytest.mark.parametrize("size", [1, 5]) def test_get_indexer_length_one_interval(self, size, closed): # GH 17284 - index = IntervalIndex.from_tuples([(0, 5)], closed=closed) + index = IntervalIndex.from_tuples([(0, 5)], inclusive=closed) result = index.get_indexer([Interval(0, 5, closed)] * size) expected = np.array([0] * size, dtype="intp") tm.assert_numpy_array_equal(result, expected) @@ -264,14 +278,14 @@ def test_get_indexer_length_one_interval(self, size, closed): [ IntervalIndex.from_tuples([(7, 8), (1, 2), (3, 4), (0, 1)]), IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4), np.nan]), - IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)], closed="both"), + IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)], inclusive="both"), [-1, 0, 0.5, 1, 2, 2.5, np.nan], ["foo", "foo", "bar", "baz"], ], ) def test_get_indexer_categorical(self, target, ordered): # GH 30063: categorical and non-categorical results should be consistent - index = IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)]) + index = IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)], inclusive="right") categorical_target = CategoricalIndex(target, ordered=ordered) result = index.get_indexer(categorical_target) @@ -280,7 +294,7 @@ def test_get_indexer_categorical(self, target, ordered): def test_get_indexer_categorical_with_nans(self): # GH#41934 nans in both index and in target - ii = IntervalIndex.from_breaks(range(5)) + ii = IntervalIndex.from_breaks(range(5), inclusive="right") ii2 = ii.append(IntervalIndex([np.nan])) ci2 = CategoricalIndex(ii2) @@ -299,7 +313,7 @@ def test_get_indexer_categorical_with_nans(self): tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize( - "tuples, closed", + "tuples, inclusive", [ ([(0, 2), (1, 3), (3, 4)], "neither"), ([(0, 5), (1, 4), (6, 7)], "left"), @@ -307,9 +321,9 @@ def test_get_indexer_categorical_with_nans(self): ([(0, 1), (2, 3), (3, 4)], "both"), ], ) - def test_get_indexer_errors(self, tuples, closed): + def test_get_indexer_errors(self, tuples, inclusive): # IntervalIndex needs non-overlapping for uniqueness when querying - index = IntervalIndex.from_tuples(tuples, closed=closed) + index = IntervalIndex.from_tuples(tuples, inclusive=inclusive) msg = ( "cannot handle overlapping indices; use " @@ -341,7 +355,7 @@ def test_get_indexer_errors(self, tuples, closed): def test_get_indexer_non_unique_with_int_and_float(self, query, expected): tuples = [(0, 2.5), (1, 3), (2, 4)] - index = IntervalIndex.from_tuples(tuples, closed="left") + index = IntervalIndex.from_tuples(tuples, inclusive="left") result_indexer, result_missing = index.get_indexer_non_unique(query) expected_indexer = np.array(expected[0], dtype="intp") @@ -433,45 +447,45 @@ def test_slice_locs_with_interval(self): assert index.slice_locs(start=Interval(2, 4), end=Interval(0, 2)) == (2, 2) # unsorted duplicates - index = IntervalIndex.from_tuples([(0, 2), (2, 4), (0, 2)]) + index = IntervalIndex.from_tuples([(0, 2), (2, 4), (0, 2)], "right") with pytest.raises( KeyError, match=re.escape( '"Cannot get left slice bound for non-unique label: ' - "Interval(0, 2, closed='right')\"" + "Interval(0, 2, inclusive='right')\"" ), ): - index.slice_locs(start=Interval(0, 2), end=Interval(2, 4)) + index.slice_locs(start=Interval(0, 2, "right"), end=Interval(2, 4, "right")) with pytest.raises( KeyError, match=re.escape( '"Cannot get left slice bound for non-unique label: ' - "Interval(0, 2, closed='right')\"" + "Interval(0, 2, inclusive='right')\"" ), ): - index.slice_locs(start=Interval(0, 2)) + index.slice_locs(start=Interval(0, 2, "right")) - assert index.slice_locs(end=Interval(2, 4)) == (0, 2) + assert index.slice_locs(end=Interval(2, 4, "right")) == (0, 2) with pytest.raises( KeyError, match=re.escape( '"Cannot get right slice bound for non-unique label: ' - "Interval(0, 2, closed='right')\"" + "Interval(0, 2, inclusive='right')\"" ), ): - index.slice_locs(end=Interval(0, 2)) + index.slice_locs(end=Interval(0, 2, "right")) with pytest.raises( KeyError, match=re.escape( '"Cannot get right slice bound for non-unique label: ' - "Interval(0, 2, closed='right')\"" + "Interval(0, 2, inclusive='right')\"" ), ): - index.slice_locs(start=Interval(2, 4), end=Interval(0, 2)) + index.slice_locs(start=Interval(2, 4, "right"), end=Interval(0, 2, "right")) # another unsorted duplicates index = IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4), (1, 3)]) @@ -485,7 +499,7 @@ def test_slice_locs_with_interval(self): def test_slice_locs_with_ints_and_floats_succeeds(self): # increasing non-overlapping - index = IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)]) + index = IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)], inclusive="right") assert index.slice_locs(0, 1) == (0, 1) assert index.slice_locs(0, 2) == (0, 2) @@ -495,7 +509,7 @@ def test_slice_locs_with_ints_and_floats_succeeds(self): assert index.slice_locs(0, 4) == (0, 3) # decreasing non-overlapping - index = IntervalIndex.from_tuples([(3, 4), (1, 2), (0, 1)]) + index = IntervalIndex.from_tuples([(3, 4), (1, 2), (0, 1)], inclusive="right") assert index.slice_locs(0, 1) == (3, 3) assert index.slice_locs(0, 2) == (3, 2) assert index.slice_locs(0, 3) == (3, 1) @@ -516,7 +530,7 @@ def test_slice_locs_with_ints_and_floats_succeeds(self): ) def test_slice_locs_with_ints_and_floats_errors(self, tuples, query): start, stop = query - index = IntervalIndex.from_tuples(tuples) + index = IntervalIndex.from_tuples(tuples, inclusive="right") with pytest.raises( KeyError, match=( @@ -571,17 +585,17 @@ class TestContains: def test_contains_dunder(self): - index = IntervalIndex.from_arrays([0, 1], [1, 2], closed="right") + index = IntervalIndex.from_arrays([0, 1], [1, 2], inclusive="right") # __contains__ requires perfect matches to intervals. assert 0 not in index assert 1 not in index assert 2 not in index - assert Interval(0, 1, closed="right") in index - assert Interval(0, 2, closed="right") not in index - assert Interval(0, 0.5, closed="right") not in index - assert Interval(3, 5, closed="right") not in index - assert Interval(-1, 0, closed="left") not in index - assert Interval(0, 1, closed="left") not in index - assert Interval(0, 1, closed="both") not in index + assert Interval(0, 1, inclusive="right") in index + assert Interval(0, 2, inclusive="right") not in index + assert Interval(0, 0.5, inclusive="right") not in index + assert Interval(3, 5, inclusive="right") not in index + assert Interval(-1, 0, inclusive="left") not in index + assert Interval(0, 1, inclusive="left") not in index + assert Interval(0, 1, inclusive="both") not in index diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 8880cab2ce29b..4e33c3abd3252 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -28,21 +28,21 @@ def name(request): class TestIntervalIndex: - index = IntervalIndex.from_arrays([0, 1], [1, 2]) + index = IntervalIndex.from_arrays([0, 1], [1, 2], "right") - def create_index(self, closed="right"): - return IntervalIndex.from_breaks(range(11), closed=closed) + def create_index(self, inclusive="right"): + return IntervalIndex.from_breaks(range(11), inclusive=inclusive) - def create_index_with_nan(self, closed="right"): + def create_index_with_nan(self, inclusive="right"): mask = [True, False] + [True] * 8 return IntervalIndex.from_arrays( np.where(mask, np.arange(10), np.nan), np.where(mask, np.arange(1, 11), np.nan), - closed=closed, + inclusive=inclusive, ) def test_properties(self, closed): - index = self.create_index(closed=closed) + index = self.create_index(inclusive=closed) assert len(index) == 10 assert index.size == 10 assert index.shape == (10,) @@ -51,7 +51,7 @@ def test_properties(self, closed): tm.assert_index_equal(index.right, Index(np.arange(1, 11))) tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5))) - assert index.closed == closed + assert index.inclusive == closed ivs = [ Interval(left, right, closed) @@ -61,7 +61,7 @@ def test_properties(self, closed): tm.assert_numpy_array_equal(np.asarray(index), expected) # with nans - index = self.create_index_with_nan(closed=closed) + index = self.create_index_with_nan(inclusive=closed) assert len(index) == 10 assert index.size == 10 assert index.shape == (10,) @@ -73,7 +73,7 @@ def test_properties(self, closed): tm.assert_index_equal(index.right, expected_right) tm.assert_index_equal(index.mid, expected_mid) - assert index.closed == closed + assert index.inclusive == closed ivs = [ Interval(left, right, closed) if notna(left) else np.nan @@ -93,7 +93,7 @@ def test_properties(self, closed): ) def test_length(self, closed, breaks): # GH 18789 - index = IntervalIndex.from_breaks(breaks, closed=closed) + index = IntervalIndex.from_breaks(breaks, inclusive=closed) result = index.length expected = Index(iv.length for iv in index) tm.assert_index_equal(result, expected) @@ -105,7 +105,7 @@ def test_length(self, closed, breaks): tm.assert_index_equal(result, expected) def test_with_nans(self, closed): - index = self.create_index(closed=closed) + index = self.create_index(inclusive=closed) assert index.hasnans is False result = index.isna() @@ -116,7 +116,7 @@ def test_with_nans(self, closed): expected = np.ones(len(index), dtype=bool) tm.assert_numpy_array_equal(result, expected) - index = self.create_index_with_nan(closed=closed) + index = self.create_index_with_nan(inclusive=closed) assert index.hasnans is True result = index.isna() @@ -128,7 +128,7 @@ def test_with_nans(self, closed): tm.assert_numpy_array_equal(result, expected) def test_copy(self, closed): - expected = self.create_index(closed=closed) + expected = self.create_index(inclusive=closed) result = expected.copy() assert result.equals(expected) @@ -141,7 +141,7 @@ def test_ensure_copied_data(self, closed): # exercise the copy flag in the constructor # not copying - index = self.create_index(closed=closed) + index = self.create_index(inclusive=closed) result = IntervalIndex(index, copy=False) tm.assert_numpy_array_equal( index.left.values, result.left.values, check_same="same" @@ -160,8 +160,8 @@ def test_ensure_copied_data(self, closed): ) def test_delete(self, closed): - expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed) - result = self.create_index(closed=closed).delete(0) + expected = IntervalIndex.from_breaks(np.arange(1, 11), inclusive=closed) + result = self.create_index(inclusive=closed).delete(0) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( @@ -201,11 +201,11 @@ def test_insert(self, data): with pytest.raises(TypeError, match=msg): data._data.insert(1, "foo") - # invalid closed - msg = "'value.closed' is 'left', expected 'right'." - for closed in {"left", "right", "both", "neither"} - {item.closed}: - msg = f"'value.closed' is '{closed}', expected '{item.closed}'." - bad_item = Interval(item.left, item.right, closed=closed) + # invalid inclusive + msg = "'value.inclusive' is 'left', expected 'right'." + for inclusive in {"left", "right", "both", "neither"} - {item.inclusive}: + msg = f"'value.inclusive' is '{inclusive}', expected '{item.inclusive}'." + bad_item = Interval(item.left, item.right, inclusive=inclusive) res = data.insert(1, bad_item) expected = data.astype(object).insert(1, bad_item) tm.assert_index_equal(res, expected) @@ -213,7 +213,7 @@ def test_insert(self, data): data._data.insert(1, bad_item) # GH 18295 (test missing) - na_idx = IntervalIndex([np.nan], closed=data.closed) + na_idx = IntervalIndex([np.nan], inclusive=data.inclusive) for na in [np.nan, None, pd.NA]: expected = data[:1].append(na_idx).append(data[1:]) result = data.insert(1, na) @@ -235,93 +235,93 @@ def test_is_unique_interval(self, closed): Interval specific tests for is_unique in addition to base class tests """ # unique overlapping - distinct endpoints - idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed) + idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], inclusive=closed) assert idx.is_unique is True # unique overlapping - shared endpoints - idx = IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], closed=closed) + idx = IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], inclusive=closed) assert idx.is_unique is True # unique nested - idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed) + idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], inclusive=closed) assert idx.is_unique is True # unique NaN - idx = IntervalIndex.from_tuples([(np.NaN, np.NaN)], closed=closed) + idx = IntervalIndex.from_tuples([(np.NaN, np.NaN)], inclusive=closed) assert idx.is_unique is True # non-unique NaN idx = IntervalIndex.from_tuples( - [(np.NaN, np.NaN), (np.NaN, np.NaN)], closed=closed + [(np.NaN, np.NaN), (np.NaN, np.NaN)], inclusive=closed ) assert idx.is_unique is False def test_monotonic(self, closed): # increasing non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)], closed=closed) + idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)], inclusive=closed) assert idx.is_monotonic_increasing is True assert idx._is_strictly_monotonic_increasing is True assert idx.is_monotonic_decreasing is False assert idx._is_strictly_monotonic_decreasing is False # decreasing non-overlapping - idx = IntervalIndex.from_tuples([(4, 5), (2, 3), (1, 2)], closed=closed) + idx = IntervalIndex.from_tuples([(4, 5), (2, 3), (1, 2)], inclusive=closed) assert idx.is_monotonic_increasing is False assert idx._is_strictly_monotonic_increasing is False assert idx.is_monotonic_decreasing is True assert idx._is_strictly_monotonic_decreasing is True # unordered non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (4, 5), (2, 3)], closed=closed) + idx = IntervalIndex.from_tuples([(0, 1), (4, 5), (2, 3)], inclusive=closed) assert idx.is_monotonic_increasing is False assert idx._is_strictly_monotonic_increasing is False assert idx.is_monotonic_decreasing is False assert idx._is_strictly_monotonic_decreasing is False # increasing overlapping - idx = IntervalIndex.from_tuples([(0, 2), (0.5, 2.5), (1, 3)], closed=closed) + idx = IntervalIndex.from_tuples([(0, 2), (0.5, 2.5), (1, 3)], inclusive=closed) assert idx.is_monotonic_increasing is True assert idx._is_strictly_monotonic_increasing is True assert idx.is_monotonic_decreasing is False assert idx._is_strictly_monotonic_decreasing is False # decreasing overlapping - idx = IntervalIndex.from_tuples([(1, 3), (0.5, 2.5), (0, 2)], closed=closed) + idx = IntervalIndex.from_tuples([(1, 3), (0.5, 2.5), (0, 2)], inclusive=closed) assert idx.is_monotonic_increasing is False assert idx._is_strictly_monotonic_increasing is False assert idx.is_monotonic_decreasing is True assert idx._is_strictly_monotonic_decreasing is True # unordered overlapping - idx = IntervalIndex.from_tuples([(0.5, 2.5), (0, 2), (1, 3)], closed=closed) + idx = IntervalIndex.from_tuples([(0.5, 2.5), (0, 2), (1, 3)], inclusive=closed) assert idx.is_monotonic_increasing is False assert idx._is_strictly_monotonic_increasing is False assert idx.is_monotonic_decreasing is False assert idx._is_strictly_monotonic_decreasing is False # increasing overlapping shared endpoints - idx = IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], closed=closed) + idx = IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], inclusive=closed) assert idx.is_monotonic_increasing is True assert idx._is_strictly_monotonic_increasing is True assert idx.is_monotonic_decreasing is False assert idx._is_strictly_monotonic_decreasing is False # decreasing overlapping shared endpoints - idx = IntervalIndex.from_tuples([(2, 3), (1, 3), (1, 2)], closed=closed) + idx = IntervalIndex.from_tuples([(2, 3), (1, 3), (1, 2)], inclusive=closed) assert idx.is_monotonic_increasing is False assert idx._is_strictly_monotonic_increasing is False assert idx.is_monotonic_decreasing is True assert idx._is_strictly_monotonic_decreasing is True # stationary - idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed) + idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], inclusive=closed) assert idx.is_monotonic_increasing is True assert idx._is_strictly_monotonic_increasing is False assert idx.is_monotonic_decreasing is True assert idx._is_strictly_monotonic_decreasing is False # empty - idx = IntervalIndex([], closed=closed) + idx = IntervalIndex([], inclusive=closed) assert idx.is_monotonic_increasing is True assert idx._is_strictly_monotonic_increasing is True assert idx.is_monotonic_decreasing is True @@ -338,22 +338,22 @@ def test_is_monotonic_with_nans(self): assert not index.is_monotonic_decreasing def test_get_item(self, closed): - i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), closed=closed) - assert i[0] == Interval(0.0, 1.0, closed=closed) - assert i[1] == Interval(1.0, 2.0, closed=closed) + i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), inclusive=closed) + assert i[0] == Interval(0.0, 1.0, inclusive=closed) + assert i[1] == Interval(1.0, 2.0, inclusive=closed) assert isna(i[2]) result = i[0:1] - expected = IntervalIndex.from_arrays((0.0,), (1.0,), closed=closed) + expected = IntervalIndex.from_arrays((0.0,), (1.0,), inclusive=closed) tm.assert_index_equal(result, expected) result = i[0:2] - expected = IntervalIndex.from_arrays((0.0, 1), (1.0, 2.0), closed=closed) + expected = IntervalIndex.from_arrays((0.0, 1), (1.0, 2.0), inclusive=closed) tm.assert_index_equal(result, expected) result = i[1:3] expected = IntervalIndex.from_arrays( - (1.0, np.nan), (2.0, np.nan), closed=closed + (1.0, np.nan), (2.0, np.nan), inclusive=closed ) tm.assert_index_equal(result, expected) @@ -477,7 +477,7 @@ def test_maybe_convert_i8_errors(self, breaks1, breaks2, make_key): def test_contains_method(self): # can select values that are IN the range of a value - i = IntervalIndex.from_arrays([0, 1], [1, 2]) + i = IntervalIndex.from_arrays([0, 1], [1, 2], "right") expected = np.array([False, False], dtype="bool") actual = i.contains(0) @@ -500,18 +500,18 @@ def test_contains_method(self): def test_dropna(self, closed): - expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)], closed=closed) + expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)], inclusive=closed) - ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed) + ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], inclusive=closed) result = ii.dropna() tm.assert_index_equal(result, expected) - ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan], closed=closed) + ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan], inclusive=closed) result = ii.dropna() tm.assert_index_equal(result, expected) def test_non_contiguous(self, closed): - index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed) + index = IntervalIndex.from_tuples([(0, 1), (2, 3)], inclusive=closed) target = [0.5, 1.5, 2.5] actual = index.get_indexer(target) expected = np.array([0, -1, 1], dtype="intp") @@ -520,7 +520,7 @@ def test_non_contiguous(self, closed): assert 1.5 not in index def test_isin(self, closed): - index = self.create_index(closed=closed) + index = self.create_index(inclusive=closed) expected = np.array([True] + [False] * (len(index) - 1)) result = index.isin(index[:1]) @@ -529,7 +529,7 @@ def test_isin(self, closed): result = index.isin([index[0]]) tm.assert_numpy_array_equal(result, expected) - other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed) + other = IntervalIndex.from_breaks(np.arange(-2, 10), inclusive=closed) expected = np.array([True] * (len(index) - 1) + [False]) result = index.isin(other) tm.assert_numpy_array_equal(result, expected) @@ -537,9 +537,9 @@ def test_isin(self, closed): result = index.isin(other.tolist()) tm.assert_numpy_array_equal(result, expected) - for other_closed in {"right", "left", "both", "neither"}: - other = self.create_index(closed=other_closed) - expected = np.repeat(closed == other_closed, len(index)) + for other_inclusive in {"right", "left", "both", "neither"}: + other = self.create_index(inclusive=other_inclusive) + expected = np.repeat(closed == other_inclusive, len(index)) result = index.isin(other) tm.assert_numpy_array_equal(result, expected) @@ -547,14 +547,14 @@ def test_isin(self, closed): tm.assert_numpy_array_equal(result, expected) def test_comparison(self): - actual = Interval(0, 1) < self.index + actual = Interval(0, 1, "right") < self.index expected = np.array([False, True]) tm.assert_numpy_array_equal(actual, expected) - actual = Interval(0.5, 1.5) < self.index + actual = Interval(0.5, 1.5, "right") < self.index expected = np.array([False, True]) tm.assert_numpy_array_equal(actual, expected) - actual = self.index > Interval(0.5, 1.5) + actual = self.index > Interval(0.5, 1.5, "right") tm.assert_numpy_array_equal(actual, expected) actual = self.index == self.index @@ -612,9 +612,11 @@ def test_comparison(self): def test_missing_values(self, closed): idx = Index( - [np.nan, Interval(0, 1, closed=closed), Interval(1, 2, closed=closed)] + [np.nan, Interval(0, 1, inclusive=closed), Interval(1, 2, inclusive=closed)] + ) + idx2 = IntervalIndex.from_arrays( + [np.nan, 0, 1], [np.nan, 1, 2], inclusive=closed ) - idx2 = IntervalIndex.from_arrays([np.nan, 0, 1], [np.nan, 1, 2], closed=closed) assert idx.equals(idx2) msg = ( @@ -623,13 +625,13 @@ def test_missing_values(self, closed): ) with pytest.raises(ValueError, match=msg): IntervalIndex.from_arrays( - [np.nan, 0, 1], np.array([0, 1, 2]), closed=closed + [np.nan, 0, 1], np.array([0, 1, 2]), inclusive=closed ) tm.assert_numpy_array_equal(isna(idx), np.array([True, False, False])) def test_sort_values(self, closed): - index = self.create_index(closed=closed) + index = self.create_index(inclusive=closed) result = index.sort_values() tm.assert_index_equal(result, index) @@ -652,7 +654,7 @@ def test_sort_values(self, closed): def test_datetime(self, tz): start = Timestamp("2000-01-01", tz=tz) dates = date_range(start=start, periods=10) - index = IntervalIndex.from_breaks(dates) + index = IntervalIndex.from_breaks(dates, "right") # test mid start = Timestamp("2000-01-01T12:00", tz=tz) @@ -664,10 +666,10 @@ def test_datetime(self, tz): assert Timestamp("2000-01-01T12", tz=tz) not in index assert Timestamp("2000-01-02", tz=tz) not in index iv_true = Interval( - Timestamp("2000-01-02", tz=tz), Timestamp("2000-01-03", tz=tz) + Timestamp("2000-01-02", tz=tz), Timestamp("2000-01-03", tz=tz), "right" ) iv_false = Interval( - Timestamp("1999-12-31", tz=tz), Timestamp("2000-01-01", tz=tz) + Timestamp("1999-12-31", tz=tz), Timestamp("2000-01-01", tz=tz), "right" ) assert iv_true in index assert iv_false not in index @@ -692,58 +694,62 @@ def test_datetime(self, tz): def test_append(self, closed): - index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed) - index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed) + index1 = IntervalIndex.from_arrays([0, 1], [1, 2], inclusive=closed) + index2 = IntervalIndex.from_arrays([1, 2], [2, 3], inclusive=closed) result = index1.append(index2) - expected = IntervalIndex.from_arrays([0, 1, 1, 2], [1, 2, 2, 3], closed=closed) + expected = IntervalIndex.from_arrays( + [0, 1, 1, 2], [1, 2, 2, 3], inclusive=closed + ) tm.assert_index_equal(result, expected) result = index1.append([index1, index2]) expected = IntervalIndex.from_arrays( - [0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed + [0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], inclusive=closed ) tm.assert_index_equal(result, expected) - for other_closed in {"left", "right", "both", "neither"} - {closed}: - index_other_closed = IntervalIndex.from_arrays( - [0, 1], [1, 2], closed=other_closed + for other_inclusive in {"left", "right", "both", "neither"} - {closed}: + index_other_inclusive = IntervalIndex.from_arrays( + [0, 1], [1, 2], inclusive=other_inclusive + ) + result = index1.append(index_other_inclusive) + expected = index1.astype(object).append( + index_other_inclusive.astype(object) ) - result = index1.append(index_other_closed) - expected = index1.astype(object).append(index_other_closed.astype(object)) tm.assert_index_equal(result, expected) def test_is_non_overlapping_monotonic(self, closed): # Should be True in all cases tpls = [(0, 1), (2, 3), (4, 5), (6, 7)] - idx = IntervalIndex.from_tuples(tpls, closed=closed) + idx = IntervalIndex.from_tuples(tpls, inclusive=closed) assert idx.is_non_overlapping_monotonic is True - idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed) + idx = IntervalIndex.from_tuples(tpls[::-1], inclusive=closed) assert idx.is_non_overlapping_monotonic is True # Should be False in all cases (overlapping) tpls = [(0, 2), (1, 3), (4, 5), (6, 7)] - idx = IntervalIndex.from_tuples(tpls, closed=closed) + idx = IntervalIndex.from_tuples(tpls, inclusive=closed) assert idx.is_non_overlapping_monotonic is False - idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed) + idx = IntervalIndex.from_tuples(tpls[::-1], inclusive=closed) assert idx.is_non_overlapping_monotonic is False # Should be False in all cases (non-monotonic) tpls = [(0, 1), (2, 3), (6, 7), (4, 5)] - idx = IntervalIndex.from_tuples(tpls, closed=closed) + idx = IntervalIndex.from_tuples(tpls, inclusive=closed) assert idx.is_non_overlapping_monotonic is False - idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed) + idx = IntervalIndex.from_tuples(tpls[::-1], inclusive=closed) assert idx.is_non_overlapping_monotonic is False - # Should be False for closed='both', otherwise True (GH16560) + # Should be False for inclusive='both', otherwise True (GH16560) if closed == "both": - idx = IntervalIndex.from_breaks(range(4), closed=closed) + idx = IntervalIndex.from_breaks(range(4), inclusive=closed) assert idx.is_non_overlapping_monotonic is False else: - idx = IntervalIndex.from_breaks(range(4), closed=closed) + idx = IntervalIndex.from_breaks(range(4), inclusive=closed) assert idx.is_non_overlapping_monotonic is True @pytest.mark.parametrize( @@ -760,34 +766,34 @@ def test_is_overlapping(self, start, shift, na_value, closed): # non-overlapping tuples = [(start + n * shift, start + (n + 1) * shift) for n in (0, 2, 4)] - index = IntervalIndex.from_tuples(tuples, closed=closed) + index = IntervalIndex.from_tuples(tuples, inclusive=closed) assert index.is_overlapping is False # non-overlapping with NA tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)] - index = IntervalIndex.from_tuples(tuples, closed=closed) + index = IntervalIndex.from_tuples(tuples, inclusive=closed) assert index.is_overlapping is False # overlapping tuples = [(start + n * shift, start + (n + 2) * shift) for n in range(3)] - index = IntervalIndex.from_tuples(tuples, closed=closed) + index = IntervalIndex.from_tuples(tuples, inclusive=closed) assert index.is_overlapping is True # overlapping with NA tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)] - index = IntervalIndex.from_tuples(tuples, closed=closed) + index = IntervalIndex.from_tuples(tuples, inclusive=closed) assert index.is_overlapping is True # common endpoints tuples = [(start + n * shift, start + (n + 1) * shift) for n in range(3)] - index = IntervalIndex.from_tuples(tuples, closed=closed) + index = IntervalIndex.from_tuples(tuples, inclusive=closed) result = index.is_overlapping expected = closed == "both" assert result is expected # common endpoints with NA tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)] - index = IntervalIndex.from_tuples(tuples, closed=closed) + index = IntervalIndex.from_tuples(tuples, inclusive=closed) result = index.is_overlapping assert result is expected @@ -873,13 +879,13 @@ def test_set_closed(self, name, closed, new_closed): expected = interval_range(0, 5, inclusive=new_closed, name=name) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("bad_closed", ["foo", 10, "LEFT", True, False]) - def test_set_closed_errors(self, bad_closed): + @pytest.mark.parametrize("bad_inclusive", ["foo", 10, "LEFT", True, False]) + def test_set_closed_errors(self, bad_inclusive): # GH 21670 index = interval_range(0, 5) - msg = f"invalid option for 'closed': {bad_closed}" + msg = f"invalid option for 'inclusive': {bad_inclusive}" with pytest.raises(ValueError, match=msg): - index.set_closed(bad_closed) + index.set_closed(bad_inclusive) def test_is_all_dates(self): # GH 23576 @@ -889,6 +895,39 @@ def test_is_all_dates(self): year_2017_index = IntervalIndex([year_2017]) assert not year_2017_index._is_all_dates + def test_interval_index_error_and_warning(self): + # GH 40245 + msg = ( + "Deprecated argument `closed` cannot " + "be passed if argument `inclusive` is not None" + ) + with pytest.raises(ValueError, match=msg): + IntervalIndex.from_breaks(range(11), closed="both", inclusive="both") + + with pytest.raises(ValueError, match=msg): + IntervalIndex.from_arrays([0, 1], [1, 2], closed="both", inclusive="both") + + with pytest.raises(ValueError, match=msg): + IntervalIndex.from_tuples( + [(0, 1), (0.5, 1.5)], closed="both", inclusive="both" + ) + + msg = "Argument `closed` is deprecated in favor of `inclusive`" + with tm.assert_produces_warning( + FutureWarning, match=msg, check_stacklevel=False + ): + IntervalIndex.from_breaks(range(11), closed="both") + + with tm.assert_produces_warning( + FutureWarning, match=msg, check_stacklevel=False + ): + IntervalIndex.from_arrays([0, 1], [1, 2], closed="both") + + with tm.assert_produces_warning( + FutureWarning, match=msg, check_stacklevel=False + ): + IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed="both") + def test_dir(): # GH#27571 dir(interval_index) should not raise diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py index 63e7f3aa2b120..255470cf4683e 100644 --- a/pandas/tests/indexes/interval/test_interval_range.py +++ b/pandas/tests/indexes/interval/test_interval_range.py @@ -30,7 +30,7 @@ class TestIntervalRange: def test_constructor_numeric(self, closed, name, freq, periods): start, end = 0, 100 breaks = np.arange(101, step=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) + expected = IntervalIndex.from_breaks(breaks, name=name, inclusive=closed) # defined from start/end/freq result = interval_range( @@ -63,7 +63,7 @@ def test_constructor_numeric(self, closed, name, freq, periods): def test_constructor_timestamp(self, closed, name, freq, periods, tz): start, end = Timestamp("20180101", tz=tz), Timestamp("20181231", tz=tz) breaks = date_range(start=start, end=end, freq=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) + expected = IntervalIndex.from_breaks(breaks, name=name, inclusive=closed) # defined from start/end/freq result = interval_range( @@ -98,7 +98,7 @@ def test_constructor_timestamp(self, closed, name, freq, periods, tz): def test_constructor_timedelta(self, closed, name, freq, periods): start, end = Timedelta("0 days"), Timedelta("100 days") breaks = timedelta_range(start=start, end=end, freq=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) + expected = IntervalIndex.from_breaks(breaks, name=name, inclusive=closed) # defined from start/end/freq result = interval_range( @@ -161,7 +161,7 @@ def test_no_invalid_float_truncation(self, start, end, freq): breaks = [0.5, 1.5, 2.5, 3.5, 4.5] else: breaks = [0.5, 2.0, 3.5, 5.0, 6.5] - expected = IntervalIndex.from_breaks(breaks) + expected = IntervalIndex.from_breaks(breaks, "right") result = interval_range( start=start, end=end, periods=4, freq=freq, inclusive="right" @@ -187,7 +187,8 @@ def test_linspace_dst_transition(self, start, mid, end): # GH 20976: linspace behavior defined from start/end/periods # accounts for the hour gained/lost during DST transition result = interval_range(start=start, end=end, periods=2, inclusive="right") - expected = IntervalIndex.from_breaks([start, mid, end]) + expected = IntervalIndex.from_breaks([start, mid, end], "right") + tm.assert_index_equal(result, expected) @pytest.mark.parametrize("freq", [2, 2.0]) @@ -336,7 +337,7 @@ def test_errors(self): # invalid end msg = r"end must be numeric or datetime-like, got \(0, 1\]" with pytest.raises(ValueError, match=msg): - interval_range(end=Interval(0, 1), periods=10) + interval_range(end=Interval(0, 1, "right"), periods=10) # invalid freq for datetime-like msg = "freq must be numeric or convertible to DateOffset, got foo" diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py index f2d9ec3608271..345025d63f4b2 100644 --- a/pandas/tests/indexes/interval/test_interval_tree.py +++ b/pandas/tests/indexes/interval/test_interval_tree.py @@ -42,7 +42,7 @@ def leaf_size(request): ) def tree(request, leaf_size): left = request.param - return IntervalTree(left, left + 2, leaf_size=leaf_size) + return IntervalTree(left, left + 2, leaf_size=leaf_size, inclusive="right") class TestIntervalTree: @@ -129,7 +129,7 @@ def test_get_indexer_closed(self, closed, leaf_size): found = x.astype("intp") not_found = (-1 * np.ones(1000)).astype("intp") - tree = IntervalTree(x, x + 0.5, closed=closed, leaf_size=leaf_size) + tree = IntervalTree(x, x + 0.5, inclusive=closed, leaf_size=leaf_size) tm.assert_numpy_array_equal(found, tree.get_indexer(x + 0.25)) expected = found if tree.closed_left else not_found @@ -151,7 +151,7 @@ def test_get_indexer_closed(self, closed, leaf_size): @pytest.mark.parametrize("order", (list(x) for x in permutations(range(3)))) def test_is_overlapping(self, closed, order, left, right, expected): # GH 23309 - tree = IntervalTree(left[order], right[order], closed=closed) + tree = IntervalTree(left[order], right[order], inclusive=closed) result = tree.is_overlapping assert result is expected @@ -160,7 +160,7 @@ def test_is_overlapping_endpoints(self, closed, order): """shared endpoints are marked as overlapping""" # GH 23309 left, right = np.arange(3, dtype="int64"), np.arange(1, 4) - tree = IntervalTree(left[order], right[order], closed=closed) + tree = IntervalTree(left[order], right[order], inclusive=closed) result = tree.is_overlapping expected = closed == "both" assert result is expected @@ -176,7 +176,7 @@ def test_is_overlapping_endpoints(self, closed, order): ) def test_is_overlapping_trivial(self, closed, left, right): # GH 23309 - tree = IntervalTree(left, right, closed=closed) + tree = IntervalTree(left, right, inclusive=closed) assert tree.is_overlapping is False @pytest.mark.skipif(not IS64, reason="GH 23440") @@ -189,3 +189,21 @@ def test_construction_overflow(self): result = tree.root.pivot expected = (50 + np.iinfo(np.int64).max) / 2 assert result == expected + + def test_interval_tree_error_and_warning(self): + # GH 40245 + + msg = ( + "Deprecated argument `closed` cannot " + "be passed if argument `inclusive` is not None" + ) + with pytest.raises(ValueError, match=msg): + left, right = np.arange(10), [np.iinfo(np.int64).max] * 10 + IntervalTree(left, right, closed="both", inclusive="both") + + msg = "Argument `closed` is deprecated in favor of `inclusive`" + with tm.assert_produces_warning( + FutureWarning, match=msg, check_stacklevel=False + ): + left, right = np.arange(10), [np.iinfo(np.int64).max] * 10 + IntervalTree(left, right, closed="both") diff --git a/pandas/tests/indexes/interval/test_pickle.py b/pandas/tests/indexes/interval/test_pickle.py index 308a90e72eab5..7f5784b6d76b9 100644 --- a/pandas/tests/indexes/interval/test_pickle.py +++ b/pandas/tests/indexes/interval/test_pickle.py @@ -5,9 +5,9 @@ class TestPickle: - @pytest.mark.parametrize("closed", ["left", "right", "both"]) - def test_pickle_round_trip_closed(self, closed): + @pytest.mark.parametrize("inclusive", ["left", "right", "both"]) + def test_pickle_round_trip_closed(self, inclusive): # https://github.com/pandas-dev/pandas/issues/35658 - idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed) + idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], inclusive=inclusive) result = tm.round_trip_pickle(idx) tm.assert_index_equal(result, idx) diff --git a/pandas/tests/indexes/interval/test_setops.py b/pandas/tests/indexes/interval/test_setops.py index 51a1d36398aa4..5933961cc0f9d 100644 --- a/pandas/tests/indexes/interval/test_setops.py +++ b/pandas/tests/indexes/interval/test_setops.py @@ -11,11 +11,13 @@ def monotonic_index(start, end, dtype="int64", closed="right"): - return IntervalIndex.from_breaks(np.arange(start, end, dtype=dtype), closed=closed) + return IntervalIndex.from_breaks( + np.arange(start, end, dtype=dtype), inclusive=closed + ) def empty_index(dtype="int64", closed="right"): - return IntervalIndex(np.array([], dtype=dtype), closed=closed) + return IntervalIndex(np.array([], dtype=dtype), inclusive=closed) class TestIntervalIndex: @@ -125,7 +127,7 @@ def test_intersection_duplicates(self): tm.assert_index_equal(result, expected) def test_difference(self, closed, sort): - index = IntervalIndex.from_arrays([1, 0, 3, 2], [1, 2, 3, 4], closed=closed) + index = IntervalIndex.from_arrays([1, 0, 3, 2], [1, 2, 3, 4], inclusive=closed) result = index.difference(index[:1], sort=sort) expected = index[1:] if sort is None: @@ -139,7 +141,7 @@ def test_difference(self, closed, sort): # GH 19101: empty result, different dtypes other = IntervalIndex.from_arrays( - index.left.astype("float64"), index.right, closed=closed + index.left.astype("float64"), index.right, inclusive=closed ) result = index.difference(other, sort=sort) tm.assert_index_equal(result, expected) @@ -161,7 +163,7 @@ def test_symmetric_difference(self, closed, sort): # GH 19101: empty result, different dtypes other = IntervalIndex.from_arrays( - index.left.astype("float64"), index.right, closed=closed + index.left.astype("float64"), index.right, inclusive=closed ) result = index.symmetric_difference(other, sort=sort) expected = empty_index(dtype="float64", closed=closed) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 55f3e27be5a72..943cc945995a1 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1431,10 +1431,10 @@ def test_ensure_index_from_sequences(self, data, names, expected): def test_ensure_index_mixed_closed_intervals(self): # GH27172 intervals = [ - pd.Interval(0, 1, closed="left"), - pd.Interval(1, 2, closed="right"), - pd.Interval(2, 3, closed="neither"), - pd.Interval(3, 4, closed="both"), + pd.Interval(0, 1, inclusive="left"), + pd.Interval(1, 2, inclusive="right"), + pd.Interval(2, 3, inclusive="neither"), + pd.Interval(3, 4, inclusive="both"), ] result = ensure_index(intervals) expected = Index(intervals, dtype=object) diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py index db3a569d3925b..7d1f1ef09fc5d 100644 --- a/pandas/tests/indexing/interval/test_interval.py +++ b/pandas/tests/indexing/interval/test_interval.py @@ -13,7 +13,7 @@ class TestIntervalIndex: @pytest.fixture def series_with_interval_index(self): - return Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6))) + return Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6), "right")) def test_getitem_with_scalar(self, series_with_interval_index, indexer_sl): @@ -40,7 +40,7 @@ def test_getitem_nonoverlapping_monotonic(self, direction, closed, indexer_sl): if direction == "decreasing": tpls = tpls[::-1] - idx = IntervalIndex.from_tuples(tpls, closed=closed) + idx = IntervalIndex.from_tuples(tpls, inclusive=closed) ser = Series(list("abc"), idx) for key, expected in zip(idx.left, ser): diff --git a/pandas/tests/indexing/interval/test_interval_new.py b/pandas/tests/indexing/interval/test_interval_new.py index aad6523357df6..2e3c765b2b372 100644 --- a/pandas/tests/indexing/interval/test_interval_new.py +++ b/pandas/tests/indexing/interval/test_interval_new.py @@ -14,7 +14,9 @@ class TestIntervalIndex: @pytest.fixture def series_with_interval_index(self): - return Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6))) + return Series( + np.arange(5), IntervalIndex.from_breaks(np.arange(6), inclusive="right") + ) def test_loc_with_interval(self, series_with_interval_index, indexer_sl): @@ -25,27 +27,33 @@ def test_loc_with_interval(self, series_with_interval_index, indexer_sl): ser = series_with_interval_index.copy() expected = 0 - result = indexer_sl(ser)[Interval(0, 1)] + result = indexer_sl(ser)[Interval(0, 1, "right")] assert result == expected expected = ser.iloc[3:5] - result = indexer_sl(ser)[[Interval(3, 4), Interval(4, 5)]] + result = indexer_sl(ser)[[Interval(3, 4, "right"), Interval(4, 5, "right")]] tm.assert_series_equal(expected, result) # missing or not exact - with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='left')")): - indexer_sl(ser)[Interval(3, 5, closed="left")] + with pytest.raises( + KeyError, match=re.escape("Interval(3, 5, inclusive='left')") + ): + indexer_sl(ser)[Interval(3, 5, inclusive="left")] - with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")): - indexer_sl(ser)[Interval(3, 5)] + with pytest.raises( + KeyError, match=re.escape("Interval(3, 5, inclusive='right')") + ): + indexer_sl(ser)[Interval(3, 5, "right")] with pytest.raises( - KeyError, match=re.escape("Interval(-2, 0, closed='right')") + KeyError, match=re.escape("Interval(-2, 0, inclusive='right')") ): - indexer_sl(ser)[Interval(-2, 0)] + indexer_sl(ser)[Interval(-2, 0, "right")] - with pytest.raises(KeyError, match=re.escape("Interval(5, 6, closed='right')")): - indexer_sl(ser)[Interval(5, 6)] + with pytest.raises( + KeyError, match=re.escape("Interval(5, 6, inclusive='right')") + ): + indexer_sl(ser)[Interval(5, 6, "right")] def test_loc_with_scalar(self, series_with_interval_index, indexer_sl): @@ -84,11 +92,11 @@ def test_loc_with_slices(self, series_with_interval_index, indexer_sl): # slice of interval expected = ser.iloc[:3] - result = indexer_sl(ser)[Interval(0, 1) : Interval(2, 3)] + result = indexer_sl(ser)[Interval(0, 1, "right") : Interval(2, 3, "right")] tm.assert_series_equal(expected, result) expected = ser.iloc[3:] - result = indexer_sl(ser)[Interval(3, 4) :] + result = indexer_sl(ser)[Interval(3, 4, "right") :] tm.assert_series_equal(expected, result) msg = "Interval objects are not currently supported" @@ -96,7 +104,7 @@ def test_loc_with_slices(self, series_with_interval_index, indexer_sl): indexer_sl(ser)[Interval(3, 6) :] with pytest.raises(NotImplementedError, match=msg): - indexer_sl(ser)[Interval(3, 4, closed="left") :] + indexer_sl(ser)[Interval(3, 4, inclusive="left") :] def test_slice_step_ne1(self, series_with_interval_index): # GH#31658 slice of scalar with step != 1 @@ -127,7 +135,7 @@ def test_slice_interval_step(self, series_with_interval_index): def test_loc_with_overlap(self, indexer_sl): - idx = IntervalIndex.from_tuples([(1, 5), (3, 7)]) + idx = IntervalIndex.from_tuples([(1, 5), (3, 7)], inclusive="right") ser = Series(range(len(idx)), index=idx) # scalar @@ -140,23 +148,25 @@ def test_loc_with_overlap(self, indexer_sl): # interval expected = 0 - result = indexer_sl(ser)[Interval(1, 5)] + result = indexer_sl(ser)[Interval(1, 5, "right")] result == expected expected = ser - result = indexer_sl(ser)[[Interval(1, 5), Interval(3, 7)]] + result = indexer_sl(ser)[[Interval(1, 5, "right"), Interval(3, 7, "right")]] tm.assert_series_equal(expected, result) - with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")): - indexer_sl(ser)[Interval(3, 5)] + with pytest.raises( + KeyError, match=re.escape("Interval(3, 5, inclusive='right')") + ): + indexer_sl(ser)[Interval(3, 5, "right")] - msg = r"None of \[\[Interval\(3, 5, closed='right'\)\]\]" + msg = r"None of \[\[Interval\(3, 5, inclusive='right'\)\]\]" with pytest.raises(KeyError, match=msg): - indexer_sl(ser)[[Interval(3, 5)]] + indexer_sl(ser)[[Interval(3, 5, "right")]] # slices with interval (only exact matches) expected = ser - result = indexer_sl(ser)[Interval(1, 5) : Interval(3, 7)] + result = indexer_sl(ser)[Interval(1, 5, "right") : Interval(3, 7, "right")] tm.assert_series_equal(expected, result) msg = "'can only get slices from an IntervalIndex if bounds are" diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index b94323e975cd7..21a14ef8523f1 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -114,7 +114,7 @@ def test_slicing(self): df = DataFrame({"value": (np.arange(100) + 1).astype("int64")}) df["D"] = pd.cut(df.value, bins=[0, 25, 50, 75, 100]) - expected = Series([11, Interval(0, 25)], index=["value", "D"], name=10) + expected = Series([11, Interval(0, 25, "right")], index=["value", "D"], name=10) result = df.iloc[10] tm.assert_series_equal(result, expected) @@ -126,7 +126,7 @@ def test_slicing(self): result = df.iloc[10:20] tm.assert_frame_equal(result, expected) - expected = Series([9, Interval(0, 25)], index=["value", "D"], name=8) + expected = Series([9, Interval(0, 25, "right")], index=["value", "D"], name=8) result = df.loc[8] tm.assert_series_equal(result, expected) @@ -495,13 +495,13 @@ def test_loc_and_at_with_categorical_index(self): # numpy object np.array([1, "b", 3.5], dtype=object), # pandas scalars - [Interval(1, 4), Interval(4, 6), Interval(6, 9)], + [Interval(1, 4, "right"), Interval(4, 6, "right"), Interval(6, 9, "right")], [Timestamp(2019, 1, 1), Timestamp(2019, 2, 1), Timestamp(2019, 3, 1)], [Timedelta(1, "d"), Timedelta(2, "d"), Timedelta(3, "D")], # pandas Integer arrays *(pd.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_INT_EA_DTYPES), # other pandas arrays - pd.IntervalIndex.from_breaks([1, 4, 6, 9]).array, + pd.IntervalIndex.from_breaks([1, 4, 6, 9], "right").array, pd.date_range("2019-01-01", periods=3).array, pd.timedelta_range(start="1d", periods=3).array, ], diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 4504c55698a9a..be8fcfb4d8348 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -701,7 +701,7 @@ def test_fillna_datetime64tz(self, index_or_series, fill_val, fill_dtype): 1.1, 1 + 1j, True, - pd.Interval(1, 2, closed="left"), + pd.Interval(1, 2, inclusive="left"), pd.Timestamp("2012-01-01", tz="US/Eastern"), pd.Timestamp("2012-01-01"), pd.Timedelta(days=1), @@ -745,7 +745,7 @@ def test_fillna_series_timedelta64(self): 1.1, 1 + 1j, True, - pd.Interval(1, 2, closed="left"), + pd.Interval(1, 2, inclusive="left"), pd.Timestamp("2012-01-01", tz="US/Eastern"), pd.Timestamp("2012-01-01"), pd.Timedelta(days=1), diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 3c90eee5be999..2f3b569c899e1 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1271,7 +1271,7 @@ def test_interval_can_hold_element(self, dtype, element): # Careful: to get the expected Series-inplace behavior we need # `elem` to not have the same length as `arr` - ii2 = IntervalIndex.from_breaks(arr[:-1], closed="neither") + ii2 = IntervalIndex.from_breaks(arr[:-1], inclusive="neither") elem = element(ii2) self.check_series_setitem(elem, ii, False) assert not blk._can_hold_element(elem) diff --git a/pandas/tests/reshape/concat/test_append.py b/pandas/tests/reshape/concat/test_append.py index 0b1d1c4a3d346..7e4371100b5ad 100644 --- a/pandas/tests/reshape/concat/test_append.py +++ b/pandas/tests/reshape/concat/test_append.py @@ -172,7 +172,7 @@ def test_append_preserve_index_name(self): Index(list("abc")), pd.CategoricalIndex("A B C".split()), pd.CategoricalIndex("D E F".split(), ordered=True), - pd.IntervalIndex.from_breaks([7, 8, 9, 10]), + pd.IntervalIndex.from_breaks([7, 8, 9, 10], inclusive="right"), pd.DatetimeIndex( [ dt.datetime(2013, 1, 3, 0, 0), diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py index 1425686f027e4..815890f319396 100644 --- a/pandas/tests/reshape/test_cut.py +++ b/pandas/tests/reshape/test_cut.py @@ -37,7 +37,7 @@ def test_bins(func): data = func([0.2, 1.4, 2.5, 6.2, 9.7, 2.1]) result, bins = cut(data, 3, retbins=True) - intervals = IntervalIndex.from_breaks(bins.round(3)) + intervals = IntervalIndex.from_breaks(bins.round(3), "right") intervals = intervals.take([0, 0, 0, 1, 2, 0]) expected = Categorical(intervals, ordered=True) @@ -49,7 +49,7 @@ def test_right(): data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) result, bins = cut(data, 4, right=True, retbins=True) - intervals = IntervalIndex.from_breaks(bins.round(3)) + intervals = IntervalIndex.from_breaks(bins.round(3), "right") expected = Categorical(intervals, ordered=True) expected = expected.take([0, 0, 0, 2, 3, 0, 0]) @@ -61,7 +61,7 @@ def test_no_right(): data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) result, bins = cut(data, 4, right=False, retbins=True) - intervals = IntervalIndex.from_breaks(bins.round(3), closed="left") + intervals = IntervalIndex.from_breaks(bins.round(3), inclusive="left") intervals = intervals.take([0, 0, 0, 2, 3, 0, 1]) expected = Categorical(intervals, ordered=True) @@ -86,7 +86,7 @@ def test_bins_from_interval_index_doc_example(): # Make sure we preserve the bins. ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60]) c = cut(ages, bins=[0, 18, 35, 70]) - expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)]) + expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)], "right") tm.assert_index_equal(c.categories, expected) result = cut([25, 20, 50], bins=c.categories) @@ -121,7 +121,8 @@ def test_bins_not_monotonic(): [ (Timestamp.min, Timestamp("2018-01-01")), (Timestamp("2018-01-01"), Timestamp.max), - ] + ], + "right", ), ), ( @@ -130,7 +131,7 @@ def test_bins_not_monotonic(): [np.iinfo(np.int64).min, 0, np.iinfo(np.int64).max], dtype="int64" ), IntervalIndex.from_tuples( - [(np.iinfo(np.int64).min, 0), (0, np.iinfo(np.int64).max)] + [(np.iinfo(np.int64).min, 0), (0, np.iinfo(np.int64).max)], "right" ), ), ( @@ -156,7 +157,8 @@ def test_bins_not_monotonic(): np.timedelta64(0, "ns"), np.timedelta64(np.iinfo(np.int64).max, "ns"), ), - ] + ], + "right", ), ), ], @@ -232,7 +234,7 @@ def test_labels(right, breaks, closed): arr = np.tile(np.arange(0, 1.01, 0.1), 4) result, bins = cut(arr, 4, retbins=True, right=right) - ex_levels = IntervalIndex.from_breaks(breaks, closed=closed) + ex_levels = IntervalIndex.from_breaks(breaks, inclusive=closed) tm.assert_index_equal(result.categories, ex_levels) @@ -248,7 +250,7 @@ def test_label_precision(): arr = np.arange(0, 0.73, 0.01) result = cut(arr, 4, precision=2) - ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, 0.54, 0.72]) + ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, 0.54, 0.72], "right") tm.assert_index_equal(result.categories, ex_levels) @@ -272,13 +274,13 @@ def test_inf_handling(): result = cut(data, bins) result_ser = cut(data_ser, bins) - ex_uniques = IntervalIndex.from_breaks(bins) + ex_uniques = IntervalIndex.from_breaks(bins, "right") tm.assert_index_equal(result.categories, ex_uniques) - assert result[5] == Interval(4, np.inf) - assert result[0] == Interval(-np.inf, 2) - assert result_ser[5] == Interval(4, np.inf) - assert result_ser[0] == Interval(-np.inf, 2) + assert result[5] == Interval(4, np.inf, "right") + assert result[0] == Interval(-np.inf, 2, "right") + assert result_ser[5] == Interval(4, np.inf, "right") + assert result_ser[0] == Interval(-np.inf, 2, "right") def test_cut_out_of_bounds(): @@ -355,7 +357,7 @@ def test_cut_return_intervals(): exp_bins[0] -= 0.008 expected = Series( - IntervalIndex.from_breaks(exp_bins, closed="right").take( + IntervalIndex.from_breaks(exp_bins, inclusive="right").take( [0, 0, 0, 1, 1, 1, 2, 2, 2] ) ).astype(CDT(ordered=True)) @@ -368,7 +370,7 @@ def test_series_ret_bins(): result, bins = cut(ser, 2, retbins=True) expected = Series( - IntervalIndex.from_breaks([-0.003, 1.5, 3], closed="right").repeat(2) + IntervalIndex.from_breaks([-0.003, 1.5, 3], inclusive="right").repeat(2) ).astype(CDT(ordered=True)) tm.assert_series_equal(result, expected) @@ -442,7 +444,8 @@ def test_datetime_bin(conv): [ Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])), Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2])), - ] + ], + "right", ) ).astype(CDT(ordered=True)) @@ -488,7 +491,8 @@ def test_datetime_cut(data): Interval( Timestamp("2013-01-02 08:00:00"), Timestamp("2013-01-03 00:00:00") ), - ] + ], + "right", ) ).astype(CDT(ordered=True)) tm.assert_series_equal(Series(result), expected) @@ -531,7 +535,8 @@ def test_datetime_tz_cut(bins, box): Timestamp("2013-01-02 08:00:00", tz=tz), Timestamp("2013-01-03 00:00:00", tz=tz), ), - ] + ], + "right", ) ).astype(CDT(ordered=True)) tm.assert_series_equal(result, expected) @@ -685,8 +690,8 @@ def test_cut_no_warnings(): def test_cut_with_duplicated_index_lowest_included(): # GH 42185 expected = Series( - [Interval(-0.001, 2, closed="right")] * 3 - + [Interval(2, 4, closed="right"), Interval(-0.001, 2, closed="right")], + [Interval(-0.001, 2, inclusive="right")] * 3 + + [Interval(2, 4, inclusive="right"), Interval(-0.001, 2, inclusive="right")], index=[0, 1, 2, 3, 0], dtype="category", ).cat.as_ordered() @@ -706,16 +711,16 @@ def test_cut_with_nonexact_categorical_indices(): index = pd.CategoricalIndex( [ - Interval(-0.099, 9.9, closed="right"), - Interval(9.9, 19.8, closed="right"), - Interval(19.8, 29.7, closed="right"), - Interval(29.7, 39.6, closed="right"), - Interval(39.6, 49.5, closed="right"), - Interval(49.5, 59.4, closed="right"), - Interval(59.4, 69.3, closed="right"), - Interval(69.3, 79.2, closed="right"), - Interval(79.2, 89.1, closed="right"), - Interval(89.1, 99, closed="right"), + Interval(-0.099, 9.9, inclusive="right"), + Interval(9.9, 19.8, inclusive="right"), + Interval(19.8, 29.7, inclusive="right"), + Interval(29.7, 39.6, inclusive="right"), + Interval(39.6, 49.5, inclusive="right"), + Interval(49.5, 59.4, inclusive="right"), + Interval(59.4, 69.3, inclusive="right"), + Interval(69.3, 79.2, inclusive="right"), + Interval(79.2, 89.1, inclusive="right"), + Interval(89.1, 99, inclusive="right"), ], ordered=True, ) diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 3950999c5d4fc..8312e3b9de9a7 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -301,7 +301,7 @@ def test_pivot_with_interval_index(self, interval_values, dropna): def test_pivot_with_interval_index_margins(self): # GH 25815 - ordered_cat = pd.IntervalIndex.from_arrays([0, 0, 1, 1], [1, 1, 2, 2]) + ordered_cat = pd.IntervalIndex.from_arrays([0, 0, 1, 1], [1, 1, 2, 2], "right") df = DataFrame( { "A": np.arange(4, 0, -1, dtype=np.intp), @@ -319,7 +319,10 @@ def test_pivot_with_interval_index_margins(self): result = pivot_tab["All"] expected = Series( [3, 7, 10], - index=Index([pd.Interval(0, 1), pd.Interval(1, 2), "All"], name="C"), + index=Index( + [pd.Interval(0, 1, "right"), pd.Interval(1, 2, "right"), "All"], + name="C", + ), name="All", dtype=np.intp, ) diff --git a/pandas/tests/reshape/test_qcut.py b/pandas/tests/reshape/test_qcut.py index f7c7204d02a49..0f82bb736c069 100644 --- a/pandas/tests/reshape/test_qcut.py +++ b/pandas/tests/reshape/test_qcut.py @@ -76,7 +76,8 @@ def test_qcut_include_lowest(): Interval(2.25, 4.5), Interval(4.5, 6.75), Interval(6.75, 9), - ] + ], + "right", ) tm.assert_index_equal(ii.categories, ex_levels) @@ -91,7 +92,7 @@ def test_qcut_nas(): def test_qcut_index(): result = qcut([0, 2], 2) - intervals = [Interval(-0.001, 1), Interval(1, 2)] + intervals = [Interval(-0.001, 1, "right"), Interval(1, 2, "right")] expected = Categorical(intervals, ordered=True) tm.assert_categorical_equal(result, expected) @@ -127,7 +128,11 @@ def test_qcut_return_intervals(): res = qcut(ser, [0, 0.333, 0.666, 1]) exp_levels = np.array( - [Interval(-0.001, 2.664), Interval(2.664, 5.328), Interval(5.328, 8)] + [ + Interval(-0.001, 2.664, "right"), + Interval(2.664, 5.328, "right"), + Interval(5.328, 8, "right"), + ] ) exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True)) tm.assert_series_equal(res, exp) @@ -183,7 +188,7 @@ def test_qcut_duplicates_bin(kwargs, msg): qcut(values, 3, **kwargs) else: result = qcut(values, 3, **kwargs) - expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)]) + expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)], "right") tm.assert_index_equal(result.categories, expected) @@ -198,7 +203,7 @@ def test_single_quantile(data, start, end, length, labels): result = qcut(ser, 1, labels=labels) if labels is None: - intervals = IntervalIndex([Interval(start, end)] * length, closed="right") + intervals = IntervalIndex([Interval(start, end)] * length, inclusive="right") expected = Series(intervals).astype(CDT(ordered=True)) else: expected = Series([0] * length, dtype=np.intp) @@ -217,7 +222,7 @@ def test_single_quantile(data, start, end, length, labels): def test_qcut_nat(ser): # see gh-19768 intervals = IntervalIndex.from_tuples( - [(ser[0] - Nano(), ser[2] - Day()), np.nan, (ser[2] - Day(), ser[2])] + [(ser[0] - Nano(), ser[2] - Day()), np.nan, (ser[2] - Day(), ser[2])], "right" ) expected = Series(Categorical(intervals, ordered=True)) @@ -247,7 +252,8 @@ def test_datetime_tz_qcut(bins): Timestamp("2013-01-02 08:00:00", tz=tz), Timestamp("2013-01-03 00:00:00", tz=tz), ), - ] + ], + "right", ) ).astype(CDT(ordered=True)) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/scalar/interval/test_interval.py b/pandas/tests/scalar/interval/test_interval.py index 1f76a7df1e996..878b5e6ec0167 100644 --- a/pandas/tests/scalar/interval/test_interval.py +++ b/pandas/tests/scalar/interval/test_interval.py @@ -13,22 +13,22 @@ @pytest.fixture def interval(): - return Interval(0, 1) + return Interval(0, 1, "right") class TestInterval: def test_properties(self, interval): - assert interval.closed == "right" + assert interval.inclusive == "right" assert interval.left == 0 assert interval.right == 1 assert interval.mid == 0.5 def test_repr(self, interval): - assert repr(interval) == "Interval(0, 1, closed='right')" + assert repr(interval) == "Interval(0, 1, inclusive='right')" assert str(interval) == "(0, 1]" - interval_left = Interval(0, 1, closed="left") - assert repr(interval_left) == "Interval(0, 1, closed='left')" + interval_left = Interval(0, 1, "left") + assert repr(interval_left) == "Interval(0, 1, inclusive='left')" assert str(interval_left) == "[0, 1)" def test_contains(self, interval): @@ -40,18 +40,18 @@ def test_contains(self, interval): with pytest.raises(TypeError, match=msg): interval in interval - interval_both = Interval(0, 1, closed="both") + interval_both = Interval(0, 1, "both") assert 0 in interval_both assert 1 in interval_both - interval_neither = Interval(0, 1, closed="neither") + interval_neither = Interval(0, 1, "neither") assert 0 not in interval_neither assert 0.5 in interval_neither assert 1 not in interval_neither def test_equal(self): - assert Interval(0, 1) == Interval(0, 1, closed="right") - assert Interval(0, 1) != Interval(0, 1, closed="left") + assert Interval(0, 1, "right") == Interval(0, 1, "right") + assert Interval(0, 1, "right") != Interval(0, 1, "left") assert Interval(0, 1) != 0 def test_comparison(self): @@ -129,7 +129,7 @@ def test_is_empty(self, left, right, closed): iv = Interval(left, right, closed) assert iv.is_empty is False - # same endpoint is empty except when closed='both' (contains one point) + # same endpoint is empty except when inclusive='both' (contains one point) iv = Interval(left, left, closed) result = iv.is_empty expected = closed != "both" @@ -152,8 +152,8 @@ def test_construct_errors(self, left, right): Interval(left, right) def test_math_add(self, closed): - interval = Interval(0, 1, closed=closed) - expected = Interval(1, 2, closed=closed) + interval = Interval(0, 1, closed) + expected = Interval(1, 2, closed) result = interval + 1 assert result == expected @@ -173,8 +173,8 @@ def test_math_add(self, closed): interval + "foo" def test_math_sub(self, closed): - interval = Interval(0, 1, closed=closed) - expected = Interval(-1, 0, closed=closed) + interval = Interval(0, 1, closed) + expected = Interval(-1, 0, closed) result = interval - 1 assert result == expected @@ -191,8 +191,8 @@ def test_math_sub(self, closed): interval - "foo" def test_math_mult(self, closed): - interval = Interval(0, 1, closed=closed) - expected = Interval(0, 2, closed=closed) + interval = Interval(0, 1, closed) + expected = Interval(0, 2, closed) result = interval * 2 assert result == expected @@ -213,8 +213,8 @@ def test_math_mult(self, closed): interval * "foo" def test_math_div(self, closed): - interval = Interval(0, 1, closed=closed) - expected = Interval(0, 0.5, closed=closed) + interval = Interval(0, 1, closed) + expected = Interval(0, 0.5, closed) result = interval / 2.0 assert result == expected @@ -231,8 +231,8 @@ def test_math_div(self, closed): interval / "foo" def test_math_floordiv(self, closed): - interval = Interval(1, 2, closed=closed) - expected = Interval(0, 1, closed=closed) + interval = Interval(1, 2, closed) + expected = Interval(0, 1, closed) result = interval // 2 assert result == expected @@ -249,9 +249,9 @@ def test_math_floordiv(self, closed): interval // "foo" def test_constructor_errors(self): - msg = "invalid option for 'closed': foo" + msg = "invalid option for 'inclusive': foo" with pytest.raises(ValueError, match=msg): - Interval(0, 1, closed="foo") + Interval(0, 1, "foo") msg = "left side of interval must be <= right side" with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index e42039a86fc16..e2a5517066ad9 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -781,7 +781,12 @@ def test_index_putmask(self, obj, key, expected, val): # cast to IntervalDtype[float] Series(interval_range(1, 5, inclusive="right")), Series( - [Interval(1, 2), np.nan, Interval(3, 4), Interval(4, 5)], + [ + Interval(1, 2, "right"), + np.nan, + Interval(3, 4, "right"), + Interval(4, 5, "right"), + ], dtype="interval[float64]", ), 1, @@ -1052,9 +1057,9 @@ class TestSetitemFloatIntervalWithIntIntervalValues(SetitemCastingEquivalents): def test_setitem_example(self): # Just a case here to make obvious what this test class is aimed at - idx = IntervalIndex.from_breaks(range(4)) + idx = IntervalIndex.from_breaks(range(4), inclusive="right") obj = Series(idx) - val = Interval(0.5, 1.5) + val = Interval(0.5, 1.5, "right") obj[0] = val assert obj.dtype == "Interval[float64, right]" @@ -1348,7 +1353,7 @@ def obj(self): @pytest.mark.parametrize( - "val", ["foo", Period("2016", freq="Y"), Interval(1, 2, closed="both")] + "val", ["foo", Period("2016", freq="Y"), Interval(1, 2, inclusive="both")] ) @pytest.mark.parametrize("exp_dtype", [object]) class TestPeriodIntervalCoercion(CoercionTest): @@ -1547,7 +1552,7 @@ def test_setitem_int_as_positional_fallback_deprecation(): # Once the deprecation is enforced, we will have # expected = Series([1, 2, 3, 4, 5], index=[1.1, 2.1, 3.0, 4.1, 5.0]) - ii = IntervalIndex.from_breaks(range(10))[::2] + ii = IntervalIndex.from_breaks(range(10), inclusive="right")[::2] ser2 = Series(range(len(ii)), index=ii) expected2 = ser2.copy() expected2.iloc[-1] = 9 diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index e416b1f625993..e0b180bf0c6f4 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1172,7 +1172,7 @@ def test_constructor_datetime64_bigendian(self): @pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray]) def test_construction_interval(self, interval_constructor): # construction from interval & array of intervals - intervals = interval_constructor.from_breaks(np.arange(3), closed="right") + intervals = interval_constructor.from_breaks(np.arange(3), inclusive="right") result = Series(intervals) assert result.dtype == "interval[int64, right]" tm.assert_index_equal(Index(result.values), Index(intervals)) @@ -1182,7 +1182,7 @@ def test_construction_interval(self, interval_constructor): ) def test_constructor_infer_interval(self, data_constructor): # GH 23563: consistent closed results in interval dtype - data = [Interval(0, 1), Interval(0, 2), None] + data = [Interval(0, 1, "right"), Interval(0, 2, "right"), None] result = Series(data_constructor(data)) expected = Series(IntervalArray(data)) assert result.dtype == "interval[float64, right]" @@ -1193,7 +1193,7 @@ def test_constructor_infer_interval(self, data_constructor): ) def test_constructor_interval_mixed_closed(self, data_constructor): # GH 23563: mixed closed results in object dtype (not interval dtype) - data = [Interval(0, 1, closed="both"), Interval(0, 2, closed="neither")] + data = [Interval(0, 1, inclusive="both"), Interval(0, 2, inclusive="neither")] result = Series(data_constructor(data)) assert result.dtype == object assert result.tolist() == data diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 2d73b8e91e831..85a240a3e825d 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1101,19 +1101,26 @@ def test_value_counts(self): # assert isinstance(factor, n) result = algos.value_counts(factor) breaks = [-1.194, -0.535, 0.121, 0.777, 1.433] - index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True)) + index = IntervalIndex.from_breaks(breaks, inclusive="right").astype( + CDT(ordered=True) + ) expected = Series([1, 1, 1, 1], index=index) tm.assert_series_equal(result.sort_index(), expected.sort_index()) def test_value_counts_bins(self): s = [1, 2, 3, 4] result = algos.value_counts(s, bins=1) - expected = Series([4], index=IntervalIndex.from_tuples([(0.996, 4.0)])) + expected = Series( + [4], index=IntervalIndex.from_tuples([(0.996, 4.0)], inclusive="right") + ) tm.assert_series_equal(result, expected) result = algos.value_counts(s, bins=2, sort=False) expected = Series( - [2, 2], index=IntervalIndex.from_tuples([(0.996, 2.5), (2.5, 4.0)]) + [2, 2], + index=IntervalIndex.from_tuples( + [(0.996, 2.5), (2.5, 4.0)], inclusive="right" + ), ) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py index 6ff1a1c17b179..c3c5f2fdc9d29 100644 --- a/pandas/tests/util/test_assert_frame_equal.py +++ b/pandas/tests/util/test_assert_frame_equal.py @@ -247,7 +247,7 @@ def test_assert_frame_equal_extension_dtype_mismatch(): def test_assert_frame_equal_interval_dtype_mismatch(): # https://github.com/pandas-dev/pandas/issues/32747 - left = DataFrame({"a": [pd.Interval(0, 1)]}, dtype="interval") + left = DataFrame({"a": [pd.Interval(0, 1, "right")]}, dtype="interval") right = left.astype(object) msg = ( diff --git a/pandas/tests/util/test_assert_interval_array_equal.py b/pandas/tests/util/test_assert_interval_array_equal.py index 243f357d7298c..29ebc00b2e69a 100644 --- a/pandas/tests/util/test_assert_interval_array_equal.py +++ b/pandas/tests/util/test_assert_interval_array_equal.py @@ -25,7 +25,7 @@ def test_interval_array_equal_closed_mismatch(): msg = """\ IntervalArray are different -Attribute "closed" are different +Attribute "inclusive" are different \\[left\\]: left \\[right\\]: right""" diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index 93a2e4b83e760..dcf1fe291f179 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -256,7 +256,7 @@ def test_assert_series_equal_extension_dtype_mismatch(): def test_assert_series_equal_interval_dtype_mismatch(): # https://github.com/pandas-dev/pandas/issues/32747 - left = Series([pd.Interval(0, 1)], dtype="interval") + left = Series([pd.Interval(0, 1, "right")], dtype="interval") right = left.astype(object) msg = """Attributes of Series are different
- [ ] xref #40245 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/46522
2022-03-26T15:55:06Z
2022-05-30T20:47:41Z
2022-05-30T20:47:41Z
2022-05-31T16:12:40Z
TYP: Fix core/groupby/generic.py type ignores
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 245e33fb1a23b..f725ae061cedb 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -26,7 +26,10 @@ import numpy as np -from pandas._libs import reduction as libreduction +from pandas._libs import ( + Interval, + reduction as libreduction, +) from pandas._typing import ( ArrayLike, Manager, @@ -652,12 +655,9 @@ def value_counts( if is_interval_dtype(lab.dtype): # TODO: should we do this inside II? + lab_interval = cast(Interval, lab) - # error: "ndarray" has no attribute "left" - # error: "ndarray" has no attribute "right" - sorter = np.lexsort( - (lab.left, lab.right, ids) # type: ignore[attr-defined] - ) + sorter = np.lexsort((lab_interval.left, lab_interval.right, ids)) else: sorter = np.lexsort((lab, ids))
xref #37715 Two things to mention: - not a huge fan of casts but couldn't find another way to fix the issues; - added `Index()` in line 277 because `columns` attribute of a dataframe is supposed to be of Index type.
https://api.github.com/repos/pandas-dev/pandas/pulls/46521
2022-03-26T14:24:19Z
2022-05-11T12:29:05Z
2022-05-11T12:29:05Z
2022-05-11T12:29:06Z
TYP: Index.join
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 931d18dc349f3..3af0f1d553c0d 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -448,6 +448,7 @@ Other Deprecations - Deprecated passing arguments as positional in :meth:`DataFrame.any` and :meth:`Series.any` (:issue:`44802`) - Deprecated the ``closed`` argument in :meth:`interval_range` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`) - Deprecated the methods :meth:`DataFrame.mad`, :meth:`Series.mad`, and the corresponding groupby methods (:issue:`11787`) +- Deprecated positional arguments to :meth:`Index.join` except for ``other``, use keyword-only arguments instead of positional arguments (:issue:`46518`) .. --------------------------------------------------------------------------- .. _whatsnew_150.performance: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 80527474f2be6..59e55bdcb405a 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -48,6 +48,7 @@ DtypeObj, F, IgnoreRaise, + Level, Shape, npt, ) @@ -4529,16 +4530,53 @@ def _reindex_non_unique( # -------------------------------------------------------------------- # Join Methods + @overload + def join( + self, + other: Index, + *, + how: str_t = ..., + level: Level = ..., + return_indexers: Literal[True], + sort: bool = ..., + ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: + ... + + @overload + def join( + self, + other: Index, + *, + how: str_t = ..., + level: Level = ..., + return_indexers: Literal[False] = ..., + sort: bool = ..., + ) -> Index: + ... + + @overload + def join( + self, + other: Index, + *, + how: str_t = ..., + level: Level = ..., + return_indexers: bool = ..., + sort: bool = ..., + ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: + ... + @final + @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "other"]) @_maybe_return_indexers def join( self, - other, + other: Index, how: str_t = "left", - level=None, + level: Level = None, return_indexers: bool = False, sort: bool = False, - ): + ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ Compute join_index and indexers to conform data structures to the new index. @@ -4723,7 +4761,7 @@ def _join_multi(self, other: Index, how: str_t): # Join left and right # Join on same leveled multi-index frames is supported join_idx, lidx, ridx = self_jnlevels.join( - other_jnlevels, how, return_indexers=True + other_jnlevels, how=how, return_indexers=True ) # Restore the dropped levels @@ -4731,8 +4769,16 @@ def _join_multi(self, other: Index, how: str_t): # common levels, ldrop_names, rdrop_names dropped_names = ldrop_names + rdrop_names + # error: Argument 5/6 to "restore_dropped_levels_multijoin" has + # incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any + # ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]" levels, codes, names = restore_dropped_levels_multijoin( - self, other, dropped_names, join_idx, lidx, ridx + self, + other, + dropped_names, + join_idx, + lidx, # type: ignore[arg-type] + ridx, # type: ignore[arg-type] ) # Re-create the multi-index
Overloads for Index.join
https://api.github.com/repos/pandas-dev/pandas/pulls/46518
2022-03-26T01:48:21Z
2022-05-06T21:26:11Z
2022-05-06T21:26:11Z
2022-05-26T01:59:24Z
Backport PR #46515 on branch 1.4.x (CI/DOC: pin jinja2 to 3.0.3)
diff --git a/environment.yml b/environment.yml index ff02ca2243d51..da5d6fabcde20 100644 --- a/environment.yml +++ b/environment.yml @@ -86,7 +86,7 @@ dependencies: - bottleneck>=1.3.1 - ipykernel - ipython>=7.11.1 - - jinja2 # pandas.Styler + - jinja2<=3.0.3 # pandas.Styler - matplotlib>=3.3.2 # pandas.plotting, Series.plot, DataFrame.plot - numexpr>=2.7.1 - scipy>=1.4.1 diff --git a/requirements-dev.txt b/requirements-dev.txt index e500417308a9d..ac525f1d09fbe 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -58,7 +58,7 @@ blosc bottleneck>=1.3.1 ipykernel ipython>=7.11.1 -jinja2 +jinja2<=3.0.3 matplotlib>=3.3.2 numexpr>=2.7.1 scipy>=1.4.1
Backport PR #46515: CI/DOC: pin jinja2 to 3.0.3
https://api.github.com/repos/pandas-dev/pandas/pulls/46517
2022-03-26T00:52:09Z
2022-03-26T03:20:56Z
2022-03-26T03:20:56Z
2022-03-26T03:20:56Z
REF: _infer_tsobject_fold to infer_datetuil_fold
diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 227cf454700d5..206e0171e0a55 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -31,5 +31,3 @@ cdef int64_t get_datetime64_nanos(object val) except? -1 cpdef datetime localize_pydatetime(datetime dt, tzinfo tz) cdef int64_t cast_from_unit(object ts, str unit) except? -1 cpdef (int64_t, int) precision_from_unit(str unit) - -cdef int64_t normalize_i8_stamp(int64_t local_val) nogil diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 132d742b78e9c..f51f25c2065f2 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -72,6 +72,7 @@ from pandas._libs.tslibs.nattype cimport ( ) from pandas._libs.tslibs.tzconversion cimport ( bisect_right_i8, + infer_datetuil_fold, localize_tzinfo_api, tz_localize_to_utc_single, ) @@ -530,7 +531,7 @@ cdef _TSObject _create_tsobject_tz_using_offset(npy_datetimestruct dts, if typ == 'dateutil': tdata = <int64_t*>cnp.PyArray_DATA(trans) pos = bisect_right_i8(tdata, obj.value, trans.shape[0]) - 1 - obj.fold = _infer_tsobject_fold(obj, trans, deltas, pos) + obj.fold = infer_datetuil_fold(obj.value, trans, deltas, pos) # Keep the converter same as PyDateTime's dt = datetime(obj.dts.year, obj.dts.month, obj.dts.day, @@ -714,7 +715,7 @@ cdef inline void _localize_tso(_TSObject obj, tzinfo tz): local_val = obj.value + deltas[pos] # dateutil supports fold, so we infer fold from value - obj.fold = _infer_tsobject_fold(obj, trans, deltas, pos) + obj.fold = infer_datetuil_fold(obj.value, trans, deltas, pos) else: # All other cases have len(deltas) == 1. As of 2018-07-17 # (and 2022-03-07), all test cases that get here have @@ -726,49 +727,6 @@ cdef inline void _localize_tso(_TSObject obj, tzinfo tz): obj.tzinfo = tz -cdef inline bint _infer_tsobject_fold( - _TSObject obj, - const int64_t[:] trans, - const int64_t[:] deltas, - intp_t pos, -): - """ - Infer _TSObject fold property from value by assuming 0 and then setting - to 1 if necessary. - - Parameters - ---------- - obj : _TSObject - trans : ndarray[int64_t] - ndarray of offset transition points in nanoseconds since epoch. - deltas : int64_t[:] - array of offsets corresponding to transition points in trans. - pos : intp_t - Position of the last transition point before taking fold into account. - - Returns - ------- - bint - Due to daylight saving time, one wall clock time can occur twice - when shifting from summer to winter time; fold describes whether the - datetime-like corresponds to the first (0) or the second time (1) - the wall clock hits the ambiguous time - - References - ---------- - .. [1] "PEP 495 - Local Time Disambiguation" - https://www.python.org/dev/peps/pep-0495/#the-fold-attribute - """ - cdef: - bint fold = 0 - - if pos > 0: - fold_delta = deltas[pos - 1] - deltas[pos] - if obj.value - fold_delta < trans[pos]: - fold = 1 - - return fold - cdef inline datetime _localize_pydatetime(datetime dt, tzinfo tz): """ Take a datetime/Timestamp in UTC and localizes to timezone tz. @@ -802,24 +760,3 @@ cpdef inline datetime localize_pydatetime(datetime dt, tzinfo tz): elif isinstance(dt, ABCTimestamp): return dt.tz_localize(tz) return _localize_pydatetime(dt, tz) - - -# ---------------------------------------------------------------------- -# Normalization - -@cython.cdivision(False) -cdef inline int64_t normalize_i8_stamp(int64_t local_val) nogil: - """ - Round the localized nanosecond timestamp down to the previous midnight. - - Parameters - ---------- - local_val : int64_t - - Returns - ------- - int64_t - """ - cdef: - int64_t day_nanos = 24 * 3600 * 1_000_000_000 - return local_val - (local_val % day_nanos) diff --git a/pandas/_libs/tslibs/timestamps.pxd b/pandas/_libs/tslibs/timestamps.pxd index 8833a611b0722..9b05fbc5be915 100644 --- a/pandas/_libs/tslibs/timestamps.pxd +++ b/pandas/_libs/tslibs/timestamps.pxd @@ -28,3 +28,5 @@ cdef class _Timestamp(ABCTimestamp): int op) except -1 cpdef void _set_freq(self, freq) cdef _warn_on_field_deprecation(_Timestamp self, freq, str field) + +cdef int64_t normalize_i8_stamp(int64_t local_val) nogil diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 2afceb827e49a..a0958e11e28b3 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -51,7 +51,6 @@ from pandas._libs.tslibs.conversion cimport ( _TSObject, convert_datetime_to_tsobject, convert_to_tsobject, - normalize_i8_stamp, ) from pandas._libs.tslibs.util cimport ( is_array, @@ -2116,3 +2115,23 @@ cdef int64_t _NS_LOWER_BOUND = NPY_NAT + 1 Timestamp.min = Timestamp(_NS_LOWER_BOUND) Timestamp.max = Timestamp(_NS_UPPER_BOUND) Timestamp.resolution = Timedelta(nanoseconds=1) # GH#21336, GH#21365 + + +# ---------------------------------------------------------------------- +# Scalar analogues to functions in vectorized.pyx + + +@cython.cdivision(False) +cdef inline int64_t normalize_i8_stamp(int64_t local_val) nogil: + """ + Round the localized nanosecond timestamp down to the previous midnight. + + Parameters + ---------- + local_val : int64_t + + Returns + ------- + int64_t + """ + return local_val - (local_val % ccalendar.DAY_NANOS) diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd index 136e62985995e..74aab9f297379 100644 --- a/pandas/_libs/tslibs/tzconversion.pxd +++ b/pandas/_libs/tslibs/tzconversion.pxd @@ -1,5 +1,8 @@ from cpython.datetime cimport tzinfo -from numpy cimport int64_t +from numpy cimport ( + int64_t, + intp_t, +) cdef int64_t localize_tzinfo_api( @@ -11,3 +14,10 @@ cdef int64_t tz_localize_to_utc_single( ) except? -1 cdef Py_ssize_t bisect_right_i8(int64_t *data, int64_t val, Py_ssize_t n) + +cdef bint infer_datetuil_fold( + int64_t value, + const int64_t[::1] trans, + const int64_t[::1] deltas, + intp_t pos, +) diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index 9190585b2882d..a63a27b8194de 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -632,3 +632,48 @@ cdef int64_t _tz_localize_using_tzinfo_api( td = tz.utcoffset(dt) delta = int(td.total_seconds() * 1_000_000_000) return delta + + +# NB: relies on dateutil internals, subject to change. +cdef bint infer_datetuil_fold( + int64_t value, + const int64_t[::1] trans, + const int64_t[::1] deltas, + intp_t pos, +): + """ + Infer _TSObject fold property from value by assuming 0 and then setting + to 1 if necessary. + + Parameters + ---------- + value : int64_t + trans : ndarray[int64_t] + ndarray of offset transition points in nanoseconds since epoch. + deltas : int64_t[:] + array of offsets corresponding to transition points in trans. + pos : intp_t + Position of the last transition point before taking fold into account. + + Returns + ------- + bint + Due to daylight saving time, one wall clock time can occur twice + when shifting from summer to winter time; fold describes whether the + datetime-like corresponds to the first (0) or the second time (1) + the wall clock hits the ambiguous time + + References + ---------- + .. [1] "PEP 495 - Local Time Disambiguation" + https://www.python.org/dev/peps/pep-0495/#the-fold-attribute + """ + cdef: + bint fold = 0 + + if pos > 0: + fold_delta = deltas[pos - 1] - deltas[pos] + if value - fold_delta < trans[pos]: + fold = 1 + + return fold diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx index 07121396df4a2..a37e348154e22 100644 --- a/pandas/_libs/tslibs/vectorized.pyx +++ b/pandas/_libs/tslibs/vectorized.pyx @@ -18,8 +18,6 @@ from numpy cimport ( cnp.import_array() -from .conversion cimport normalize_i8_stamp - from .dtypes import Resolution from .ccalendar cimport DAY_NANOS @@ -34,7 +32,10 @@ from .np_datetime cimport ( ) from .offsets cimport BaseOffset from .period cimport get_period_ordinal -from .timestamps cimport create_timestamp_from_ts +from .timestamps cimport ( + create_timestamp_from_ts, + normalize_i8_stamp, +) from .timezones cimport ( get_dst_info, is_tzlocal,
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Trying to de-duplicate a bunch of the timezone-handling code, splitting this off as theres no clear way it could affect perf
https://api.github.com/repos/pandas-dev/pandas/pulls/46516
2022-03-25T23:07:36Z
2022-03-27T21:47:41Z
2022-03-27T21:47:41Z
2023-03-02T22:48:59Z
CI/DOC: pin jinja2 to 3.0.3
diff --git a/environment.yml b/environment.yml index f60c68b8d7638..ac8921b12f4a3 100644 --- a/environment.yml +++ b/environment.yml @@ -86,7 +86,7 @@ dependencies: - bottleneck>=1.3.1 - ipykernel - ipython>=7.11.1 - - jinja2 # pandas.Styler + - jinja2<=3.0.3 # pandas.Styler - matplotlib>=3.3.2 # pandas.plotting, Series.plot, DataFrame.plot - numexpr>=2.7.1 - scipy>=1.4.1 diff --git a/requirements-dev.txt b/requirements-dev.txt index f25c51dd58a1c..a0558f1a00177 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -58,7 +58,7 @@ blosc bottleneck>=1.3.1 ipykernel ipython>=7.11.1 -jinja2 +jinja2<=3.0.3 matplotlib>=3.3.2 numexpr>=2.7.1 scipy>=1.4.1
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Opened #46514 to track
https://api.github.com/repos/pandas-dev/pandas/pulls/46515
2022-03-25T22:38:20Z
2022-03-26T00:51:58Z
2022-03-26T00:51:58Z
2022-03-26T00:52:21Z
TYP: misc
diff --git a/pandas/_typing.py b/pandas/_typing.py index e3b3a4774f558..30244e025e430 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -104,6 +104,8 @@ # passed in, a DataFrame is always returned. NDFrameT = TypeVar("NDFrameT", bound="NDFrame") +NumpyIndexT = TypeVar("NumpyIndexT", np.ndarray, "Index") + Axis = Union[str, int] IndexLabel = Union[Hashable, Sequence[Hashable]] Level = Union[Hashable, int] diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 8f0516abe8bb3..27260f8ed62ca 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -295,7 +295,9 @@ def asi8(self) -> npt.NDArray[np.int64]: # ---------------------------------------------------------------- # Rendering Methods - def _format_native_types(self, *, na_rep="NaT", date_format=None): + def _format_native_types( + self, *, na_rep="NaT", date_format=None + ) -> npt.NDArray[np.object_]: """ Helper method for astype when converting to strings. diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index fa543f6773634..7d0b30a1abb60 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -635,7 +635,7 @@ def _formatter(self, boxed: bool = False): @dtl.ravel_compat def _format_native_types( self, *, na_rep="NaT", date_format=None, **kwargs - ) -> np.ndarray: + ) -> npt.NDArray[np.object_]: """ actually format my specific types """ diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index dc63cd92bbb2b..e4a9b156655e9 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -37,6 +37,7 @@ from pandas._typing import ( DtypeObj, NpDtype, + npt, ) from pandas.compat.numpy import function as nv from pandas.util._validators import validate_endpoints @@ -431,7 +432,7 @@ def _formatter(self, boxed: bool = False): @dtl.ravel_compat def _format_native_types( self, *, na_rep="NaT", date_format=None, **kwargs - ) -> np.ndarray: + ) -> npt.NDArray[np.object_]: from pandas.io.formats.format import get_format_timedelta64 formatter = get_format_timedelta64(self._ndarray, na_rep) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 8b9c537631d94..c34e99298dd0e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1511,7 +1511,9 @@ def to_native_types(self, slicer=None, **kwargs) -> np.ndarray: values = values[slicer] return values._format_native_types(**kwargs) - def _format_native_types(self, *, na_rep="", quoting=None, **kwargs): + def _format_native_types( + self, *, na_rep="", quoting=None, **kwargs + ) -> npt.NDArray[np.object_]: """ Actually format specific types of the index. """ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index c1d7eb972e1f4..425e7d3f4432e 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -820,7 +820,9 @@ def _format_with_header(self, header: list[str], na_rep: str) -> list[str]: # matches base class except for whitespace padding return header + list(self._format_native_types(na_rep=na_rep)) - def _format_native_types(self, *, na_rep="NaN", quoting=None, **kwargs): + def _format_native_types( + self, *, na_rep="NaN", quoting=None, **kwargs + ) -> npt.NDArray[np.object_]: # GH 28210: use base method but with different default na_rep return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 1105e7b8a274f..c55312026a893 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1294,7 +1294,9 @@ def _formatter_func(self, tup): formatter_funcs = [level._formatter_func for level in self.levels] return tuple(func(val) for func, val in zip(formatter_funcs, tup)) - def _format_native_types(self, *, na_rep="nan", **kwargs): + def _format_native_types( + self, *, na_rep="nan", **kwargs + ) -> npt.NDArray[np.object_]: new_levels = [] new_codes = [] diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index be88b874908e8..174e0a7f81850 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -276,7 +276,7 @@ def _assert_safe_casting(cls, data: np.ndarray, subarr: np.ndarray) -> None: def _format_native_types( self, *, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs - ): + ) -> npt.NDArray[np.object_]: from pandas.io.formats.format import FloatArrayFormatter if is_float_dtype(self.dtype): diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py index d2c08712abacd..9f9143f4aaa60 100644 --- a/pandas/core/reshape/util.py +++ b/pandas/core/reshape/util.py @@ -1,5 +1,7 @@ import numpy as np +from pandas._typing import NumpyIndexT + from pandas.core.dtypes.common import is_list_like @@ -54,7 +56,7 @@ def cartesian_product(X): return [tile_compat(np.repeat(x, b[i]), np.product(a[i])) for i, x in enumerate(X)] -def tile_compat(arr, num: int): +def tile_compat(arr: NumpyIndexT, num: int) -> NumpyIndexT: """ Index compat for np.tile. diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 3fd2a5e2bca32..c577acfaeba8e 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -73,7 +73,7 @@ def __init__( self.filepath_or_buffer = path_or_buf self.encoding = encoding - self.compression = compression + self.compression: CompressionOptions = compression self.mode = mode self.storage_options = storage_options
Small set of typing changes to reduce the number of errors of pyright's reportGeneralTypeIssues from 1365 to 1309: - return value for `_format_native_types` - TypeVar for `tile_compat` - When assigning literals to an instance variable, they need explicit type annotations (in this case for `CompressionOptions`), see https://github.com/microsoft/pyright/issues/3256
https://api.github.com/repos/pandas-dev/pandas/pulls/46513
2022-03-25T22:05:56Z
2022-03-29T00:10:50Z
2022-03-29T00:10:50Z
2022-04-01T01:36:30Z
update test suite output in installation guide
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index e312889f2eb6a..adc8b3d08d441 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -204,17 +204,28 @@ installed), make sure you have `pytest :: >>> pd.test() - running: pytest --skip-slow --skip-network C:\Users\TP\Anaconda3\envs\py36\lib\site-packages\pandas - ============================= test session starts ============================= - platform win32 -- Python 3.6.2, pytest-3.6.0, py-1.4.34, pluggy-0.4.0 - rootdir: C:\Users\TP\Documents\Python\pandasdev\pandas, inifile: setup.cfg - collected 12145 items / 3 skipped + running: pytest --skip-slow --skip-network --skip-db /home/user/anaconda3/lib/python3.9/site-packages/pandas - ..................................................................S...... - ........S................................................................ - ......................................................................... + ============================= test session starts ============================== + platform linux -- Python 3.9.7, pytest-6.2.5, py-1.11.0, pluggy-1.0.0 + rootdir: /home/user + plugins: dash-1.19.0, anyio-3.5.0, hypothesis-6.29.3 + collected 154975 items / 4 skipped / 154971 selected + ........................................................................ [ 0%] + ........................................................................ [ 99%] + ....................................... [100%] - ==================== 12130 passed, 12 skipped in 368.339 seconds ===================== + ==================================== ERRORS ==================================== + + =================================== FAILURES =================================== + + =============================== warnings summary =============================== + + =========================== short test summary info ============================ + + = 1 failed, 146194 passed, 7402 skipped, 1367 xfailed, 5 xpassed, 197 warnings, 10 errors in 1090.16s (0:18:10) = + +This is just an example of what information is shown. You might see a slightly different result as what is shown above. .. _install.dependencies:
- [x] closes #46504 - [x] closes #46498 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/46510
2022-03-25T18:27:43Z
2022-03-28T19:40:14Z
2022-03-28T19:40:14Z
2022-03-28T19:40:14Z
DOC remove unused data param from INFO_DOCSTRING
diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py index b42bc47856e1b..c0bdf37e5273a 100644 --- a/pandas/io/formats/info.py +++ b/pandas/io/formats/info.py @@ -258,8 +258,6 @@ Parameters ---------- - data : {klass} - {klass} to print information about. verbose : bool, optional Whether to print the full summary. By default, the setting in ``pandas.options.display.max_info_columns`` is followed.
- Updated during PyLadies London Sprint
https://api.github.com/repos/pandas-dev/pandas/pulls/46503
2022-03-25T11:58:48Z
2022-03-25T22:58:27Z
2022-03-25T22:58:27Z
2022-03-25T22:58:32Z
CI Clean up code
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 4498585e36ce5..ec8545ad1ee4a 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -78,8 +78,8 @@ fi ### DOCSTRINGS ### if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then - MSG='Validate docstrings (GL01, GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, SS01, SS02, SS03, SS04, SS05, PR03, PR04, PR05, PR06, PR08, PR09, PR10, EX04, RT01, RT04, RT05, SA02, SA03)' ; echo $MSG - $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,SS02,SS03,SS04,SS05,PR03,PR04,PR05,PR06,PR08,PR09,PR10,EX04,RT01,RT04,RT05,SA02,SA03 + MSG='Validate docstrings (EX04, GL01, GL02, GL03, GL04, GL05, GL06, GL07, GL09, GL10, PR03, PR04, PR05, PR06, PR08, PR09, PR10, RT01, RT04, RT05, SA02, SA03, SS01, SS02, SS03, SS04, SS05)' ; echo $MSG + $BASE_DIR/scripts/validate_docstrings.py --format=actions --errors=EX04,GL01,GL02,GL03,GL04,GL05,GL06,GL07,GL09,GL10,PR03,PR04,PR05,PR06,PR08,PR09,PR10,RT01,RT04,RT05,SA02,SA03,SS01,SS02,SS03,SS04,SS05 RET=$(($RET + $?)) ; echo $MSG "DONE" fi
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. - [ ] Worked on during pyladies london sprint, cleaned up code and added SS01
https://api.github.com/repos/pandas-dev/pandas/pulls/46502
2022-03-25T11:44:07Z
2022-03-25T19:01:30Z
2022-03-25T19:01:30Z
2022-03-25T19:01:30Z
Backport PR #46119 on branch 1.4.x (REF: isinstance(x, int) -> is_integer(x))
diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index 2e9a62a54cd44..34d047eb59be6 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -25,6 +25,11 @@ from pandas._typing import Level from pandas.compat._optional import import_optional_dependency +from pandas.core.dtypes.common import ( + is_complex, + is_float, + is_integer, +) from pandas.core.dtypes.generic import ABCSeries from pandas import ( @@ -1431,9 +1436,9 @@ def _default_formatter(x: Any, precision: int, thousands: bool = False) -> Any: value : Any Matches input type, or string if input is float or complex or int with sep. """ - if isinstance(x, (float, complex)): + if is_float(x) or is_complex(x): return f"{x:,.{precision}f}" if thousands else f"{x:.{precision}f}" - elif isinstance(x, int): + elif is_integer(x): return f"{x:,.0f}" if thousands else f"{x:.0f}" return x @@ -1448,7 +1453,7 @@ def _wrap_decimal_thousands( """ def wrapper(x): - if isinstance(x, (float, complex, int)): + if is_float(x) or is_integer(x) or is_complex(x): if decimal != "." and thousands is not None and thousands != ",": return ( formatter(x)
Backport PR #46119: REF: isinstance(x, int) -> is_integer(x)
https://api.github.com/repos/pandas-dev/pandas/pulls/46501
2022-03-25T11:41:02Z
2022-03-29T11:27:21Z
2022-03-29T11:27:21Z
2022-03-29T11:27:21Z
Backport PR #46457 on branch 1.4.x (BUG: url regex in `style_render` does not pass colon and other valid)
diff --git a/doc/source/whatsnew/v1.4.2.rst b/doc/source/whatsnew/v1.4.2.rst index 4cbb8118055af..13f3e9a0d0a8c 100644 --- a/doc/source/whatsnew/v1.4.2.rst +++ b/doc/source/whatsnew/v1.4.2.rst @@ -31,7 +31,7 @@ Bug fixes ~~~~~~~~~ - Fix some cases for subclasses that define their ``_constructor`` properties as general callables (:issue:`46018`) - Fixed "longtable" formatting in :meth:`.Styler.to_latex` when ``column_format`` is given in extended format (:issue:`46037`) -- +- Fixed incorrect rendering in :meth:`.Styler.format` with ``hyperlinks="html"`` when the url contains a colon or other special characters (:issue:`46389`) .. --------------------------------------------------------------------------- diff --git a/pandas/io/formats/style_render.py b/pandas/io/formats/style_render.py index e15387b1e2bae..2e9a62a54cd44 100644 --- a/pandas/io/formats/style_render.py +++ b/pandas/io/formats/style_render.py @@ -1488,7 +1488,7 @@ def _render_href(x, format): href = r"\href{{{0}}}{{{0}}}" else: raise ValueError("``hyperlinks`` format can only be 'html' or 'latex'") - pat = r"(https?:\/\/|ftp:\/\/|www.)[\w/\-?=%.]+\.[\w/\-&?=%.]+" + pat = r"((http|ftp)s?:\/\/|www.)[\w/\-?=%.:@]+\.[\w/\-&?=%.,':;~!@#$*()\[\]]+" return re.sub(pat, lambda m: href.format(m.group(0)), x) return x diff --git a/pandas/tests/io/formats/style/test_html.py b/pandas/tests/io/formats/style/test_html.py index fad289d5e0d2c..1903d4174e638 100644 --- a/pandas/tests/io/formats/style/test_html.py +++ b/pandas/tests/io/formats/style/test_html.py @@ -778,8 +778,20 @@ def test_hiding_index_columns_multiindex_trimming(): ("no scheme, no top-level: www.web", False, "www.web"), ("https scheme: https://www.web.com", True, "https://www.web.com"), ("ftp scheme: ftp://www.web", True, "ftp://www.web"), + ("ftps scheme: ftps://www.web", True, "ftps://www.web"), ("subdirectories: www.web.com/directory", True, "www.web.com/directory"), ("Multiple domains: www.1.2.3.4", True, "www.1.2.3.4"), + ("with port: http://web.com:80", True, "http://web.com:80"), + ( + "full net_loc scheme: http://user:pass@web.com", + True, + "http://user:pass@web.com", + ), + ( + "with valid special chars: http://web.com/,.':;~!@#$*()[]", + True, + "http://web.com/,.':;~!@#$*()[]", + ), ], ) def test_rendered_links(type, text, exp, found):
Backport PR #46457: BUG: url regex in `style_render` does not pass colon and other valid
https://api.github.com/repos/pandas-dev/pandas/pulls/46497
2022-03-24T18:16:57Z
2022-03-25T00:44:20Z
2022-03-25T00:44:20Z
2022-03-25T00:44:21Z
DOC: setting allows_duplicate_labels=False(#46480)
diff --git a/doc/source/user_guide/duplicates.rst b/doc/source/user_guide/duplicates.rst index 36c2ec53d58b4..7894789846ce8 100644 --- a/doc/source/user_guide/duplicates.rst +++ b/doc/source/user_guide/duplicates.rst @@ -172,7 +172,7 @@ going forward, to ensure that your data pipeline doesn't introduce duplicates. >>> deduplicated = raw.groupby(level=0).first() # remove duplicates >>> deduplicated.flags.allows_duplicate_labels = False # disallow going forward -Setting ``allows_duplicate_labels=True`` on a ``Series`` or ``DataFrame`` with duplicate +Setting ``allows_duplicate_labels=False`` on a ``Series`` or ``DataFrame`` with duplicate labels or performing an operation that introduces duplicate labels on a ``Series`` or ``DataFrame`` that disallows duplicates will raise an :class:`errors.DuplicateLabelError`.
- [ ] closes #46480 (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/46495
2022-03-24T12:38:16Z
2022-03-26T13:39:11Z
2022-03-26T13:39:11Z
2022-03-26T13:39:19Z
DOC: fix PR09,PR08 docstring errors in pandas.plotting
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e4a44a89998e3..9467978f13d30 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2066,7 +2066,7 @@ def to_feather(self, fname): Parameters ---------- fname : str - string file path + String file path. """ from pandas.io.feather_format import to_feather @@ -4772,6 +4772,7 @@ def drop_duplicates(self, subset=None, keep="first", inplace=False): Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' + Determines which duplicates (if any) to keep. - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. @@ -4806,10 +4807,10 @@ def duplicated(self, subset=None, keep="first"): Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - - ``first`` : Mark duplicates as ``True`` except for the - first occurrence. - - ``last`` : Mark duplicates as ``True`` except for the - last occurrence. + Determines which duplicates (if any) to mark. + + - ``first`` : Mark duplicates as ``True`` except for the first occurrence. + - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns @@ -6233,8 +6234,8 @@ def unstack(self, level=-1, fill_value=None): ---------- level : int, str, or list of these, default -1 (last level) Level(s) of index to unstack, can pass level name - fill_value : replace NaN with this value if the unstack produces - missing values + fill_value : int, string or dict + Replace NaN with this value if the unstack produces missing values Returns ------- @@ -6665,6 +6666,8 @@ def apply( by result_type='broadcast'. raw : bool, default False + Determines if row or column is passed as a Series or ndarry object: + * ``False`` : passes each row or column as a Series to the function. * ``True`` : the passed function will receive ndarray objects @@ -7357,6 +7360,8 @@ def corr(self, method="pearson", min_periods=1): Parameters ---------- method : {'pearson', 'kendall', 'spearman'} or callable + Method of correlation: + * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation @@ -7556,10 +7561,13 @@ def corrwith(self, other, axis=0, drop=False, method="pearson"): other : DataFrame, Series Object with which to compute correlations. axis : {0 or 'index', 1 or 'columns'}, default 0 - 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise. + The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for + row-wise. drop : bool, default False Drop missing indices from result. method : {'pearson', 'kendall', 'spearman'} or callable + Method of correlation: + * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation @@ -7939,7 +7947,7 @@ def idxmin(self, axis=0, skipna=True): Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 - 0 or 'index' for row-wise, 1 or 'columns' for column-wise + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. @@ -7976,7 +7984,7 @@ def idxmax(self, axis=0, skipna=True): Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 - 0 or 'index' for row-wise, 1 or 'columns' for column-wise + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a3b9bec494854..cb21588c8ba1a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2559,10 +2559,10 @@ def to_msgpack(self, path_or_buf=None, encoding="utf-8", **kwargs): path : str, buffer-like, or None Destination for the serialized object. If None, return generated bytes - append : bool whether to append to an existing msgpack - (default is False) - compress : type of compressor (zlib or blosc), default to None (no - compression) + append : bool, default False + Whether to append to an existing msgpack. + compress : str, default None + Type of compressor (zlib, blosc or None). Returns ------- @@ -2797,10 +2797,10 @@ def to_clipboard(self, excel=True, sep=None, **kwargs): Parameters ---------- excel : bool, default True - - True, use the provided separator, writing in a csv format for - allowing easy pasting into excel. - - False, write a string representation of the object to the - clipboard. + Produce output in a csv format for easy pasting into excel. + + - True, use the provided separator for csv pasting. + - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. @@ -5024,15 +5024,15 @@ def sample( Parameters ---------- func : function - function to apply to the %(klass)s. + Function to apply to the %(klass)s. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the %(klass)s. args : iterable, optional - positional arguments passed into ``func``. + Positional arguments passed into ``func``. kwargs : mapping, optional - a dictionary of keyword arguments passed into ``func``. + A dictionary of keyword arguments passed into ``func``. Returns ------- diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 8724382d9ec55..966a18e11a620 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -514,6 +514,8 @@ class PlotAccessor(PandasObject): Allows plotting of one column versus another. Only used if data is a DataFrame. kind : str + The kind of plot to produce: + - 'line' : line plot (default) - 'bar' : vertical bar plot - 'barh' : horizontal bar plot @@ -537,7 +539,7 @@ class PlotAccessor(PandasObject): legend : False/True/'reverse' Place legend on axis subplots style : list or dict - matplotlib line style per column + The matplotlib line style per column logx : bool or 'sym', default False Use log scaling or symlog scaling on x axis .. versionchanged:: 0.25.0 diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index a8e86d9dfa997..74ce60c6116a9 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -14,9 +14,9 @@ def table(ax, data, rowLabels=None, colLabels=None, **kwargs): ---------- ax : Matplotlib axes object data : DataFrame or Series - data for table contents - kwargs : keywords, optional - keyword arguments which passed to matplotlib.table.table. + Data for table contents. + **kwargs + Keyword arguments to be passed to matplotlib.table.table. If `rowLabels` or `colLabels` is not specified, data index or column name will be used. @@ -82,7 +82,7 @@ def scatter_matrix( density_kwds=None, hist_kwds=None, range_padding=0.05, - **kwds + **kwargs ): """ Draw a matrix of scatter plots. @@ -91,28 +91,26 @@ def scatter_matrix( ---------- frame : DataFrame alpha : float, optional - amount of transparency applied + Amount of transparency applied. figsize : (float,float), optional - a tuple (width, height) in inches + A tuple (width, height) in inches. ax : Matplotlib axis object, optional grid : bool, optional - setting this to True will show the grid + Setting this to True will show the grid. diagonal : {'hist', 'kde'} - pick between 'kde' and 'hist' for - either Kernel Density Estimation or Histogram - plot in the diagonal + Pick between 'kde' and 'hist' for either Kernel Density Estimation or + Histogram plot in the diagonal. marker : str, optional - Matplotlib marker type, default '.' - hist_kwds : other plotting keyword arguments - To be passed to hist function - density_kwds : other plotting keyword arguments - To be passed to kernel density estimate plot - range_padding : float, optional - relative extension of axis range in x and y - with respect to (x_max - x_min) or (y_max - y_min), - default 0.05 - kwds : other plotting keyword arguments - To be passed to scatter function + Matplotlib marker type, default '.'. + density_kwds : keywords + Keyword arguments to be passed to kernel density estimate plot. + hist_kwds : keywords + Keyword arguments to be passed to hist function. + range_padding : float, default 0.05 + Relative extension of axis range in x and y with respect to + (x_max - x_min) or (y_max - y_min). + **kwargs + Keyword arguments to be passed to scatter function. Returns ------- @@ -136,7 +134,7 @@ def scatter_matrix( density_kwds=density_kwds, hist_kwds=hist_kwds, range_padding=range_padding, - **kwds + **kwargs ) @@ -215,7 +213,7 @@ def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds): @deprecate_kwarg(old_arg_name="data", new_arg_name="frame") def andrews_curves( - frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwds + frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwargs ): """ Generate a matplotlib plot of Andrews curves, for visualising clusters of @@ -233,17 +231,17 @@ def andrews_curves( Parameters ---------- frame : DataFrame - Data to be plotted, preferably normalized to (0.0, 1.0) + Data to be plotted, preferably normalized to (0.0, 1.0). class_column : Name of the column containing class names ax : matplotlib axes object, default None samples : Number of points to plot in each curve color : list or tuple, optional - Colors to use for the different classes + Colors to use for the different classes. colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. - kwds : keywords - Options to pass to matplotlib plotting method + **kwargs + Options to pass to matplotlib plotting method. Returns ------- @@ -257,7 +255,7 @@ def andrews_curves( samples=samples, color=color, colormap=colormap, - **kwds + **kwargs ) @@ -327,7 +325,7 @@ def parallel_coordinates( axvlines=True, axvlines_kwds=None, sort_labels=False, - **kwds + **kwargs ): """ Parallel coordinates plotting. @@ -336,30 +334,29 @@ def parallel_coordinates( ---------- frame : DataFrame class_column : str - Column name containing class names + Column name containing class names. cols : list, optional - A list of column names to use + A list of column names to use. ax : matplotlib.axis, optional - matplotlib axis object + Matplotlib axis object. color : list or tuple, optional - Colors to use for the different classes + Colors to use for the different classes. use_columns : bool, optional - If true, columns will be used as xticks + If true, columns will be used as xticks. xticks : list or tuple, optional - A list of values to use for xticks + A list of values to use for xticks. colormap : str or matplotlib colormap, default None Colormap to use for line colors. axvlines : bool, optional - If true, vertical lines will be added at each xtick + If true, vertical lines will be added at each xtick. axvlines_kwds : keywords, optional - Options to be passed to axvline method for vertical lines - sort_labels : bool, False - Sort class_column labels, useful when assigning colors + Options to be passed to axvline method for vertical lines. + sort_labels : bool, default False + Sort class_column labels, useful when assigning colors. .. versionadded:: 0.20.0 - - kwds : keywords - Options to pass to matplotlib plotting method + **kwargs + Options to pass to matplotlib plotting method. Returns ------- @@ -388,7 +385,7 @@ def parallel_coordinates( axvlines=axvlines, axvlines_kwds=axvlines_kwds, sort_labels=sort_labels, - **kwds + **kwargs ) @@ -411,7 +408,7 @@ def lag_plot(series, lag=1, ax=None, **kwds): return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds) -def autocorrelation_plot(series, ax=None, **kwds): +def autocorrelation_plot(series, ax=None, **kwargs): """ Autocorrelation plot for time series. @@ -419,15 +416,15 @@ def autocorrelation_plot(series, ax=None, **kwds): ---------- series : Time series ax : Matplotlib axis object, optional - kwds : keywords - Options to pass to matplotlib plotting method + **kwargs + Options to pass to matplotlib plotting method. Returns ------- class:`matplotlib.axis.Axes` """ plot_backend = _get_plot_backend("matplotlib") - return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwds) + return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs) def tsplot(series, plotf, ax=None, **kwargs):
fixes the errors along with minor changes to standardize kwds -> **kwargs closes #28687 verified this fixes PR09,PR08 errors with: ``` ./scripts/validate_docstrings.py --errors=PR09,PR08 | grep "pandas.plotting" ``` - [x] closes #xxxx - [x] tests added / passed
https://api.github.com/repos/pandas-dev/pandas/pulls/28689
2019-09-30T16:11:59Z
2019-10-01T15:50:52Z
2019-10-01T15:50:52Z
2019-10-01T18:14:49Z
DOC: fix formatting in the ExtensionArray docstrings
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 7a16c3f6a35b6..53755695c97e3 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -474,7 +474,7 @@ def fillna(self, value=None, method=None, limit=None): method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap + backfill / bfill: use NEXT valid observation to fill gap. limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is @@ -485,7 +485,8 @@ def fillna(self, value=None, method=None, limit=None): Returns ------- - filled : ExtensionArray with NA/NaN filled + ExtensionArray + With NA/NaN filled. """ value, method = validate_fillna_kwargs(value, method) @@ -539,13 +540,14 @@ def shift(self, periods: int = 1, fill_value: object = None) -> ABCExtensionArra fill_value : object, optional The scalar value to use for newly introduced missing values. - The default is ``self.dtype.na_value`` + The default is ``self.dtype.na_value``. .. versionadded:: 0.24.0 Returns ------- - shifted : ExtensionArray + ExtensionArray + Shifted. Notes ----- @@ -869,11 +871,12 @@ def view(self, dtype=None) -> Union[ABCExtensionArray, np.ndarray]: Parameters ---------- dtype : str, np.dtype, or ExtensionDtype, optional - Default None + Default None. Returns ------- ExtensionArray + A view of the :class:`ExtensionArray`. """ # NB: # - This must return a *new* object referencing the same data, not self.
- [x] closes #28685 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28686
2019-09-30T14:47:21Z
2019-10-14T21:48:16Z
2019-10-14T21:48:15Z
2019-10-14T21:50:43Z
BUG: make pct_change can handle the anchored freq #28664
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 90606fb61ada8..3b7756256dcab 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -440,6 +440,7 @@ Reshaping - :func:`qcut` and :func:`cut` now handle boolean input (:issue:`20303`) - Fix to ensure all int dtypes can be used in :func:`merge_asof` when using a tolerance value. Previously every non-int64 type would raise an erroneous ``MergeError`` (:issue:`28870`). - Better error message in :func:`get_dummies` when `columns` isn't a list-like value (:issue:`28383`) +- Bug :meth:`Series.pct_change` where supplying an anchored frequency would throw a ValueError (:issue:`28664`) Sparse ^^^^^^ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 47a0582edbea4..ddae2a26e10b5 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10443,6 +10443,7 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwar data = self.fillna(method=fill_method, limit=limit, axis=axis) rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1 + rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) if freq is None: mask = isna(com.values_from_object(data)) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 7154975c6c73b..4ae00bca3e832 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -370,6 +370,16 @@ def test_pct_change(self, datetime_series): rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled) ) + def test_pct_change_with_duplicate_axis(self): + # GH 28664 + common_idx = date_range("2019-11-14", periods=5, freq="D") + result = Series(range(5), common_idx).pct_change(freq="B") + + # the reason that the expected should be like this is documented at PR 28681 + expected = Series([np.NaN, np.inf, np.NaN, np.NaN, 3.0], common_idx) + + tm.assert_series_equal(result, expected) + def test_pct_change_shift_over_nas(self): s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
- [x] closes #28664 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry pct_change didn't work when the freq is anchored(like `1W`, `1M`, `BM`) so when the freq is anchored, use `data.asfreq(freq)` instead of the raw `data`.
https://api.github.com/repos/pandas-dev/pandas/pulls/28681
2019-09-30T10:34:21Z
2019-11-15T14:55:06Z
2019-11-15T14:55:06Z
2019-11-15T14:55:09Z
BUG: Fix RangeIndex.get_indexer for decreasing RangeIndex
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst index f904d69d6421b..9789c9fce3541 100644 --- a/doc/source/whatsnew/v0.25.2.rst +++ b/doc/source/whatsnew/v0.25.2.rst @@ -50,6 +50,7 @@ Indexing ^^^^^^^^ - Fix regression in :meth:`DataFrame.reindex` not following ``limit`` argument (:issue:`28631`). +- Fix regression in :meth:`RangeIndex.get_indexer` for decreasing :class:`RangeIndex` where target values may be improperly identified as missing/present (:issue:`28678`) - - diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 43445a0d5d5a2..6e2d500f4c5ab 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -388,8 +388,9 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): if self.step > 0: start, stop, step = self.start, self.stop, self.step else: - # Work on reversed range for simplicity: - start, stop, step = (self.stop - self.step, self.start + 1, -self.step) + # GH 28678: work on reversed range for simplicity + reverse = self._range[::-1] + start, stop, step = reverse.start, reverse.stop, reverse.step target_array = np.asarray(target) if not (is_integer_dtype(target_array) and target_array.ndim == 1): diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py index 7e08a5deaff7a..627c5cc56e010 100644 --- a/pandas/tests/indexes/test_range.py +++ b/pandas/tests/indexes/test_range.py @@ -424,6 +424,14 @@ def test_get_indexer_limit(self): expected = np.array([0, 1, 2, 3, 3, -1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) + @pytest.mark.parametrize("stop", [0, -1, -2]) + def test_get_indexer_decreasing(self, stop): + # GH 28678 + index = RangeIndex(7, stop, -3) + result = index.get_indexer(range(9)) + expected = np.array([-1, 2, -1, -1, 1, -1, -1, 0, -1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + def test_join_outer(self): # join with Int64Index other = Int64Index(np.arange(25, 14, -1))
- [X] closes #28678 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28680
2019-09-30T06:05:30Z
2019-10-02T06:50:51Z
2019-10-02T06:50:51Z
2019-10-02T16:13:03Z
minor inconsistency in Categorical.remove_categories error message
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 751db2b88069d..f2bb20746741d 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -223,7 +223,7 @@ Categorical - Bug where :func:`merge` was unable to join on categorical and extension dtype columns (:issue:`28668`) - :meth:`Categorical.searchsorted` and :meth:`CategoricalIndex.searchsorted` now work on unordered categoricals also (:issue:`21667`) - Added test to assert roundtripping to parquet with :func:`DataFrame.to_parquet` or :func:`read_parquet` will preserve Categorical dtypes for string types (:issue:`27955`) -- +- Changed the error message in :meth:`Categorical.remove_categories` to always show the invalid removals as a set (:issue:`28669`) Datetimelike diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index bab1127e6e539..a14b91d78212d 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1120,7 +1120,7 @@ def remove_categories(self, removals, inplace=False): # GH 10156 if any(isna(removals)): - not_included = [x for x in not_included if notna(x)] + not_included = {x for x in not_included if notna(x)} new_categories = [x for x in new_categories if notna(x)] if len(not_included) != 0: diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py index ab07b3c96a1db..42087b89a19b5 100644 --- a/pandas/tests/arrays/categorical/test_api.py +++ b/pandas/tests/arrays/categorical/test_api.py @@ -1,3 +1,5 @@ +import re + import numpy as np import pytest @@ -339,9 +341,13 @@ def test_remove_categories(self): tm.assert_categorical_equal(cat, new) assert res is None - # removal is not in categories - with pytest.raises(ValueError): - cat.remove_categories(["c"]) + @pytest.mark.parametrize("removals", [["c"], ["c", np.nan], "c", ["c", "c"]]) + def test_remove_categories_raises(self, removals): + cat = Categorical(["a", "b", "a"]) + message = re.escape("removals must all be in old categories: {'c'}") + + with pytest.raises(ValueError, match=message): + cat.remove_categories(removals) def test_remove_unused_categories(self): c = Categorical(["a", "b", "c", "d", "a"], categories=["a", "b", "c", "d", "e"])
This pull request fixes minor inconsistency in Categorical.remove_categories error message - [x] closes #28669 - [ ] tests added / passed - passes `black pandas` - passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - whatsnew entry Changed the error message to show invalid removals as a set. Added tests for removal of null from the categories. Parameterized pytest.
https://api.github.com/repos/pandas-dev/pandas/pulls/28677
2019-09-30T02:34:07Z
2019-10-22T15:46:04Z
2019-10-22T15:46:04Z
2019-10-22T15:46:10Z
BUG: restore limit in RangeIndex.get_indexer
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst index 14682b706f924..f904d69d6421b 100644 --- a/doc/source/whatsnew/v0.25.2.rst +++ b/doc/source/whatsnew/v0.25.2.rst @@ -49,7 +49,7 @@ Interval Indexing ^^^^^^^^ -- +- Fix regression in :meth:`DataFrame.reindex` not following ``limit`` argument (:issue:`28631`). - - diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 8783351cc74d1..43445a0d5d5a2 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -380,8 +380,10 @@ def get_loc(self, key, method=None, tolerance=None): @Appender(_index_shared_docs["get_indexer"]) def get_indexer(self, target, method=None, limit=None, tolerance=None): - if not (method is None and tolerance is None and is_list_like(target)): - return super().get_indexer(target, method=method, tolerance=tolerance) + if com.any_not_none(method, tolerance, limit) or not is_list_like(target): + return super().get_indexer( + target, method=method, tolerance=tolerance, limit=limit + ) if self.step > 0: start, stop, step = self.start, self.stop, self.step diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 6b073c460ea08..6d239e96cd167 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -2217,6 +2217,22 @@ def test_reindex_frame_add_nat(self): assert mask[-5:].all() assert not mask[:-5].any() + def test_reindex_limit(self): + # GH 28631 + data = [["A", "A", "A"], ["B", "B", "B"], ["C", "C", "C"], ["D", "D", "D"]] + exp_data = [ + ["A", "A", "A"], + ["B", "B", "B"], + ["C", "C", "C"], + ["D", "D", "D"], + ["D", "D", "D"], + [np.nan, np.nan, np.nan], + ] + df = DataFrame(data) + result = df.reindex([0, 1, 2, 3, 4, 5], method="ffill", limit=1) + expected = DataFrame(exp_data) + tm.assert_frame_equal(result, expected) + def test_set_dataframe_column_ns_dtype(self): x = DataFrame([datetime.now(), datetime.now()]) assert x[0].dtype == np.dtype("M8[ns]") diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py index 58b98297f00f3..7e08a5deaff7a 100644 --- a/pandas/tests/indexes/test_range.py +++ b/pandas/tests/indexes/test_range.py @@ -416,6 +416,14 @@ def test_get_indexer_backfill(self): expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) + def test_get_indexer_limit(self): + # GH 28631 + idx = RangeIndex(4) + target = RangeIndex(6) + result = idx.get_indexer(target, method="pad", limit=1) + expected = np.array([0, 1, 2, 3, 3, -1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + def test_join_outer(self): # join with Int64Index other = Int64Index(np.arange(25, 14, -1))
- [x] closes #28631 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28671
2019-09-29T16:38:32Z
2019-10-01T12:00:39Z
2019-10-01T12:00:38Z
2019-10-01T12:17:45Z
Fix incorrect doc for to_datetime
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 32dc3c1f3e8f2..7b136fa29ecea 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -637,7 +637,7 @@ def to_datetime( datetime strings, and if it can be inferred, switch to a faster method of parsing them. In some cases this can increase the parsing speed by ~5-10x. - origin : scalar, default is 'unix' + origin : scalar, default 'unix' Define the reference date. The numeric values would be parsed as number of units (defined by `unit`) since this reference date.
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This PR fixes incorrect doc for `pandas.to_datetime`.
https://api.github.com/repos/pandas-dev/pandas/pulls/28670
2019-09-29T06:07:23Z
2019-10-05T22:44:54Z
2019-10-05T22:44:54Z
2019-10-05T22:44:58Z
DOC: Updating See Also section in IndexOpsMixin
diff --git a/pandas/core/base.py b/pandas/core/base.py index 910b05c47071d..8cdeaa666f94b 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -678,8 +678,10 @@ def _is_homogeneous_type(self): See Also -------- - DataFrame._is_homogeneous_type - MultiIndex._is_homogeneous_type + DataFrame._is_homogeneous_type : Whether all the columns in a + DataFrame have the same dtype. + MultiIndex._is_homogeneous_type : Whether all the levels of a + MultiIndex have the same dtype. """ return True diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e4a44a89998e3..7aa9fc628f71b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -535,6 +535,13 @@ def _is_homogeneous_type(self) -> bool: ------- bool + See Also + -------- + Index._is_homogeneous_type : Whether the object has a single + dtype. + MultiIndex._is_homogeneous_type : Whether all the levels of a + MultiIndex have the same dtype. + Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 3273c4f8cd13b..b2bb50939551d 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -665,8 +665,10 @@ def _is_homogeneous_type(self): See Also -------- - Index._is_homogeneous_type - DataFrame._is_homogeneous_type + Index._is_homogeneous_type : Whether the object has a single + dtype. + DataFrame._is_homogeneous_type : Whether all the columns in a + DataFrame have the same dtype. Examples --------
- [x] Updated the "See Also" section for the IndexOpsMixin '''_is_homogenous_type'''
https://api.github.com/repos/pandas-dev/pandas/pulls/28667
2019-09-28T17:14:14Z
2019-10-02T04:06:43Z
2019-10-02T04:06:43Z
2019-10-02T04:06:48Z
BUG: fix broken error message in ujson.encode() (GH18878)
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index eb4b72d01d59a..b00d2157a9216 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -301,6 +301,7 @@ Other - Using :meth:`DataFrame.replace` with overlapping keys in a nested dictionary will no longer raise, now matching the behavior of a flat dictionary (:issue:`27660`) - :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` now support dicts as ``compression`` argument with key ``'method'`` being the compression method and others as additional compression options when the compression method is ``'zip'``. (:issue:`26023`) - :meth:`Series.append` will no longer raise a ``TypeError`` when passed a tuple of ``Series`` (:issue:`28410`) +- Fix corrupted error message when calling ``pandas.libs._json.encode()`` on a 0d array (:issue:`18878`) .. _whatsnew_1000.contributors: diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 22c42acea0150..48712dc68829d 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -1986,11 +1986,9 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { tc->type = JT_DOUBLE; return; } else if (PyArray_Check(obj) && PyArray_CheckScalar(obj)) { - tmpObj = PyObject_Repr(obj); PyErr_Format(PyExc_TypeError, - "%s (0d array) is not JSON serializable at the moment", - PyBytes_AS_STRING(tmpObj)); - Py_DECREF(tmpObj); + "%R (0d array) is not JSON serializable at the moment", + obj); goto INVALID; } diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index 69a246487ddf1..d6572ac7b7bfe 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -780,7 +780,9 @@ def test_array_float(self): tm.assert_almost_equal(arr, arr_out) def test_0d_array(self): - with pytest.raises(TypeError): + # gh-18878 + msg = re.escape("array(1) (0d array) is not JSON serializable at the moment") + with pytest.raises(TypeError, match=msg): ujson.encode(np.array(1)) @pytest.mark.parametrize(
- [x] closes #18878 - [x] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28666
2019-09-28T16:07:58Z
2019-10-06T22:40:39Z
2019-10-06T22:40:38Z
2019-10-08T13:59:33Z
BUG: Fix groupby.apply
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 8755abe642068..a80fdd6faba09 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -911,7 +911,7 @@ Plotting Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- +- Bug in :meth:`DataFrame.groupby.apply` only showing output from a single group when function returns an :class:`Index` (:issue:`28652`) - Bug in :meth:`DataFrame.groupby` with multiple groups where an ``IndexError`` would be raised if any group contained all NA values (:issue:`20519`) - Bug in :meth:`pandas.core.resample.Resampler.size` and :meth:`pandas.core.resample.Resampler.count` returning wrong dtype when used with an empty series or dataframe (:issue:`28427`) - Bug in :meth:`DataFrame.rolling` not allowing for rolling over datetimes when ``axis=1`` (:issue:`28192`) diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index 0019fc4b36d20..8571761f77265 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -1,3 +1,4 @@ +from copy import copy from distutils.version import LooseVersion from cython import Py_ssize_t @@ -15,7 +16,7 @@ from numpy cimport (ndarray, cnp.import_array() cimport pandas._libs.util as util -from pandas._libs.lib import maybe_convert_objects +from pandas._libs.lib import maybe_convert_objects, is_scalar cdef _check_result_array(object obj, Py_ssize_t cnt): @@ -492,14 +493,19 @@ def apply_frame_axis0(object frame, object f, object names, # Need to infer if low level index slider will cause segfaults require_slow_apply = i == 0 and piece is chunk try: - if piece.index is chunk.index: - piece = piece.copy(deep='all') - else: + if piece.index is not chunk.index: mutated = True except AttributeError: # `piece` might not have an index, could be e.g. an int pass + if not is_scalar(piece): + # Need to copy data to avoid appending references + if hasattr(piece, "copy"): + piece = piece.copy(deep="all") + else: + piece = copy(piece) + results.append(piece) # If the data was modified inplace we need to diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 0e62569fffeb6..050b1e7c5d3b3 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -686,6 +686,17 @@ def test_apply_with_mixed_types(): tm.assert_frame_equal(result, expected) +def test_func_returns_object(): + # GH 28652 + df = DataFrame({"a": [1, 2]}, index=pd.Int64Index([1, 2])) + result = df.groupby("a").apply(lambda g: g.index) + expected = Series( + [pd.Int64Index([1]), pd.Int64Index([2])], index=pd.Int64Index([1, 2], name="a") + ) + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( "group_column_dtlike", [datetime.today(), datetime.today().date(), datetime.today().time()],
- [x] closes #28652 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Makes sure that the output of `groupby.apply` is built up by value instead of by reference in `reduction.pyx` to avoid the behavior from #28652.
https://api.github.com/repos/pandas-dev/pandas/pulls/28662
2019-09-27T23:27:41Z
2020-01-01T16:21:28Z
2020-01-01T16:21:27Z
2020-01-07T00:30:25Z
DOC: Fixed PR08 docstring errors in pandas.DataFrame
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e4a44a89998e3..16f34fee5e1ff 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2066,7 +2066,7 @@ def to_feather(self, fname): Parameters ---------- fname : str - string file path + String file path. """ from pandas.io.feather_format import to_feather @@ -4772,6 +4772,7 @@ def drop_duplicates(self, subset=None, keep="first", inplace=False): Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' + Determines which duplicates (if any) to keep. - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. @@ -4806,10 +4807,10 @@ def duplicated(self, subset=None, keep="first"): Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - - ``first`` : Mark duplicates as ``True`` except for the - first occurrence. - - ``last`` : Mark duplicates as ``True`` except for the - last occurrence. + Determines which duplicates (if any) to mark. + + - ``first`` : Mark duplicates as ``True`` except for the first occurrence. + - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns @@ -6233,8 +6234,8 @@ def unstack(self, level=-1, fill_value=None): ---------- level : int, str, or list of these, default -1 (last level) Level(s) of index to unstack, can pass level name - fill_value : replace NaN with this value if the unstack produces - missing values + fill_value : int, string or dict + Replace NaN with this value if the unstack produces missing values Returns ------- @@ -6665,6 +6666,8 @@ def apply( by result_type='broadcast'. raw : bool, default False + Determines if row or column is passed as a Series or ndarry object: + * ``False`` : passes each row or column as a Series to the function. * ``True`` : the passed function will receive ndarray objects @@ -7357,6 +7360,8 @@ def corr(self, method="pearson", min_periods=1): Parameters ---------- method : {'pearson', 'kendall', 'spearman'} or callable + Method of correlation: + * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation @@ -7556,10 +7561,13 @@ def corrwith(self, other, axis=0, drop=False, method="pearson"): other : DataFrame, Series Object with which to compute correlations. axis : {0 or 'index', 1 or 'columns'}, default 0 - 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise. + The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for + row-wise. drop : bool, default False Drop missing indices from result. method : {'pearson', 'kendall', 'spearman'} or callable + Method of correlation: + * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation @@ -7939,8 +7947,8 @@ def idxmin(self, axis=0, skipna=True): Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 - 0 or 'index' for row-wise, 1 or 'columns' for column-wise - skipna : bool, default True + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise + skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. @@ -7976,8 +7984,8 @@ def idxmax(self, axis=0, skipna=True): Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 - 0 or 'index' for row-wise, 1 or 'columns' for column-wise - skipna : bool, default True + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise + skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a3b9bec494854..cb21588c8ba1a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2559,10 +2559,10 @@ def to_msgpack(self, path_or_buf=None, encoding="utf-8", **kwargs): path : str, buffer-like, or None Destination for the serialized object. If None, return generated bytes - append : bool whether to append to an existing msgpack - (default is False) - compress : type of compressor (zlib or blosc), default to None (no - compression) + append : bool, default False + Whether to append to an existing msgpack. + compress : str, default None + Type of compressor (zlib, blosc or None). Returns ------- @@ -2797,10 +2797,10 @@ def to_clipboard(self, excel=True, sep=None, **kwargs): Parameters ---------- excel : bool, default True - - True, use the provided separator, writing in a csv format for - allowing easy pasting into excel. - - False, write a string representation of the object to the - clipboard. + Produce output in a csv format for easy pasting into excel. + + - True, use the provided separator for csv pasting. + - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. @@ -5024,15 +5024,15 @@ def sample( Parameters ---------- func : function - function to apply to the %(klass)s. + Function to apply to the %(klass)s. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the %(klass)s. args : iterable, optional - positional arguments passed into ``func``. + Positional arguments passed into ``func``. kwargs : mapping, optional - a dictionary of keyword arguments passed into ``func``. + A dictionary of keyword arguments passed into ``func``. Returns ------- diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 8724382d9ec55..966a18e11a620 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -514,6 +514,8 @@ class PlotAccessor(PandasObject): Allows plotting of one column versus another. Only used if data is a DataFrame. kind : str + The kind of plot to produce: + - 'line' : line plot (default) - 'bar' : vertical bar plot - 'barh' : horizontal bar plot @@ -537,7 +539,7 @@ class PlotAccessor(PandasObject): legend : False/True/'reverse' Place legend on axis subplots style : list or dict - matplotlib line style per column + The matplotlib line style per column logx : bool or 'sym', default False Use log scaling or symlog scaling on x axis .. versionchanged:: 0.25.0
This relates to: [27977](https://github.com/pandas-dev/pandas/issues/27977). I have fixed the doc PR08 formatting issues for: ``` pandas.DataFrame.insert: Parameter "column" description should start with a capital letter pandas.DataFrame.apply: Parameter "raw" description should start with a capital letter pandas.DataFrame.pipe: Parameter "func" description should start with a capital letter pandas.DataFrame.pipe: Parameter "args" description should start with a capital letter pandas.DataFrame.pipe: Parameter "kwargs" description should start with a capital letter pandas.DataFrame.corr: Parameter "method" description should start with a capital letter pandas.DataFrame.corrwith: Parameter "axis" description should start with a capital letter pandas.DataFrame.corrwith: Parameter "method" description should start with a capital letter pandas.DataFrame.drop_duplicates: Parameter "keep" description should start with a capital letter pandas.DataFrame.duplicated: Parameter "keep" description should start with a capital letter pandas.DataFrame.idxmax: Parameter "axis" description should start with a capital letter pandas.DataFrame.idxmin: Parameter "axis" description should start with a capital letter pandas.DataFrame.unstack: Parameter "fill_value" description should start with a capital letter pandas.DataFrame.plot: Parameter "kind" description should start with a capital letter pandas.DataFrame.plot: Parameter "style" description should start with a capital letter pandas.DataFrame.to_feather: Parameter "fname" description should start with a capital letter pandas.DataFrame.to_msgpack: Parameter "path" description should start with a capital letter pandas.DataFrame.to_msgpack: Parameter "append" description should start with a capital letter pandas.DataFrame.to_msgpack: Parameter "compress" description should start with a capital letter pandas.DataFrame.to_clipboard: Parameter "excel" description should start with a capital letter ``` - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Will continue to work through all PR08 docstring errors.
https://api.github.com/repos/pandas-dev/pandas/pulls/28655
2019-09-27T12:57:33Z
2019-09-30T16:22:42Z
2019-09-30T16:22:42Z
2019-10-01T18:51:36Z
CLN: Define and pin GroupBy properties without exec
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index b03c4f2238445..e13738b98833a 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -125,6 +125,10 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then # invgrep -R --include="*.py*" -E "from numpy import nan " pandas # GH#24822 not yet implemented since the offending imports have not all been removed RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check for use of exec' ; echo $MSG + invgrep -R --include="*.py*" -E "[^a-zA-Z0-9_]exec\(" pandas + RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check for pytest warns' ; echo $MSG invgrep -r -E --include '*.py' 'pytest\.warns' pandas/tests/ RET=$(($RET + $?)) ; echo $MSG "DONE" @@ -184,7 +188,7 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --include="*.rst" ".. ipython ::" doc/source RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Check that no file in the repo contains tailing whitespaces' ; echo $MSG + MSG='Check that no file in the repo contains trailing whitespaces' ; echo $MSG set -o pipefail if [[ "$AZURE" == "true" ]]; then # we exclude all c/cpp files as the c/cpp files of pandas code base are tested when Linting .c and .h files diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index f8f1455561c03..0ab19448043f6 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -11,7 +11,7 @@ from functools import partial from textwrap import dedent import typing -from typing import Any, Callable, FrozenSet, Iterator, Sequence, Type, Union +from typing import Any, Callable, FrozenSet, Sequence, Type, Union import warnings import numpy as np @@ -70,47 +70,63 @@ ScalarResult = typing.TypeVar("ScalarResult") -def whitelist_method_generator( - base_class: Type[GroupBy], klass: Type[FrameOrSeries], whitelist: FrozenSet[str] -) -> Iterator[str]: +def generate_property(name: str, klass: Type[FrameOrSeries]): """ - Yields all GroupBy member defs for DataFrame/Series names in whitelist. + Create a property for a GroupBy subclass to dispatch to DataFrame/Series. + + Parameters + ---------- + name : str + klass : {DataFrame, Series} + + Returns + ------- + property + """ + + def prop(self): + return self._make_wrapper(name) + + parent_method = getattr(klass, name) + prop.__doc__ = parent_method.__doc__ or "" + prop.__name__ = name + return property(prop) + + +def pin_whitelisted_properties(klass: Type[FrameOrSeries], whitelist: FrozenSet[str]): + """ + Create GroupBy member defs for DataFrame/Series names in a whitelist. Parameters ---------- - base_class : Groupby class - base class klass : DataFrame or Series class class where members are defined. - whitelist : frozenset + whitelist : frozenset[str] Set of names of klass methods to be constructed Returns ------- - The generator yields a sequence of strings, each suitable for exec'ing, - that define implementations of the named methods for DataFrameGroupBy - or SeriesGroupBy. + class decorator + Notes + ----- Since we don't want to override methods explicitly defined in the base class, any such name is skipped. """ - property_wrapper_template = """@property -def %(name)s(self) : - \"""%(doc)s\""" - return self.__getattr__('%(name)s')""" - - for name in whitelist: - # don't override anything that was explicitly defined - # in the base class - if hasattr(base_class, name): - continue - # ugly, but we need the name string itself in the method. - f = getattr(klass, name) - doc = f.__doc__ - doc = doc if type(doc) == str else "" - wrapper_template = property_wrapper_template - params = {"name": name, "doc": doc} - yield wrapper_template % params + + def pinner(cls): + for name in whitelist: + if hasattr(cls, name): + # don't override anything that was explicitly defined + # in the base class + continue + + prop = generate_property(name, klass) + setattr(cls, name, prop) + + return cls + + return pinner class NDFrameGroupBy(GroupBy): @@ -747,13 +763,9 @@ def filter(self, func, dropna=True, *args, **kwargs): return self._apply_filter(indices, dropna) +@pin_whitelisted_properties(Series, base.series_apply_whitelist) class SeriesGroupBy(GroupBy): - # - # Make class defs of attributes on SeriesGroupBy whitelist - _apply_whitelist = base.series_apply_whitelist - for _def_str in whitelist_method_generator(GroupBy, Series, _apply_whitelist): - exec(_def_str) @property def _selection_name(self): @@ -1368,15 +1380,11 @@ def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None): return (filled / shifted) - 1 +@pin_whitelisted_properties(DataFrame, base.dataframe_apply_whitelist) class DataFrameGroupBy(NDFrameGroupBy): _apply_whitelist = base.dataframe_apply_whitelist - # - # Make class defs of attributes on DataFrameGroupBy whitelist. - for _def_str in whitelist_method_generator(GroupBy, DataFrame, _apply_whitelist): - exec(_def_str) - _block_agg_axis = 1 _agg_see_also_doc = dedent( diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index e010e615e176e..f9c8e7748b7f7 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -562,8 +562,6 @@ def __getattr__(self, attr): return object.__getattribute__(self, attr) if attr in self.obj: return self[attr] - if hasattr(self.obj, attr): - return self._make_wrapper(attr) raise AttributeError( "%r object has no attribute %r" % (type(self).__name__, attr)
- [x] closes #16959 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` In an unrelated branch I found that this exec-generated code is a PITA to debug. So this PR finally gets rid of it.
https://api.github.com/repos/pandas-dev/pandas/pulls/28651
2019-09-27T03:36:56Z
2019-10-01T13:07:53Z
2019-10-01T13:07:53Z
2019-10-01T13:43:40Z
CLN: Exception catching in expressions
diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 90bb12b4cd727..46bc762e1a0b3 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -107,15 +107,12 @@ def _evaluate_numexpr(op, op_str, a, b, reversed=False): a_value = getattr(a, "values", a) b_value = getattr(b, "values", b) - try: - result = ne.evaluate( - "a_value {op} b_value".format(op=op_str), - local_dict={"a_value": a_value, "b_value": b_value}, - casting="safe", - ) - except ValueError as detail: - if "unknown type object" in str(detail): - pass + + result = ne.evaluate( + "a_value {op} b_value".format(op=op_str), + local_dict={"a_value": a_value, "b_value": b_value}, + casting="safe", + ) if _TEST_MODE: _store_test_result(result is not None) @@ -140,21 +137,15 @@ def _where_numexpr(cond, a, b): a_value = getattr(a, "values", a) b_value = getattr(b, "values", b) - try: - result = ne.evaluate( - "where(cond_value, a_value, b_value)", - local_dict={ - "cond_value": cond_value, - "a_value": a_value, - "b_value": b_value, - }, - casting="safe", - ) - except ValueError as detail: - if "unknown type object" in str(detail): - pass - except Exception as detail: - raise TypeError(str(detail)) + result = ne.evaluate( + "where(cond_value, a_value, b_value)", + local_dict={ + "cond_value": cond_value, + "a_value": a_value, + "b_value": b_value, + }, + casting="safe", + ) if result is None: result = _where_standard(cond, a, b) @@ -167,11 +158,10 @@ def _where_numexpr(cond, a, b): def _has_bool_dtype(x): + if isinstance(x, ABCDataFrame): + return "bool" in x.dtypes try: - if isinstance(x, ABCDataFrame): - return "bool" in x.dtypes - else: - return x.dtype == bool + return x.dtype == bool except AttributeError: return isinstance(x, (bool, np.bool_))
https://api.github.com/repos/pandas-dev/pandas/pulls/28650
2019-09-26T23:57:04Z
2019-09-27T21:18:52Z
2019-09-27T21:18:52Z
2019-09-27T21:31:23Z
CLN: Exception in nanops
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index fe88622a04bb4..eb442e8bf3486 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -97,17 +97,21 @@ def f(values, axis=None, skipna=True, **kwds): for k, v in self.kwargs.items(): if k not in kwds: kwds[k] = v - try: - if values.size == 0 and kwds.get("min_count") is None: - # We are empty, returning NA for our type - # Only applies for the default `min_count` of None - # since that affects how empty arrays are handled. - # TODO(GH-18976) update all the nanops methods to - # correctly handle empty inputs and remove this check. - # It *may* just be `var` - return _na_for_min_count(values, axis) - - if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name): + + if values.size == 0 and kwds.get("min_count") is None: + # We are empty, returning NA for our type + # Only applies for the default `min_count` of None + # since that affects how empty arrays are handled. + # TODO(GH-18976) update all the nanops methods to + # correctly handle empty inputs and remove this check. + # It *may* just be `var` + return _na_for_min_count(values, axis) + + if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name): + if kwds.get("mask", None) is None: + # `mask` is not recognised by bottleneck, would raise + # TypeError if called + kwds.pop("mask", None) result = bn_func(values, axis=axis, **kwds) # prefer to treat inf/-inf as NA, but must compute the func @@ -116,18 +120,8 @@ def f(values, axis=None, skipna=True, **kwds): result = alt(values, axis=axis, skipna=skipna, **kwds) else: result = alt(values, axis=axis, skipna=skipna, **kwds) - except Exception: - try: - result = alt(values, axis=axis, skipna=skipna, **kwds) - except ValueError as e: - # we want to transform an object array - # ValueError message to the more typical TypeError - # e.g. this is normally a disallowed function on - # object arrays that contain strings - - if is_object_dtype(values): - raise TypeError(e) - raise + else: + result = alt(values, axis=axis, skipna=skipna, **kwds) return result
The only relevant exception that gets raised by bottleneck AFAICT is a TypeError when we pass a `mask` kwarg that it doesn't accept. By avoiding this case at the beginning, we avoid having to catch the exception.
https://api.github.com/repos/pandas-dev/pandas/pulls/28648
2019-09-26T23:46:05Z
2019-09-27T11:45:46Z
2019-09-27T11:45:46Z
2019-09-27T14:24:36Z
ENH: When using another plotting backend, minimize pre-processing
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index c11d94c381d6d..8a35e5084f55b 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -722,6 +722,11 @@ def __call__(self, *args, **kwargs): ) kind = self._kind_aliases.get(kind, kind) + + # when using another backend, get out of the way + if plot_backend.__name__ != "pandas.plotting._matplotlib": + return plot_backend.plot(self._parent, x=x, y=y, kind=kind, **kwargs) + if kind not in self._all_kinds: raise ValueError("{} is not a valid plot kind".format(kind)) diff --git a/pandas/tests/plotting/test_backend.py b/pandas/tests/plotting/test_backend.py index 41b1a88b15acb..d4035f8eba102 100644 --- a/pandas/tests/plotting/test_backend.py +++ b/pandas/tests/plotting/test_backend.py @@ -86,3 +86,11 @@ def test_setting_backend_without_plot_raises(): def test_no_matplotlib_ok(): with pytest.raises(ImportError): pandas.plotting._core._get_plot_backend("matplotlib") + + +def test_extra_kinds_ok(monkeypatch, restore_backend): + # https://github.com/pandas-dev/pandas/pull/28647 + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + pandas.set_option("plotting.backend", "pandas_dummy_backend") + df = pandas.DataFrame({"A": [1, 2, 3]}) + df.plot(kind="not a real kind")
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry I ran into this while implementing the hvplot backend. In hvplot you can do: ```python df.hvplot.hist(y='y', by='category') ``` but with the pandas version ```python pd.options.plotting.backend= 'holoviews' df.plot.hist(y='y', by='category') ``` will fail because `data = data[y]` is called before the plotting is passed off to the backend. Basically it seems like backend writers should be free to get the passed pandas objects with as little interference as possible.
https://api.github.com/repos/pandas-dev/pandas/pulls/28647
2019-09-26T22:09:45Z
2019-11-19T04:59:10Z
2019-11-19T04:59:10Z
2019-11-19T04:59:16Z
CLN: remove unused categories/ordered handling in astype
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 152983451bc38..a3b9bec494854 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5772,7 +5772,7 @@ def _to_dict_of_blocks(self, copy=True): for k, v, in self._data.to_dict(copy=copy).items() } - def astype(self, dtype, copy=True, errors="raise", **kwargs): + def astype(self, dtype, copy=True, errors="raise"): """ Cast a pandas object to a specified dtype ``dtype``. @@ -5795,8 +5795,6 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs): .. versionadded:: 0.20.0 - **kwargs : keyword arguments to pass on to the constructor - Returns ------- casted : same type as caller @@ -5882,7 +5880,7 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs): "the key in Series dtype mappings." ) new_type = dtype[self.name] - return self.astype(new_type, copy, errors, **kwargs) + return self.astype(new_type, copy, errors) for col_name in dtype.keys(): if col_name not in self: @@ -5894,9 +5892,7 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs): for col_name, col in self.items(): if col_name in dtype: results.append( - col.astype( - dtype=dtype[col_name], copy=copy, errors=errors, **kwargs - ) + col.astype(dtype=dtype[col_name], copy=copy, errors=errors) ) else: results.append(results.append(col.copy() if copy else col)) @@ -5911,9 +5907,7 @@ def astype(self, dtype, copy=True, errors="raise", **kwargs): else: # else, only a single dtype is given - new_data = self._data.astype( - dtype=dtype, copy=copy, errors=errors, **kwargs - ) + new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors) return self._constructor(new_data).__finalize__(self) # GH 19920: retain column metadata after concat diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 04c3b2b7714ef..b76cb5cbec626 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -574,18 +574,6 @@ def _astype(self, dtype, copy=False, errors="raise", **kwargs): # may need to convert to categorical if self.is_categorical_astype(dtype): - # deprecated 17636 - for deprecated_arg in ("categories", "ordered"): - if deprecated_arg in kwargs: - raise ValueError( - "Got an unexpected argument: {}".format(deprecated_arg) - ) - - categories = kwargs.get("categories", None) - ordered = kwargs.get("ordered", None) - if com.any_not_none(categories, ordered): - dtype = CategoricalDtype(categories, ordered) - if is_categorical_dtype(self.values): # GH 10696/18593: update an existing categorical efficiently return self.make_block(self.values.astype(dtype, copy=copy)) @@ -621,7 +609,7 @@ def _astype(self, dtype, copy=False, errors="raise", **kwargs): # _astype_nansafe works fine with 1-d only vals1d = values.ravel() try: - values = astype_nansafe(vals1d, dtype, copy=True, **kwargs) + values = astype_nansafe(vals1d, dtype, copy=True) except (ValueError, TypeError): # e.g. astype_nansafe can fail on object-dtype of strings # trying to convert to float diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index 9be79bf93ece7..6ee120f3bec64 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -228,11 +228,10 @@ def test_astype_dict_like(self, dtype_class): with pytest.raises(KeyError, match=msg): s.astype(dt5) - def test_astype_categories_deprecation_raises(self): - - # deprecated 17636 + def test_astype_categories_raises(self): + # deprecated 17636, removed in GH-27141 s = Series(["a", "b", "a"]) - with pytest.raises(ValueError, match="Got an unexpected"): + with pytest.raises(TypeError, match="got an unexpected"): s.astype("category", categories=["a", "b"], ordered=True) @pytest.mark.parametrize(
This removes some unused code in the internals `astype`. There is a small change in behaviour though, when passing the categories/ordered keyword you now get TypeError instead of ValueError. But since that is Python's default behaviour, I would say this is rather a good fix.
https://api.github.com/repos/pandas-dev/pandas/pulls/28646
2019-09-26T20:14:18Z
2019-09-27T11:48:03Z
2019-09-27T11:48:03Z
2019-09-27T14:58:51Z
CLN: Exception in pickle loading
diff --git a/doc/source/whatsnew/v0.8.0.rst b/doc/source/whatsnew/v0.8.0.rst index 664325ac063c0..072d1bae2a2b9 100644 --- a/doc/source/whatsnew/v0.8.0.rst +++ b/doc/source/whatsnew/v0.8.0.rst @@ -156,8 +156,7 @@ Other new features New plotting methods ~~~~~~~~~~~~~~~~~~~~ -.. ipython:: python - :suppress: +.. code-block:: python import pandas as pd fx = pd.read_pickle('data/fx_prices') @@ -165,7 +164,7 @@ New plotting methods ``Series.plot`` now supports a ``secondary_y`` option: -.. ipython:: python +.. code-block:: python plt.figure() diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index 3a36713ccdbda..458c0c07c7602 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -4,7 +4,6 @@ import copy import pickle as pkl -import sys from typing import TYPE_CHECKING import warnings @@ -25,14 +24,14 @@ def load_reduce(self): try: stack[-1] = func(*args) return - except Exception as e: + except TypeError as err: # If we have a deprecated function, # try to replace and try again. msg = "_reconstruct: First argument must be a sub-type of ndarray" - if msg in str(e): + if msg in str(err): try: cls = args[0] stack[-1] = object.__new__(cls) @@ -40,22 +39,6 @@ def load_reduce(self): except TypeError: pass - # try to re-encode the arguments - if getattr(self, "encoding", None) is not None: - args = tuple( - arg.encode(self.encoding) if isinstance(arg, str) else arg - for arg in args - ) - try: - stack[-1] = func(*args) - return - except TypeError: - pass - - # unknown exception, re-raise - if getattr(self, "is_verbose", None): - print(sys.exc_info()) - print(func, args) raise diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 621e8e09230b7..df1996aa0dee0 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -5,7 +5,7 @@ from numpy.lib.format import read_array -from pandas.compat import pickle_compat as pc +from pandas.compat import PY36, pickle_compat as pc from pandas.io.common import _get_handle, _stringify_path @@ -142,18 +142,24 @@ def read_pickle(path, compression="infer"): # 1) try standard libary Pickle # 2) try pickle_compat (older pandas version) to handle subclass changes - # 3) try pickle_compat with latin1 encoding + + excs_to_catch = (AttributeError, ImportError) + if PY36: + excs_to_catch += (ModuleNotFoundError,) try: with warnings.catch_warnings(record=True): # We want to silence any warnings about, e.g. moved modules. warnings.simplefilter("ignore", Warning) return pickle.load(f) - except Exception: - try: - return pc.load(f, encoding=None) - except Exception: - return pc.load(f, encoding="latin1") + except excs_to_catch: + # e.g. + # "No module named 'pandas.core.sparse.series'" + # "Can't get attribute '__nat_unpickle' on <module 'pandas._libs.tslib" + return pc.load(f, encoding=None) + except UnicodeDecodeError: + # e.g. can occur for files written in py27; see GH#28645 + return pc.load(f, encoding="latin-1") finally: f.close() for _f in fh: diff --git a/pandas/tests/io/data/test_py27.pkl b/pandas/tests/io/data/test_py27.pkl new file mode 100644 index 0000000000000..5308b864bc0c7 Binary files /dev/null and b/pandas/tests/io/data/test_py27.pkl differ diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index edd0b09185e71..23a16c885687f 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -377,3 +377,14 @@ def test_read(self, protocol, get_random_path): df.to_pickle(path, protocol=protocol) df2 = pd.read_pickle(path) tm.assert_frame_equal(df, df2) + + +def test_unicode_decode_error(): + # pickle file written with py27, should be readable without raising + # UnicodeDecodeError, see GH#28645 + path = os.path.join(os.path.dirname(__file__), "data", "test_py27.pkl") + df = pd.read_pickle(path) + + # just test the columns are correct since the values are random + excols = pd.Index(["a", "b", "c"]) + tm.assert_index_equal(df.columns, excols)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28645
2019-09-26T20:07:10Z
2019-11-02T15:48:40Z
2019-11-02T15:48:40Z
2019-11-02T15:50:19Z
Doc contribution pr06
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ed05691d33d07..e4a44a89998e3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -322,7 +322,7 @@ class DataFrame(NDFrame): RangeIndex (0, 1, 2, ..., n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer - copy : boolean, default False + copy : bool, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input See Also @@ -1542,7 +1542,7 @@ def from_records( Parameters ---------- data : ndarray (structured dtype), list of tuples, dict, or DataFrame - index : string, list of fields, array-like + index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use exclude : sequence, default None @@ -1553,7 +1553,7 @@ def from_records( columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns) - coerce_float : boolean, default False + coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets nrows : int, default None @@ -3461,7 +3461,7 @@ def insert(self, loc, column, value, allow_duplicates=False): ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns) - column : string, number, or hashable object + column : str, number, or hashable object label of the inserted column value : int, Series, or array-like allow_duplicates : bool, optional @@ -4775,7 +4775,7 @@ def drop_duplicates(self, subset=None, keep="first", inplace=False): - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. - inplace : boolean, default False + inplace : bool, default False Whether to drop duplicates in place or to return a copy Returns @@ -5197,7 +5197,7 @@ def swaplevel(self, i=-2, j=-1, axis=0): Parameters ---------- - i, j : int, string (can be mixed) + i, j : int, str (can be mixed) Level of index to be swapped. Can pass level name as string. Returns @@ -5723,12 +5723,12 @@ def update( Parameters ----------%s - index : string or object, optional + index : str or object, optional Column to use to make new frame's index. If None, uses existing index. - columns : string or object + columns : str or object Column to use to make new frame's columns. - values : string, object or a list of the previous, optional + values : str, object or a list of the previous, optional Column(s) to use for populating new frame's values. If not specified, all remaining columns will be used and the result will have hierarchically indexed columns. @@ -5850,14 +5850,14 @@ def pivot(self, index=None, columns=None, values=None): is function or list of functions fill_value : scalar, default None Value to replace missing values with - margins : boolean, default False + margins : bool, default False Add all row / columns (e.g. for subtotal / grand totals) - dropna : boolean, default True + dropna : bool, default True Do not include columns whose entries are all NaN - margins_name : string, default 'All' + margins_name : str, default 'All' Name of the row / column that will contain the totals when margins is True. - observed : boolean, default False + observed : bool, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. @@ -6231,7 +6231,7 @@ def unstack(self, level=-1, fill_value=None): Parameters ---------- - level : int, string, or list of these, default -1 (last level) + level : int, str, or list of these, default -1 (last level) Level(s) of index to unstack, can pass level name fill_value : replace NaN with this value if the unstack produces missing values @@ -6305,7 +6305,7 @@ def unstack(self, level=-1, fill_value=None): ``frame.columns.name`` or 'variable'. value_name : scalar, default 'value' Name to use for the 'value' column. - col_level : int or string, optional + col_level : int or str, optional If columns are a MultiIndex then use this level to melt. Returns @@ -6894,11 +6894,11 @@ def append(self, other, ignore_index=False, verify_integrity=False, sort=None): ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. - ignore_index : boolean, default False + ignore_index : bool, default False If True, do not use the index labels. - verify_integrity : boolean, default False + verify_integrity : bool, default False If True, raise ValueError on creating index with duplicates. - sort : boolean, default None + sort : bool, default None Sort columns if the columns of `self` and `other` are not aligned. The default sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to @@ -7940,7 +7940,7 @@ def idxmin(self, axis=0, skipna=True): ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise - skipna : boolean, default True + skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. @@ -7977,7 +7977,7 @@ def idxmax(self, axis=0, skipna=True): ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' for row-wise, 1 or 'columns' for column-wise - skipna : boolean, default True + skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index e010e615e176e..6facbe7e01c57 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1032,7 +1032,7 @@ class GroupBy(_GroupBy): Most users should ignore this exclusions : array-like, optional List of columns to exclude - name : string + name : str Most users should ignore this Returns @@ -1253,7 +1253,7 @@ def std(self, ddof=1, *args, **kwargs): Parameters ---------- - ddof : integer, default 1 + ddof : int, default 1 degrees of freedom Returns @@ -1276,7 +1276,7 @@ def var(self, ddof=1, *args, **kwargs): Parameters ---------- - ddof : integer, default 1 + ddof : int, default 1 degrees of freedom Returns @@ -1311,7 +1311,7 @@ def sem(self, ddof=1): Parameters ---------- - ddof : integer, default 1 + ddof : int, default 1 degrees of freedom Returns @@ -1623,7 +1623,7 @@ def pad(self, limit=None): Parameters ---------- - limit : integer, optional + limit : int, optional limit of how many values to fill Returns @@ -1649,7 +1649,7 @@ def backfill(self, limit=None): Parameters ---------- - limit : integer, optional + limit : int, optional limit of how many values to fill Returns @@ -2099,13 +2099,13 @@ def rank( * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups - ascending : boolean, default True + ascending : bool, default True False for ranks by high (1) to low (N) na_option : {'keep', 'top', 'bottom'}, default 'keep' * keep: leave NA values where they are * top: smallest rank if ascending * bottom: smallest rank if descending - pct : boolean, default False + pct : bool, default False Compute percentage rank of data within each group axis : int, default 0 The axis of the object over which to compute the rank. @@ -2313,7 +2313,7 @@ def shift(self, periods=1, freq=None, axis=0, fill_value=None): Parameters ---------- - periods : integer, default 1 + periods : int, default 1 number of periods to shift freq : frequency string axis : axis to shift, default 0 diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 2ebfbed0b132a..2d37121d28308 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -48,17 +48,17 @@ class Grouper: Parameters ---------- - key : string, defaults to None + key : str, defaults to None groupby key, which selects the grouping column of the target level : name/number, defaults to None the level for the target index - freq : string / frequency object, defaults to None + freq : str / frequency object, defaults to None This will groupby the specified frequency if the target selection (via key or level) is a datetime-like object. For full specification of available frequencies, please see `here <http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_. axis : number/name of the axis, defaults to 0 - sort : boolean, default to False + sort : bool, default to False whether to sort the resulting labels closed : {'left' or 'right'} Closed end of interval. Only when `freq` parameter is passed. @@ -69,7 +69,7 @@ class Grouper: If grouper is PeriodIndex and `freq` parameter is passed. base : int, default 0 Only when `freq` parameter is passed. - loffset : string, DateOffset, timedelta object + loffset : str, DateOffset, timedelta object Only when `freq` parameter is passed. Returns diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 62662edb692a7..0b633602f3ed0 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -904,8 +904,8 @@ def repeat(self, repeats, axis=None): Parameters ---------- - name : string, optional - deep : boolean, default False + name : str, optional + deep : bool, default False dtype : numpy dtype or pandas type Returns @@ -1172,7 +1172,7 @@ def to_series(self, index=None, name=None): ---------- index : Index, optional index of resulting Series. If None, defaults to original index - name : string, optional + name : str, optional name of resulting Series. If None, defaults to name of original index @@ -1198,7 +1198,7 @@ def to_frame(self, index=True, name=None): Parameters ---------- - index : boolean, default True + index : bool, default True Set the index of the returned DataFrame as the original Index. name : object, default None @@ -1401,7 +1401,7 @@ def rename(self, name, inplace=False): ---------- name : label or list of labels Name(s) to set. - inplace : boolean, default False + inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. @@ -1494,7 +1494,7 @@ def sortlevel(self, level=None, ascending=True, sort_remaining=None): Parameters ---------- - ascending : boolean, default True + ascending : bool, default True False to sort in descending order level, sort_remaining are compat parameters @@ -3415,8 +3415,8 @@ def _reindex_non_unique(self, target): other : Index how : {'left', 'right', 'inner', 'outer'} level : int or level name, default None - return_indexers : boolean, default False - sort : boolean, default False + return_indexers : bool, default False + sort : bool, default False Sort the join keys lexicographically in the result Index. If False, the order of the join keys depends on the join type (how keyword) @@ -3942,7 +3942,7 @@ def memory_usage(self, deep=False): Parameters ---------- - cond : boolean array-like with the same length as self + cond : bool array-like with the same length as self other : scalar, or array-like Returns @@ -4924,7 +4924,7 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): end : label, default None If None, defaults to the end step : int, default None - kind : string, default None + kind : str, default None Returns ------- diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index cce390d98c037..0b20df38e7d42 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -106,7 +106,7 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index, DatetimeDelegateMixin): Optional datetime-like data to construct index with copy : bool Make a copy of input ndarray - freq : string or pandas offset object, optional + freq : str or pandas offset object, optional One of pandas date offset strings or corresponding objects. The string 'infer' can be passed in order to set the frequency of the index as the inferred frequency upon creation @@ -129,7 +129,7 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index, DatetimeDelegateMixin): .. deprecated:: 0.24.0 - closed : string or None, default None + closed : str or None, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) @@ -1371,8 +1371,8 @@ def indexer_between_time( datetime.time or string in appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"). - include_start : boolean, default True - include_end : boolean, default True + include_start : bool, default True + include_end : bool, default True Returns ------- @@ -1435,7 +1435,7 @@ def date_range( Left bound for generating dates. end : str or datetime-like, optional Right bound for generating dates. - periods : integer, optional + periods : int, optional Number of periods to generate. freq : str or DateOffset, default 'D' Frequency strings can have multiples, e.g. '5H'. See @@ -1598,22 +1598,22 @@ def bdate_range( Parameters ---------- - start : string or datetime-like, default None + start : str or datetime-like, default None Left bound for generating dates. - end : string or datetime-like, default None + end : str or datetime-like, default None Right bound for generating dates. - periods : integer, default None + periods : int, default None Number of periods to generate. - freq : string or DateOffset, default 'B' (business daily) + freq : str or DateOffset, default 'B' (business daily) Frequency strings can have multiples, e.g. '5H'. - tz : string or None + tz : str or None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing. normalize : bool, default False Normalize start/end dates to midnight before generating date range. - name : string, default None + name : str, default None Name of the resulting DatetimeIndex. - weekmask : string or None, default None + weekmask : str or None, default None Weekmask of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. The default value None is equivalent to 'Mon Tue Wed Thu Fri'. @@ -1627,7 +1627,7 @@ def bdate_range( .. versionadded:: 0.21.0 - closed : string, default None + closed : str, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None). **kwargs diff --git a/pandas/core/resample.py b/pandas/core/resample.py index a5d0e2cb3b58f..545bc21dd6d1b 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -423,7 +423,7 @@ def pad(self, limit=None): Parameters ---------- - limit : integer, optional + limit : int, optional limit of how many values to fill Returns @@ -514,7 +514,7 @@ def backfill(self, limit=None): Parameters ---------- - limit : integer, optional + limit : int, optional Limit of how many values to fill. Returns @@ -628,7 +628,7 @@ def fillna(self, method, limit=None): * 'backfill' or 'bfill': use next valid observation to fill gap. * 'nearest': use nearest valid observation to fill gap. - limit : integer, optional + limit : int, optional Limit of how many consecutive missing values to fill. Returns @@ -823,7 +823,7 @@ def std(self, ddof=1, *args, **kwargs): Parameters ---------- - ddof : integer, default 1 + ddof : int, default 1 Degrees of freedom. Returns @@ -840,7 +840,7 @@ def var(self, ddof=1, *args, **kwargs): Parameters ---------- - ddof : integer, default 1 + ddof : int, default 1 degrees of freedom Returns diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index b07647cf5b5fb..32dc3c1f3e8f2 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -577,7 +577,7 @@ def to_datetime( Parameters ---------- - arg : integer, float, string, datetime, list, tuple, 1-d array, Series + arg : int, float, str, datetime, list, tuple, 1-d array, Series or DataFrame/dict-like errors : {'ignore', 'raise', 'coerce'}, default 'raise' @@ -585,13 +585,13 @@ def to_datetime( - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaT - If 'ignore', then invalid parsing will return the input - dayfirst : boolean, default False + dayfirst : bool, default False Specify a date parse order if `arg` is str or its list-likes. If True, parses dates with the day first, eg 10/11/12 is parsed as 2012-11-10. Warning: dayfirst=True is not strict, but will prefer to parse with day first (this is a known bug, based on dateutil behavior). - yearfirst : boolean, default False + yearfirst : bool, default False Specify a date parse order if `arg` is str or its list-likes. - If True parses dates with the year first, eg 10/11/12 is parsed as @@ -604,10 +604,10 @@ def to_datetime( .. versionadded:: 0.16.1 - utc : boolean, default None + utc : bool, default None Return UTC DatetimeIndex if True (converting any tz-aware datetime.datetime objects as well). - box : boolean, default True + box : bool, default True - If True returns a DatetimeIndex or Index-like object - If False returns ndarray of values. @@ -617,22 +617,22 @@ def to_datetime( instead to get an ndarray of values or numpy.datetime64, respectively. - format : string, default None + format : str, default None strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse all the way up to nanoseconds. See strftime documentation for more information on choices: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior - exact : boolean, True by default + exact : bool, True by default - If True, require an exact format match. - If False, allow the format to match anywhere in the target string. - unit : string, default 'ns' + unit : str, default 'ns' unit of the arg (D,s,ms,us,ns) denote the unit, which is an integer or float number. This will be based off the origin. Example, with unit='ms' and origin='unix' (the default), this would calculate the number of milliseconds to the unix epoch start. - infer_datetime_format : boolean, default False + infer_datetime_format : bool, default False If True and no `format` is given, attempt to infer the format of the datetime strings, and if it can be inferred, switch to a faster method of parsing them. In some cases this can increase the parsing @@ -649,7 +649,7 @@ def to_datetime( origin. .. versionadded:: 0.20.0 - cache : boolean, default True + cache : bool, default True If True, use a cache of unique, converted dates to apply the datetime conversion. May produce significant speed-up when parsing duplicate date strings, especially ones with timezone offsets. diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index 571c544d48b29..6bd3532d538c7 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -29,12 +29,12 @@ def read_sas( By file-like object, we refer to objects with a ``read()`` method, such as a file handler (e.g. via builtin ``open`` function) or ``StringIO``. - format : string {'xport', 'sas7bdat'} or None + format : str {'xport', 'sas7bdat'} or None If None, file format is inferred from file extension. If 'xport' or 'sas7bdat', uses the corresponding format. index : identifier of index column, defaults to None Identifier of column that should be used as index of the DataFrame. - encoding : string, default is None + encoding : str, default is None Encoding for text data. If None, text data are stored as raw bytes. chunksize : int Read file `chunksize` lines at a time, returns iterator. diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index fe6b339c2f4c8..8724382d9ec55 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -28,7 +28,7 @@ def hist_series( yrot=None, figsize=None, bins=10, - **kwds + **kwargs ): """ Draw histogram of the input series using matplotlib. @@ -56,7 +56,7 @@ def hist_series( bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. - `**kwds` : keywords + **kwargs To be passed to the actual plotting function Returns @@ -80,7 +80,7 @@ def hist_series( yrot=yrot, figsize=figsize, bins=bins, - **kwds + **kwargs ) @@ -99,7 +99,7 @@ def hist_frame( figsize=None, layout=None, bins=10, - **kwds + **kwargs ): """ Make a histogram of the DataFrame's. @@ -151,7 +151,7 @@ def hist_frame( bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. - **kwds + **kwargs All other plotting keyword arguments to be passed to :meth:`matplotlib.pyplot.hist`. @@ -194,7 +194,7 @@ def hist_frame( figsize=figsize, layout=layout, bins=bins, - **kwds + **kwargs ) @@ -209,7 +209,7 @@ def boxplot( figsize=None, layout=None, return_type=None, - **kwds + **kwargs ): """ Make a box plot from DataFrame columns. @@ -260,7 +260,7 @@ def boxplot( If ``return_type`` is `None`, a NumPy array of axes with the same shape as ``layout`` is returned. - **kwds + **kwargs All other plotting keyword arguments to be passed to :func:`matplotlib.pyplot.boxplot`. @@ -385,7 +385,7 @@ def boxplot( figsize=figsize, layout=layout, return_type=return_type, - **kwds + **kwargs ) @@ -401,7 +401,7 @@ def boxplot_frame( figsize=None, layout=None, return_type=None, - **kwds + **kwargs ): plot_backend = _get_plot_backend() return plot_backend.boxplot_frame( @@ -415,7 +415,7 @@ def boxplot_frame( figsize=figsize, layout=layout, return_type=return_type, - **kwds + **kwargs ) @@ -431,7 +431,7 @@ def boxplot_frame_groupby( layout=None, sharex=False, sharey=True, - **kwds + **kwargs ): """ Make box plots from DataFrameGroupBy data. @@ -459,7 +459,7 @@ def boxplot_frame_groupby( Whether y-axes will be shared among subplots .. versionadded:: 0.23.1 - `**kwds` : Keyword Arguments + **kwargs All other plotting keyword arguments to be passed to matplotlib's boxplot function @@ -495,7 +495,7 @@ def boxplot_frame_groupby( layout=layout, sharex=sharex, sharey=sharey, - **kwds + **kwargs ) @@ -586,7 +586,7 @@ class PlotAccessor(PandasObject): labels with "(right)" in the legend include_bool : bool, default is False If True, boolean values can be plotted. - `**kwds` : keywords + **kwargs Options to pass to matplotlib plotting method. Returns @@ -810,7 +810,7 @@ def line(self, x=None, y=None, **kwargs): The values to be plotted. Either the location or the label of the columns to be used. By default, it will use the remaining DataFrame numeric columns. - **kwds + **kwargs Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns @@ -880,7 +880,7 @@ def bar(self, x=None, y=None, **kwargs): y : label or position, optional Allows plotting of one column versus another. If not specified, all numerical columns are used. - **kwds + **kwargs Additional keyword arguments are documented in :meth:`DataFrame.plot`. @@ -963,7 +963,7 @@ def barh(self, x=None, y=None, **kwargs): Column to be used for categories. y : label or position, default All numeric columns in dataframe Columns to be plotted from the DataFrame. - **kwds + **kwargs Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns @@ -1049,7 +1049,7 @@ def box(self, by=None, **kwargs): ---------- by : str or sequence Column in the DataFrame to group by. - **kwds : optional + **kwargs Additional keywords are documented in :meth:`DataFrame.plot`. @@ -1092,7 +1092,7 @@ def hist(self, by=None, bins=10, **kwargs): Column in the DataFrame to group by. bins : int, default 10 Number of histogram bins to be used. - **kwds + **kwargs Additional keyword arguments are documented in :meth:`DataFrame.plot`. @@ -1148,7 +1148,7 @@ def kde(self, bw_method=None, ind=None, **kwargs): 1000 equally spaced points are used. If `ind` is a NumPy array, the KDE is evaluated at the points passed. If `ind` is an integer, `ind` number of equally spaced points are used. - **kwds : optional + **kwargs Additional keyword arguments are documented in :meth:`pandas.%(this-datatype)s.plot`. @@ -1250,7 +1250,7 @@ def area(self, x=None, y=None, **kwargs): stacked : bool, default True Area plots are stacked by default. Set to False to create a unstacked plot. - **kwds : optional + **kwargs Additional keyword arguments are documented in :meth:`DataFrame.plot`. @@ -1322,7 +1322,7 @@ def pie(self, **kwargs): y : int or label, optional Label or position of the column to plot. If not provided, ``subplots=True`` argument must be passed. - **kwds + **kwargs Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns @@ -1404,7 +1404,7 @@ def scatter(self, x, y, s=None, c=None, **kwargs): - A column name or position whose values will be used to color the marker points according to a colormap. - **kwds + **kwargs Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns @@ -1476,7 +1476,7 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwargs): Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the x-direction and the y-direction. - **kwds + **kwargs Additional keyword arguments are documented in :meth:`DataFrame.plot`.
Documentation update fixing some of the methods with a PR06 error code which involved updating - string to str - integer to int - boolean to bool No tests required
https://api.github.com/repos/pandas-dev/pandas/pulls/28644
2019-09-26T19:38:55Z
2019-09-27T15:53:47Z
2019-09-27T15:53:47Z
2019-09-27T17:03:49Z
BUG: Fix TypeError raised in libreduction
diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index 361c21c18c4da..a7d6d19bbc80d 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -15,7 +15,7 @@ from numpy cimport (ndarray, cnp.import_array() cimport pandas._libs.util as util -from pandas._libs.lib import maybe_convert_objects, values_from_object +from pandas._libs.lib import maybe_convert_objects cdef _get_result_array(object obj, Py_ssize_t size, Py_ssize_t cnt): @@ -23,7 +23,7 @@ cdef _get_result_array(object obj, Py_ssize_t size, Py_ssize_t cnt): if (util.is_array(obj) or (isinstance(obj, list) and len(obj) == cnt) or getattr(obj, 'shape', None) == (cnt,)): - raise ValueError('function does not reduce') + raise ValueError('Function does not reduce') return np.empty(size, dtype='O') @@ -103,7 +103,7 @@ cdef class Reducer: ndarray arr, result, chunk Py_ssize_t i, incr flatiter it - bint has_labels + bint has_labels, has_ndarray_labels object res, name, labels, index object cached_typ=None @@ -113,14 +113,18 @@ cdef class Reducer: chunk.data = arr.data labels = self.labels has_labels = labels is not None + has_ndarray_labels = util.is_array(labels) has_index = self.index is not None incr = self.increment try: for i in range(self.nresults): - if has_labels: + if has_ndarray_labels: name = util.get_value_at(labels, i) + elif has_labels: + # labels is an ExtensionArray + name = labels[i] else: name = None @@ -362,7 +366,8 @@ cdef class SeriesGrouper: def get_result(self): cdef: - ndarray arr, result + # Define result to avoid UnboundLocalError + ndarray arr, result = None ndarray[int64_t] labels, counts Py_ssize_t i, n, group_size, lab object res @@ -428,6 +433,9 @@ cdef class SeriesGrouper: islider.reset() vslider.reset() + if result is None: + raise ValueError("No result.") + if result.dtype == np.object_: result = maybe_convert_objects(result) @@ -639,11 +647,11 @@ def compute_reduction(arr, f, axis=0, dummy=None, labels=None): """ if labels is not None: - if labels._has_complex_internals: - raise Exception('Cannot use shortcut') + # Caller is responsible for ensuring we don't have MultiIndex + assert not labels._has_complex_internals - # pass as an ndarray - labels = values_from_object(labels) + # pass as an ndarray/ExtensionArray + labels = labels._values reducer = Reducer(arr, f, axis=axis, dummy=dummy, labels=labels) return reducer.get_result() diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 61d093d19e4be..1be881e683be5 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -223,10 +223,12 @@ def apply_empty_result(self): def apply_raw(self): """ apply to the values as a numpy array """ - try: result = libreduction.compute_reduction(self.values, self.f, axis=self.axis) - except Exception: + except ValueError as err: + if "Function does not reduce" not in str(err): + # catch only ValueError raised intentionally in libreduction + raise result = np.apply_along_axis(self.f, self.axis, self.values) # TODO: mixed type case @@ -273,24 +275,38 @@ def apply_standard(self): if ( self.result_type in ["reduce", None] and not self.dtypes.apply(is_extension_type).any() + # Disallow complex_internals since libreduction shortcut + # cannot handle MultiIndex + and not self.agg_axis._has_complex_internals ): - # Create a dummy Series from an empty array - from pandas import Series - values = self.values index = self.obj._get_axis(self.axis) labels = self.agg_axis empty_arr = np.empty(len(index), dtype=values.dtype) - dummy = Series(empty_arr, index=index, dtype=values.dtype) + + # Preserve subclass for e.g. test_subclassed_apply + dummy = self.obj._constructor_sliced( + empty_arr, index=index, dtype=values.dtype + ) try: result = libreduction.compute_reduction( values, self.f, axis=self.axis, dummy=dummy, labels=labels ) - return self.obj._constructor_sliced(result, index=labels) - except Exception: + except ValueError as err: + if "Function does not reduce" not in str(err): + # catch only ValueError raised intentionally in libreduction + raise + except TypeError: + # e.g. test_apply_ignore_failures we just ignore + if not self.ignore_failures: + raise + except ZeroDivisionError: + # reached via numexpr; fall back to python implementation pass + else: + return self.obj._constructor_sliced(result, index=labels) # compute the result using the series generator self.apply_series_generator() diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index bec5cbc5fecb8..6212a37472000 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -775,11 +775,7 @@ def test_omit_nuisance(df): # won't work with axis = 1 grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1) - msg = ( - r'\("unsupported operand type\(s\) for \+: ' - "'Timestamp' and 'float'\"" - r", 'occurred at index 0'\)" - ) + msg = r'\("unsupported operand type\(s\) for \+: ' "'Timestamp' and 'float'\", 0" with pytest.raises(TypeError, match=msg): grouped.agg(lambda x: x.sum(0, numeric_only=False))
This is a PITA and I'm not 100% happy with the solution here, open to suggestions. There is a call to `util.get_value_at(labels, i)` that raises `TypeError` if `labels` is not an `ndarray`. This fixes that by checking for non-ndarray and handling that case correctly. There is _also_ a case in master where we get an `UnboundLocalError` by referencing `result` before it is assigned. This patches that to raise a `ValueError` instead, _but_ AFAICT fixing the ndarray bug above made it so that the UnboundLocalError case is no longer reached in the tests. i.e. this change is definitely more correct, but we don't have a test case specific to it. I'm also not wild about the specific exception-catching on L297-307 in core.apply, but don't see a viable alternative. Suggestions welcome.
https://api.github.com/repos/pandas-dev/pandas/pulls/28643
2019-09-26T18:19:31Z
2019-10-02T19:58:34Z
2019-10-02T19:58:34Z
2019-10-02T20:24:37Z
CLN: more Exceptions
diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 61d093d19e4be..d093d7a145382 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -342,7 +342,7 @@ def wrap_results(self): results = self.results # see if we can infer the results - if len(results) > 0 and is_sequence(results[0]): + if len(results) > 0 and 0 in results and is_sequence(results[0]): return self.wrap_results_for_axis() diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 62662edb692a7..8aff0bc19d68d 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2588,8 +2588,9 @@ def intersection(self, other, sort=False): try: indexer = Index(rvals).get_indexer(lvals) indexer = indexer.take((indexer != -1).nonzero()[0]) - except Exception: - # duplicates + except (InvalidIndexError, IncompatibleFrequency): + # InvalidIndexError raised by get_indexer if non-unique + # IncompatibleFrequency raised by PeriodIndex.get_indexer indexer = algos.unique1d(Index(rvals).get_indexer_non_unique(lvals)[0]) indexer = indexer[indexer != -1] diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 44cb399336d62..b0683fb8b0dfb 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -1591,10 +1591,7 @@ def execute(self, *args, **kwargs): else: cur = self.con.cursor() try: - if kwargs: - cur.execute(*args, **kwargs) - else: - cur.execute(*args) + cur.execute(*args, **kwargs) return cur except Exception as exc: try: diff --git a/pandas/io/stata.py b/pandas/io/stata.py index c67106e897727..0b674b556b2ee 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -2388,16 +2388,16 @@ def write_file(self): self._write_map() except Exception as exc: self._close() - try: - if self._own_file: + if self._own_file: + try: os.unlink(self._fname) - except Exception: - warnings.warn( - "This save was not successful but {0} could not " - "be deleted. This file is not " - "valid.".format(self._fname), - ResourceWarning, - ) + except OSError: + warnings.warn( + "This save was not successful but {0} could not " + "be deleted. This file is not " + "valid.".format(self._fname), + ResourceWarning, + ) raise exc else: self._close()
84 of these left.
https://api.github.com/repos/pandas-dev/pandas/pulls/28642
2019-09-26T17:56:50Z
2019-09-27T11:48:49Z
2019-09-27T11:48:49Z
2019-09-27T14:23:02Z
REF: Consolidate alignment calls in DataFrame ops
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ed05691d33d07..9dc16eb132504 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5271,24 +5271,17 @@ def _arith_op(left, right): new_data = dispatch_fill_zeros(func, this.values, other.values, res_values) return this._construct_result(new_data) - def _combine_match_index(self, other, func, level=None): - left, right = self.align(other, join="outer", axis=0, level=level, copy=False) - # at this point we have `left.index.equals(right.index)` + def _combine_match_index(self, other, func): + # at this point we have `self.index.equals(other.index)` - if left._is_mixed_type or right._is_mixed_type: + if self._is_mixed_type or other._is_mixed_type: # operate column-wise; avoid costly object-casting in `.values` - new_data = ops.dispatch_to_series(left, right, func) + new_data = ops.dispatch_to_series(self, other, func) else: # fastpath --> operate directly on values with np.errstate(all="ignore"): - new_data = func(left.values.T, right.values).T - return left._construct_result(new_data) - - def _combine_match_columns(self, other: Series, func, level=None): - left, right = self.align(other, join="outer", axis=1, level=level, copy=False) - # at this point we have `left.columns.equals(right.index)` - new_data = ops.dispatch_to_series(left, right, func, axis="columns") - return left._construct_result(new_data) + new_data = func(self.values.T, other.values).T + return new_data def _construct_result(self, result) -> "DataFrame": """ diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 16d2eaa410637..05b2becfc73d8 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -379,7 +379,7 @@ def column_op(a, b): return {i: func(a.iloc[:, i], b.iloc[:, i]) for i in range(len(a.columns))} elif isinstance(right, ABCSeries) and axis == "columns": - # We only get here if called via left._combine_match_columns, + # We only get here if called via _combine_frame_series, # in which case we specifically want to operate row-by-row assert right.index.equals(left.columns) @@ -597,15 +597,18 @@ def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=N "fill_value {fill} not supported.".format(fill=fill_value) ) - if axis is not None: - axis = self._get_axis_number(axis) - if axis == 0: - return self._combine_match_index(other, func, level=level) - else: - return self._combine_match_columns(other, func, level=level) + if axis is None: + # default axis is columns + axis = 1 + + axis = self._get_axis_number(axis) + left, right = self.align(other, join="outer", axis=axis, level=level, copy=False) + if axis == 0: + new_data = left._combine_match_index(right, func) + else: + new_data = dispatch_to_series(left, right, func, axis="columns") - # default axis is columns - return self._combine_match_columns(other, func, level=level) + return left._construct_result(new_data) def _align_method_FRAME(left, right, axis):
Next step after this will be to consolidate _construct_result calls. That hinges on some not-obvious alignment behaviors.
https://api.github.com/repos/pandas-dev/pandas/pulls/28638
2019-09-26T16:19:52Z
2019-10-02T11:52:27Z
2019-10-02T11:52:27Z
2019-10-02T12:44:29Z
CLN: streamline Series _construct_result calls
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 16d2eaa410637..79272c5643281 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -5,7 +5,7 @@ """ import datetime import operator -from typing import Tuple +from typing import Tuple, Union import numpy as np @@ -13,7 +13,12 @@ from pandas.util._decorators import Appender from pandas.core.dtypes.common import is_list_like, is_timedelta64_dtype -from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCExtensionArray, + ABCIndexClass, + ABCSeries, +) from pandas.core.dtypes.missing import isna from pandas.core.construction import extract_array @@ -436,13 +441,37 @@ def _align_method_SERIES(left, right, align_asobject=False): return left, right -def _construct_result(left, result, index, name, dtype=None): +def _construct_result( + left: ABCSeries, + result: Union[np.ndarray, ABCExtensionArray], + index: ABCIndexClass, + name, +): """ - If the raw op result has a non-None name (e.g. it is an Index object) and - the name argument is None, then passing name to the constructor will - not be enough; we still need to override the name attribute. + Construct an appropriately-labelled Series from the result of an op. + + Parameters + ---------- + left : Series + result : ndarray or ExtensionArray + index : Index + name : object + + Returns + ------- + Series + In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ - out = left._constructor(result, index=index, dtype=dtype) + if isinstance(result, tuple): + # produced by divmod or rdivmod + return ( + _construct_result(left, result[0], index=index, name=name), + _construct_result(left, result[1], index=index, name=name), + ) + + # We do not pass dtype to ensure that the Series constructor + # does inference in the case where `result` has object-dtype. + out = left._constructor(result, index=index) out = out.__finalize__(left) # Set the result's name after __finalize__ is called because __finalize__ @@ -451,15 +480,6 @@ def _construct_result(left, result, index, name, dtype=None): return out -def _construct_divmod_result(left, result, index, name, dtype=None): - """divmod returns a tuple of like indexed series instead of a single series. - """ - return ( - _construct_result(left, result[0], index=index, name=name, dtype=dtype), - _construct_result(left, result[1], index=index, name=name, dtype=dtype), - ) - - def _arith_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid @@ -468,9 +488,6 @@ def _arith_method_SERIES(cls, op, special): str_rep = _get_opstr(op) op_name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(op_name) - construct_result = ( - _construct_divmod_result if op in [divmod, rdivmod] else _construct_result - ) def wrapper(left, right): if isinstance(right, ABCDataFrame): @@ -482,9 +499,7 @@ def wrapper(left, right): lvalues = extract_array(left, extract_numpy=True) result = arithmetic_op(lvalues, right, op, str_rep, eval_kwargs) - # We do not pass dtype to ensure that the Series constructor - # does inference in the case where `result` has object-dtype. - return construct_result(left, result, index=left.index, name=res_name) + return _construct_result(left, result, index=left.index, name=res_name) wrapper.__name__ = op_name return wrapper @@ -553,6 +568,7 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0): # validate axis if axis is not None: self._get_axis_number(axis) + if isinstance(other, ABCSeries): return self._binop(other, op, level=level, fill_value=fill_value) elif isinstance(other, (np.ndarray, list, tuple)): @@ -564,7 +580,7 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0): if fill_value is not None: self = self.fillna(fill_value) - return self._constructor(op(self, other), self.index).__finalize__(self) + return op(self, other) flex_wrapper.__name__ = name return flex_wrapper diff --git a/pandas/core/series.py b/pandas/core/series.py index c87e371354f63..276f829d287ab 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2738,10 +2738,7 @@ def _binop(self, other, func, level=None, fill_value=None): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) - if func.__name__ in ["divmod", "rdivmod"]: - ret = ops._construct_divmod_result(self, result, new_index, name) - else: - ret = ops._construct_result(self, result, new_index, name) + ret = ops._construct_result(self, result, new_index, name) return ret def combine(self, other, func, fill_value=None):
After this, the three Series methods in `ops.__init__` are just about in sync, the last holdout being alignment behavior in the comparison method.
https://api.github.com/repos/pandas-dev/pandas/pulls/28637
2019-09-26T15:49:51Z
2019-10-01T16:53:18Z
2019-10-01T16:53:17Z
2019-10-01T16:58:20Z
BUG: value_counts can handle the case even with empty groups (#28479)
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index b40a64420a0be..2130e0c88ef4d 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -454,6 +454,7 @@ Other - :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` now support dicts as ``compression`` argument with key ``'method'`` being the compression method and others as additional compression options when the compression method is ``'zip'``. (:issue:`26023`) - Bug in :meth:`Series.diff` where a boolean series would incorrectly raise a ``TypeError`` (:issue:`17294`) - :meth:`Series.append` will no longer raise a ``TypeError`` when passed a tuple of ``Series`` (:issue:`28410`) +- :meth:`SeriesGroupBy.value_counts` will be able to handle the case even when the :class:`Grouper` makes empty groups (:issue: 28479) - Fix corrupted error message when calling ``pandas.libs._json.encode()`` on a 0d array (:issue:`18878`) - Fix :class:`AbstractHolidayCalendar` to return correct results for years after 2030 (now goes up to 2200) (:issue:`27790`) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 2c8aa1294451d..9599ce0bf39a9 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -767,6 +767,11 @@ def group_info(self): ngroups, ) + @cache_readonly + def recons_codes(self): + # get unique result indices, and prepend 0 as groupby starts from the first + return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]] + @cache_readonly def result_index(self): if len(self.binlabels) != 0 and isna(self.binlabels[0]): diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py index 363c5a9af0180..c76ee09f977b5 100644 --- a/pandas/tests/groupby/test_value_counts.py +++ b/pandas/tests/groupby/test_value_counts.py @@ -9,7 +9,7 @@ import numpy as np import pytest -from pandas import DataFrame, MultiIndex, Series, date_range +from pandas import DataFrame, Grouper, MultiIndex, Series, date_range, to_datetime import pandas.util.testing as tm @@ -79,3 +79,31 @@ def rebuild_index(df): # have to sort on index because of unstable sort on values left, right = map(rebuild_index, (left, right)) # xref GH9212 tm.assert_series_equal(left.sort_index(), right.sort_index()) + + +def test_series_groupby_value_counts_with_grouper(): + # GH28479 + df = DataFrame( + { + "Timestamp": [ + 1565083561, + 1565083561 + 86400, + 1565083561 + 86500, + 1565083561 + 86400 * 2, + 1565083561 + 86400 * 3, + 1565083561 + 86500 * 3, + 1565083561 + 86400 * 4, + ], + "Food": ["apple", "apple", "banana", "banana", "orange", "orange", "pear"], + } + ).drop([3]) + + df["Datetime"] = to_datetime(df["Timestamp"].apply(lambda t: str(t)), unit="s") + dfg = df.groupby(Grouper(freq="1D", key="Datetime")) + + # have to sort on index because of unstable sort on values xref GH9212 + result = dfg["Food"].value_counts().sort_index() + expected = dfg["Food"].apply(Series.value_counts).sort_index() + expected.index.names = result.index.names + + tm.assert_series_equal(result, expected)
* If applying rep to recons_labels go fail, use ids which has no consecutive duplicates instead. - [x] closes #28479 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry xuancong84 found that value_counts() crashes if `groupby` object contains empty groups. However, even though I made the construction of DataFrame don't skip empty rows, it still crashed. Till then, I already tried in many ways though, in this time I tried to correct the callee `self.grouper.recons_labels`. After several tests, I found that If freq of `Grouper` is too long so that it has empty groups in some periods then it crashes. And also have found that this is solved by using `ids` which has no consecutive duplicates instead of `self.grouper.recons_labels`.
https://api.github.com/repos/pandas-dev/pandas/pulls/28634
2019-09-26T13:06:48Z
2019-11-07T21:19:58Z
2019-11-07T21:19:57Z
2019-11-07T21:20:01Z
BUG: DataFrame.to_html validates formatters has the correct length
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 7ca93d7d75854..874aea1e22735 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -252,6 +252,7 @@ I/O - Bug in :func:`DataFrame.to_string` where values were truncated using display options instead of outputting the full content (:issue:`9784`) - Bug in :meth:`DataFrame.to_json` where a datetime column label would not be written out in ISO format with ``orient="table"`` (:issue:`28130`) - Bug in :func:`DataFrame.to_parquet` where writing to GCS would fail with `engine='fastparquet'` if the file did not already exist (:issue:`28326`) +- Bug in :meth:`DataFrame.to_html` where the length of the ``formatters`` argument was not verified (:issue:`28469`) Plotting ^^^^^^^^ diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 3a50f63409582..15f21814b072d 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -561,7 +561,17 @@ def __init__( self.sparsify = sparsify self.float_format = float_format - self.formatters = formatters if formatters is not None else {} + if formatters is None: + self.formatters = {} + elif len(frame.columns) == len(formatters) or isinstance(formatters, dict): + self.formatters = formatters + else: + raise ValueError( + ( + "Formatters length({flen}) should match" + " DataFrame number of columns({dlen})" + ).format(flen=len(formatters), dlen=len(frame.columns)) + ) self.na_rep = na_rep self.decimal = decimal self.col_space = col_space diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index 004dffd128dd6..ef19319e208d9 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -235,6 +235,15 @@ def test_to_html_truncate(datapath): assert result == expected +@pytest.mark.parametrize("size", [1, 5]) +def test_html_invalid_formatters_arg_raises(size): + # issue-28469 + df = DataFrame(columns=["a", "b", "c"]) + msg = "Formatters length({}) should match DataFrame number of columns(3)" + with pytest.raises(ValueError, match=re.escape(msg.format(size))): + df.to_html(formatters=["{}".format] * size) + + def test_to_html_truncate_formatter(datapath): # issue-25955 data = [
- [x] closes #28469 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry @gabriellm1 @hugoecarl
https://api.github.com/repos/pandas-dev/pandas/pulls/28632
2019-09-26T12:18:52Z
2019-10-07T15:01:03Z
2019-10-07T15:01:02Z
2019-10-19T01:22:56Z
WEB: Add diversity note to team.md
diff --git a/web/pandas/about/team.md b/web/pandas/about/team.md index 41da3a0e82bdb..8eb2edebec817 100644 --- a/web/pandas/about/team.md +++ b/web/pandas/about/team.md @@ -36,6 +36,16 @@ If you want to support pandas development, you can find information in the [dona {% endfor %} </div> +## Diversity and Inclusion + +> _pandas_ expressly welcomes and encourages contributions from anyone who faces under-representation, discrimination in the technology industry +> or anyone willing to increase the diversity of our team. +> We have identified visible gaps and obstacles in sustaining diversity and inclusion in the open-source communities and we are proactive in increasing +> the diversity of our team. +> We have a [code of conduct]({base_url}/community/coc.html) to ensure a friendly and welcoming environment. +> Please send an email to [pandas-code-of-conduct-committee](mailto:pandas-coc@googlegroups.com), if you think we can do a +> better job at achieving this goal. + ## Governance Wes McKinney is the Benevolent Dictator for Life (BDFL).
@datapythonista kindly review
https://api.github.com/repos/pandas-dev/pandas/pulls/28630
2019-09-26T10:57:22Z
2019-10-04T02:13:39Z
2019-10-04T02:13:39Z
2019-10-04T08:48:40Z
TST: loc misbehaves when Period is at start of 3-level MultiIndex
diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 3da3d1e4b1b41..cef77f5a795af 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -251,10 +251,8 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): ------- datetime, datetime/dateutil.parser._result, str """ - if not isinstance(arg, (str, unicode)): - # Note: cython recognizes `unicode` in both py2/py3, optimizes - # this check into a C call. - return arg + if not isinstance(arg, str): + raise TypeError("parse_time_string argument must be str") if getattr(freq, "_typ", None) == "dateoffset": freq = freq.rule_code diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 8b3b66bd1ee6b..ac19fa70442cd 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -617,6 +617,44 @@ def test_insert(self): result = period_range("2017Q1", periods=4, freq="Q").insert(1, na) tm.assert_index_equal(result, expected) + @pytest.mark.parametrize( + "msg, key", + [ + (r"Period\('2019', 'A-DEC'\), 'foo', 'bar'", (Period(2019), "foo", "bar")), + (r"Period\('2019', 'A-DEC'\), 'y1', 'bar'", (Period(2019), "y1", "bar")), + (r"Period\('2019', 'A-DEC'\), 'foo', 'z1'", (Period(2019), "foo", "z1")), + ( + r"Period\('2018', 'A-DEC'\), Period\('2016', 'A-DEC'\), 'bar'", + (Period(2018), Period(2016), "bar"), + ), + (r"Period\('2018', 'A-DEC'\), 'foo', 'y1'", (Period(2018), "foo", "y1")), + ( + r"Period\('2017', 'A-DEC'\), 'foo', Period\('2015', 'A-DEC'\)", + (Period(2017), "foo", Period(2015)), + ), + (r"Period\('2017', 'A-DEC'\), 'z1', 'bar'", (Period(2017), "z1", "bar")), + ], + ) + def test_contains_raise_error_if_period_index_is_in_multi_index(self, msg, key): + # issue 20684 + """ + parse_time_string return parameter if type not matched. + PeriodIndex.get_loc takes returned value from parse_time_string as a tuple. + If first argument is Period and a tuple has 3 items, + process go on not raise exception + """ + df = DataFrame( + { + "A": [Period(2019), "x1", "x2"], + "B": [Period(2018), Period(2016), "y1"], + "C": [Period(2017), "z1", Period(2015)], + "V1": [1, 2, 3], + "V2": [10, 20, 30], + } + ).set_index(["A", "B", "C"]) + with pytest.raises(KeyError, match=msg): + df.loc[key] + def test_maybe_convert_timedelta(): pi = PeriodIndex(["2000", "2001"], freq="D") diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py index 126a1bd12ad59..7df7dbbf91d0b 100644 --- a/pandas/tests/tslibs/test_parsing.py +++ b/pandas/tests/tslibs/test_parsing.py @@ -209,3 +209,13 @@ def test_try_parse_dates(): expected = np.array([parse(d, dayfirst=True) for d in arr]) tm.assert_numpy_array_equal(result, expected) + + +def test_parse_time_string_check_instance_type_raise_exception(): + # issue 20684 + with pytest.raises(TypeError): + parse_time_string((1, 2, 3)) + + result = parse_time_string("2019") + expected = (datetime(2019, 1, 1), datetime(2019, 1, 1), "year") + assert result == expected
If index is MultiIndex and level of 0 is PeriodIndex, loc function raise exception if all input of loc does not match index values Background: This bug only happens when MultiIndex's level is 3 and first level index is PeriodIndex. In this situation, if someone access one row using a '.loc' with a miss match key, then would not raise exception. Someone already change what i try to do `parse_time_string` function in '_libs.tslibs.parsing'. in the past, ``` def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): if not isinstance(arg, str): return arg ``` What i try to do: ``` def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): if not isinstance(arg, str): raise TypeError ``` now in master: ``` def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): if not isinstance(arg, str): raise TypeError("parse_time_string argument must be str") ``` Just add tests for issue. - [x] closes (#20684) - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28628
2019-09-26T07:08:53Z
2019-10-30T12:23:35Z
2019-10-30T12:23:35Z
2019-10-30T14:04:29Z
Backport PR #28357 on branch 0.25.x (Change conda channel order for Windows builds)
diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml index 33c8122fb232a..88b38aaef237c 100644 --- a/ci/deps/azure-windows-36.yaml +++ b/ci/deps/azure-windows-36.yaml @@ -1,17 +1,15 @@ name: pandas-dev channels: - - defaults - conda-forge + - defaults dependencies: - blosc - bottleneck - - boost-cpp<1.67 - fastparquet>=0.2.1 - matplotlib=3.0.2 - numexpr - numpy=1.15.* - openpyxl - - parquet-cpp - pyarrow - pytables - python-dateutil diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index d634859e72d7b..9573ac15dc45f 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -1,5 +1,6 @@ """ test parquet compat """ import datetime +from distutils.version import LooseVersion import os from warnings import catch_warnings @@ -238,6 +239,15 @@ def test_cross_engine_pa_fp(df_cross_compat, pa, fp): def test_cross_engine_fp_pa(df_cross_compat, pa, fp): # cross-compat with differing reading/writing engines + if ( + LooseVersion(pyarrow.__version__) < "0.15" + and LooseVersion(pyarrow.__version__) >= "0.13" + ): + pytest.xfail( + "Reading fastparquet with pyarrow in 0.14 fails: " + "https://issues.apache.org/jira/browse/ARROW-6492" + ) + df = df_cross_compat with tm.ensure_clean() as path: df.to_parquet(path, engine=fp, compression=None)
Backport PR #28357: Change conda channel order for Windows builds
https://api.github.com/repos/pandas-dev/pandas/pulls/28627
2019-09-26T06:40:54Z
2019-09-26T11:32:27Z
2019-09-26T11:32:27Z
2019-09-26T11:40:36Z
REF: Assert json roundtrip equal
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 415b1d81eb3e4..2195bf248f43a 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -37,6 +37,14 @@ _mixed_frame = _frame.copy() +def assert_json_roundtrip_equal(result, expected, orient): + if orient == "records" or orient == "values": + expected = expected.reset_index(drop=True) + if orient == "values": + expected.columns = range(len(expected.columns)) + assert_frame_equal(result, expected) + + class TestPandasContainer: @pytest.fixture(scope="function", autouse=True) def setup(self, datapath): @@ -90,12 +98,7 @@ def test_frame_double_encoded_labels(self, orient): result = read_json(df.to_json(orient=orient), orient=orient) expected = df.copy() - if orient == "records" or orient == "values": - expected = expected.reset_index(drop=True) - if orient == "values": - expected.columns = range(len(expected.columns)) - - assert_frame_equal(result, expected) + assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("orient", ["split", "records", "values"]) def test_frame_non_unique_index(self, orient): @@ -103,12 +106,7 @@ def test_frame_non_unique_index(self, orient): result = read_json(df.to_json(orient=orient), orient=orient) expected = df.copy() - if orient == "records" or orient == "values": - expected = expected.reset_index(drop=True) - if orient == "values": - expected.columns = range(len(expected.columns)) - - assert_frame_equal(result, expected) + assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("orient", ["index", "columns"]) def test_frame_non_unique_index_raises(self, orient): @@ -172,12 +170,7 @@ def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype): # TODO: debug why sort is required expected = expected.sort_index() - if orient == "records" or orient == "values": - expected = expected.reset_index(drop=True) - if orient == "values": - expected.columns = range(len(expected.columns)) - - tm.assert_frame_equal(result, expected) + assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("dtype", [False, np.int64]) @pytest.mark.parametrize("convert_axes", [True, False]) @@ -191,11 +184,6 @@ def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype): if not numpy and (orient == "index" or (PY35 and orient == "columns")): expected = expected.sort_index() - if orient == "records" or orient == "values": - expected = expected.reset_index(drop=True) - if orient == "values": - expected.columns = range(len(expected.columns)) - if ( numpy and (is_platform_32bit() or is_platform_windows()) @@ -205,7 +193,7 @@ def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype): # TODO: see what is causing roundtrip dtype loss expected = expected.astype(np.int32) - tm.assert_frame_equal(result, expected) + assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"]) @pytest.mark.parametrize("convert_axes", [True, False]) @@ -246,12 +234,7 @@ def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype): elif orient == "records" and convert_axes: expected.columns = expected.columns.astype(np.int64) - if orient == "records" or orient == "values": - expected = expected.reset_index(drop=True) - if orient == "values": - expected.columns = range(len(expected.columns)) - - tm.assert_frame_equal(result, expected) + assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("convert_axes", [True, False]) @pytest.mark.parametrize("numpy", [True, False]) @@ -277,12 +260,7 @@ def test_roundtrip_categorical(self, orient, convert_axes, numpy): if not numpy and (orient == "index" or (PY35 and orient == "columns")): expected = expected.sort_index() - if orient == "records" or orient == "values": - expected = expected.reset_index(drop=True) - if orient == "values": - expected.columns = range(len(expected.columns)) - - tm.assert_frame_equal(result, expected) + assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("convert_axes", [True, False]) @pytest.mark.parametrize("numpy", [True, False]) @@ -320,12 +298,7 @@ def test_roundtrip_timestamp(self, orient, convert_axes, numpy): expected.index = idx - if orient == "records" or orient == "values": - expected = expected.reset_index(drop=True) - if orient == "values": - expected.columns = range(len(expected.columns)) - - tm.assert_frame_equal(result, expected) + assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("convert_axes", [True, False]) @pytest.mark.parametrize("numpy", [True, False]) @@ -354,12 +327,7 @@ def test_roundtrip_mixed(self, orient, convert_axes, numpy): if not numpy and (orient == "index" or (PY35 and orient == "columns")): expected = expected.sort_index() - if orient == "records" or orient == "values": - expected = expected.reset_index(drop=True) - if orient == "values": - expected.columns = range(len(expected.columns)) - - tm.assert_frame_equal(result, expected) + assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize( "data,msg,orient",
Replaces a some frequently repeated lines with a function. - [x] closes #28555 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28626
2019-09-26T04:10:26Z
2019-09-27T22:47:24Z
2019-09-27T22:47:24Z
2019-09-27T22:47:48Z
REF: separate out dispatch-centric ops functions
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 06c0e9722c045..16d2eaa410637 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -5,33 +5,18 @@ """ import datetime import operator -from typing import Any, Callable, Tuple, Union +from typing import Tuple import numpy as np from pandas._libs import Timedelta, Timestamp, lib -from pandas.errors import NullFrequencyError from pandas.util._decorators import Appender -from pandas.core.dtypes.common import ( - is_datetime64_dtype, - is_extension_array_dtype, - is_integer_dtype, - is_list_like, - is_object_dtype, - is_scalar, - is_timedelta64_dtype, -) -from pandas.core.dtypes.generic import ( - ABCDataFrame, - ABCExtensionArray, - ABCIndexClass, - ABCSeries, -) +from pandas.core.dtypes.common import is_list_like, is_timedelta64_dtype +from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.missing import isna -from pandas._typing import ArrayLike -from pandas.core.construction import array, extract_array +from pandas.core.construction import extract_array from pandas.core.ops.array_ops import ( arithmetic_op, comparison_op, @@ -39,6 +24,8 @@ logical_op, ) from pandas.core.ops.array_ops import comp_method_OBJECT_ARRAY # noqa:F401 +from pandas.core.ops.dispatch import maybe_dispatch_ufunc_to_dunder_op # noqa:F401 +from pandas.core.ops.dispatch import should_series_dispatch from pandas.core.ops.docstrings import ( _arith_doc_FRAME, _flex_comp_doc_FRAME, @@ -358,71 +345,6 @@ def fill_binop(left, right, fill_value): # Dispatch logic -def should_extension_dispatch(left: ABCSeries, right: Any) -> bool: - """ - Identify cases where Series operation should use dispatch_to_extension_op. - - Parameters - ---------- - left : Series - right : object - - Returns - ------- - bool - """ - if ( - is_extension_array_dtype(left.dtype) - or is_datetime64_dtype(left.dtype) - or is_timedelta64_dtype(left.dtype) - ): - return True - - if not is_scalar(right) and is_extension_array_dtype(right): - # GH#22378 disallow scalar to exclude e.g. "category", "Int64" - return True - - return False - - -def should_series_dispatch(left, right, op): - """ - Identify cases where a DataFrame operation should dispatch to its - Series counterpart. - - Parameters - ---------- - left : DataFrame - right : DataFrame - op : binary operator - - Returns - ------- - override : bool - """ - if left._is_mixed_type or right._is_mixed_type: - return True - - if not len(left.columns) or not len(right.columns): - # ensure obj.dtypes[0] exists for each obj - return False - - ldtype = left.dtypes.iloc[0] - rdtype = right.dtypes.iloc[0] - - if (is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or ( - is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype) - ): - # numpy integer dtypes as timedelta64 dtypes in this scenario - return True - - if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype): - # in particular case where right is an array of DateOffsets - return True - - return False - - def dispatch_to_series(left, right, func, str_rep=None, axis=None): """ Evaluate the frame operation func(left, right) by evaluating @@ -489,58 +411,6 @@ def column_op(a, b): return new_data -def dispatch_to_extension_op( - op, - left: Union[ABCExtensionArray, np.ndarray], - right: Any, - keep_null_freq: bool = False, -): - """ - Assume that left or right is a Series backed by an ExtensionArray, - apply the operator defined by op. - - Parameters - ---------- - op : binary operator - left : ExtensionArray or np.ndarray - right : object - keep_null_freq : bool, default False - Whether to re-raise a NullFrequencyError unchanged, as opposed to - catching and raising TypeError. - - Returns - ------- - ExtensionArray or np.ndarray - 2-tuple of these if op is divmod or rdivmod - """ - # NB: left and right should already be unboxed, so neither should be - # a Series or Index. - - if left.dtype.kind in "mM" and isinstance(left, np.ndarray): - # We need to cast datetime64 and timedelta64 ndarrays to - # DatetimeArray/TimedeltaArray. But we avoid wrapping others in - # PandasArray as that behaves poorly with e.g. IntegerArray. - left = array(left) - - # The op calls will raise TypeError if the op is not defined - # on the ExtensionArray - - try: - res_values = op(left, right) - except NullFrequencyError: - # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError - # on add/sub of integers (or int-like). We re-raise as a TypeError. - if keep_null_freq: - # TODO: remove keep_null_freq after Timestamp+int deprecation - # GH#22535 is enforced - raise - raise TypeError( - "incompatible type for a datetime/timedelta " - "operation [{name}]".format(name=op.__name__) - ) - return res_values - - # ----------------------------------------------------------------------------- # Series @@ -906,92 +776,3 @@ def f(self, other): f.__name__ = op_name return f - - -# ----------------------------------------------------------------------------- -# Sparse - - -def maybe_dispatch_ufunc_to_dunder_op( - self: ArrayLike, ufunc: Callable, method: str, *inputs: ArrayLike, **kwargs: Any -): - """ - Dispatch a ufunc to the equivalent dunder method. - - Parameters - ---------- - self : ArrayLike - The array whose dunder method we dispatch to - ufunc : Callable - A NumPy ufunc - method : {'reduce', 'accumulate', 'reduceat', 'outer', 'at', '__call__'} - inputs : ArrayLike - The input arrays. - kwargs : Any - The additional keyword arguments, e.g. ``out``. - - Returns - ------- - result : Any - The result of applying the ufunc - """ - # special has the ufuncs we dispatch to the dunder op on - special = { - "add", - "sub", - "mul", - "pow", - "mod", - "floordiv", - "truediv", - "divmod", - "eq", - "ne", - "lt", - "gt", - "le", - "ge", - "remainder", - "matmul", - } - aliases = { - "subtract": "sub", - "multiply": "mul", - "floor_divide": "floordiv", - "true_divide": "truediv", - "power": "pow", - "remainder": "mod", - "divide": "div", - "equal": "eq", - "not_equal": "ne", - "less": "lt", - "less_equal": "le", - "greater": "gt", - "greater_equal": "ge", - } - - # For op(., Array) -> Array.__r{op}__ - flipped = { - "lt": "__gt__", - "le": "__ge__", - "gt": "__lt__", - "ge": "__le__", - "eq": "__eq__", - "ne": "__ne__", - } - - op_name = ufunc.__name__ - op_name = aliases.get(op_name, op_name) - - def not_implemented(*args, **kwargs): - return NotImplemented - - if method == "__call__" and op_name in special and kwargs.get("out") is None: - if isinstance(inputs[0], type(self)): - name = "__{}__".format(op_name) - return getattr(self, name, not_implemented)(inputs[1]) - else: - name = flipped.get(op_name, "__r{}__".format(op_name)) - return getattr(self, name, not_implemented)(inputs[0]) - else: - return NotImplemented diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index b72ef69ede199..55b4b1a899f65 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -36,6 +36,7 @@ from pandas.core.construction import extract_array from pandas.core.ops import missing +from pandas.core.ops.dispatch import dispatch_to_extension_op, should_extension_dispatch from pandas.core.ops.invalid import invalid_comparison from pandas.core.ops.roperator import rpow @@ -179,11 +180,7 @@ def arithmetic_op( Or a 2-tuple of these in the case of divmod or rdivmod. """ - from pandas.core.ops import ( - maybe_upcast_for_op, - should_extension_dispatch, - dispatch_to_extension_op, - ) + from pandas.core.ops import maybe_upcast_for_op keep_null_freq = isinstance( right, @@ -236,7 +233,6 @@ def comparison_op( ------- ndarrray or ExtensionArray """ - from pandas.core.ops import should_extension_dispatch, dispatch_to_extension_op # NB: We assume extract_array has already been called on left and right lvalues = left @@ -335,7 +331,6 @@ def logical_op( ------- ndarrray or ExtensionArray """ - from pandas.core.ops import should_extension_dispatch, dispatch_to_extension_op fill_int = lambda x: x diff --git a/pandas/core/ops/dispatch.py b/pandas/core/ops/dispatch.py new file mode 100644 index 0000000000000..9835d57ee7366 --- /dev/null +++ b/pandas/core/ops/dispatch.py @@ -0,0 +1,223 @@ +""" +Functions for defining unary operations. +""" +from typing import Any, Callable, Union + +import numpy as np + +from pandas.errors import NullFrequencyError + +from pandas.core.dtypes.common import ( + is_datetime64_dtype, + is_extension_array_dtype, + is_integer_dtype, + is_object_dtype, + is_scalar, + is_timedelta64_dtype, +) +from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries + +from pandas._typing import ArrayLike +from pandas.core.construction import array + + +def should_extension_dispatch(left: ABCSeries, right: Any) -> bool: + """ + Identify cases where Series operation should use dispatch_to_extension_op. + + Parameters + ---------- + left : Series + right : object + + Returns + ------- + bool + """ + if ( + is_extension_array_dtype(left.dtype) + or is_datetime64_dtype(left.dtype) + or is_timedelta64_dtype(left.dtype) + ): + return True + + if not is_scalar(right) and is_extension_array_dtype(right): + # GH#22378 disallow scalar to exclude e.g. "category", "Int64" + return True + + return False + + +def should_series_dispatch(left, right, op): + """ + Identify cases where a DataFrame operation should dispatch to its + Series counterpart. + + Parameters + ---------- + left : DataFrame + right : DataFrame + op : binary operator + + Returns + ------- + override : bool + """ + if left._is_mixed_type or right._is_mixed_type: + return True + + if not len(left.columns) or not len(right.columns): + # ensure obj.dtypes[0] exists for each obj + return False + + ldtype = left.dtypes.iloc[0] + rdtype = right.dtypes.iloc[0] + + if (is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or ( + is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype) + ): + # numpy integer dtypes as timedelta64 dtypes in this scenario + return True + + if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype): + # in particular case where right is an array of DateOffsets + return True + + return False + + +def dispatch_to_extension_op( + op, + left: Union[ABCExtensionArray, np.ndarray], + right: Any, + keep_null_freq: bool = False, +): + """ + Assume that left or right is a Series backed by an ExtensionArray, + apply the operator defined by op. + + Parameters + ---------- + op : binary operator + left : ExtensionArray or np.ndarray + right : object + keep_null_freq : bool, default False + Whether to re-raise a NullFrequencyError unchanged, as opposed to + catching and raising TypeError. + + Returns + ------- + ExtensionArray or np.ndarray + 2-tuple of these if op is divmod or rdivmod + """ + # NB: left and right should already be unboxed, so neither should be + # a Series or Index. + + if left.dtype.kind in "mM" and isinstance(left, np.ndarray): + # We need to cast datetime64 and timedelta64 ndarrays to + # DatetimeArray/TimedeltaArray. But we avoid wrapping others in + # PandasArray as that behaves poorly with e.g. IntegerArray. + left = array(left) + + # The op calls will raise TypeError if the op is not defined + # on the ExtensionArray + + try: + res_values = op(left, right) + except NullFrequencyError: + # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError + # on add/sub of integers (or int-like). We re-raise as a TypeError. + if keep_null_freq: + # TODO: remove keep_null_freq after Timestamp+int deprecation + # GH#22535 is enforced + raise + raise TypeError( + "incompatible type for a datetime/timedelta " + "operation [{name}]".format(name=op.__name__) + ) + return res_values + + +def maybe_dispatch_ufunc_to_dunder_op( + self: ArrayLike, ufunc: Callable, method: str, *inputs: ArrayLike, **kwargs: Any +): + """ + Dispatch a ufunc to the equivalent dunder method. + + Parameters + ---------- + self : ArrayLike + The array whose dunder method we dispatch to + ufunc : Callable + A NumPy ufunc + method : {'reduce', 'accumulate', 'reduceat', 'outer', 'at', '__call__'} + inputs : ArrayLike + The input arrays. + kwargs : Any + The additional keyword arguments, e.g. ``out``. + + Returns + ------- + result : Any + The result of applying the ufunc + """ + # special has the ufuncs we dispatch to the dunder op on + special = { + "add", + "sub", + "mul", + "pow", + "mod", + "floordiv", + "truediv", + "divmod", + "eq", + "ne", + "lt", + "gt", + "le", + "ge", + "remainder", + "matmul", + } + aliases = { + "subtract": "sub", + "multiply": "mul", + "floor_divide": "floordiv", + "true_divide": "truediv", + "power": "pow", + "remainder": "mod", + "divide": "div", + "equal": "eq", + "not_equal": "ne", + "less": "lt", + "less_equal": "le", + "greater": "gt", + "greater_equal": "ge", + } + + # For op(., Array) -> Array.__r{op}__ + flipped = { + "lt": "__gt__", + "le": "__ge__", + "gt": "__lt__", + "ge": "__le__", + "eq": "__eq__", + "ne": "__ne__", + } + + op_name = ufunc.__name__ + op_name = aliases.get(op_name, op_name) + + def not_implemented(*args, **kwargs): + return NotImplemented + + if method == "__call__" and op_name in special and kwargs.get("out") is None: + if isinstance(inputs[0], type(self)): + name = "__{}__".format(op_name) + return getattr(self, name, not_implemented)(inputs[1]) + else: + name = flipped.get(op_name, "__r{}__".format(op_name)) + return getattr(self, name, not_implemented)(inputs[0]) + else: + return NotImplemented
Seems like a reasonable chunk of stuff to get out of `__init__`. `dispatch_to_series` is not moved because it will end up using `array_ops` and I don't want to introduce circular dependencies.
https://api.github.com/repos/pandas-dev/pandas/pulls/28624
2019-09-26T01:03:07Z
2019-09-26T15:39:54Z
2019-09-26T15:39:54Z
2019-09-26T15:42:46Z
DOC: start using new bootstrap-based sphinx theme
diff --git a/doc/redirects.csv b/doc/redirects.csv index a1504f9175480..8c8079bb3fd2b 100644 --- a/doc/redirects.csv +++ b/doc/redirects.csv @@ -6,6 +6,7 @@ whatsnew,whatsnew/index release,whatsnew/index # getting started +install,getting_started/install 10min,getting_started/10min basics,getting_started/basics comparison_with_r,getting_started/comparison/comparison_with_r diff --git a/doc/source/conf.py b/doc/source/conf.py index 1da1948e45268..5e2a2db20b53c 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -191,7 +191,7 @@ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = "nature_with_gtoc" +html_theme = "pandas_sphinx_theme" # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths @@ -204,7 +204,7 @@ # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -html_theme_path = ["themes"] +# html_theme_path = ["themes"] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index aaf2040156a45..48c722bc16a86 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -1,3 +1,5 @@ +:orphan: + .. _ecosystem: {{ header }} diff --git a/doc/source/getting_started/index.rst b/doc/source/getting_started/index.rst index eead28830f861..34bb4f930f175 100644 --- a/doc/source/getting_started/index.rst +++ b/doc/source/getting_started/index.rst @@ -12,6 +12,7 @@ Getting started .. toctree:: :maxdepth: 2 + install overview 10min basics diff --git a/doc/source/install.rst b/doc/source/getting_started/install.rst similarity index 100% rename from doc/source/install.rst rename to doc/source/getting_started/install.rst diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index f5669626aa2b3..b7cb8bfbdcebc 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -40,10 +40,8 @@ See the :ref:`overview` for more detail about what's in the library. {% endif %} {% if not single_doc %} What's New in 1.0.0 <whatsnew/v1.0.0> - install getting_started/index user_guide/index - ecosystem {% endif -%} {% if include_api -%} reference/index @@ -54,9 +52,9 @@ See the :ref:`overview` for more detail about what's in the library. {% endif %} * :doc:`whatsnew/v1.0.0` -* :doc:`install` * :doc:`getting_started/index` + * :doc:`getting_started/install` * :doc:`getting_started/overview` * :doc:`getting_started/10min` * :doc:`getting_started/basics` diff --git a/environment.yml b/environment.yml index 7629fa52e7829..f95af62e912c2 100644 --- a/environment.yml +++ b/environment.yml @@ -88,3 +88,5 @@ dependencies: - xlwt # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile - odfpy # pandas.read_excel - pyreadstat # pandas.read_spss + - pip: + - git+https://github.com/pandas-dev/pandas-sphinx-theme.git@master diff --git a/requirements-dev.txt b/requirements-dev.txt index fd8e6378240b4..3849504f640a8 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -59,4 +59,5 @@ xlrd xlsxwriter xlwt odfpy -pyreadstat \ No newline at end of file +pyreadstat +git+https://github.com/pandas-dev/pandas-sphinx-theme.git@master \ No newline at end of file diff --git a/web/pandas/getting_started.md b/web/pandas/getting_started.md index 99a7a9f4b2d60..9682cf90cad6f 100644 --- a/web/pandas/getting_started.md +++ b/web/pandas/getting_started.md @@ -4,7 +4,7 @@ The next steps provides the easiest and recommended way to set up your environment to use pandas. Other installation options can be found in -the [advanced installation page]({{ base_url}}/docs/install.html). +the [advanced installation page]({{ base_url}}/docs/getting_started/install.html). 1. Download [Anaconda](https://www.anaconda.com/distribution/) for your operating system and the latest Python version, run the installer, and follow the steps. Detailed instructions
closes https://github.com/pandas-dev/pandas/issues/15556/ I want to propose that we start using the new bootstrap-based theme that is being developed at https://github.com/pandas-dev/pandas-sphinx-theme/ for the dev docs. This way it will already be used for the docs at https://dev.pandas.io/docs/ so it can get some exposure before the next release. How I did it in this PR is to install it from git master https://github.com/pandas-dev/pandas-sphinx-theme/ instead of moving the actual source into the pandas repo. I would prefer doing it like this for now, as that makes it easier to further iterate on the theme (the other repo is set up with a faster doc build (disabled ipython directive + smaller api) and automatic preview using doctr on travis). In a later stage, we can still move it here if we want (or move the pandas-specific customizations here). A preview (of a subset, not all API pages) can be seen at https://dev.pandas.io/pandas-sphinx-theme/ There are still several "must todo's" to get the theme in a decent enough state for a release. There are some open issues on the theme repo, but I can also open an issue here to keep track of those. And of course, feedback on the theme is very welcome. I made two other changes: - moved the install.rst into the getting_started directory (so it is not a top-level navigation item). Given that the new website will have a page with a quick install linking to this more advanced install page, I think that is fine (and also, on a reworked home page of the docs it could also get a prominent place without being in the navigation bar) - Idem for the ecosystem page, as this will also get more exposure on the new markdown website (we actually need to remove that page / reconcile it with the website, but let's leave that for another PR)
https://api.github.com/repos/pandas-dev/pandas/pulls/28623
2019-09-25T21:07:08Z
2019-10-04T20:12:38Z
2019-10-04T20:12:38Z
2019-10-04T20:12:55Z
ENH: Allow plotting backend to be an option
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 8a481f194d408..b40a64420a0be 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -409,6 +409,7 @@ Plotting - Bug where :meth:`DataFrame.boxplot` would not accept a `color` parameter like `DataFrame.plot.box` (:issue:`26214`) - Bug in the ``xticks`` argument being ignored for :meth:`DataFrame.plot.bar` (:issue:`14119`) - :func:`set_option` now validates that the plot backend provided to ``'plotting.backend'`` implements the backend when the option is set, rather than when a plot is created (:issue:`28163`) +- :meth:`DataFrame.plot` now allow a ``backend`` keyword arugment to allow changing between backends in one session (:issue:`28619`). Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index c11d94c381d6d..6fc5b03920cba 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -3,7 +3,7 @@ from pandas._config import get_option -from pandas.util._decorators import Appender +from pandas.util._decorators import Appender, Substitution from pandas.core.dtypes.common import is_integer, is_list_like from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries @@ -22,6 +22,7 @@ def hist_series( yrot=None, figsize=None, bins=10, + backend=None, **kwargs ): """ @@ -50,6 +51,14 @@ def hist_series( bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. + backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. + + .. versionadded:: 1.0.0 + **kwargs To be passed to the actual plotting function. @@ -62,7 +71,7 @@ def hist_series( -------- matplotlib.axes.Axes.hist : Plot a histogram using matplotlib. """ - plot_backend = _get_plot_backend() + plot_backend = _get_plot_backend(backend) return plot_backend.hist_series( self, by=by, @@ -93,6 +102,7 @@ def hist_frame( figsize=None, layout=None, bins=10, + backend=None, **kwargs ): """ @@ -145,6 +155,14 @@ def hist_frame( bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. + backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. + + .. versionadded:: 1.0.0 + **kwargs All other plotting keyword arguments to be passed to :meth:`matplotlib.pyplot.hist`. @@ -172,7 +190,7 @@ def hist_frame( ... }, index=['pig', 'rabbit', 'duck', 'chicken', 'horse']) >>> hist = df.hist(bins=3) """ - plot_backend = _get_plot_backend() + plot_backend = _get_plot_backend(backend) return plot_backend.hist_frame( data, column=column, @@ -192,181 +210,198 @@ def hist_frame( ) -def boxplot( - data, - column=None, - by=None, - ax=None, - fontsize=None, - rot=0, - grid=True, - figsize=None, - layout=None, - return_type=None, - **kwargs -): - """ - Make a box plot from DataFrame columns. - - Make a box-and-whisker plot from DataFrame columns, optionally grouped - by some other columns. A box plot is a method for graphically depicting - groups of numerical data through their quartiles. - The box extends from the Q1 to Q3 quartile values of the data, - with a line at the median (Q2). The whiskers extend from the edges - of box to show the range of the data. The position of the whiskers - is set by default to `1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box. - Outlier points are those past the end of the whiskers. - - For further details see - Wikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_. - - Parameters - ---------- - column : str or list of str, optional - Column name or list of names, or vector. - Can be any valid input to :meth:`pandas.DataFrame.groupby`. - by : str or array-like, optional - Column in the DataFrame to :meth:`pandas.DataFrame.groupby`. - One box-plot will be done per value of columns in `by`. - ax : object of class matplotlib.axes.Axes, optional - The matplotlib axes to be used by boxplot. - fontsize : float or str - Tick label font size in points or as a string (e.g., `large`). - rot : int or float, default 0 - The rotation angle of labels (in degrees) - with respect to the screen coordinate system. - grid : bool, default True - Setting this to True will show the grid. - figsize : A tuple (width, height) in inches - The size of the figure to create in matplotlib. - layout : tuple (rows, columns), optional - For example, (3, 5) will display the subplots - using 3 columns and 5 rows, starting from the top-left. - return_type : {'axes', 'dict', 'both'} or None, default 'axes' - The kind of object to return. The default is ``axes``. - - * 'axes' returns the matplotlib axes the boxplot is drawn on. - * 'dict' returns a dictionary whose values are the matplotlib - Lines of the boxplot. - * 'both' returns a namedtuple with the axes and dict. - * when grouping with ``by``, a Series mapping columns to - ``return_type`` is returned. - - If ``return_type`` is `None`, a NumPy array - of axes with the same shape as ``layout`` is returned. - **kwargs - All other plotting keyword arguments to be passed to - :func:`matplotlib.pyplot.boxplot`. - - Returns - ------- - result - See Notes. - - See Also - -------- - Series.plot.hist: Make a histogram. - matplotlib.pyplot.boxplot : Matplotlib equivalent plot. - - Notes - ----- - The return type depends on the `return_type` parameter: - - * 'axes' : object of class matplotlib.axes.Axes - * 'dict' : dict of matplotlib.lines.Line2D objects - * 'both' : a namedtuple with structure (ax, lines) - - For data grouped with ``by``, return a Series of the above or a numpy - array: - - * :class:`~pandas.Series` - * :class:`~numpy.array` (for ``return_type = None``) +_boxplot_doc = """ +Make a box plot from DataFrame columns. + +Make a box-and-whisker plot from DataFrame columns, optionally grouped +by some other columns. A box plot is a method for graphically depicting +groups of numerical data through their quartiles. +The box extends from the Q1 to Q3 quartile values of the data, +with a line at the median (Q2). The whiskers extend from the edges +of box to show the range of the data. The position of the whiskers +is set by default to `1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box. +Outlier points are those past the end of the whiskers. + +For further details see +Wikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_. + +Parameters +---------- +column : str or list of str, optional + Column name or list of names, or vector. + Can be any valid input to :meth:`pandas.DataFrame.groupby`. +by : str or array-like, optional + Column in the DataFrame to :meth:`pandas.DataFrame.groupby`. + One box-plot will be done per value of columns in `by`. +ax : object of class matplotlib.axes.Axes, optional + The matplotlib axes to be used by boxplot. +fontsize : float or str + Tick label font size in points or as a string (e.g., `large`). +rot : int or float, default 0 + The rotation angle of labels (in degrees) + with respect to the screen coordinate system. +grid : bool, default True + Setting this to True will show the grid. +figsize : A tuple (width, height) in inches + The size of the figure to create in matplotlib. +layout : tuple (rows, columns), optional + For example, (3, 5) will display the subplots + using 3 columns and 5 rows, starting from the top-left. +return_type : {'axes', 'dict', 'both'} or None, default 'axes' + The kind of object to return. The default is ``axes``. + + * 'axes' returns the matplotlib axes the boxplot is drawn on. + * 'dict' returns a dictionary whose values are the matplotlib + Lines of the boxplot. + * 'both' returns a namedtuple with the axes and dict. + * when grouping with ``by``, a Series mapping columns to + ``return_type`` is returned. + + If ``return_type`` is `None`, a NumPy array + of axes with the same shape as ``layout`` is returned. +%(backend)s\ + +**kwargs + All other plotting keyword arguments to be passed to + :func:`matplotlib.pyplot.boxplot`. + +Returns +------- +result + See Notes. + +See Also +-------- +Series.plot.hist: Make a histogram. +matplotlib.pyplot.boxplot : Matplotlib equivalent plot. + +Notes +----- +The return type depends on the `return_type` parameter: + +* 'axes' : object of class matplotlib.axes.Axes +* 'dict' : dict of matplotlib.lines.Line2D objects +* 'both' : a namedtuple with structure (ax, lines) + +For data grouped with ``by``, return a Series of the above or a numpy +array: + +* :class:`~pandas.Series` +* :class:`~numpy.array` (for ``return_type = None``) + +Use ``return_type='dict'`` when you want to tweak the appearance +of the lines after plotting. In this case a dict containing the Lines +making up the boxes, caps, fliers, medians, and whiskers is returned. + +Examples +-------- + +Boxplots can be created for every column in the dataframe +by ``df.boxplot()`` or indicating the columns to be used: + +.. plot:: + :context: close-figs + + >>> np.random.seed(1234) + >>> df = pd.DataFrame(np.random.randn(10, 4), + ... columns=['Col1', 'Col2', 'Col3', 'Col4']) + >>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3']) + +Boxplots of variables distributions grouped by the values of a third +variable can be created using the option ``by``. For instance: + +.. plot:: + :context: close-figs + + >>> df = pd.DataFrame(np.random.randn(10, 2), + ... columns=['Col1', 'Col2']) + >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', + ... 'B', 'B', 'B', 'B', 'B']) + >>> boxplot = df.boxplot(by='X') + +A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot +in order to group the data by combination of the variables in the x-axis: + +.. plot:: + :context: close-figs + + >>> df = pd.DataFrame(np.random.randn(10, 3), + ... columns=['Col1', 'Col2', 'Col3']) + >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', + ... 'B', 'B', 'B', 'B', 'B']) + >>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A', + ... 'B', 'A', 'B', 'A', 'B']) + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y']) + +The layout of boxplot can be adjusted giving a tuple to ``layout``: + +.. plot:: + :context: close-figs + + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', + ... layout=(2, 1)) - Use ``return_type='dict'`` when you want to tweak the appearance - of the lines after plotting. In this case a dict containing the Lines - making up the boxes, caps, fliers, medians, and whiskers is returned. +Additional formatting can be done to the boxplot, like suppressing the grid +(``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``) +or changing the fontsize (i.e. ``fontsize=15``): - Examples - -------- - - Boxplots can be created for every column in the dataframe - by ``df.boxplot()`` or indicating the columns to be used: - - .. plot:: - :context: close-figs - - >>> np.random.seed(1234) - >>> df = pd.DataFrame(np.random.randn(10,4), - ... columns=['Col1', 'Col2', 'Col3', 'Col4']) - >>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3']) - - Boxplots of variables distributions grouped by the values of a third - variable can be created using the option ``by``. For instance: - - .. plot:: - :context: close-figs - - >>> df = pd.DataFrame(np.random.randn(10, 2), - ... columns=['Col1', 'Col2']) - >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', - ... 'B', 'B', 'B', 'B', 'B']) - >>> boxplot = df.boxplot(by='X') - - A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot - in order to group the data by combination of the variables in the x-axis: - - .. plot:: - :context: close-figs +.. plot:: + :context: close-figs - >>> df = pd.DataFrame(np.random.randn(10,3), - ... columns=['Col1', 'Col2', 'Col3']) - >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', - ... 'B', 'B', 'B', 'B', 'B']) - >>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A', - ... 'B', 'A', 'B', 'A', 'B']) - >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y']) + >>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15) - The layout of boxplot can be adjusted giving a tuple to ``layout``: +The parameter ``return_type`` can be used to select the type of element +returned by `boxplot`. When ``return_type='axes'`` is selected, +the matplotlib axes on which the boxplot is drawn are returned: - .. plot:: - :context: close-figs + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], return_type='axes') + >>> type(boxplot) + <class 'matplotlib.axes._subplots.AxesSubplot'> - >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', - ... layout=(2, 1)) +When grouping with ``by``, a Series mapping columns to ``return_type`` +is returned: - Additional formatting can be done to the boxplot, like suppressing the grid - (``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``) - or changing the fontsize (i.e. ``fontsize=15``): + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', + ... return_type='axes') + >>> type(boxplot) + <class 'pandas.core.series.Series'> - .. plot:: - :context: close-figs +If ``return_type`` is `None`, a NumPy array of axes with the same shape +as ``layout`` is returned: - >>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15) + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', + ... return_type=None) + >>> type(boxplot) + <class 'numpy.ndarray'> +""" - The parameter ``return_type`` can be used to select the type of element - returned by `boxplot`. When ``return_type='axes'`` is selected, - the matplotlib axes on which the boxplot is drawn are returned: - >>> boxplot = df.boxplot(column=['Col1','Col2'], return_type='axes') - >>> type(boxplot) - <class 'matplotlib.axes._subplots.AxesSubplot'> +_backend_doc = """\ +backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. - When grouping with ``by``, a Series mapping columns to ``return_type`` - is returned: + .. versionadded:: 1.0.0 +""" - >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', - ... return_type='axes') - >>> type(boxplot) - <class 'pandas.core.series.Series'> - If ``return_type`` is `None`, a NumPy array of axes with the same shape - as ``layout`` is returned: - - >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', - ... return_type=None) - >>> type(boxplot) - <class 'numpy.ndarray'> - """ +@Substitution(backend="") +@Appender(_boxplot_doc) +def boxplot( + data, + column=None, + by=None, + ax=None, + fontsize=None, + rot=0, + grid=True, + figsize=None, + layout=None, + return_type=None, + **kwargs +): plot_backend = _get_plot_backend("matplotlib") return plot_backend.boxplot( data, @@ -383,7 +418,8 @@ def boxplot( ) -@Appender(boxplot.__doc__) +@Substitution(backend=_backend_doc) +@Appender(_boxplot_doc) def boxplot_frame( self, column=None, @@ -395,9 +431,10 @@ def boxplot_frame( figsize=None, layout=None, return_type=None, + backend=None, **kwargs ): - plot_backend = _get_plot_backend() + plot_backend = _get_plot_backend(backend) return plot_backend.boxplot_frame( self, column=column, @@ -425,6 +462,7 @@ def boxplot_frame_groupby( layout=None, sharex=False, sharey=True, + backend=None, **kwargs ): """ @@ -454,6 +492,14 @@ def boxplot_frame_groupby( Whether y-axes will be shared among subplots. .. versionadded:: 0.23.1 + backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. + + .. versionadded:: 1.0.0 + **kwargs All other plotting keyword arguments to be passed to matplotlib's boxplot function. @@ -477,7 +523,7 @@ def boxplot_frame_groupby( >>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1) >>> boxplot_frame_groupby(grouped, subplots=False) """ - plot_backend = _get_plot_backend() + plot_backend = _get_plot_backend(backend) return plot_backend.boxplot_frame_groupby( grouped, subplots=subplots, @@ -586,6 +632,14 @@ class PlotAccessor(PandasObject): labels with "(right)" in the legend. include_bool : bool, default is False If True, boolean values can be plotted. + backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. + + .. versionadded:: 1.0.0 + **kwargs Options to pass to matplotlib plotting method. @@ -715,7 +769,7 @@ def _get_call_args(backend_name, data, args, kwargs): return x, y, kind, kwargs def __call__(self, *args, **kwargs): - plot_backend = _get_plot_backend() + plot_backend = _get_plot_backend(kwargs.pop("backend", None)) x, y, kind, kwargs = self._get_call_args( plot_backend.__name__, self._parent, args, kwargs diff --git a/pandas/tests/plotting/test_backend.py b/pandas/tests/plotting/test_backend.py index 41b1a88b15acb..c84b78c79e771 100644 --- a/pandas/tests/plotting/test_backend.py +++ b/pandas/tests/plotting/test_backend.py @@ -9,7 +9,7 @@ import pandas dummy_backend = types.ModuleType("pandas_dummy_backend") -setattr(dummy_backend, "plot", lambda *args, **kwargs: None) +setattr(dummy_backend, "plot", lambda *args, **kwargs: "used_dummy") @pytest.fixture @@ -38,6 +38,14 @@ def test_backend_is_correct(monkeypatch, restore_backend): ) +def test_backend_can_be_set_in_plot_call(monkeypatch, restore_backend): + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + df = pandas.DataFrame([1, 2, 3]) + + assert pandas.get_option("plotting.backend") == "matplotlib" + assert df.plot(backend="pandas_dummy_backend") == "used_dummy" + + @td.skip_if_no_mpl def test_register_entrypoint(restore_backend):
- [x] closes #28619 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28622
2019-09-25T19:42:15Z
2019-11-05T17:07:09Z
2019-11-05T17:07:09Z
2019-11-05T17:07:14Z
DEPR: Deprecate Index.set_value
diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst index 576f734d517aa..dd59a99b3df9e 100644 --- a/doc/source/reference/indexing.rst +++ b/doc/source/reference/indexing.rst @@ -166,7 +166,6 @@ Selecting Index.get_slice_bound Index.get_value Index.get_values - Index.set_value Index.isin Index.slice_indexer Index.slice_locs diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 2668734031ee1..16d23d675a8bb 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -123,7 +123,9 @@ Documentation Improvements Deprecations ~~~~~~~~~~~~ -- +- ``Index.set_value`` has been deprecated. For a given index ``idx``, array ``arr``, + value in ``idx`` of ``idx_val`` and a new value of ``val``, ``idx.set_value(arr, idx_val, val)`` + is equivalent to ``arr[idx.get_loc(idx_val)] = val``, which should be used instead (:issue:`28621`). - .. _whatsnew_1000.prior_deprecations: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 0b5f9fb61fce8..afa4f1a5a8c76 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -205,7 +205,9 @@ class Index(IndexOpsMixin, PandasObject): """ # tolist is not actually deprecated, just suppressed in the __dir__ - _deprecations = DirNamesMixin._deprecations | frozenset(["tolist", "dtype_str"]) + _deprecations = DirNamesMixin._deprecations | frozenset( + ["tolist", "dtype_str", "set_value"] + ) # To hand over control to subclasses _join_precedence = 1 @@ -4680,10 +4682,20 @@ def set_value(self, arr, key, value): """ Fast lookup of value from 1-dimensional ndarray. + .. deprecated:: 1.0 + Notes ----- Only use this if you know what you're doing. """ + warnings.warn( + ( + "The 'set_value' method is deprecated, and " + "will be removed in a future version." + ), + FutureWarning, + stacklevel=2, + ) self._engine.set_value( com.values_from_object(arr), com.values_from_object(key), value ) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index d1ed79118d2fa..82d5ddd1ac358 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1908,16 +1908,21 @@ def test_is_monotonic_incomparable(self, attr): index = Index([5, datetime.now(), 7]) assert not getattr(index, attr) - def test_get_set_value(self): + def test_set_value_deprecated(self): + # GH 28621 + idx = self.create_index() + arr = np.array([1, 2, 3]) + with tm.assert_produces_warning(FutureWarning): + idx.set_value(arr, idx[1], 80) + assert arr[1] == 80 + + def test_get_value(self): # TODO: Remove function? GH 19728 values = np.random.randn(100) date = self.dateIndex[67] assert_almost_equal(self.dateIndex.get_value(values, date), values[67]) - self.dateIndex.set_value(values, date, 10) - assert values[67] == 10 - @pytest.mark.parametrize("values", [["foo", "bar", "quux"], {"foo", "bar", "quux"}]) @pytest.mark.parametrize( "index,expected",
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Deprecates ``Index.set_value``. This is a very little used and confusing method IMO. For a given index ``idx``, array ``arr``, value ``idx_val`` in ``idx`` and a new value of ``val``, ``idx.set_value(arr, idx_val, val)`` is equivalent to ``arr[idx.get_loc(idx_val) = val``, which is more standard and should be used instead.
https://api.github.com/repos/pandas-dev/pandas/pulls/28621
2019-09-25T19:32:53Z
2019-10-03T06:56:11Z
2019-10-03T06:56:10Z
2019-10-03T06:56:14Z
Backport PR #28614 on branch 0.25.x (CI: troubleshoot s3fs failures)
diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml index 48e9e3b6896f3..847e9f66d2c72 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-36-cov.yaml @@ -29,7 +29,7 @@ dependencies: - python-snappy - python=3.6.* - pytz - - s3fs + - s3fs<0.3 - scikit-learn - scipy - sqlalchemy diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/travis-36-slow.yaml index e9c5dadbc924a..d54708d48a65e 100644 --- a/ci/deps/travis-36-slow.yaml +++ b/ci/deps/travis-36-slow.yaml @@ -18,7 +18,7 @@ dependencies: - python-dateutil - python=3.6.* - pytz - - s3fs + - s3fs<0.3 - scipy - sqlalchemy - xlrd diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index 4bc490e202818..2fc4d160c8c27 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -17,7 +17,7 @@ dependencies: - pytest-xdist - pytest-mock - hypothesis>=3.58.0 - - s3fs + - s3fs<0.3 - pip - pyreadstat - pip: diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 9750a36d9350b..b08868f311f76 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -782,7 +782,7 @@ def test_categorical_no_compress(): def test_sort(): - # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: flake8 + # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: E501 # This should result in a properly sorted Series so that the plot # has a sorted x axis # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
Backport PR #28614: CI: troubleshoot s3fs failures
https://api.github.com/repos/pandas-dev/pandas/pulls/28615
2019-09-25T15:48:08Z
2019-09-26T12:24:13Z
2019-09-26T12:24:13Z
2019-09-26T12:24:13Z
CI: troubleshoot s3fs failures
diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml index b2a74fceaf0fa..e4e917d13990c 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-36-cov.yaml @@ -29,7 +29,7 @@ dependencies: - python-snappy - python=3.6.* - pytz - - s3fs + - s3fs<0.3 - scikit-learn - scipy - sqlalchemy diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/travis-36-slow.yaml index e9c5dadbc924a..d54708d48a65e 100644 --- a/ci/deps/travis-36-slow.yaml +++ b/ci/deps/travis-36-slow.yaml @@ -18,7 +18,7 @@ dependencies: - python-dateutil - python=3.6.* - pytz - - s3fs + - s3fs<0.3 - scipy - sqlalchemy - xlrd diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index 903636f2fe060..440ca6c480b87 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -17,7 +17,7 @@ dependencies: - pytest-xdist>=1.29.0 - pytest-mock - hypothesis>=3.58.0 - - s3fs + - s3fs<0.3 - pip - pyreadstat - pip: diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index e09af3fd48ee6..fcc0aa3b1c015 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -782,7 +782,7 @@ def test_categorical_no_compress(): def test_sort(): - # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: flake8 + # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: E501 # This should result in a properly sorted Series so that the plot # has a sorted x axis # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
xref #28612. Recent failures used 0.3.4, the last passing build I see used 0.2.0
https://api.github.com/repos/pandas-dev/pandas/pulls/28614
2019-09-25T14:40:37Z
2019-09-25T15:47:43Z
2019-09-25T15:47:43Z
2019-09-25T16:00:29Z
fix unnecessary sort in pd.read_json and orient="index"
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 751db2b88069d..fd1c1271a5e37 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -311,6 +311,7 @@ I/O - Bug in :func:`DataFrame.to_string` where values were truncated using display options instead of outputting the full content (:issue:`9784`) - Bug in :meth:`DataFrame.to_json` where a datetime column label would not be written out in ISO format with ``orient="table"`` (:issue:`28130`) - Bug in :func:`DataFrame.to_parquet` where writing to GCS would fail with `engine='fastparquet'` if the file did not already exist (:issue:`28326`) +- Bug in :meth:`DataFrame.read_json` where using ``orient="index"`` would not maintain the order (:issue:`28557`) - Bug in :meth:`DataFrame.to_html` where the length of the ``formatters`` argument was not verified (:issue:`28469`) Plotting diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 73f4985e201f1..6ce288890b6c7 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -12,7 +12,7 @@ from pandas.core.dtypes.common import ensure_str, is_period_dtype -from pandas import DataFrame, MultiIndex, Series, isna, to_datetime +from pandas import DataFrame, MultiIndex, Series, compat, isna, to_datetime from pandas._typing import Scalar from pandas.core.reshape.concat import concat @@ -1112,15 +1112,13 @@ def _parse_no_numpy(self): self.check_keys_split(decoded) self.obj = DataFrame(dtype=None, **decoded) elif orient == "index": - self.obj = ( - DataFrame.from_dict( - loads(json, precise_float=self.precise_float), - dtype=None, - orient="index", - ) - .sort_index(axis="columns") - .sort_index(axis="index") + self.obj = DataFrame.from_dict( + loads(json, precise_float=self.precise_float), + dtype=None, + orient="index", ) + if compat.PY35: + self.obj = self.obj.sort_index(axis="columns").sort_index(axis="index") elif orient == "table": self.obj = parse_table_schema(json, precise_float=self.precise_float) else: diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 2195bf248f43a..8e28740c70bad 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -166,8 +166,7 @@ def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype): expected = self.frame.copy() - if not numpy and (orient == "index" or (PY35 and orient == "columns")): - # TODO: debug why sort is required + if not numpy and PY35 and orient in ("index", "columns"): expected = expected.sort_index() assert_json_roundtrip_equal(result, expected, orient) @@ -181,7 +180,7 @@ def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype): data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype ) expected = self.intframe.copy() - if not numpy and (orient == "index" or (PY35 and orient == "columns")): + if not numpy and PY35 and orient in ("index", "columns"): expected = expected.sort_index() if ( @@ -216,7 +215,7 @@ def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype): ) expected = df.copy() - if not numpy and (orient == "index" or (PY35 and orient == "columns")): + if not numpy and PY35 and orient in ("index", "columns"): expected = expected.sort_index() if not dtype:
- [x] closes #28557 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28606
2019-09-25T01:29:14Z
2019-10-09T14:50:47Z
2019-10-09T14:50:46Z
2019-10-11T04:12:27Z
CLN: Exception x5
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 0a3f4ed3cc91d..bd74180403ad9 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -519,7 +519,7 @@ cdef _TSObject convert_str_to_tsobject(object ts, object tz, object unit, try: ts = parse_datetime_string(ts, dayfirst=dayfirst, yearfirst=yearfirst) - except Exception: + except (ValueError, OverflowError): raise ValueError("could not convert string to Timestamp") return convert_to_tsobject(ts, tz, unit, dayfirst, yearfirst) diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index d099a77a77044..ca70c8af45f2f 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -309,9 +309,9 @@ cdef parse_datetime_string_with_reso(date_string, freq=None, dayfirst=False, parsed, reso = dateutil_parse(date_string, _DEFAULT_DATETIME, dayfirst=dayfirst, yearfirst=yearfirst, ignoretz=False, tzinfos=None) - except Exception as e: + except (ValueError, OverflowError) as err: # TODO: allow raise of errors within instead - raise DateParseError(e) + raise DateParseError(err) if parsed is None: raise DateParseError("Could not parse {dstr}".format(dstr=date_string)) return parsed, parsed, reso diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 67fa79ad5da8c..1c9bd01b16739 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -281,17 +281,15 @@ def _remove_labels_from_axis(axis): for t in axis.get_majorticklabels(): t.set_visible(False) - try: - # set_visible will not be effective if - # minor axis has NullLocator and NullFormattor (default) - if isinstance(axis.get_minor_locator(), ticker.NullLocator): - axis.set_minor_locator(ticker.AutoLocator()) - if isinstance(axis.get_minor_formatter(), ticker.NullFormatter): - axis.set_minor_formatter(ticker.FormatStrFormatter("")) - for t in axis.get_minorticklabels(): - t.set_visible(False) - except Exception: # pragma no cover - raise + # set_visible will not be effective if + # minor axis has NullLocator and NullFormattor (default) + if isinstance(axis.get_minor_locator(), ticker.NullLocator): + axis.set_minor_locator(ticker.AutoLocator()) + if isinstance(axis.get_minor_formatter(), ticker.NullFormatter): + axis.set_minor_formatter(ticker.FormatStrFormatter("")) + for t in axis.get_minorticklabels(): + t.set_visible(False) + axis.get_label().set_visible(False) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index e09af3fd48ee6..fcc0aa3b1c015 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -782,7 +782,7 @@ def test_categorical_no_compress(): def test_sort(): - # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: flake8 + # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: E501 # This should result in a properly sorted Series so that the plot # has a sorted x axis # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
https://api.github.com/repos/pandas-dev/pandas/pulls/28605
2019-09-25T00:40:55Z
2019-09-25T19:58:07Z
2019-09-25T19:58:07Z
2019-09-25T20:01:47Z
CLN: Assorted typings
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 6e73e1636a75b..002bbcc63d04f 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -176,7 +176,6 @@ def _reconstruct_data(values, dtype, original): ------- Index for extension types, otherwise ndarray casted to dtype """ - from pandas import Index if is_extension_array_dtype(dtype): values = dtype.construct_array_type()._from_sequence(values) @@ -184,7 +183,7 @@ def _reconstruct_data(values, dtype, original): values = values.astype(dtype) # we only support object dtypes bool Index - if isinstance(original, Index): + if isinstance(original, ABCIndexClass): values = values.astype(object) elif dtype is not None: values = values.astype(dtype) @@ -833,7 +832,7 @@ def duplicated(values, keep="first"): return f(values, keep=keep) -def mode(values, dropna=True): +def mode(values, dropna: bool = True): """ Returns the mode(s) of an array. @@ -1888,7 +1887,7 @@ def searchsorted(arr, value, side="left", sorter=None): } -def diff(arr, n, axis=0): +def diff(arr, n: int, axis: int = 0): """ difference of n between self, analogous to s-s.shift(n) @@ -1904,7 +1903,6 @@ def diff(arr, n, axis=0): Returns ------- shifted - """ n = int(n) @@ -1935,13 +1933,15 @@ def diff(arr, n, axis=0): f = _diff_special[arr.dtype.name] f(arr, out_arr, n, axis) else: - res_indexer = [slice(None)] * arr.ndim - res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n) - res_indexer = tuple(res_indexer) - - lag_indexer = [slice(None)] * arr.ndim - lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None) - lag_indexer = tuple(lag_indexer) + # To keep mypy happy, _res_indexer is a list while res_indexer is + # a tuple, ditto for lag_indexer. + _res_indexer = [slice(None)] * arr.ndim + _res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n) + res_indexer = tuple(_res_indexer) + + _lag_indexer = [slice(None)] * arr.ndim + _lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None) + lag_indexer = tuple(_lag_indexer) # need to make sure that we account for na for datelike/timedelta # we don't actually want to subtract these i8 numbers diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index bcdbf0855cbb4..4bcc53606aeca 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -26,7 +26,7 @@ _default_hash_key = "0123456789123456" -def _combine_hash_arrays(arrays, num_items): +def _combine_hash_arrays(arrays, num_items: int): """ Parameters ---------- @@ -55,7 +55,11 @@ def _combine_hash_arrays(arrays, num_items): def hash_pandas_object( - obj, index=True, encoding="utf8", hash_key=None, categorize=True + obj, + index: bool = True, + encoding: str = "utf8", + hash_key=None, + categorize: bool = True, ): """ Return a data hash of the Index/Series/DataFrame. @@ -125,7 +129,10 @@ def hash_pandas_object( for _ in [None] ) num_items += 1 - hashes = itertools.chain(hashes, index_hash_generator) + + # keep `hashes` specifically a generator to keep mypy happy + _hashes = itertools.chain(hashes, index_hash_generator) + hashes = (x for x in _hashes) h = _combine_hash_arrays(hashes, num_items) h = Series(h, index=obj.index, dtype="uint64", copy=False) @@ -179,7 +186,7 @@ def hash_tuples(vals, encoding="utf8", hash_key=None): return h -def hash_tuple(val, encoding="utf8", hash_key=None): +def hash_tuple(val, encoding: str = "utf8", hash_key=None): """ Hash a single tuple efficiently @@ -201,7 +208,7 @@ def hash_tuple(val, encoding="utf8", hash_key=None): return h -def _hash_categorical(c, encoding, hash_key): +def _hash_categorical(c, encoding: str, hash_key: str): """ Hash a Categorical by hashing its categories, and then mapping the codes to the hashes @@ -239,7 +246,7 @@ def _hash_categorical(c, encoding, hash_key): return result -def hash_array(vals, encoding="utf8", hash_key=None, categorize=True): +def hash_array(vals, encoding: str = "utf8", hash_key=None, categorize: bool = True): """ Given a 1d array, return an array of deterministic integers. @@ -317,7 +324,7 @@ def hash_array(vals, encoding="utf8", hash_key=None, categorize=True): return vals -def _hash_scalar(val, encoding="utf8", hash_key=None): +def _hash_scalar(val, encoding: str = "utf8", hash_key=None): """ Hash scalar value
Broken off from abandoned local branches. Also fixes one of the problems currently afflicting the CI in tests.groupby.test_categorical
https://api.github.com/repos/pandas-dev/pandas/pulls/28604
2019-09-25T00:00:34Z
2019-10-01T04:00:19Z
2019-10-01T04:00:19Z
2019-10-01T13:33:37Z
OPS: Remove mask_cmp_op fallback behavior
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index a3d75d69e1e82..a78bc07ac2715 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -199,7 +199,7 @@ Timezones Numeric ^^^^^^^ - Bug in :meth:`DataFrame.quantile` with zero-column :class:`DataFrame` incorrectly raising (:issue:`23925`) -- +- :class:`DataFrame` inequality comparisons with object-dtype and ``complex`` entries failing to raise ``TypeError`` like their :class:`Series` counterparts (:issue:`28079`) - Conversion diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 4f027843fbac1..eb901630b753a 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -28,7 +28,7 @@ ABCIndexClass, ABCSeries, ) -from pandas.core.dtypes.missing import isna, notna +from pandas.core.dtypes.missing import isna from pandas._typing import ArrayLike from pandas.core.construction import array, extract_array @@ -354,38 +354,6 @@ def fill_binop(left, right, fill_value): return left, right -def mask_cmp_op(x, y, op): - """ - Apply the function `op` to only non-null points in x and y. - - Parameters - ---------- - x : array-like - y : array-like - op : binary operation - - Returns - ------- - result : ndarray[bool] - """ - xrav = x.ravel() - result = np.empty(x.size, dtype=bool) - if isinstance(y, (np.ndarray, ABCSeries)): - yrav = y.ravel() - mask = notna(xrav) & notna(yrav) - result[mask] = op(np.array(list(xrav[mask])), np.array(list(yrav[mask]))) - else: - mask = notna(xrav) - result[mask] = op(np.array(list(xrav[mask])), y) - - if op == operator.ne: # pragma: no cover - np.putmask(result, ~mask, True) - else: - np.putmask(result, ~mask, False) - result = result.reshape(x.shape) - return result - - # ----------------------------------------------------------------------------- # Dispatch logic @@ -905,14 +873,6 @@ def _flex_comp_method_FRAME(cls, op, special): op_name = _get_op_name(op, special) default_axis = _get_frame_op_default_axis(op_name) - def na_op(x, y): - try: - with np.errstate(invalid="ignore"): - result = op(x, y) - except TypeError: - result = mask_cmp_op(x, y, op) - return result - doc = _flex_comp_doc_FRAME.format( op_name=op_name, desc=_op_descriptions[op_name]["desc"] ) @@ -926,16 +886,16 @@ def f(self, other, axis=default_axis, level=None): # Another DataFrame if not self._indexed_same(other): self, other = self.align(other, "outer", level=level, copy=False) - new_data = dispatch_to_series(self, other, na_op, str_rep) + new_data = dispatch_to_series(self, other, op, str_rep) return self._construct_result(new_data) elif isinstance(other, ABCSeries): return _combine_series_frame( - self, other, na_op, fill_value=None, axis=axis, level=level + self, other, op, fill_value=None, axis=axis, level=level ) else: # in this case we always have `np.ndim(other) == 0` - new_data = dispatch_to_series(self, other, na_op) + new_data = dispatch_to_series(self, other, op) return self._construct_result(new_data) f.__name__ = op_name diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index fc3640503e385..3b46e834933b3 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -235,21 +235,46 @@ def _test_seq(df, idx_ser, col_ser): rs = df.le(df) assert not rs.loc[0, 0] + def test_bool_flex_frame_complex_dtype(self): # complex arr = np.array([np.nan, 1, 6, np.nan]) arr2 = np.array([2j, np.nan, 7, None]) df = pd.DataFrame({"a": arr}) df2 = pd.DataFrame({"a": arr2}) - rs = df.gt(df2) - assert not rs.values.any() + + msg = "|".join( + [ + "'>' not supported between instances of '.*' and 'complex'", + r"unorderable types: .*complex\(\)", # PY35 + ] + ) + with pytest.raises(TypeError, match=msg): + # inequalities are not well-defined for complex numbers + df.gt(df2) + with pytest.raises(TypeError, match=msg): + # regression test that we get the same behavior for Series + df["a"].gt(df2["a"]) + with pytest.raises(TypeError, match=msg): + # Check that we match numpy behavior here + df.values > df2.values + rs = df.ne(df2) assert rs.values.all() arr3 = np.array([2j, np.nan, None]) df3 = pd.DataFrame({"a": arr3}) - rs = df3.gt(2j) - assert not rs.values.any() + with pytest.raises(TypeError, match=msg): + # inequalities are not well-defined for complex numbers + df3.gt(2j) + with pytest.raises(TypeError, match=msg): + # regression test that we get the same behavior for Series + df3["a"].gt(2j) + with pytest.raises(TypeError, match=msg): + # Check that we match numpy behavior here + df3.values > 2j + + def test_bool_flex_frame_object_dtype(self): # corner, dtype=object df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]}) df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
- [x] closes #28079 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry xref #28050. To make the Series vs DataFrame behavior consistent, the two main options are a) change the DataFrame behavior (this PR) or b) change the Series behavior. The latter is complicated by the fact that for object dtypes the Series comparisons go through comp_method_OBJECT_ARRAY instead of the numpy op, so we would still have to change the complex-case test changed here.
https://api.github.com/repos/pandas-dev/pandas/pulls/28601
2019-09-24T21:19:22Z
2019-09-26T12:25:29Z
2019-09-26T12:25:28Z
2019-09-27T17:30:07Z
REF/TST: Corner cases for op(DataFrame, Series)
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 4f027843fbac1..1f658296a559e 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -785,18 +785,9 @@ def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=N return self._combine_match_index(other, func, level=level) else: return self._combine_match_columns(other, func, level=level) - else: - if not len(other): - return self * np.nan - - if not len(self): - # Ambiguous case, use _series so works with DataFrame - return self._constructor( - data=self._series, index=self.index, columns=self.columns - ) - # default axis is columns - return self._combine_match_columns(other, func, level=level) + # default axis is columns + return self._combine_match_columns(other, func, level=level) def _align_method_FRAME(left, right, axis): diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index fc3640503e385..da399750c9bcd 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -663,3 +663,34 @@ def test_operations_with_interval_categories_index(self, all_arithmetic_operator result = getattr(df, op)(num) expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind) tm.assert_frame_equal(result, expected) + + +def test_frame_with_zero_len_series_corner_cases(): + # GH#28600 + # easy all-float case + df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"]) + ser = pd.Series(dtype=np.float64) + + result = df + ser + expected = pd.DataFrame(df.values * np.nan, columns=df.columns) + tm.assert_frame_equal(result, expected) + + result = df == ser + expected = pd.DataFrame(False, index=df.index, columns=df.columns) + tm.assert_frame_equal(result, expected) + + # non-float case should not raise on comparison + df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns) + result = df2 == ser + expected = pd.DataFrame(False, index=df.index, columns=df.columns) + tm.assert_frame_equal(result, expected) + + +def test_zero_len_frame_with_series_corner_cases(): + # GH#28600 + df = pd.DataFrame(columns=["A", "B"], dtype=np.float64) + ser = pd.Series([1, 2], index=["A", "B"]) + + result = df + ser + expected = df + tm.assert_frame_equal(result, expected)
We have two special cases in `_combine_series_frame` that are never tested ATM. This adds tests for them, then notes that the special case handling code is unnecessary and this can now fall through to the general case code. In a follow-up, we'll be able to simplify _combine_series_frame further, but for now I want to keep the changed logic obvious. Also note that the removed ``` if not len(other): return self * np.nan ``` is actually wrong in the non-float test case in L681-685 in the test file.
https://api.github.com/repos/pandas-dev/pandas/pulls/28600
2019-09-24T17:58:59Z
2019-09-26T12:27:47Z
2019-09-26T12:27:47Z
2019-09-26T14:47:19Z
CLN: unify __finalize__ treatment for Series ops
diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 4f027843fbac1..4e85a3ff104c4 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -606,6 +606,9 @@ def _construct_result(left, result, index, name, dtype=None): """ out = left._constructor(result, index=index, dtype=dtype) out = out.__finalize__(left) + + # Set the result's name after __finalize__ is called because __finalize__ + # would set it back to self.name out.name = name return out @@ -660,14 +663,6 @@ def wrapper(self, other): res_name = get_op_result_name(self, other) - # TODO: shouldn't we be applying finalize whenever - # not isinstance(other, ABCSeries)? - finalizer = ( - lambda x: x.__finalize__(self) - if isinstance(other, (np.ndarray, ABCIndexClass)) - else x - ) - if isinstance(other, ABCDataFrame): # pragma: no cover # Defer to DataFrame implementation; fail early return NotImplemented @@ -680,13 +675,7 @@ def wrapper(self, other): res_values = comparison_op(lvalues, rvalues, op) - result = self._constructor(res_values, index=self.index) - result = finalizer(result) - - # Set the result's name after finalizer is called because finalizer - # would set it back to self.name - result.name = res_name - return result + return _construct_result(self, res_values, index=self.index, name=res_name) wrapper.__name__ = op_name return wrapper @@ -703,14 +692,6 @@ def wrapper(self, other): self, other = _align_method_SERIES(self, other, align_asobject=True) res_name = get_op_result_name(self, other) - # TODO: shouldn't we be applying finalize whenever - # not isinstance(other, ABCSeries)? - finalizer = ( - lambda x: x.__finalize__(self) - if not isinstance(other, (ABCSeries, ABCIndexClass)) - else x - ) - if isinstance(other, ABCDataFrame): # Defer to DataFrame implementation; fail early return NotImplemented @@ -719,8 +700,7 @@ def wrapper(self, other): rvalues = extract_array(other, extract_numpy=True) res_values = logical_op(lvalues, rvalues, op) - result = self._constructor(res_values, index=self.index, name=res_name) - return finalizer(result) + return _construct_result(self, res_values, index=self.index, name=res_name) wrapper.__name__ = op_name return wrapper
ATM we have three slightly different usages for the arithmetic, comparison and logical ops, with no clear reason for the differences. This changes that to make the arithmetic the one true version, which has the added benefit of letting us share _construct_result code across these three functions.
https://api.github.com/repos/pandas-dev/pandas/pulls/28590
2019-09-24T01:40:48Z
2019-09-26T12:30:40Z
2019-09-26T12:30:40Z
2019-09-26T14:42:48Z
CLN: remove unused args from _construct_result
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 0638c4c1b6a01..69ef3b68406b7 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5269,7 +5269,7 @@ def _arith_op(left, right): with np.errstate(all="ignore"): res_values = _arith_op(this.values, other.values) new_data = dispatch_fill_zeros(func, this.values, other.values, res_values) - return this._construct_result(other, new_data, _arith_op) + return this._construct_result(new_data) def _combine_match_index(self, other, func, level=None): left, right = self.align(other, join="outer", axis=0, level=level, copy=False) @@ -5282,44 +5282,31 @@ def _combine_match_index(self, other, func, level=None): # fastpath --> operate directly on values with np.errstate(all="ignore"): new_data = func(left.values.T, right.values).T - return left._construct_result(other, new_data, func) + return left._construct_result(new_data) def _combine_match_columns(self, other: Series, func, level=None): left, right = self.align(other, join="outer", axis=1, level=level, copy=False) # at this point we have `left.columns.equals(right.index)` new_data = ops.dispatch_to_series(left, right, func, axis="columns") - return left._construct_result(right, new_data, func) + return left._construct_result(new_data) - def _combine_const(self, other, func): - # scalar other or np.ndim(other) == 0 - new_data = ops.dispatch_to_series(self, other, func) - return self._construct_result(other, new_data, func) - - def _construct_result(self, other, result, func): + def _construct_result(self, result) -> "DataFrame": """ Wrap the result of an arithmetic, comparison, or logical operation. Parameters ---------- - other : object result : DataFrame - func : binary operator Returns ------- DataFrame - - Notes - ----- - `func` is included for compat with SparseDataFrame signature, is not - needed here. """ out = self._constructor(result, index=self.index, copy=False) # Pin columns instead of passing to constructor for compat with # non-unique columns case out.columns = self.columns return out - # TODO: finalize? we do for SparseDataFrame def combine(self, other, func, fill_value=None, overwrite=True): """ diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 0c1e1e90c003b..4f027843fbac1 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -892,7 +892,8 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): if fill_value is not None: self = self.fillna(fill_value) - return self._combine_const(other, op) + new_data = dispatch_to_series(self, other, op) + return self._construct_result(new_data) f.__name__ = op_name @@ -926,7 +927,7 @@ def f(self, other, axis=default_axis, level=None): if not self._indexed_same(other): self, other = self.align(other, "outer", level=level, copy=False) new_data = dispatch_to_series(self, other, na_op, str_rep) - return self._construct_result(other, new_data, na_op) + return self._construct_result(new_data) elif isinstance(other, ABCSeries): return _combine_series_frame( @@ -934,7 +935,8 @@ def f(self, other, axis=default_axis, level=None): ) else: # in this case we always have `np.ndim(other) == 0` - return self._combine_const(other, na_op) + new_data = dispatch_to_series(self, other, na_op) + return self._construct_result(new_data) f.__name__ = op_name @@ -957,7 +959,7 @@ def f(self, other): "Can only compare identically-labeled DataFrame objects" ) new_data = dispatch_to_series(self, other, func, str_rep) - return self._construct_result(other, new_data, func) + return self._construct_result(new_data) elif isinstance(other, ABCSeries): return _combine_series_frame( @@ -967,8 +969,8 @@ def f(self, other): # straight boolean comparisons we want to allow all columns # (regardless of dtype to pass thru) See #4537 for discussion. - res = self._combine_const(other, func) - return res + new_data = dispatch_to_series(self, other, func) + return self._construct_result(new_data) f.__name__ = op_name
These were there for compat with the SparseDataFrame._construct_result signature, which is no longer relevant. Also got rid of _combine_const; that was also just waiting for the SparseDataFrame version to be gone.
https://api.github.com/repos/pandas-dev/pandas/pulls/28589
2019-09-24T01:37:31Z
2019-09-24T12:06:59Z
2019-09-24T12:06:59Z
2019-09-24T15:41:49Z
CLN: indexing Exception in Series
diff --git a/pandas/core/series.py b/pandas/core/series.py index 2431bfcfd0356..c87e371354f63 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1131,7 +1131,9 @@ def _get_with(self, key): elif isinstance(key, tuple): try: return self._get_values_tuple(key) - except Exception: + except ValueError: + # if we don't have a MultiIndex, we may still be able to handle + # a 1-tuple. see test_1tuple_without_multiindex if len(key) == 1: key = key[0] if isinstance(key, slice): @@ -1186,7 +1188,9 @@ def _get_values(self, indexer): return self._constructor( self._data.get_slice(indexer), fastpath=True ).__finalize__(self) - except Exception: + except ValueError: + # mpl compat if we look up e.g. ser[:, np.newaxis]; + # see tests.series.timeseries.test_mpl_compat_hack return self._values[indexer] def _get_value(self, label, takeable: bool = False): diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index e375bd459e66f..d478fbfa1686d 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -1202,3 +1202,12 @@ def test_readonly_indices(): result = df["data"].iloc[indices] expected = df["data"].loc[[1, 3, 6]] tm.assert_series_equal(result, expected) + + +def test_1tuple_without_multiindex(): + ser = pd.Series(range(5)) + key = (slice(3),) + + result = ser[key] + expected = ser[key[0]] + tm.assert_series_equal(result, expected)
https://api.github.com/repos/pandas-dev/pandas/pulls/28588
2019-09-24T00:49:50Z
2019-09-24T12:07:41Z
2019-09-24T12:07:41Z
2019-09-24T15:49:06Z
TST/CLN: Exception catching
diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 85eab91af3c48..c3ba5c0545b8b 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -8,6 +8,7 @@ import pandas as pd from pandas import DataFrame, Series, concat, date_range, isna from pandas.api.types import is_scalar +from pandas.core.indexing import IndexingError from pandas.tests.indexing.common import Base from pandas.util import testing as tm @@ -722,7 +723,7 @@ def test_iloc_mask(self): else: accessor = df ans = str(bin(accessor[mask]["nums"].sum())) - except Exception as e: + except (ValueError, IndexingError, NotImplementedError) as e: ans = str(e) key = tuple([idx, method]) diff --git a/pandas/tests/io/pytables/test_pytables.py b/pandas/tests/io/pytables/test_pytables.py index 856d97e29f2c0..ae604b1141204 100644 --- a/pandas/tests/io/pytables/test_pytables.py +++ b/pandas/tests/io/pytables/test_pytables.py @@ -37,7 +37,6 @@ import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal, assert_series_equal, set_timezone -from pandas.io.formats.printing import pprint_thing from pandas.io.pytables import ( ClosedFileError, HDFStore, @@ -3415,14 +3414,9 @@ def test_string_select(self): expected = df[df.x == "none"] assert_frame_equal(result, expected) - try: - result = store.select("df", "x!=none") - expected = df[df.x != "none"] - assert_frame_equal(result, expected) - except Exception as detail: - pprint_thing("[{0}]".format(detail)) - pprint_thing(store) - pprint_thing(expected) + result = store.select("df", "x!=none") + expected = df[df.x != "none"] + assert_frame_equal(result, expected) df2 = df.copy() df2.loc[df2.x == "", "x"] = np.nan diff --git a/pandas/util/testing.py b/pandas/util/testing.py index af726caa52e88..aee58f808d9e6 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -9,7 +9,6 @@ from shutil import rmtree import string import tempfile -import traceback from typing import Union, cast import warnings import zipfile @@ -2291,10 +2290,7 @@ def wrapper(*args, **kwargs): " and error {error}".format(error=e) ) - try: - e_str = traceback.format_exc(e) - except Exception: - e_str = str(e) + e_str = str(e) if any(m.lower() in e_str.lower() for m in _skip_on_messages): skip(
https://api.github.com/repos/pandas-dev/pandas/pulls/28587
2019-09-23T22:45:42Z
2019-09-24T12:24:04Z
2019-09-24T12:24:04Z
2019-09-24T15:50:08Z
Fix typo in class DataFrame
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 0638c4c1b6a01..be969e939afda 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -312,7 +312,7 @@ class DataFrame(NDFrame): .. versionchanged:: 0.25.0 If data is a list of dicts, column order follows insertion-order - Python 3.6 and later. + for Python 3.6 and later. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry This is basically a version of #28579 for the master branch.
https://api.github.com/repos/pandas-dev/pandas/pulls/28586
2019-09-23T22:04:09Z
2019-09-24T01:26:13Z
2019-09-24T01:26:13Z
2019-09-24T17:45:09Z
WEB: Restructuring pages and navigation, styling and new footer
diff --git a/web/pandas/_templates/layout.html b/web/pandas/_templates/layout.html index 253318182f30c..fe3e4d1245d93 100644 --- a/web/pandas/_templates/layout.html +++ b/web/pandas/_templates/layout.html @@ -20,41 +20,44 @@ <link rel="stylesheet" href="{{ base_url }}{{ stylesheet }}"> {% endfor %} + <script src="https://kit.fontawesome.com/79e5369384.js" crossorigin="anonymous"></script> </head> <body> <header> <nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark"> - <button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#nav-content" aria-controls="nav-content" aria-expanded="false" aria-label="Toggle navigation"> - <span class="navbar-toggler-icon"></span> - </button> + <div class="container"> + <button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#nav-content" aria-controls="nav-content" aria-expanded="false" aria-label="Toggle navigation"> + <span class="navbar-toggler-icon"></span> + </button> - {% if static.logo %}<a class="navbar-brand" href="{{ base_url }}/"><img alt="" src="{{ base_url }}{{ static.logo }}"/></a>{% endif %} + {% if static.logo %}<a class="navbar-brand" href="{{ base_url }}/"><img alt="" src="{{ base_url }}{{ static.logo }}"/></a>{% endif %} - <div class="collapse navbar-collapse" id="nav-content"> - <ul class="navbar-nav"> - {% for item in navbar %} - {% if not item.has_subitems %} - <li class="nav-item"> - <a class="nav-link" href="{% if not item.target.startswith("http") %}{{ base_url }}{% endif %}{{ item.target }}">{{ item.name }}</a> - </li> - {% else %} - <li class="nav-item dropdown"> - <a class="nav-link dropdown-toggle" - data-toggle="dropdown" - id="{{ item.slug }}" - href="#" - role="button" - aria-haspopup="true" - aria-expanded="false">{{ item.name }}</a> - <div class="dropdown-menu" aria-labelledby="{{ item.slug }}"> - {% for subitem in item.target %} - <a class="dropdown-item" href="{% if not subitem.target.startswith("http") %}{{ base_url }}{% endif %}{{ subitem.target }}">{{ subitem.name }}</a> - {% endfor %} - </div> - </li> - {% endif %} - {% endfor %} - </ul> + <div class="collapse navbar-collapse" id="nav-content"> + <ul class="navbar-nav ml-auto"> + {% for item in navbar %} + {% if not item.has_subitems %} + <li class="nav-item"> + <a class="nav-link" href="{% if not item.target.startswith("http") %}{{ base_url }}{% endif %}{{ item.target }}">{{ item.name }}</a> + </li> + {% else %} + <li class="nav-item dropdown"> + <a class="nav-link dropdown-toggle" + data-toggle="dropdown" + id="{{ item.slug }}" + href="#" + role="button" + aria-haspopup="true" + aria-expanded="false">{{ item.name }}</a> + <div class="dropdown-menu" aria-labelledby="{{ item.slug }}"> + {% for subitem in item.target %} + <a class="dropdown-item" href="{% if not subitem.target.startswith("http") %}{{ base_url }}{% endif %}{{ subitem.target }}">{{ subitem.name }}</a> + {% endfor %} + </div> + </li> + {% endif %} + {% endfor %} + </ul> + </div> </div> </nav> </header> @@ -64,11 +67,30 @@ </div> </main> <footer class="container pt-4 pt-md-5 border-top"> - <p class="float-right"> - <a href="#">Back to top</a> - </p> + <ul class="list-inline social-buttons float-right"> + <li class="list-inline-item"> + <a href="https://twitter.com/pandas_dev/"> + <i class="fab fa-twitter"></i> + </a> + </li> + <li class="list-inline-item"> + <a href="https://github.com/pandas-dev/pandas/"> + <i class="fab fa-github"></i> + </a> + </li> + <li class="list-inline-item"> + <a href="https://stackoverflow.com/questions/tagged/pandas"> + <i class="fab fa-stack-overflow"></i> + </a> + </li> + <li class="list-inline-item"> + <a href="https://pandas.discourse.group"> + <i class="fab fa-discourse"></i> + </a> + </li> + </ul> <p> - © 2009 - 2019, pandas team + pandas is a fiscally sponsored project of <a href="https://numfocus.org">NumFOCUS</a> </p> </footer> diff --git a/web/pandas/community/citing.md b/web/pandas/about/citing.md similarity index 98% rename from web/pandas/community/citing.md rename to web/pandas/about/citing.md index 6bad948bb3736..77b79c41aa4d1 100644 --- a/web/pandas/community/citing.md +++ b/web/pandas/about/citing.md @@ -1,6 +1,6 @@ -# Citing pandas +# Citing and logo -## Citing +## Citing pandas If you use _pandas_ for a scientific publication, we would appreciate citations to one of the following papers: diff --git a/web/pandas/community/about.md b/web/pandas/about/index.html similarity index 100% rename from web/pandas/community/about.md rename to web/pandas/about/index.html diff --git a/web/pandas/community/roadmap.md b/web/pandas/about/roadmap.md similarity index 100% rename from web/pandas/community/roadmap.md rename to web/pandas/about/roadmap.md diff --git a/web/pandas/about/sponsors.md b/web/pandas/about/sponsors.md new file mode 100644 index 0000000000000..dcc6e367e5d64 --- /dev/null +++ b/web/pandas/about/sponsors.md @@ -0,0 +1,41 @@ +# Sponsors + +## NumFOCUS + +![](https://numfocus.org/wp-content/uploads/2018/01/optNumFocus_LRG.png) + +_pandas_ is a Sponsored Project of [NumFOCUS](https://numfocus.org/), a 501(c)(3) nonprofit charity in the United States. +NumFOCUS provides _pandas_ with fiscal, legal, and administrative support to help ensure the +health and sustainability of the project. Visit numfocus.org for more information. + +Donations to _pandas_ are managed by NumFOCUS. For donors in the United States, your gift is tax-deductible +to the extent provided by law. As with any donation, you should consult with your tax adviser about your particular tax situation. + +## Tidelift + +_pandas_ is part of the [Tidelift subscription](https://tidelift.com/subscription/pkg/pypi-pandas?utm_source=pypi-pandas&utm_medium=referral&utm_campaign=readme). +You can support pandas by becoming a Tidelift subscriber. + +## Institutional partners + +Institutional Partners are companies and universities that support the project by employing contributors. +Current Institutional Partners include: + +<ul> + {% for company in partners.active if company.employs %} + <li><a href="{{ company.url }}">{{ company.name }}</a> ({{ company.employs }})</li> + {% endfor %} +</ul> + +## In-kind sponsors + +- [OVH](https://us.ovhcloud.com/): Hosting +- [Indeed](https://opensource.indeedeng.io/): Logo and website design + +## Past institutional partners + +<ul> + {% for company in partners.past %} + <li><a href="{{ company.url }}">{{ company.name }}</a></li> + {% endfor %} +</ul> diff --git a/web/pandas/community/team.md b/web/pandas/about/team.md similarity index 63% rename from web/pandas/community/team.md rename to web/pandas/about/team.md index c0a15081e1fa8..41da3a0e82bdb 100644 --- a/web/pandas/community/team.md +++ b/web/pandas/about/team.md @@ -36,25 +36,12 @@ If you want to support pandas development, you can find information in the [dona {% endfor %} </div> -## BDFL +## Governance Wes McKinney is the Benevolent Dictator for Life (BDFL). -## Governance - The project governance is available in the [project governance documents](https://github.com/pandas-dev/pandas-governance). -## NumFOCUS - -![](https://numfocus.org/wp-content/uploads/2018/01/optNumFocus_LRG.png) - -_pandas_ is a Sponsored Project of [NumFOCUS](https://numfocus.org/), a 501(c)(3) nonprofit charity in the United States. -NumFOCUS provides _pandas_ with fiscal, legal, and administrative support to help ensure the -health and sustainability of the project. Visit numfocus.org for more information. - -Donations to _pandas_ are managed by NumFOCUS. For donors in the United States, your gift is tax-deductible -to the extent provided by law. As with any donation, you should consult with your tax adviser about your particular tax situation. - ## Code of conduct committee <ul> @@ -71,19 +58,6 @@ to the extent provided by law. As with any donation, you should consult with you {% endfor %} </ul> -## Institutional partners - -<ul> - {% for company in partners.active if company.employs %} - <li><a href="{{ company.url }}">{{ company.name }}</a> ({{ company.employs }})</li> - {% endfor %} -</ul> - -In-kind sponsors - -- [Indeed](https://opensource.indeedeng.io/): Logo and website design -- Can we find a donor for the hosting (website, benchmarks,...?) - ## Emeritus maintainers <ul> @@ -91,11 +65,3 @@ In-kind sponsors <li>{{ person }}</li> {% endfor %} </ul> - -## Past institutional partners - -<ul> - {% for company in partners.past %} - <li><a href="{{ company.url }}">{{ company.name }}</a></li> - {% endfor %} -</ul> diff --git a/web/pandas/blog.html b/web/pandas/community/blog.html similarity index 100% rename from web/pandas/blog.html rename to web/pandas/community/blog.html diff --git a/web/pandas/community/coc.md b/web/pandas/community/coc.md index 2841349fdb556..de0e8120f7eee 100644 --- a/web/pandas/community/coc.md +++ b/web/pandas/community/coc.md @@ -1,4 +1,4 @@ -# Contributor Code of Conduct +# Code of conduct As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md index af27c31b52d50..cf242e86f879f 100644 --- a/web/pandas/community/ecosystem.md +++ b/web/pandas/community/ecosystem.md @@ -1,4 +1,4 @@ -# Pandas ecosystem +# Ecosystem Increasingly, packages are being built on top of pandas to address specific needs in data preparation, analysis and visualization. This is diff --git a/web/pandas/config.yml b/web/pandas/config.yml index c7c4b77e309f7..d5c505f298437 100644 --- a/web/pandas/config.yml +++ b/web/pandas/config.yml @@ -4,12 +4,11 @@ main: ignore: - _templates/layout.html - config.yml - - blog.html # blog will be added at a later stage - try.md # the binder page will be added later github_repo_url: pandas-dev/pandas context_preprocessors: - pandas_web.Preprocessors.navbar_add_info - # - pandas_web.Preprocessors.blog_add_posts + - pandas_web.Preprocessors.blog_add_posts - pandas_web.Preprocessors.maintainers_add_info - pandas_web.Preprocessors.home_add_releases markdown_extensions: @@ -17,46 +16,48 @@ main: - tables - fenced_code static: - logo: # path to the logo when it's in the repo + logo: # /static/img/pandas.svg css: - /static/css/pandas.css navbar: - - name: "Install" - target: /install.html + - name: "About us" + target: + - name: "About pandas" + target: /about/index.html + - name: "Project roadmap" + target: /about/roadmap.html + - name: "Team" + target: /about/team.html + - name: "Sponsors" + target: /about/sponsors.html + - name: "Citing and logo" + target: /about/citing.html + - name: "Getting started" + target: /getting_started.html - name: "Documentation" target: - - name: "Getting started" - target: /docs/getting_started/index.html - name: "User guide" target: /docs/user_guide/index.html - name: "API reference" target: /docs/reference/index.html - - name: "Contributing to pandas" - target: /docs/development/index.html - name: "Release notes" target: /docs/whatsnew/index.html + - name: "Older versions" + target: https://pandas.pydata.org/pandas-docs/version/ - name: "Community" target: - - name: "About pandas" - target: /community/about.html - - name: "Project roadmap" - target: /community/roadmap.html - - name: "Ecosystem" - target: /community/ecosystem.html + - name: "Blog" + target: /community/blog.html - name: "Ask a question (StackOverflow)" target: https://stackoverflow.com/questions/tagged/pandas - - name: "Discuss (mailing list)" - target: https://groups.google.com/forum/#!forum/pydata - - name: "Team" - target: /community/team.html - - name: "Code of Conduct" + - name: "Discuss" + target: https://pandas.discourse.group + - name: "Code of conduct" target: /community/coc.html - - name: "Citing pandas" - target: /community/citing.html - # - name: "Blog" - # target: /blog.html - - name: "Donate" - target: /donate.html + - name: "Ecosystem" + target: /community/ecosystem.html + - name: "Contribute" + target: /contribute.html blog: num_posts: 8 feed: diff --git a/web/pandas/contribute.md b/web/pandas/contribute.md new file mode 100644 index 0000000000000..825a5870bf5a0 --- /dev/null +++ b/web/pandas/contribute.md @@ -0,0 +1,12 @@ +# Contribute to pandas + +_pandas_ is and always will be **free**. To make the development sustainable, we need _pandas_ users, corporate +or individual, to support the development by providing their time and money. + +You can find more information about current developers in the [team page](about/team.html), +and about current sponsors in the [sponsors page](about/sponsors.html). +Financial contributions will mainly be used to advance in the [pandas roadmap](about/roadmap.html). + +- If your **company or organization** is interested in helping make pandas better, please contact us at [info@numfocus.org](mailto:info@numfocus.org) +- If you want to contribute to _pandas_ with your **time**, please visit the [contributing page]({{ base_url }}/docs/development/index.html) +- If you want to support _pandas_ with a **donation**, please use the [donations page](donate.html). diff --git a/web/pandas/donate.md b/web/pandas/donate.md index 5f5b07fb8763c..69db7e4648e77 100644 --- a/web/pandas/donate.md +++ b/web/pandas/donate.md @@ -1,16 +1,5 @@ # Donate to pandas -_pandas_ is and always will be **free**. To make de development sustainable, we need _pandas_ users, corporate -or individual, to support the development by providing their time and money. - -You can find more information about current developers and supporters in the [team page](community/team.html). -Financial contributions will mainly be used to advance in the [pandas roadmap](community/roadmap.html). - -- If your **company or organization** is interested in helping make pandas better, please contact us at [info@numfocus.org](mailto:info@numfocus.org) -- If you want to contribute to _pandas_ with your **time**, please visit the [contributing page]({{ base_url }}/docs/development/index.html) -- If you want to support _pandas_ with a **donation**, please use the form below: - - <div id="salsalabs-donate-container"> </div> <script type="text/javascript" diff --git a/web/pandas/install.md b/web/pandas/getting_started.md similarity index 62% rename from web/pandas/install.md rename to web/pandas/getting_started.md index 84721b3d1d9a4..99a7a9f4b2d60 100644 --- a/web/pandas/install.md +++ b/web/pandas/getting_started.md @@ -1,4 +1,6 @@ -# Installation instructions +# Getting started + +## Installation instructions The next steps provides the easiest and recommended way to set up your environment to use pandas. Other installation options can be found in @@ -21,8 +23,29 @@ the [advanced installation page]({{ base_url}}/docs/install.html). <img class="img-fluid" alt="" src="{{ base_url }}/static/img/install/pandas_import_and_version.png"/> -5. Now you are ready to use pandas you can write your code in the next cells. +5. Now you are ready to use pandas, and you can write your code in the next cells. +## Tutorials You can learn more about pandas in the [tutorials](#), and more about JupyterLab in the [JupyterLab documentation](https://jupyterlab.readthedocs.io/en/stable/user/interface.html). + +## Books + +The book we recommend to learn pandas is [Python for Data Analysis](https://amzn.to/2KI5JJw), +by [Wes McKinney](https://wesmckinney.com/), creator of pandas. + +<a href="https://amzn.to/2KI5JJw"> + <img alt="Python for Data Analysis" src="{{ base_url }}/static/img/pydata_book.gif"/> +</a> + +## Videos + +<iframe width="560" height="315" frameborder="0" +src="https://www.youtube.com/embed/_T8LGqJtuGc" +allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" +allowfullscreen></iframe> + +## Cheat sheet + +[pandas cheat sheet](https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf) diff --git a/web/pandas/index.html b/web/pandas/index.html index 9f1a0e9a64174..df6e5ab9a330b 100644 --- a/web/pandas/index.html +++ b/web/pandas/index.html @@ -10,7 +10,7 @@ <h1>pandas</h1> built on top of the <a href="http://www.python.org">Python</a> programming language. </p> <p> - <a class="btn btn-primary" href="{{ base_url }}/install.html">Install pandas now!</a> + <a class="btn btn-primary" href="{{ base_url }}/getting_started.html">Install pandas now!</a> </p> </section> @@ -19,7 +19,7 @@ <h1>pandas</h1> <h5>Getting started</h5> <ul> <!-- <li><a href="{{ base_url }}/try.html">Try pandas online</a></li> --> - <li><a href="{{ base_url }}/install.html">Install pandas</a></li> + <li><a href="{{ base_url }}/getting_started.html">Install pandas</a></li> <li><a href="{{ base_url }}/docs/getting_started/index.html">Getting started</a></li> </ul> </div> diff --git a/web/pandas/static/css/pandas.css b/web/pandas/static/css/pandas.css index 5911de96b5fa9..0a227cf8d96c9 100644 --- a/web/pandas/static/css/pandas.css +++ b/web/pandas/static/css/pandas.css @@ -1,16 +1,54 @@ body { padding-top: 5em; - padding-bottom: 3em; + color: #444; +} +h1 { + font-size: 2.4rem; + font-weight: 700; + color: #130654; +} +h2 { + font-size: 1.45rem; + font-weight: 700; + color: black; +} +h3 { + font-size: 1.3rem; + font-weight: 600; + color: black; +} +a { + color: #130654; } code { white-space: pre; } +.fab { + font-size: 1.2rem; + color: #666; +} +.fab:hover { + color: #130654; +} a.navbar-brand img { max-height: 2em; } div.card { margin: 0 0 .2em .2em !important; } +div.card .card-title { + font-weight: 500; + color: #130654; +} .book { padding: 0 20%; } +.bg-dark { + background-color: #130654 !important; +} +.navbar-dark .navbar-nav .nav-link { + color: rgba(255, 255, 255, .9); +} +.navbar-dark .navbar-nav .nav-link:hover { + color: white; +}
xref #28519 closes #28520 You can see the result rendered in: https://datapythonista.github.io/pandas-web/ (the new logo is not in the PR). Been discussing with NumPy the navigation, and what is proposed here is with minimal differences agreed with them. See https://github.com/numpy/numpy.org/issues/43 I don't touch the home page here, I think we need to remove almost everything we've got so far (based on the current), and move more in the direction of Dask or Jupyter. Discussions about it better in #28168 than here.
https://api.github.com/repos/pandas-dev/pandas/pulls/28582
2019-09-23T18:33:21Z
2019-09-26T06:42:27Z
2019-09-26T06:42:27Z
2019-09-26T13:22:31Z
Fix typo
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c0b331f356c3c..4c39e18ce5002 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -320,7 +320,7 @@ class DataFrame(NDFrame): .. versionchanged :: 0.25.0 If data is a list of dicts, column order follows insertion-order - Python 3.6 and later. + for Python 3.6 and later. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28579
2019-09-23T14:47:24Z
2019-09-23T15:21:25Z
2019-09-23T15:21:25Z
2019-09-23T22:05:02Z
DOC: Add scaling to large datasets section
diff --git a/doc/.gitignore b/doc/.gitignore new file mode 100644 index 0000000000000..e23892d6100e8 --- /dev/null +++ b/doc/.gitignore @@ -0,0 +1,4 @@ +data/ +timeseries.csv +timeseries.parquet +timeseries_wide.parquet diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index f5669626aa2b3..6ff42eee9dad2 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -83,6 +83,7 @@ See the :ref:`overview` for more detail about what's in the library. * :doc:`user_guide/style` * :doc:`user_guide/options` * :doc:`user_guide/enhancingperf` + * :doc:`user_guide/scale` * :doc:`user_guide/sparse` * :doc:`user_guide/gotchas` * :doc:`user_guide/cookbook` diff --git a/doc/source/user_guide/index.rst b/doc/source/user_guide/index.rst index 05df83decbd7e..b86961a71433b 100644 --- a/doc/source/user_guide/index.rst +++ b/doc/source/user_guide/index.rst @@ -38,6 +38,7 @@ Further information on any specific method can be obtained in the style options enhancingperf + scale sparse gotchas cookbook diff --git a/doc/source/user_guide/scale.rst b/doc/source/user_guide/scale.rst new file mode 100644 index 0000000000000..7b590a3a1fcc8 --- /dev/null +++ b/doc/source/user_guide/scale.rst @@ -0,0 +1,373 @@ +.. _scale: + +************************* +Scaling to large datasets +************************* + +Pandas provides data structures for in-memory analytics, which makes using pandas +to analyze datasets that are larger than memory datasets somewhat tricky. Even datasets +that are a sizable fraction of memory become unwieldy, as some pandas operations need +to make intermediate copies. + +This document provides a few recommendations for scaling your analysis to larger datasets. +It's a complement to :ref:`enhancingperf`, which focuses on speeding up analysis +for datasets that fit in memory. + +But first, it's worth considering *not using pandas*. Pandas isn't the right +tool for all situations. If you're working with very large datasets and a tool +like PostgreSQL fits your needs, then you should probably be using that. +Assuming you want or need the expressiveness and power of pandas, let's carry on. + +.. ipython:: python + + import pandas as pd + import numpy as np + +.. ipython:: python + :suppress: + + from pandas.util.testing import _make_timeseries + + # Make a random in-memory dataset + ts = _make_timeseries(freq="30S", seed=0) + ts.to_csv("timeseries.csv") + ts.to_parquet("timeseries.parquet") + + +Load less data +-------------- + +.. ipython:: python + :suppress: + + # make a similar dataset with many columns + timeseries = [ + _make_timeseries(freq="1T", seed=i).rename(columns=lambda x: f"{x}_{i}") + for i in range(10) + ] + ts_wide = pd.concat(timeseries, axis=1) + ts_wide.to_parquet("timeseries_wide.parquet") + +Suppose our raw dataset on disk has many columns:: + + id_0 name_0 x_0 y_0 id_1 name_1 x_1 ... name_8 x_8 y_8 id_9 name_9 x_9 y_9 + timestamp ... + 2000-01-01 00:00:00 1015 Michael -0.399453 0.095427 994 Frank -0.176842 ... Dan -0.315310 0.713892 1025 Victor -0.135779 0.346801 + 2000-01-01 00:01:00 969 Patricia 0.650773 -0.874275 1003 Laura 0.459153 ... Ursula 0.913244 -0.630308 1047 Wendy -0.886285 0.035852 + 2000-01-01 00:02:00 1016 Victor -0.721465 -0.584710 1046 Michael 0.524994 ... Ray -0.656593 0.692568 1064 Yvonne 0.070426 0.432047 + 2000-01-01 00:03:00 939 Alice -0.746004 -0.908008 996 Ingrid -0.414523 ... Jerry -0.958994 0.608210 978 Wendy 0.855949 -0.648988 + 2000-01-01 00:04:00 1017 Dan 0.919451 -0.803504 1048 Jerry -0.569235 ... Frank -0.577022 -0.409088 994 Bob -0.270132 0.335176 + ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... + 2000-12-30 23:56:00 999 Tim 0.162578 0.512817 973 Kevin -0.403352 ... Tim -0.380415 0.008097 1041 Charlie 0.191477 -0.599519 + 2000-12-30 23:57:00 970 Laura -0.433586 -0.600289 958 Oliver -0.966577 ... Zelda 0.971274 0.402032 1038 Ursula 0.574016 -0.930992 + 2000-12-30 23:58:00 1065 Edith 0.232211 -0.454540 971 Tim 0.158484 ... Alice -0.222079 -0.919274 1022 Dan 0.031345 -0.657755 + 2000-12-30 23:59:00 1019 Ingrid 0.322208 -0.615974 981 Hannah 0.607517 ... Sarah -0.424440 -0.117274 990 George -0.375530 0.563312 + 2000-12-31 00:00:00 937 Ursula -0.906523 0.943178 1018 Alice -0.564513 ... Jerry 0.236837 0.807650 985 Oliver 0.777642 0.783392 + + [525601 rows x 40 columns] + + +To load the columns we want, we have two options. +Option 1 loads in all the data and then filters to what we need. + +.. ipython:: python + + columns = ['id_0', 'name_0', 'x_0', 'y_0'] + + pd.read_parquet("timeseries_wide.parquet")[columns] + +Option 2 only loads the columns we request. + +.. ipython:: python + + pd.read_parquet("timeseries_wide.parquet", columns=columns) + +If we were to measure the memory usage of the two calls, we'd see that specifying +``columns`` uses about 1/10th the memory in this case. + +With :func:`pandas.read_csv`, you can specify ``usecols`` to limit the columns +read into memory. Not all file formats that can be read by pandas provide an option +to read a subset of columns. + +Use efficient datatypes +----------------------- + +The default pandas data types are not the most memory efficient. This is +especially true for high-cardinality text data (columns with relatively few +unique values). By using more efficient data types you can store larger datasets +in memory. + +.. ipython:: python + + ts = pd.read_parquet("timeseries.parquet") + ts + +Now, let's inspect the data types and memory usage to see where we should focus our +attention. + +.. ipython:: python + + ts.dtypes + +.. ipython:: python + + ts.memory_usage(deep=True) # memory usage in bytes + + +The ``name`` column is taking up much more memory than any other. It has just a +few unique values, so it's a good candidate for converting to a +:class:`Categorical`. With a Categorical, we store each unique name once and use +space-efficient integers to know which specific name is used in each row. + + +.. ipython:: python + + ts2 = ts.copy() + ts2['name'] = ts2['name'].astype('category') + ts2.memory_usage(deep=True) + +We can go a bit further and downcast the numeric columns to their smallest types +using :func:`pandas.to_numeric`. + +.. ipython:: python + + ts2['id'] = pd.to_numeric(ts2['id'], downcast='unsigned') + ts2[['x', 'y']] = ts2[['x', 'y']].apply(pd.to_numeric, downcast='float') + ts2.dtypes + +.. ipython:: python + + ts2.memory_usage(deep=True) + +.. ipython:: python + + reduction = (ts2.memory_usage(deep=True).sum() + / ts.memory_usage(deep=True).sum()) + print(f"{reduction:0.2f}") + +In all, we've reduced the in-memory footprint of this dataset to 1/5 of its +original size. + +See :ref:`categorical` for more on ``Categorical`` and :ref:`basics.dtypes` +for an overview of all of pandas' dtypes. + +Use chunking +------------ + +Some workloads can be achieved with chunking: splitting a large problem like "convert this +directory of CSVs to parquet" into a bunch of small problems ("convert this individual CSV +file into a Parquet file. Now repeat that for each file in this directory."). As long as each chunk +fits in memory, you can work with datasets that are much larger than memory. + +.. note:: + + Chunking works well when the operation you're performing requires zero or minimal + coordination between chunks. For more complicated workflows, you're better off + :ref:`using another library <scale.other_libraries>`. + +Suppose we have an even larger "logical dataset" on disk that's a directory of parquet +files. Each file in the directory represents a different year of the entire dataset. + +.. ipython:: python + :suppress: + + import pathlib + + N = 12 + starts = [f'20{i:>02d}-01-01' for i in range(N)] + ends = [f'20{i:>02d}-12-13' for i in range(N)] + + pathlib.Path("data/timeseries").mkdir(exist_ok=True) + + for i, (start, end) in enumerate(zip(starts, ends)): + ts = _make_timeseries(start=start, end=end, freq='1T', seed=i) + ts.to_parquet(f"data/timeseries/ts-{i:0>2d}.parquet") + + +:: + + data + └── timeseries + ├── ts-00.parquet + ├── ts-01.parquet + ├── ts-02.parquet + ├── ts-03.parquet + ├── ts-04.parquet + ├── ts-05.parquet + ├── ts-06.parquet + ├── ts-07.parquet + ├── ts-08.parquet + ├── ts-09.parquet + ├── ts-10.parquet + └── ts-11.parquet + +Now we'll implement an out-of-core ``value_counts``. The peak memory usage of this +workflow is the single largest chunk, plus a small series storing the unique value +counts up to this point. As long as each individual file fits in memory, this will +work for arbitrary-sized datasets. + +.. ipython:: python + + %%time + files = pathlib.Path("data/timeseries/").glob("ts*.parquet") + counts = pd.Series(dtype=int) + for path in files: + # Only one dataframe is in memory at a time... + df = pd.read_parquet(path) + # ... plus a small Series `counts`, which is updated. + counts = counts.add(df['name'].value_counts(), fill_value=0) + counts.astype(int) + +Some readers, like :meth:`pandas.read_csv`, offer parameters to control the +``chunksize`` when reading a single file. + +Manually chunking is an OK option for workflows that don't +require too sophisticated of operations. Some operations, like ``groupby``, are +much harder to do chunkwise. In these cases, you may be better switching to a +different library that implements these out-of-core algorithms for you. + +.. _scale.other_libraries: + +Use other libraries +------------------- + +Pandas is just one library offering a DataFrame API. Because of its popularity, +pandas' API has become something of a standard that other libraries implement. +The pandas documentation maintains a list of libraries implementing a DataFrame API +in :ref:`our ecosystem page <ecosystem.out-of-core>`. + +For example, `Dask`_, a parallel computing library, has `dask.dataframe`_, a +pandas-like API for working with larger than memory datasets in parallel. Dask +can use multiple threads or processes on a single machine, or a cluster of +machines to process data in parallel. + + +We'll import ``dask.dataframe`` and notice that the API feels similar to pandas. +We can use Dask's ``read_parquet`` function, but provide a globstring of files to read in. + +.. ipython:: python + + import dask.dataframe as dd + + ddf = dd.read_parquet("data/timeseries/ts*.parquet", engine="pyarrow") + ddf + +Inspecting the ``ddf`` object, we see a few things + +* There are familiar attributes like ``.columns`` and ``.dtypes`` +* There are familiar methods like ``.groupby``, ``.sum``, etc. +* There are new attributes like ``.npartitions`` and ``.divisions`` + +The partitions and divisions are how Dask parallizes computation. A **Dask** +DataFrame is made up of many **Pandas** DataFrames. A single method call on a +Dask DataFrame ends up making many pandas method calls, and Dask knows how to +coordinate everything to get the result. + +.. ipython:: python + + ddf.columns + ddf.dtypes + ddf.npartitions + +One major difference: the ``dask.dataframe`` API is *lazy*. If you look at the +repr above, you'll notice that the values aren't actually printed out; just the +column names and dtypes. That's because Dask hasn't actually read the data yet. +Rather than executing immediately, doing operations build up a **task graph**. + +.. ipython:: python + + ddf + ddf['name'] + ddf['name'].value_counts() + +Each of these calls is instant because the result isn't being computed yet. +We're just building up a list of computation to do when someone needs the +result. Dask knows that the return type of a ``pandas.Series.value_counts`` +is a pandas Series with a certain dtype and a certain name. So the Dask version +returns a Dask Series with the same dtype and the same name. + +To get the actual result you can call ``.compute()``. + +.. ipython:: python + + %time ddf['name'].value_counts().compute() + +At that point, you get back the same thing you'd get with pandas, in this case +a concrete pandas Series with the count of each ``name``. + +Calling ``.compute`` causes the full task graph to be executed. This includes +reading the data, selecting the columns, and doing the ``value_counts``. The +execution is done *in parallel* where possible, and Dask tries to keep the +overall memory footprint small. You can work with datasets that are much larger +than memory, as long as each partition (a regular pandas DataFrame) fits in memory. + +By default, ``dask.dataframe`` operations use a threadpool to do operations in +parallel. We can also connect to a cluster to distribute the work on many +machines. In this case we'll connect to a local "cluster" made up of several +processes on this single machine. + +.. code-block:: python + + >>> from dask.distributed import Client, LocalCluster + + >>> cluster = LocalCluster() + >>> client = Client(cluster) + >>> client + <Client: 'tcp://127.0.0.1:53349' processes=4 threads=8, memory=17.18 GB> + +Once this ``client`` is created, all of Dask's computation will take place on +the cluster (which is just processes in this case). + +Dask implements the most used parts of the pandas API. For example, we can do +a familiar groupby aggregation. + +.. ipython:: python + + %time ddf.groupby('name')[['x', 'y']].mean().compute().head() + +The grouping and aggregation is done out-of-core and in parallel. + +When Dask knows the ``divisions`` of a dataset, certain optimizations are +possible. When reading parquet datasets written by dask, the divisions will be +known automatically. In this case, since we created the parquet files manually, +we need to supply the divisions manually. + +.. ipython:: python + + N = 12 + starts = [f'20{i:>02d}-01-01' for i in range(N)] + ends = [f'20{i:>02d}-12-13' for i in range(N)] + + divisions = tuple(pd.to_datetime(starts)) + (pd.Timestamp(ends[-1]),) + ddf.divisions = divisions + ddf + +Now we can do things like fast random access with ``.loc``. + +.. ipython:: python + + ddf.loc['2002-01-01 12:01':'2002-01-01 12:05'].compute() + +Dask knows to just look in the 3rd partition for selecting values in `2002`. It +doesn't need to look at any other data. + +Many workflows involve a large amount of data and processing it in a way that +reduces the size to something that fits in memory. In this case, we'll resample +to daily frequency and take the mean. Once we've taken the mean, we know the +results will fit in memory, so we can safely call ``compute`` without running +out of memory. At that point it's just a regular pandas object. + +.. ipython:: python + + @savefig dask_resample.png + ddf[['x', 'y']].resample("1D").mean().cumsum().compute().plot() + +These Dask examples have all be done using multiple processes on a single +machine. Dask can be `deployed on a cluster +<https://docs.dask.org/en/latest/setup.html>`_ to scale up to even larger +datasets. + +You see more dask examples at https://examples.dask.org. + +.. _Dask: https://dask.org +.. _dask.dataframe: https://docs.dask.org/en/latest/dataframe.html diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index a78bc07ac2715..a6abe39f24ac3 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -111,6 +111,13 @@ Other API changes - :meth:`MultiIndex.from_arrays` will no longer infer names from arrays if ``names=None`` is explicitly provided (:issue:`27292`) - +.. _whatsnew_1000.api.documentation: + +Documentation Improvements +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Added new section on :ref:`scale` (:issue:`28315`). + .. _whatsnew_1000.deprecations: Deprecations diff --git a/environment.yml b/environment.yml index 7629fa52e7829..7c3ec9064cba3 100644 --- a/environment.yml +++ b/environment.yml @@ -35,6 +35,12 @@ dependencies: - nbconvert>=5.4.1 - nbsphinx - pandoc + # Dask and its dependencies + - dask-core + - toolz>=0.7.3 + - fsspec>=0.5.1 + - partd>=0.3.10 + - cloudpickle>=0.2.1 # web (jinja2 is also needed, but it's also an optional pandas dependency) - markdown @@ -76,7 +82,7 @@ dependencies: - html5lib # pandas.read_html - lxml # pandas.read_html - openpyxl # pandas.read_excel, DataFrame.to_excel, pandas.ExcelWriter, pandas.ExcelFile - - pyarrow>=0.9.0 # pandas.read_paquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather + - pyarrow>=0.13.1 # pandas.read_paquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather - pyqt>=5.9.2 # pandas.read_clipboard - pytables>=3.4.2 # pandas.read_hdf, DataFrame.to_hdf - python-snappy # required by pyarrow diff --git a/pandas/util/testing.py b/pandas/util/testing.py index aee58f808d9e6..1c0a8dbc19ccd 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1651,6 +1651,87 @@ def makeMultiIndex(k=10, names=None, **kwargs): return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs) +_names = [ + "Alice", + "Bob", + "Charlie", + "Dan", + "Edith", + "Frank", + "George", + "Hannah", + "Ingrid", + "Jerry", + "Kevin", + "Laura", + "Michael", + "Norbert", + "Oliver", + "Patricia", + "Quinn", + "Ray", + "Sarah", + "Tim", + "Ursula", + "Victor", + "Wendy", + "Xavier", + "Yvonne", + "Zelda", +] + + +def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None): + """ + Make a DataFrame with a DatetimeIndex + + Parameters + ---------- + start : str or Timestamp, default "2000-01-01" + The start of the index. Passed to date_range with `freq`. + end : str or Timestamp, default "2000-12-31" + The end of the index. Passed to date_range with `freq`. + freq : str or Freq + The frequency to use for the DatetimeIndex + seed : int, optional + The random state seed. + + * name : object dtype with string names + * id : int dtype with + * x, y : float dtype + + Examples + -------- + >>> _make_timeseries() + id name x y + timestamp + 2000-01-01 982 Frank 0.031261 0.986727 + 2000-01-02 1025 Edith -0.086358 -0.032920 + 2000-01-03 982 Edith 0.473177 0.298654 + 2000-01-04 1009 Sarah 0.534344 -0.750377 + 2000-01-05 963 Zelda -0.271573 0.054424 + ... ... ... ... ... + 2000-12-27 980 Ingrid -0.132333 -0.422195 + 2000-12-28 972 Frank -0.376007 -0.298687 + 2000-12-29 1009 Ursula -0.865047 -0.503133 + 2000-12-30 1000 Hannah -0.063757 -0.507336 + 2000-12-31 972 Tim -0.869120 0.531685 + """ + index = pd.date_range(start=start, end=end, freq=freq, name="timestamp") + n = len(index) + state = np.random.RandomState(seed) + columns = { + "name": state.choice(_names, size=n), + "id": state.poisson(1000, size=n), + "x": state.rand(n) * 2 - 1, + "y": state.rand(n) * 2 - 1, + } + df = pd.DataFrame(columns, index=index, columns=sorted(columns)) + if df.index[-1] == end: + df = df.iloc[:-1] + return df + + def all_index_generator(k=10): """Generator which can be iterated over to get instances of all the various index classes. diff --git a/requirements-dev.txt b/requirements-dev.txt index fd8e6378240b4..698e4f3aea094 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -17,6 +17,11 @@ numpydoc>=0.9.0 nbconvert>=5.4.1 nbsphinx pandoc +dask-core +toolz>=0.7.3 +fsspec>=0.5.1 +partd>=0.3.10 +cloudpickle>=0.2.1 markdown feedparser pyyaml @@ -48,7 +53,7 @@ fastparquet>=0.2.1 html5lib lxml openpyxl -pyarrow>=0.9.0 +pyarrow>=0.13.1 pyqt5>=5.9.2 tables>=3.4.2 python-snappy diff --git a/scripts/generate_pip_deps_from_conda.py b/scripts/generate_pip_deps_from_conda.py index 29fe8bf84c12b..44fe50b99560a 100755 --- a/scripts/generate_pip_deps_from_conda.py +++ b/scripts/generate_pip_deps_from_conda.py @@ -20,7 +20,7 @@ import yaml EXCLUDE = {"python=3"} -RENAME = {"pytables": "tables", "pyqt": "pyqt5"} +RENAME = {"pytables": "tables", "pyqt": "pyqt5", "dask-core": "dask"} def conda_package_to_pip(package):
Closes https://github.com/pandas-dev/pandas/issues/28315
https://api.github.com/repos/pandas-dev/pandas/pulls/28577
2019-09-23T11:15:58Z
2019-10-01T11:59:04Z
2019-10-01T11:59:04Z
2019-10-01T12:09:30Z
DOC: Fixed PR08 docstring errors in pandas.tseries
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 82cbfa831bf32..4ebb4f353a8fd 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1007,9 +1007,9 @@ class CustomBusinessDay(_CustomMixin, BusinessDay): normalize : bool, default False Normalize start/end dates to midnight before generating date range weekmask : str, Default 'Mon Tue Wed Thu Fri' - weekmask of valid business days, passed to ``numpy.busdaycalendar`` + Weekmask of valid business days, passed to ``numpy.busdaycalendar`` holidays : list - list/array of dates to exclude from the set of valid business days, + List/array of dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar`` calendar : pd.HolidayCalendar or np.busdaycalendar offset : timedelta, default timedelta(0) @@ -1671,16 +1671,19 @@ class WeekOfMonth(_WeekOfMonthMixin, DateOffset): Parameters ---------- n : int - week : {0, 1, 2, 3, ...}, default 0 - 0 is 1st week of month, 1 2nd week, etc. - weekday : {0, 1, ..., 6}, default 0 - 0: Mondays - 1: Tuesdays - 2: Wednesdays - 3: Thursdays - 4: Fridays - 5: Saturdays - 6: Sundays + week : int {0, 1, 2, 3, ...}, default 0 + A specific integer for the week of the month. + e.g. 0 is 1st week of month, 1 is the 2nd week, etc. + weekday : int {0, 1, ..., 6}, default 0 + A specific integer for the day of the week. + + - 0 is Monday + - 1 is Tuesday + - 2 is Wednesday + - 3 is Thursday + - 4 is Friday + - 5 is Saturday + - 6 is Sunday """ _prefix = "WOM" @@ -1747,14 +1750,16 @@ class LastWeekOfMonth(_WeekOfMonthMixin, DateOffset): Parameters ---------- n : int, default 1 - weekday : {0, 1, ..., 6}, default 0 - 0: Mondays - 1: Tuesdays - 2: Wednesdays - 3: Thursdays - 4: Fridays - 5: Saturdays - 6: Sundays + weekday : int {0, 1, ..., 6}, default 0 + A specific integer for the day of the week. + + - 0 is Monday + - 1 is Tuesday + - 2 is Wednesday + - 3 is Thursday + - 4 is Friday + - 5 is Saturday + - 6 is Sunday """ _prefix = "LWOM" @@ -2055,6 +2060,7 @@ class FY5253(DateOffset): http://en.wikipedia.org/wiki/4-4-5_calendar The year may either: + - end on the last X day of the Y month. - end on the last X day closest to the last day of the Y month. @@ -2064,17 +2070,25 @@ class FY5253(DateOffset): Parameters ---------- n : int - weekday : {0, 1, ..., 6} - 0: Mondays - 1: Tuesdays - 2: Wednesdays - 3: Thursdays - 4: Fridays - 5: Saturdays - 6: Sundays - startingMonth : The month in which fiscal years end. {1, 2, ... 12} - variation : str - {"nearest", "last"} for "LastOfMonth" or "NearestEndMonth" + weekday : int {0, 1, ..., 6}, default 0 + A specific integer for the day of the week. + + - 0 is Monday + - 1 is Tuesday + - 2 is Wednesday + - 3 is Thursday + - 4 is Friday + - 5 is Saturday + - 6 is Sunday + + startingMonth : int {1, 2, ... 12}, default 1 + The month in which the fiscal year ends. + + variation : str, default "nearest" + Method of employing 4-4-5 calendar. There are two options: + + - "nearest" means year end is **weekday** closest to last day of month in year. + - "last" means year end is final **weekday** of the final month in fiscal year. """ _prefix = "RE" @@ -2258,6 +2272,7 @@ class FY5253Quarter(DateOffset): http://en.wikipedia.org/wiki/4-4-5_calendar The year may either: + - end on the last X day of the Y month. - end on the last X day closest to the last day of the Y month. @@ -2271,19 +2286,28 @@ class FY5253Quarter(DateOffset): Parameters ---------- n : int - weekday : {0, 1, ..., 6} - 0: Mondays - 1: Tuesdays - 2: Wednesdays - 3: Thursdays - 4: Fridays - 5: Saturdays - 6: Sundays - startingMonth : The month in which fiscal years end. {1, 2, ... 12} - qtr_with_extra_week : The quarter number that has the leap - or 14 week when needed. {1,2,3,4} - variation : str - {"nearest", "last"} for "LastOfMonth" or "NearestEndMonth" + weekday : int {0, 1, ..., 6}, default 0 + A specific integer for the day of the week. + + - 0 is Monday + - 1 is Tuesday + - 2 is Wednesday + - 3 is Thursday + - 4 is Friday + - 5 is Saturday + - 6 is Sunday + + startingMonth : int {1, 2, ..., 12}, default 1 + The month in which fiscal years end. + + qtr_with_extra_week : int {1, 2, 3, 4}, default 1 + The quarter number that has the leap or 14 week when needed. + + variation : str, default "nearest" + Method of employing 4-4-5 calendar. There are two options: + + - "nearest" means year end is **weekday** closest to last day of month in year. + - "last" means year end is final **weekday** of the final month in fiscal year. """ _prefix = "REQ" @@ -2707,8 +2731,8 @@ def generate_range(start=None, end=None, periods=None, offset=BDay()): Parameters ---------- - start : datetime (default None) - end : datetime (default None) + start : datetime, (default None) + end : datetime, (default None) periods : int, (default None) offset : DateOffset, (default BDay())
This relates to: [27977](https://github.com/pandas-dev/pandas/issues/27977). I have fixed the doc PR08 formatting issues for: ``` pandas.tseries.offsets.CustomBusinessDay: Parameter "weekmask" description should start with a capital letter pandas.tseries.offsets.CustomBusinessDay: Parameter "holidays" description should start with a capital letter pandas.tseries.offsets.WeekOfMonth: Parameter "week" description should start with a capital letter pandas.tseries.offsets.WeekOfMonth: Parameter "weekday" description should start with a capital letter pandas.tseries.offsets.LastWeekOfMonth: Parameter "weekday" description should start with a capital letter pandas.tseries.offsets.FY5253: Parameter "weekday" description should start with a capital letter pandas.tseries.offsets.FY5253: Parameter "variation" description should start with a capital letter pandas.tseries.offsets.FY5253Quarter: Parameter "weekday" description should start with a capital letter pandas.tseries.offsets.FY5253Quarter: Parameter "qtr_with_extra_week" description should start with a capital letter pandas.tseries.offsets.FY5253Quarter: Parameter "variation" description should start with a capital letter pandas.tseries.offsets.CDay: Parameter "weekmask" description should start with a capital letter pandas.tseries.offsets.CDay: Parameter "holidays" description should start with a capital letter ``` - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Will continue to work through all PR08 docstring errors.
https://api.github.com/repos/pandas-dev/pandas/pulls/28571
2019-09-22T23:41:47Z
2019-10-01T04:08:13Z
2019-10-01T04:08:13Z
2019-10-01T18:51:29Z
Bugfix/groupby datetime issue
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index eb4b72d01d59a..08bc333d926db 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -182,7 +182,7 @@ Datetimelike - Bug in :class:`Series` and :class:`DataFrame` with integer dtype failing to raise ``TypeError`` when adding or subtracting a ``np.datetime64`` object (:issue:`28080`) - Bug in :class:`Week` with ``weekday`` incorrectly raising ``AttributeError`` instead of ``TypeError`` when adding or subtracting an invalid type (:issue:`28530`) - Bug in :class:`DataFrame` arithmetic operations when operating with a :class:`Series` with dtype `'timedelta64[ns]'` (:issue:`28049`) -- +- Bug in :func:`pandas.core.groupby.generic.SeriesGroupBy.apply` raising ``ValueError`` when a column in the original DataFrame is a datetime and the column labels are not standard integers (:issue:`28247`) Timedelta ^^^^^^^^^ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index f8f1455561c03..a4dc1613d8c80 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1905,7 +1905,9 @@ def _recast_datetimelike_result(result: DataFrame) -> DataFrame: result = result.copy() obj_cols = [ - idx for idx in range(len(result.columns)) if is_object_dtype(result.dtypes[idx]) + idx + for idx in range(len(result.columns)) + if is_object_dtype(result.dtypes.iloc[idx]) ] # See GH#26285 diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 76588549532b1..4d0063b773bc5 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -657,3 +657,22 @@ def test_apply_with_mixed_types(): result = g.apply(lambda x: x / x.sum()) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "group_column_dtlike", + [datetime.today(), datetime.today().date(), datetime.today().time()], +) +def test_apply_datetime_issue(group_column_dtlike): + # GH-28247 + # groupby-apply throws an error if one of the columns in the DataFrame + # is a datetime object and the column labels are different from + # standard int values in range(len(num_columns)) + + df = pd.DataFrame({"a": ["foo"], "b": [group_column_dtlike]}) + result = df.groupby("a").apply(lambda x: pd.Series(["spam"], index=[42])) + + expected = pd.DataFrame( + ["spam"], Index(["foo"], dtype="object", name="a"), columns=[42] + ) + tm.assert_frame_equal(result, expected)
- [x] closes #28247 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28569
2019-09-22T18:14:59Z
2019-10-03T17:25:04Z
2019-10-03T17:25:04Z
2020-01-18T00:29:08Z
DOC: update fixing unknown parameters errors (error code PR02)
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index fe6b339c2f4c8..8724382d9ec55 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -28,7 +28,7 @@ def hist_series( yrot=None, figsize=None, bins=10, - **kwds + **kwargs ): """ Draw histogram of the input series using matplotlib. @@ -56,7 +56,7 @@ def hist_series( bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. - `**kwds` : keywords + **kwargs To be passed to the actual plotting function Returns @@ -80,7 +80,7 @@ def hist_series( yrot=yrot, figsize=figsize, bins=bins, - **kwds + **kwargs ) @@ -99,7 +99,7 @@ def hist_frame( figsize=None, layout=None, bins=10, - **kwds + **kwargs ): """ Make a histogram of the DataFrame's. @@ -151,7 +151,7 @@ def hist_frame( bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. - **kwds + **kwargs All other plotting keyword arguments to be passed to :meth:`matplotlib.pyplot.hist`. @@ -194,7 +194,7 @@ def hist_frame( figsize=figsize, layout=layout, bins=bins, - **kwds + **kwargs ) @@ -209,7 +209,7 @@ def boxplot( figsize=None, layout=None, return_type=None, - **kwds + **kwargs ): """ Make a box plot from DataFrame columns. @@ -260,7 +260,7 @@ def boxplot( If ``return_type`` is `None`, a NumPy array of axes with the same shape as ``layout`` is returned. - **kwds + **kwargs All other plotting keyword arguments to be passed to :func:`matplotlib.pyplot.boxplot`. @@ -385,7 +385,7 @@ def boxplot( figsize=figsize, layout=layout, return_type=return_type, - **kwds + **kwargs ) @@ -401,7 +401,7 @@ def boxplot_frame( figsize=None, layout=None, return_type=None, - **kwds + **kwargs ): plot_backend = _get_plot_backend() return plot_backend.boxplot_frame( @@ -415,7 +415,7 @@ def boxplot_frame( figsize=figsize, layout=layout, return_type=return_type, - **kwds + **kwargs ) @@ -431,7 +431,7 @@ def boxplot_frame_groupby( layout=None, sharex=False, sharey=True, - **kwds + **kwargs ): """ Make box plots from DataFrameGroupBy data. @@ -459,7 +459,7 @@ def boxplot_frame_groupby( Whether y-axes will be shared among subplots .. versionadded:: 0.23.1 - `**kwds` : Keyword Arguments + **kwargs All other plotting keyword arguments to be passed to matplotlib's boxplot function @@ -495,7 +495,7 @@ def boxplot_frame_groupby( layout=layout, sharex=sharex, sharey=sharey, - **kwds + **kwargs ) @@ -586,7 +586,7 @@ class PlotAccessor(PandasObject): labels with "(right)" in the legend include_bool : bool, default is False If True, boolean values can be plotted. - `**kwds` : keywords + **kwargs Options to pass to matplotlib plotting method. Returns @@ -810,7 +810,7 @@ def line(self, x=None, y=None, **kwargs): The values to be plotted. Either the location or the label of the columns to be used. By default, it will use the remaining DataFrame numeric columns. - **kwds + **kwargs Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns @@ -880,7 +880,7 @@ def bar(self, x=None, y=None, **kwargs): y : label or position, optional Allows plotting of one column versus another. If not specified, all numerical columns are used. - **kwds + **kwargs Additional keyword arguments are documented in :meth:`DataFrame.plot`. @@ -963,7 +963,7 @@ def barh(self, x=None, y=None, **kwargs): Column to be used for categories. y : label or position, default All numeric columns in dataframe Columns to be plotted from the DataFrame. - **kwds + **kwargs Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns @@ -1049,7 +1049,7 @@ def box(self, by=None, **kwargs): ---------- by : str or sequence Column in the DataFrame to group by. - **kwds : optional + **kwargs Additional keywords are documented in :meth:`DataFrame.plot`. @@ -1092,7 +1092,7 @@ def hist(self, by=None, bins=10, **kwargs): Column in the DataFrame to group by. bins : int, default 10 Number of histogram bins to be used. - **kwds + **kwargs Additional keyword arguments are documented in :meth:`DataFrame.plot`. @@ -1148,7 +1148,7 @@ def kde(self, bw_method=None, ind=None, **kwargs): 1000 equally spaced points are used. If `ind` is a NumPy array, the KDE is evaluated at the points passed. If `ind` is an integer, `ind` number of equally spaced points are used. - **kwds : optional + **kwargs Additional keyword arguments are documented in :meth:`pandas.%(this-datatype)s.plot`. @@ -1250,7 +1250,7 @@ def area(self, x=None, y=None, **kwargs): stacked : bool, default True Area plots are stacked by default. Set to False to create a unstacked plot. - **kwds : optional + **kwargs Additional keyword arguments are documented in :meth:`DataFrame.plot`. @@ -1322,7 +1322,7 @@ def pie(self, **kwargs): y : int or label, optional Label or position of the column to plot. If not provided, ``subplots=True`` argument must be passed. - **kwds + **kwargs Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns @@ -1404,7 +1404,7 @@ def scatter(self, x, y, s=None, c=None, **kwargs): - A column name or position whose values will be used to color the marker points according to a colormap. - **kwds + **kwargs Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns @@ -1476,7 +1476,7 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwargs): Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the x-direction and the y-direction. - **kwds + **kwargs Additional keyword arguments are documented in :meth:`DataFrame.plot`.
- Documentation update fixing some of the methods with a PR-2 error code which involved updating **kwds to **kwargs - No tests required - The data frame.plot methods now have the docstring arguments updated from **kwds to **kwargs to match the method signature and exclude them from the PR02 errors:
https://api.github.com/repos/pandas-dev/pandas/pulls/28567
2019-09-22T10:36:22Z
2019-09-27T07:00:09Z
2019-09-27T07:00:09Z
2019-09-27T07:00:24Z
BENCH: Add rolling apply benchmarks
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py index b42fa553b495c..493f96d46d5e7 100644 --- a/asv_bench/benchmarks/rolling.py +++ b/asv_bench/benchmarks/rolling.py @@ -25,6 +25,25 @@ def peakmem_rolling(self, constructor, window, dtype, method): getattr(self.roll, method)() +class Apply: + params = ( + ["DataFrame", "Series"], + [10, 1000], + ["int", "float"], + [sum, np.sum, lambda x: np.sum(x) + 5], + [True, False], + ) + param_names = ["contructor", "window", "dtype", "function", "raw"] + + def setup(self, constructor, window, dtype, function, raw): + N = 10 ** 5 + arr = (100 * np.random.random(N)).astype(dtype) + self.roll = getattr(pd, constructor)(arr).rolling(window) + + def time_rolling(self, constructor, window, dtype, function, raw): + self.roll.apply(function, raw=raw) + + class ExpandingMethods: params = (
- [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/28566
2019-09-22T03:19:01Z
2019-09-23T00:06:19Z
2019-09-23T00:06:19Z
2019-09-23T00:06:23Z
TST: un-xfail incorrectly xfailed tests for maybe_promote
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index e31918c21c2ac..504eec8010ec6 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -358,6 +358,7 @@ def maybe_promote(dtype, fill_value=np.nan): fill_value = NaT elif is_extension_array_dtype(dtype) and isna(fill_value): fill_value = dtype.na_value + elif is_float(fill_value): if issubclass(dtype.type, np.bool_): dtype = np.object_ @@ -366,6 +367,8 @@ def maybe_promote(dtype, fill_value=np.nan): elif is_bool(fill_value): if not issubclass(dtype.type, np.bool_): dtype = np.object_ + else: + fill_value = np.bool_(fill_value) elif is_integer(fill_value): if issubclass(dtype.type, np.bool_): dtype = np.object_ @@ -374,6 +377,10 @@ def maybe_promote(dtype, fill_value=np.nan): arr = np.asarray(fill_value) if arr != arr.astype(dtype): dtype = arr.dtype + elif issubclass(dtype.type, np.floating): + # check if we can cast + if _check_lossless_cast(fill_value, dtype): + fill_value = dtype.type(fill_value) elif is_complex(fill_value): if issubclass(dtype.type, np.bool_): dtype = np.object_ @@ -398,12 +405,31 @@ def maybe_promote(dtype, fill_value=np.nan): pass elif is_datetime64tz_dtype(dtype): pass - elif issubclass(np.dtype(dtype).type, str): + elif issubclass(np.dtype(dtype).type, (bytes, str)): dtype = np.object_ return dtype, fill_value +def _check_lossless_cast(value, dtype: np.dtype) -> bool: + """ + Check if we can cast the given value to the given dtype _losslesly_. + + Parameters + ---------- + value : object + dtype : np.dtype + + Returns + ------- + bool + """ + casted = dtype.type(value) + if casted == value: + return True + return False + + def infer_dtype_from(val, pandas_dtype=False): """ interpret the dtype from a scalar or array. This is a convenience diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py index 44aebd4d277f2..211c550100018 100644 --- a/pandas/tests/dtypes/cast/test_promote.py +++ b/pandas/tests/dtypes/cast/test_promote.py @@ -23,6 +23,7 @@ is_timedelta64_dtype, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype, PandasExtensionDtype +from pandas.core.dtypes.missing import isna import pandas as pd @@ -95,6 +96,7 @@ def _safe_dtype_assert(left_dtype, right_dtype): """ Compare two dtypes without raising TypeError. """ + __tracebackhide__ = True if isinstance(right_dtype, PandasExtensionDtype): # switch order of equality check because numpy dtypes (e.g. if # left_dtype is np.object_) do not know some expected dtypes (e.g. @@ -157,20 +159,17 @@ def _check_promote( _safe_dtype_assert(result_dtype, expected_dtype) - # for equal values, also check type (relevant e.g. for int vs float, resp. - # for different datetimes and timedeltas) - match_value = ( - result_fill_value - == expected_fill_value - # disabled type check due to too many xfails; GH 23982/25425 - # and type(result_fill_value) == type(expected_fill_value) - ) + # GH#23982/25425 require the same type in addition to equality/NA-ness + res_type = type(result_fill_value) + ex_type = type(expected_fill_value) + assert res_type == ex_type + + match_value = result_fill_value == expected_fill_value + # Note: type check above ensures that we have the _same_ NA value # for missing values, None == None and iNaT == iNaT (which is checked # through match_value above), but np.nan != np.nan and pd.NaT != pd.NaT - match_missing = (result_fill_value is np.nan and expected_fill_value is np.nan) or ( - result_fill_value is NaT and expected_fill_value is NaT - ) + match_missing = isna(result_fill_value) and isna(expected_fill_value) assert match_value or match_missing @@ -251,7 +250,9 @@ def test_maybe_promote_bool_with_any(any_numpy_dtype_reduced, box): if boxed and fill_dtype == bool: pytest.xfail("falsely upcasts to object") - if boxed and box_dtype is None and is_datetime_or_timedelta_dtype(fill_dtype): + if boxed and box_dtype is None and fill_dtype.kind == "M": + pytest.xfail("wrongly casts fill_value") + if boxed and box_dtype is None and fill_dtype.kind == "m": pytest.xfail("wrongly casts fill_value") # create array of given dtype; casts "1" to correct dtype @@ -282,7 +283,9 @@ def test_maybe_promote_any_with_bool(any_numpy_dtype_reduced, box): pytest.xfail("falsely upcasts to object") if boxed and dtype not in (str, object) and box_dtype is None: pytest.xfail("falsely upcasts to object") - if not boxed and is_datetime_or_timedelta_dtype(dtype): + if not boxed and dtype.kind == "M": + pytest.xfail("raises error") + if not boxed and dtype.kind == "m": pytest.xfail("raises error") # filling anything but bool with bool casts to object @@ -393,9 +396,6 @@ def test_maybe_promote_datetimetz_with_any_numpy_dtype( fill_dtype = np.dtype(any_numpy_dtype_reduced) boxed, box_dtype = box # read from parametrized fixture - if box_dtype != object: - pytest.xfail("does not upcast correctly") - # create array of given dtype; casts "1" to correct dtype fill_value = np.array([1], dtype=fill_dtype)[0] @@ -430,8 +430,6 @@ def test_maybe_promote_datetimetz_with_datetimetz( pytest.xfail("Cannot process fill_value with this dtype, see GH 24310") if dtype.tz == fill_dtype.tz and boxed: pytest.xfail("falsely upcasts") - if dtype.tz != fill_dtype.tz and not boxed: - pytest.xfail("falsely upcasts") # create array of given dtype; casts "1" to correct dtype fill_value = pd.Series([10 ** 9], dtype=fill_dtype)[0] @@ -466,14 +464,10 @@ def test_maybe_promote_datetimetz_with_na(tz_aware_fixture, fill_value, box): dtype = DatetimeTZDtype(tz=tz_aware_fixture) boxed, box_dtype = box # read from parametrized fixture - if boxed and ( - box_dtype == object - or (box_dtype is None and (fill_value is None or fill_value is NaT)) - ): - pytest.xfail("false upcasts to object") # takes the opinion that DatetimeTZ should have single na-marker # using iNaT would lead to errors elsewhere -> NaT if not boxed and fill_value == iNaT: + # TODO: are we sure iNaT _should_ be cast to NaT? pytest.xfail("wrong missing value marker") expected_dtype = dtype @@ -509,8 +503,10 @@ def test_maybe_promote_any_numpy_dtype_with_datetimetz( fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture) boxed, box_dtype = box # read from parametrized fixture - if is_datetime_or_timedelta_dtype(dtype) and not boxed: + if dtype.kind == "m" and not boxed: pytest.xfail("raises error") + elif dtype.kind == "M" and not boxed: + pytest.xfail("Comes back as M8 instead of object") fill_value = pd.Series([fill_value], dtype=fill_dtype)[0] @@ -566,19 +562,6 @@ def test_maybe_promote_any_with_timedelta64( else: if boxed and box_dtype is None and is_timedelta64_dtype(type(fill_value)): pytest.xfail("does not upcast correctly") - if ( - not boxed - and is_timedelta64_dtype(type(fill_value)) - and ( - is_integer_dtype(dtype) - or is_float_dtype(dtype) - or is_complex_dtype(dtype) - or issubclass(dtype.type, np.bytes_) - ) - ): - pytest.xfail("does not upcast correctly") - if box_dtype == "td_dtype": - pytest.xfail("falsely upcasts") if not boxed and is_datetime64_dtype(dtype): pytest.xfail("raises error") @@ -612,7 +595,9 @@ def test_maybe_promote_string_with_any(string_dtype, any_numpy_dtype_reduced, bo fill_dtype = np.dtype(any_numpy_dtype_reduced) boxed, box_dtype = box # read from parametrized fixture - if boxed and box_dtype is None and is_datetime_or_timedelta_dtype(fill_dtype): + if boxed and box_dtype is None and fill_dtype.kind == "m": + pytest.xfail("wrong missing value marker") + if boxed and box_dtype is None and fill_dtype.kind == "M": pytest.xfail("wrong missing value marker") # create array of given dtype; casts "1" to correct dtype @@ -652,17 +637,6 @@ def test_maybe_promote_any_with_string(any_numpy_dtype_reduced, string_dtype, bo if is_datetime_or_timedelta_dtype(dtype) and box_dtype != object: pytest.xfail("does not upcast or raises") - if ( - boxed - and box_dtype in (None, "str") - and ( - is_integer_dtype(dtype) - or is_float_dtype(dtype) - or is_complex_dtype(dtype) - or issubclass(dtype.type, np.bytes_) - ) - ): - pytest.xfail("does not upcast correctly") # create array of given dtype fill_value = "abc" @@ -760,19 +734,6 @@ def test_maybe_promote_any_numpy_dtype_with_na( pytest.xfail("does not upcast to object") elif dtype == "uint64" and not boxed and fill_value == iNaT: pytest.xfail("does not upcast correctly") - elif is_datetime_or_timedelta_dtype(dtype) and boxed: - pytest.xfail("falsely upcasts to object") - elif ( - boxed - and ( - is_integer_dtype(dtype) or is_float_dtype(dtype) or is_complex_dtype(dtype) - ) - and fill_value is not NaT - and dtype != "uint64" - ): - pytest.xfail("falsely upcasts to object") - elif boxed and dtype == "uint64" and (fill_value is np.nan or fill_value is None): - pytest.xfail("falsely upcasts to object") # below: opinionated that iNaT should be interpreted as missing value elif ( not boxed
This sits on top of #28561.
https://api.github.com/repos/pandas-dev/pandas/pulls/28564
2019-09-21T14:57:53Z
2019-10-01T13:06:04Z
2019-10-01T13:06:04Z
2019-10-01T13:30:46Z
CLN: Assorted cleanups
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index c0ed198e200f1..6e73e1636a75b 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -39,7 +39,6 @@ is_period_dtype, is_scalar, is_signed_integer_dtype, - is_sparse, is_timedelta64_dtype, is_unsigned_integer_dtype, needs_i8_conversion, @@ -743,7 +742,7 @@ def value_counts( else: - if is_extension_array_dtype(values) or is_sparse(values): + if is_extension_array_dtype(values): # handle Categorical and sparse, result = Series(values)._values.value_counts(dropna=dropna) @@ -1623,7 +1622,7 @@ def take_nd( out : ndarray or None, default None Optional output array, must be appropriate type to hold input and fill_value together, if indexer has any -1 value entries; call - _maybe_promote to determine this type for any fill_value + maybe_promote to determine this type for any fill_value fill_value : any, default np.nan Fill value to replace -1 values with mask_info : tuple of (ndarray, boolean) @@ -1644,9 +1643,7 @@ def take_nd( if is_extension_array_dtype(arr): return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) - if is_sparse(arr): - arr = arr.to_dense() - elif isinstance(arr, (ABCIndexClass, ABCSeries)): + if isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr._values arr = np.asarray(arr) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 5bd2a2b69deb1..5e8b28267f24f 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -9,7 +9,7 @@ import numpy as np import numpy.ma as ma -from pandas._libs import lib, tslibs +from pandas._libs import lib from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime from pandas.core.dtypes.cast import ( @@ -36,7 +36,7 @@ is_timedelta64_ns_dtype, pandas_dtype, ) -from pandas.core.dtypes.dtypes import ExtensionDtype, registry +from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype, registry from pandas.core.dtypes.generic import ( ABCExtensionArray, ABCIndexClass, @@ -275,7 +275,7 @@ def array( if inferred_dtype == "period": try: return period_array(data, copy=copy) - except tslibs.IncompatibleFrequency: + except IncompatibleFrequency: # We may have a mixture of frequencies. # We choose to return an ndarray, rather than raising. pass @@ -365,7 +365,9 @@ def extract_array(obj, extract_numpy=False): return obj -def sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False): +def sanitize_array( + data, index, dtype=None, copy: bool = False, raise_cast_failure: bool = False +): """ Sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified. @@ -486,13 +488,19 @@ def sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False return subarr -def _try_cast(arr, dtype, copy, raise_cast_failure): +def _try_cast( + arr, + dtype: Optional[Union[np.dtype, "ExtensionDtype"]], + copy: bool, + raise_cast_failure: bool, +): """ Convert input to numpy ndarray and optionally cast to a given dtype. Parameters ---------- - arr : array-like + arr : ndarray, list, tuple, iterator (catchall) + Excludes: ExtensionArray, Series, Index. dtype : np.dtype, ExtensionDtype or None copy : bool If False, don't copy the data if not needed. @@ -528,11 +536,13 @@ def _try_cast(arr, dtype, copy, raise_cast_failure): if is_categorical_dtype(dtype): # We *do* allow casting to categorical, since we know # that Categorical is the only array type for 'category'. + dtype = cast(CategoricalDtype, dtype) subarr = dtype.construct_array_type()( arr, dtype.categories, ordered=dtype._ordered ) elif is_extension_array_dtype(dtype): # create an extension array from its dtype + dtype = cast(ExtensionDtype, dtype) array_type = dtype.construct_array_type()._from_sequence subarr = array_type(arr, dtype=dtype, copy=copy) elif dtype is not None and raise_cast_failure: diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index e31918c21c2ac..b59660056aadb 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1311,9 +1311,8 @@ def construct_1d_ndarray_preserving_na(values, dtype=None, copy=False): >>> np.array([1.0, 2.0, None], dtype='str') array(['1.0', '2.0', 'None'], dtype='<U4') - >>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str') - - + >>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype=np.dtype('str')) + array(['1.0', '2.0', None], dtype=object) """ subarr = np.array(values, dtype=dtype, copy=copy) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 4ea649a2a6faf..41677af7b1721 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -888,7 +888,8 @@ def is_dtype_equal(source, target): def is_any_int_dtype(arr_or_dtype) -> bool: - """Check whether the provided array or dtype is of an integer dtype. + """ + Check whether the provided array or dtype is of an integer dtype. In this function, timedelta64 instances are also considered "any-integer" type objects and will return True. diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 01399a23e810e..04c3b2b7714ef 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2128,7 +2128,8 @@ def _can_hold_na(self): return True def _maybe_coerce_values(self, values): - """Input validation for values passed to __init__. Ensure that + """ + Input validation for values passed to __init__. Ensure that we have datetime64ns, coercing if necessary. Parameters
https://api.github.com/repos/pandas-dev/pandas/pulls/28563
2019-09-21T14:41:43Z
2019-09-23T11:58:41Z
2019-09-23T11:58:41Z
2019-09-23T12:44:50Z
TST/CLN: parametrize and clean test_expressions, test_nanops
diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index b11698bf89cda..6edd3125331b9 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -54,14 +54,12 @@ def run_arithmetic(self, df, other): operations = ["add", "sub", "mul", "mod", "truediv", "floordiv"] for test_flex in [True, False]: for arith in operations: - - operator_name = arith - + # TODO: share with run_binary if test_flex: op = lambda x, y: getattr(x, arith)(y) op.__name__ = arith else: - op = getattr(operator, operator_name) + op = getattr(operator, arith) expr.set_use_numexpr(False) expected = op(df, other) expr.set_use_numexpr(True) @@ -87,13 +85,14 @@ def run_binary(self, df, other): for test_flex in [True, False]: for arith in operations: if test_flex: - op = lambda x, y: getattr(df, arith)(y) + op = lambda x, y: getattr(x, arith)(y) op.__name__ = arith else: op = getattr(operator, arith) expr.set_use_numexpr(False) expected = op(df, other) expr.set_use_numexpr(True) + expr.get_test_result() result = op(df, other) used_numexpr = expr.get_test_result() @@ -167,29 +166,29 @@ def test_invalid(self): "opname,op_str", [("add", "+"), ("sub", "-"), ("mul", "*"), ("truediv", "/"), ("pow", "**")], ) - def test_binary_ops(self, opname, op_str): + @pytest.mark.parametrize("left,right", [(_frame, _frame2), (_mixed, _mixed2)]) + def test_binary_ops(self, opname, op_str, left, right): def testit(): - for f, f2 in [(self.frame, self.frame2), (self.mixed, self.mixed2)]: + if opname == "pow": + # TODO: get this working + return - if opname == "pow": - continue + op = getattr(operator, opname) - op = getattr(operator, opname) + result = expr._can_use_numexpr(op, op_str, left, left, "evaluate") + assert result != left._is_mixed_type - result = expr._can_use_numexpr(op, op_str, f, f, "evaluate") - assert result != f._is_mixed_type + result = expr.evaluate(op, op_str, left, left, use_numexpr=True) + expected = expr.evaluate(op, op_str, left, left, use_numexpr=False) - result = expr.evaluate(op, op_str, f, f, use_numexpr=True) - expected = expr.evaluate(op, op_str, f, f, use_numexpr=False) + if isinstance(result, DataFrame): + tm.assert_frame_equal(result, expected) + else: + tm.assert_numpy_array_equal(result, expected.values) - if isinstance(result, DataFrame): - tm.assert_frame_equal(result, expected) - else: - tm.assert_numpy_array_equal(result, expected.values) - - result = expr._can_use_numexpr(op, op_str, f2, f2, "evaluate") - assert not result + result = expr._can_use_numexpr(op, op_str, right, right, "evaluate") + assert not result expr.set_use_numexpr(False) testit() @@ -210,30 +209,26 @@ def testit(): ("ne", "!="), ], ) - def test_comparison_ops(self, opname, op_str): + @pytest.mark.parametrize("left,right", [(_frame, _frame2), (_mixed, _mixed2)]) + def test_comparison_ops(self, opname, op_str, left, right): def testit(): - for f, f2 in [(self.frame, self.frame2), (self.mixed, self.mixed2)]: - - f11 = f - f12 = f + 1 + f12 = left + 1 + f22 = right + 1 - f21 = f2 - f22 = f2 + 1 + op = getattr(operator, opname) - op = getattr(operator, opname) + result = expr._can_use_numexpr(op, op_str, left, f12, "evaluate") + assert result != left._is_mixed_type - result = expr._can_use_numexpr(op, op_str, f11, f12, "evaluate") - assert result != f11._is_mixed_type + result = expr.evaluate(op, op_str, left, f12, use_numexpr=True) + expected = expr.evaluate(op, op_str, left, f12, use_numexpr=False) + if isinstance(result, DataFrame): + tm.assert_frame_equal(result, expected) + else: + tm.assert_numpy_array_equal(result, expected.values) - result = expr.evaluate(op, op_str, f11, f12, use_numexpr=True) - expected = expr.evaluate(op, op_str, f11, f12, use_numexpr=False) - if isinstance(result, DataFrame): - tm.assert_frame_equal(result, expected) - else: - tm.assert_numpy_array_equal(result, expected.values) - - result = expr._can_use_numexpr(op, op_str, f21, f22, "evaluate") - assert not result + result = expr._can_use_numexpr(op, op_str, right, f22, "evaluate") + assert not result expr.set_use_numexpr(False) testit() @@ -244,15 +239,14 @@ def testit(): testit() @pytest.mark.parametrize("cond", [True, False]) - def test_where(self, cond): + @pytest.mark.parametrize("df", [_frame, _frame2, _mixed, _mixed2]) + def test_where(self, cond, df): def testit(): - for f in [self.frame, self.frame2, self.mixed, self.mixed2]: - - c = np.empty(f.shape, dtype=np.bool_) - c.fill(cond) - result = expr.where(c, f.values, f.values + 1) - expected = np.where(c, f.values, f.values + 1) - tm.assert_numpy_array_equal(result, expected) + c = np.empty(df.shape, dtype=np.bool_) + c.fill(cond) + result = expr.where(c, df.values, df.values + 1) + expected = np.where(c, df.values, df.values + 1) + tm.assert_numpy_array_equal(result, expected) expr.set_use_numexpr(False) testit() @@ -263,7 +257,7 @@ def testit(): testit() @pytest.mark.parametrize( - "op_str,opname", list(zip(["/", "//", "**"], ["truediv", "floordiv", "pow"])) + "op_str,opname", [("/", "truediv"), ("//", "floordiv"), ("**", "pow")] ) def test_bool_ops_raise_on_arithmetic(self, op_str, opname): df = DataFrame({"a": np.random.rand(10) > 0.5, "b": np.random.rand(10) > 0.5}) @@ -291,7 +285,7 @@ def test_bool_ops_raise_on_arithmetic(self, op_str, opname): f(df, True) @pytest.mark.parametrize( - "op_str,opname", list(zip(["+", "*", "-"], ["add", "mul", "sub"])) + "op_str,opname", [("+", "add"), ("*", "mul"), ("-", "sub")] ) def test_bool_ops_warn_on_arithmetic(self, op_str, opname): n = 10 diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 41b27f030d80f..49d1777df0751 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -1,4 +1,5 @@ from functools import partial +import operator import warnings import numpy as np @@ -15,6 +16,7 @@ import pandas.util.testing as tm use_bn = nanops._USE_BOTTLENECK +has_c16 = hasattr(np, "complex128") class TestnanopsDataFrame: @@ -131,14 +133,9 @@ def _coerce_tds(targ, res): if targ.dtype.kind != "O": res = res.astype(targ.dtype) else: - try: - res = res.astype("c16") - except RuntimeError: - res = res.astype("f8") - try: - targ = targ.astype("c16") - except RuntimeError: - targ = targ.astype("f8") + cast_dtype = "c16" if has_c16 else "f8" + res = res.astype(cast_dtype) + targ = targ.astype(cast_dtype) # there should never be a case where numpy returns an object # but nanops doesn't, so make that an exception elif targ.dtype.kind == "O": @@ -152,14 +149,13 @@ def check_fun_data( targfunc, testarval, targarval, - targarnanval, check_dtype=True, empty_targfunc=None, **kwargs ): for axis in list(range(targarval.ndim)) + [None]: for skipna in [False, True]: - targartempval = targarval if skipna else targarnanval + targartempval = targarval if skipna else testarval if skipna and empty_targfunc and isna(targartempval).all(): targ = empty_targfunc(targartempval, axis=axis, **kwargs) else: @@ -180,46 +176,32 @@ def check_fun_data( if testarval.ndim <= 1: return - try: - testarval2 = np.take(testarval, 0, axis=-1) - targarval2 = np.take(targarval, 0, axis=-1) - targarnanval2 = np.take(targarnanval, 0, axis=-1) - except ValueError: - return + # Recurse on lower-dimension + testarval2 = np.take(testarval, 0, axis=-1) + targarval2 = np.take(targarval, 0, axis=-1) self.check_fun_data( testfunc, targfunc, testarval2, targarval2, - targarnanval2, check_dtype=check_dtype, empty_targfunc=empty_targfunc, **kwargs ) - def check_fun( - self, - testfunc, - targfunc, - testar, - targar=None, - targarnan=None, - empty_targfunc=None, - **kwargs - ): - if targar is None: - targar = testar - if targarnan is None: - targarnan = testar + def check_fun(self, testfunc, targfunc, testar, empty_targfunc=None, **kwargs): + + targar = testar + if testar.endswith("_nan") and hasattr(self, testar[:-4]): + targar = testar[:-4] + testarval = getattr(self, testar) targarval = getattr(self, targar) - targarnanval = getattr(self, targarnan) self.check_fun_data( testfunc, targfunc, testarval, targarval, - targarnanval, empty_targfunc=empty_targfunc, **kwargs ) @@ -230,14 +212,13 @@ def check_funs( targfunc, allow_complex=True, allow_all_nan=True, - allow_str=True, allow_date=True, allow_tdelta=True, allow_obj=True, **kwargs ): self.check_fun(testfunc, targfunc, "arr_float", **kwargs) - self.check_fun(testfunc, targfunc, "arr_float_nan", "arr_float", **kwargs) + self.check_fun(testfunc, targfunc, "arr_float_nan", **kwargs) self.check_fun(testfunc, targfunc, "arr_int", **kwargs) self.check_fun(testfunc, targfunc, "arr_bool", **kwargs) objs = [ @@ -251,26 +232,15 @@ def check_funs( if allow_complex: self.check_fun(testfunc, targfunc, "arr_complex", **kwargs) - self.check_fun( - testfunc, targfunc, "arr_complex_nan", "arr_complex", **kwargs - ) + self.check_fun(testfunc, targfunc, "arr_complex_nan", **kwargs) if allow_all_nan: self.check_fun(testfunc, targfunc, "arr_nan_nanj", **kwargs) objs += [self.arr_complex.astype("O")] - if allow_str: - self.check_fun(testfunc, targfunc, "arr_str", **kwargs) - self.check_fun(testfunc, targfunc, "arr_utf", **kwargs) - objs += [self.arr_str.astype("O"), self.arr_utf.astype("O")] - if allow_date: - try: - targfunc(self.arr_date) - except TypeError: - pass - else: - self.check_fun(testfunc, targfunc, "arr_date", **kwargs) - objs += [self.arr_date.astype("O")] + targfunc(self.arr_date) + self.check_fun(testfunc, targfunc, "arr_date", **kwargs) + objs += [self.arr_date.astype("O")] if allow_tdelta: try: @@ -300,33 +270,20 @@ def _badobj_wrap(self, value, func, allow_complex=True, **kwargs): value = value.astype("f8") return func(value, **kwargs) - def test_nanany(self): - self.check_funs( - nanops.nanany, - np.any, - allow_all_nan=False, - allow_str=False, - allow_date=False, - allow_tdelta=False, - ) - - def test_nanall(self): + @pytest.mark.parametrize( + "nan_op,np_op", [(nanops.nanany, np.any), (nanops.nanall, np.all)] + ) + def test_nan_funcs(self, nan_op, np_op): + # TODO: allow tdelta, doesn't break tests self.check_funs( - nanops.nanall, - np.all, - allow_all_nan=False, - allow_str=False, - allow_date=False, - allow_tdelta=False, + nan_op, np_op, allow_all_nan=False, allow_date=False, allow_tdelta=False ) def test_nansum(self): self.check_funs( nanops.nansum, np.sum, - allow_str=False, allow_date=False, - allow_tdelta=True, check_dtype=False, empty_targfunc=np.nansum, ) @@ -335,11 +292,9 @@ def test_nanmean(self): self.check_funs( nanops.nanmean, np.mean, - allow_complex=False, + allow_complex=False, # TODO: allow this, doesn't break test allow_obj=False, - allow_str=False, allow_date=False, - allow_tdelta=True, ) def test_nanmean_overflow(self): @@ -355,22 +310,31 @@ def test_nanmean_overflow(self): assert result == np_result assert result.dtype == np.float64 - def test_returned_dtype(self): - - dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64] - if hasattr(np, "float128"): - dtypes.append(np.float128) + @pytest.mark.parametrize( + "dtype", + [ + np.int16, + np.int32, + np.int64, + np.float32, + np.float64, + getattr(np, "float128", None), + ], + ) + def test_returned_dtype(self, dtype): + if dtype is None: + # no float128 available + return - for dtype in dtypes: - s = Series(range(10), dtype=dtype) - group_a = ["mean", "std", "var", "skew", "kurt"] - group_b = ["min", "max"] - for method in group_a + group_b: - result = getattr(s, method)() - if is_integer_dtype(dtype) and method in group_a: - assert result.dtype == np.float64 - else: - assert result.dtype == dtype + s = Series(range(10), dtype=dtype) + group_a = ["mean", "std", "var", "skew", "kurt"] + group_b = ["min", "max"] + for method in group_a + group_b: + result = getattr(s, method)() + if is_integer_dtype(dtype) and method in group_a: + assert result.dtype == np.float64 + else: + assert result.dtype == dtype def test_nanmedian(self): with warnings.catch_warnings(record=True): @@ -379,9 +343,7 @@ def test_nanmedian(self): nanops.nanmedian, np.median, allow_complex=False, - allow_str=False, allow_date=False, - allow_tdelta=True, allow_obj="convert", ) @@ -391,9 +353,7 @@ def test_nanvar(self, ddof): nanops.nanvar, np.var, allow_complex=False, - allow_str=False, allow_date=False, - allow_tdelta=True, allow_obj="convert", ddof=ddof, ) @@ -404,9 +364,7 @@ def test_nanstd(self, ddof): nanops.nanstd, np.std, allow_complex=False, - allow_str=False, allow_date=False, - allow_tdelta=True, allow_obj="convert", ddof=ddof, ) @@ -421,32 +379,19 @@ def test_nansem(self, ddof): nanops.nansem, sem, allow_complex=False, - allow_str=False, allow_date=False, allow_tdelta=False, allow_obj="convert", ddof=ddof, ) - def _minmax_wrap(self, value, axis=None, func=None): - - # numpy warns if all nan - res = func(value, axis) - if res.dtype.kind == "m": - res = np.atleast_1d(res) - return res - - def test_nanmin(self): + @pytest.mark.parametrize( + "nan_op,np_op", [(nanops.nanmin, np.min), (nanops.nanmax, np.max)] + ) + def test_nanops_with_warnings(self, nan_op, np_op): with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", RuntimeWarning) - func = partial(self._minmax_wrap, func=np.min) - self.check_funs(nanops.nanmin, func, allow_str=False, allow_obj=False) - - def test_nanmax(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore", RuntimeWarning) - func = partial(self._minmax_wrap, func=np.max) - self.check_funs(nanops.nanmax, func, allow_str=False, allow_obj=False) + self.check_funs(nan_op, np_op, allow_obj=False) def _argminmax_wrap(self, value, axis=None, func=None): res = func(value, axis) @@ -467,20 +412,13 @@ def test_nanargmax(self): with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", RuntimeWarning) func = partial(self._argminmax_wrap, func=np.argmax) - self.check_funs( - nanops.nanargmax, - func, - allow_str=False, - allow_obj=False, - allow_date=True, - allow_tdelta=True, - ) + self.check_funs(nanops.nanargmax, func, allow_obj=False) def test_nanargmin(self): with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", RuntimeWarning) func = partial(self._argminmax_wrap, func=np.argmin) - self.check_funs(nanops.nanargmin, func, allow_str=False, allow_obj=False) + self.check_funs(nanops.nanargmin, func, allow_obj=False) def _skew_kurt_wrap(self, values, axis=None, func=None): if not isinstance(values.dtype.type, np.floating): @@ -504,7 +442,6 @@ def test_nanskew(self): nanops.nanskew, func, allow_complex=False, - allow_str=False, allow_date=False, allow_tdelta=False, ) @@ -520,7 +457,6 @@ def test_nankurt(self): nanops.nankurt, func, allow_complex=False, - allow_str=False, allow_date=False, allow_tdelta=False, ) @@ -529,7 +465,6 @@ def test_nanprod(self): self.check_funs( nanops.nanprod, np.prod, - allow_str=False, allow_date=False, allow_tdelta=False, empty_targfunc=np.nanprod, @@ -695,45 +630,34 @@ def check_nancomp(self, checkfun, targ0): res2 = checkfun(arr_float_nan, arr_nan_float1) tm.assert_numpy_array_equal(targ2, res2, check_dtype=False) - try: - arr_float = np.take(arr_float, 0, axis=-1) - arr_float1 = np.take(arr_float1, 0, axis=-1) - arr_nan = np.take(arr_nan, 0, axis=-1) - arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1) - arr_float_nan = np.take(arr_float_nan, 0, axis=-1) - arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1) - arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1) - targ0 = np.take(targ0, 0, axis=-1) - except ValueError: - break - - def test_nangt(self): - targ0 = self.arr_float > self.arr_float1 - self.check_nancomp(nanops.nangt, targ0) - - def test_nange(self): - targ0 = self.arr_float >= self.arr_float1 - self.check_nancomp(nanops.nange, targ0) - - def test_nanlt(self): - targ0 = self.arr_float < self.arr_float1 - self.check_nancomp(nanops.nanlt, targ0) - - def test_nanle(self): - targ0 = self.arr_float <= self.arr_float1 - self.check_nancomp(nanops.nanle, targ0) - - def test_naneq(self): - targ0 = self.arr_float == self.arr_float1 - self.check_nancomp(nanops.naneq, targ0) - - def test_nanne(self): - targ0 = self.arr_float != self.arr_float1 - self.check_nancomp(nanops.nanne, targ0) - - def check_bool(self, func, value, correct, *args, **kwargs): + # Lower dimension for next step in the loop + arr_float = np.take(arr_float, 0, axis=-1) + arr_float1 = np.take(arr_float1, 0, axis=-1) + arr_nan = np.take(arr_nan, 0, axis=-1) + arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1) + arr_float_nan = np.take(arr_float_nan, 0, axis=-1) + arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1) + arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1) + targ0 = np.take(targ0, 0, axis=-1) + + @pytest.mark.parametrize( + "op,nanop", + [ + (operator.eq, nanops.naneq), + (operator.ne, nanops.nanne), + (operator.gt, nanops.nangt), + (operator.ge, nanops.nange), + (operator.lt, nanops.nanlt), + (operator.le, nanops.nanle), + ], + ) + def test_nan_comparison(self, op, nanop): + targ0 = op(self.arr_float, self.arr_float1) + self.check_nancomp(nanop, targ0) + + def check_bool(self, func, value, correct): while getattr(value, "ndim", True): - res0 = func(value, *args, **kwargs) + res0 = func(value) if correct: assert res0 else: @@ -741,10 +665,9 @@ def check_bool(self, func, value, correct, *args, **kwargs): if not hasattr(value, "ndim"): break - try: - value = np.take(value, 0, axis=-1) - except ValueError: - break + + # Reduce dimension for next step in the loop + value = np.take(value, 0, axis=-1) def test__has_infs(self): pairs = [
These are each going to need at least one more pass after this.
https://api.github.com/repos/pandas-dev/pandas/pulls/28553
2019-09-20T19:07:57Z
2019-09-23T12:01:39Z
2019-09-23T12:01:39Z
2019-09-23T12:47:18Z
Backport PR #28524: COMPAT: ensure no warnings on tab completion with Jedi 0.15
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst index 1cdf213d81a74..76c7ad208865d 100644 --- a/doc/source/whatsnew/v0.25.2.rst +++ b/doc/source/whatsnew/v0.25.2.rst @@ -100,7 +100,8 @@ Other ^^^^^ - Compatibility with Python 3.8 in :meth:`DataFrame.query` (:issue:`27261`) -- +- Fix to ensure that tab-completion in an IPython console does not raise + warnings for deprecated attributes (:issue:`27900`). .. _whatsnew_0.252.contributors: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9aced760725be..400d8647ced92 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -150,7 +150,7 @@ class NDFrame(PandasObject, SelectionMixin): _internal_names_set = set(_internal_names) # type: Set[str] _accessors = set() # type: Set[str] _deprecations = frozenset( - ["as_blocks", "blocks", "is_copy"] + ["as_blocks", "blocks", "is_copy", "ftypes", "ix"] ) # type: FrozenSet[str] _metadata = [] # type: List[str] _is_copy = None diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 680976f44ee1e..0e4c9ffcc5858 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -225,7 +225,7 @@ class Index(IndexOpsMixin, PandasObject): """ # tolist is not actually deprecated, just suppressed in the __dir__ - _deprecations = DirNamesMixin._deprecations | frozenset(["tolist"]) + _deprecations = DirNamesMixin._deprecations | frozenset(["tolist", "dtype_str"]) # To hand over control to subclasses _join_precedence = 1 diff --git a/pandas/core/series.py b/pandas/core/series.py index 9f31e185fe41a..8394766fb0286 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -56,7 +56,7 @@ import pandas as pd from pandas.core import algorithms, base, generic, nanops, ops -from pandas.core.accessor import CachedAccessor +from pandas.core.accessor import CachedAccessor, DirNamesMixin from pandas.core.arrays import ExtensionArray, SparseArray from pandas.core.arrays.categorical import Categorical, CategoricalAccessor from pandas.core.arrays.sparse import SparseAccessor @@ -178,8 +178,11 @@ class Series(base.IndexOpsMixin, generic.NDFrame): _metadata = ["name"] _accessors = {"dt", "cat", "str", "sparse"} # tolist is not actually deprecated, just suppressed in the __dir__ - _deprecations = generic.NDFrame._deprecations | frozenset( - ["asobject", "reshape", "get_value", "set_value", "valid", "tolist"] + _deprecations = ( + generic.NDFrame._deprecations + | DirNamesMixin._deprecations + | frozenset(["asobject", "reshape", "get_value", "set_value", "valid"]) + | frozenset(["ftype", "real", "imag", "tolist"]) ) # Override cache_readonly bc Series is mutable
Backport for #28524
https://api.github.com/repos/pandas-dev/pandas/pulls/28550
2019-09-20T15:55:55Z
2019-09-21T11:36:03Z
2019-09-21T11:36:03Z
2019-09-21T11:36:06Z
CLN: Exception and BaseException in test_nanops
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index eb39f01657b90..41b27f030d80f 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -165,25 +165,17 @@ def check_fun_data( else: targ = targfunc(targartempval, axis=axis, **kwargs) - try: - res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs) + res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + if skipna: + res = testfunc(testarval, axis=axis, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + if axis is None: + res = testfunc(testarval, skipna=skipna, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + if skipna and axis is None: + res = testfunc(testarval, **kwargs) self.check_results(targ, res, axis, check_dtype=check_dtype) - if skipna: - res = testfunc(testarval, axis=axis, **kwargs) - self.check_results(targ, res, axis, check_dtype=check_dtype) - if axis is None: - res = testfunc(testarval, skipna=skipna, **kwargs) - self.check_results(targ, res, axis, check_dtype=check_dtype) - if skipna and axis is None: - res = testfunc(testarval, **kwargs) - self.check_results(targ, res, axis, check_dtype=check_dtype) - except BaseException as exc: - exc.args += ( - "axis: {axis} of {of}".format(axis=axis, of=testarval.ndim - 1), - "skipna: {skipna}".format(skipna=skipna), - "kwargs: {kwargs}".format(kwargs=kwargs), - ) - raise if testarval.ndim <= 1: return @@ -222,23 +214,15 @@ def check_fun( testarval = getattr(self, testar) targarval = getattr(self, targar) targarnanval = getattr(self, targarnan) - try: - self.check_fun_data( - testfunc, - targfunc, - testarval, - targarval, - targarnanval, - empty_targfunc=empty_targfunc, - **kwargs - ) - except BaseException as exc: - exc.args += ( - "testar: {testar}".format(testar=testar), - "targar: {targar}".format(targar=targar), - "targarnan: {targarnan}".format(targarnan=targarnan), - ) - raise + self.check_fun_data( + testfunc, + targfunc, + testarval, + targarval, + targarnanval, + empty_targfunc=empty_targfunc, + **kwargs + ) def check_funs( self, @@ -697,23 +681,19 @@ def check_nancomp(self, checkfun, targ0): arr_nan_float1 = self.arr_nan_float1 while targ0.ndim: - try: - res0 = checkfun(arr_float, arr_float1) - tm.assert_almost_equal(targ0, res0) + res0 = checkfun(arr_float, arr_float1) + tm.assert_almost_equal(targ0, res0) - if targ0.ndim > 1: - targ1 = np.vstack([targ0, arr_nan]) - else: - targ1 = np.hstack([targ0, arr_nan]) - res1 = checkfun(arr_float_nan, arr_float1_nan) - tm.assert_numpy_array_equal(targ1, res1, check_dtype=False) - - targ2 = arr_nan_nan - res2 = checkfun(arr_float_nan, arr_nan_float1) - tm.assert_numpy_array_equal(targ2, res2, check_dtype=False) - except Exception as exc: - exc.args += ("ndim: {arr_float.ndim}".format(arr_float=arr_float),) - raise + if targ0.ndim > 1: + targ1 = np.vstack([targ0, arr_nan]) + else: + targ1 = np.hstack([targ0, arr_nan]) + res1 = checkfun(arr_float_nan, arr_float1_nan) + tm.assert_numpy_array_equal(targ1, res1, check_dtype=False) + + targ2 = arr_nan_nan + res2 = checkfun(arr_float_nan, arr_nan_float1) + tm.assert_numpy_array_equal(targ2, res2, check_dtype=False) try: arr_float = np.take(arr_float, 0, axis=-1) @@ -753,15 +733,12 @@ def test_nanne(self): def check_bool(self, func, value, correct, *args, **kwargs): while getattr(value, "ndim", True): - try: - res0 = func(value, *args, **kwargs) - if correct: - assert res0 - else: - assert not res0 - except BaseException as exc: - exc.args += ("dim: {}".format(getattr(value, "ndim", value)),) - raise + res0 = func(value, *args, **kwargs) + if correct: + assert res0 + else: + assert not res0 + if not hasattr(value, "ndim"): break try: @@ -796,21 +773,13 @@ def test__has_infs(self): for arr, correct in pairs: val = getattr(self, arr) - try: - self.check_bool(nanops._has_infs, val, correct) - except BaseException as exc: - exc.args += (arr,) - raise + self.check_bool(nanops._has_infs, val, correct) for arr, correct in pairs_float: val = getattr(self, arr) - try: - self.check_bool(nanops._has_infs, val, correct) - self.check_bool(nanops._has_infs, val.astype("f4"), correct) - self.check_bool(nanops._has_infs, val.astype("f2"), correct) - except BaseException as exc: - exc.args += (arr,) - raise + self.check_bool(nanops._has_infs, val, correct) + self.check_bool(nanops._has_infs, val.astype("f4"), correct) + self.check_bool(nanops._has_infs, val.astype("f2"), correct) def test__isfinite(self): pairs = [ @@ -844,21 +813,13 @@ def test__isfinite(self): for arr, correct in pairs: val = getattr(self, arr) - try: - self.check_bool(func1, val, correct) - except BaseException as exc: - exc.args += (arr,) - raise + self.check_bool(func1, val, correct) for arr, correct in pairs_float: val = getattr(self, arr) - try: - self.check_bool(func1, val, correct) - self.check_bool(func1, val.astype("f4"), correct) - self.check_bool(func1, val.astype("f2"), correct) - except BaseException as exc: - exc.args += (arr,) - raise + self.check_bool(func1, val, correct) + self.check_bool(func1, val.astype("f4"), correct) + self.check_bool(func1, val.astype("f2"), correct) def test__bn_ok_dtype(self): assert nanops._bn_ok_dtype(self.arr_float.dtype, "test")
AFAICT these are artifacts of pre-pytest usage where we needed to manually add info to the traceback. Adds a code_check to make sure we aren't catching BaseException anywhere. BaseException includes KeyboardInterrupt, which we shouldn't be catching in general. Saving parametrization in test_nanops for a separate pass, as the diff will get big.
https://api.github.com/repos/pandas-dev/pandas/pulls/28544
2019-09-19T21:44:32Z
2019-09-20T06:14:59Z
2019-09-20T06:14:59Z
2019-09-20T14:18:34Z
WEB/CI: Fixing target path of the web build
diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 5b3d4e91c1e02..62c46b6970969 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -121,7 +121,7 @@ jobs: - script: | source activate pandas-dev - python web/pandas_web.py web/pandas + python web/pandas_web.py web/pandas --target-path=web/build displayName: 'Build website' - script: |
http://dev,pandas.io is currently not serving the website (http://dev,pandas.io/docs) In the deployment of the new website, when building the web, the target default path `build` is not working as expected. The path is later expected to be `web/build`, but since the script is called from the root and not from inside web, the target path is `build/`, and the website is not copied. This PR fixes the target path to be the correct one, and should fix http://dev,pandas.io @TomAugspurger if you don't mind having a quick look.
https://api.github.com/repos/pandas-dev/pandas/pulls/28543
2019-09-19T21:25:40Z
2019-09-19T22:24:48Z
2019-09-19T22:24:48Z
2019-09-19T23:23:40Z
ENH: Add dta 119 reading to StataReader
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 54a6171f623f6..2b6fc46311ea7 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -37,7 +37,7 @@ Other enhancements pandas (so it will become an integer or float dtype depending on the presence of missing data). (:issue:`28368`) - :meth:`DataFrame.to_json` now accepts an ``indent`` integer argument to enable pretty printing of JSON output (:issue:`12004`) - +- :meth:`read_stata` can read Stata 119 dta files. (:issue:`28250`) Build Changes ^^^^^^^^^^^^^ diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 31fdaa5cc6735..c67106e897727 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1139,13 +1139,17 @@ def _read_new_header(self, first_char): # The first part of the header is common to 117 and 118. self.path_or_buf.read(27) # stata_dta><header><release> self.format_version = int(self.path_or_buf.read(3)) - if self.format_version not in [117, 118]: + if self.format_version not in [117, 118, 119]: raise ValueError(_version_error) self._set_encoding() self.path_or_buf.read(21) # </release><byteorder> self.byteorder = self.path_or_buf.read(3) == b"MSF" and ">" or "<" self.path_or_buf.read(15) # </byteorder><K> - self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0] + nvar_type = "H" if self.format_version <= 118 else "I" + nvar_size = 2 if self.format_version <= 118 else 4 + self.nvar = struct.unpack( + self.byteorder + nvar_type, self.path_or_buf.read(nvar_size) + )[0] self.path_or_buf.read(7) # </K><N> self.nobs = self._get_nobs() @@ -1207,7 +1211,7 @@ def _read_new_header(self, first_char): self.path_or_buf.seek(self._seek_variable_labels) self._variable_labels = self._get_variable_labels() - # Get data type information, works for versions 117-118. + # Get data type information, works for versions 117-119. def _get_dtypes(self, seek_vartypes): self.path_or_buf.seek(seek_vartypes) @@ -1241,14 +1245,14 @@ def f(typ): def _get_varlist(self): if self.format_version == 117: b = 33 - elif self.format_version == 118: + elif self.format_version >= 118: b = 129 return [self._decode(self.path_or_buf.read(b)) for i in range(self.nvar)] # Returns the format list def _get_fmtlist(self): - if self.format_version == 118: + if self.format_version >= 118: b = 57 elif self.format_version > 113: b = 49 @@ -1270,7 +1274,7 @@ def _get_lbllist(self): return [self._decode(self.path_or_buf.read(b)) for i in range(self.nvar)] def _get_variable_labels(self): - if self.format_version == 118: + if self.format_version >= 118: vlblist = [ self._decode(self.path_or_buf.read(321)) for i in range(self.nvar) ] @@ -1285,13 +1289,13 @@ def _get_variable_labels(self): return vlblist def _get_nobs(self): - if self.format_version == 118: + if self.format_version >= 118: return struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0] else: return struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0] def _get_data_label(self): - if self.format_version == 118: + if self.format_version >= 118: strlen = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0] return self._decode(self.path_or_buf.read(strlen)) elif self.format_version == 117: @@ -1303,7 +1307,7 @@ def _get_data_label(self): return self._decode(self.path_or_buf.read(32)) def _get_time_stamp(self): - if self.format_version == 118: + if self.format_version >= 118: strlen = struct.unpack("b", self.path_or_buf.read(1))[0] return self.path_or_buf.read(strlen).decode("utf-8") elif self.format_version == 117: @@ -1321,7 +1325,7 @@ def _get_seek_variable_labels(self): # a work around that uses the previous label, 33 bytes for each # variable, 20 for the closing tag and 17 for the opening tag return self._seek_value_label_names + (33 * self.nvar) + 20 + 17 - elif self.format_version == 118: + elif self.format_version >= 118: return struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 17 else: raise ValueError() @@ -1519,10 +1523,12 @@ def _read_strls(self): else: buf = self.path_or_buf.read(12) # Only tested on little endian file on little endian machine. + v_size = 2 if self.format_version == 118 else 3 if self.byteorder == "<": - buf = buf[0:2] + buf[4:10] + buf = buf[0:v_size] + buf[4 : 12 - v_size] else: - buf = buf[0:2] + buf[6:] + # This path may not be correct, impossible to test + buf = buf[0:v_size] + buf[4 + v_size :] v_o = struct.unpack("Q", buf)[0] typ = struct.unpack("B", self.path_or_buf.read(1))[0] length = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0] diff --git a/pandas/tests/io/data/stata1_119.dta.gz b/pandas/tests/io/data/stata1_119.dta.gz new file mode 100644 index 0000000000000..0f75d8b92db14 Binary files /dev/null and b/pandas/tests/io/data/stata1_119.dta.gz differ diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 1e7d568602656..a0ec06a2197ae 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -101,6 +101,8 @@ def setup_method(self, datapath): self.dta24_111 = os.path.join(self.dirpath, "stata7_111.dta") self.dta25_118 = os.path.join(self.dirpath, "stata16_118.dta") + self.dta26_119 = os.path.join(self.dirpath, "stata1_119.dta.gz") + self.stata_dates = os.path.join(self.dirpath, "stata13_dates.dta") def read_dta(self, file): @@ -1780,3 +1782,14 @@ def test_encoding_latin1_118(self): expected = pd.DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"]) tm.assert_frame_equal(encoded, expected) + + @pytest.mark.slow + def test_stata_119(self): + # Gzipped since contains 32,999 variables and uncompressed is 20MiB + with gzip.open(self.dta26_119, "rb") as gz: + df = read_stata(gz) + assert df.shape == (1, 32999) + assert df.iloc[0, 6] == "A" * 3000 + assert df.iloc[0, 7] == 3.14 + assert df.iloc[0, -1] == 1 + assert df.iloc[0, 0] == pd.Timestamp(datetime(2012, 12, 21, 21, 12, 21))
Add requirements for reading 119 format files - [X] closes #28250 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28542
2019-09-19T21:09:13Z
2019-09-20T12:40:13Z
2019-09-20T12:40:12Z
2019-12-19T22:29:34Z
Pandas.io.formats.style.Styler docstring PR02
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 033d93d1456c8..95e1084747aa3 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -645,7 +645,7 @@ def apply(self, func, axis=0, subset=None, **kwargs): subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice - kwargs : dict + **kwargs : dict pass along to ``func`` Returns @@ -697,7 +697,7 @@ def applymap(self, func, subset=None, **kwargs): subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice - kwargs : dict + **kwargs : dict pass along to ``func`` Returns @@ -732,7 +732,7 @@ def where(self, cond, value, other=None, subset=None, **kwargs): subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice - kwargs : dict + **kwargs : dict pass along to ``cond`` Returns @@ -965,8 +965,10 @@ def background_gradient( ---------- cmap : str or colormap matplotlib colormap - low, high : float - compress the range by these values. + low : float + compress the range by the low. + high : float + compress the range by the high. axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once @@ -1078,7 +1080,7 @@ def set_properties(self, subset=None, **kwargs): ---------- subset : IndexSlice a valid slice for ``data`` to limit the style application to - kwargs : dict + **kwargs : dict property: value pairs to be set for each cell Returns @@ -1350,8 +1352,10 @@ def pipe(self, func, *args, **kwargs): Function to apply to the Styler. Alternatively, a ``(callable, keyword)`` tuple where ``keyword`` is a string indicating the keyword of ``callable`` that expects the Styler. - *args, **kwargs : + *args : optional Arguments passed to `func`. + **kwargs : optional + A dictionary of keyword arguments passed into ``func``. Returns -------
Solves: - Unknown parameters {kwargs} in apply method in Styler class - Unknown parameters {kwargs} in applymap method in Styler class - Unknown parameters {kwargs} in where method in Styler class - Unknown parameters {low, high} in background_gradient method in Styler class - Unknown parameters {kwargs} in set_properties method in Styler class - Unknown parameters {*args, **kwargs :} in pipe method in Styler class for issues: pandas.io.formats.style.Styler.apply: Unknown parameters {kwargs} pandas.io.formats.style.Styler.applymap: Unknown parameters {kwargs} pandas.io.formats.style.Styler.where: Unknown parameters {kwargs} pandas.io.formats.style.Styler.set_properties: Unknown parameters {kwargs} pandas.io.formats.style.Styler.pipe: Unknown parameters {*args, **kwargs :} pandas.io.formats.style.Styler.background_gradient: Unknown parameters {low, high} all in #27976
https://api.github.com/repos/pandas-dev/pandas/pulls/28539
2019-09-19T20:27:30Z
2019-09-20T12:45:27Z
2019-09-20T12:45:27Z
2019-09-20T12:46:28Z
REF: Parametrize value_counts tests
diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py index c7b28822092a8..f8bd8843ab7e3 100644 --- a/pandas/tests/groupby/test_value_counts.py +++ b/pandas/tests/groupby/test_value_counts.py @@ -52,29 +52,30 @@ def seed_df(seed_nans, n, m): @pytest.mark.slow @pytest.mark.parametrize("df, keys, bins, n, m", binned, ids=ids) -def test_series_groupby_value_counts(df, keys, bins, n, m): +@pytest.mark.parametrize("isort", [True, False]) +@pytest.mark.parametrize("normalize", [True, False]) +@pytest.mark.parametrize("sort", [True, False]) +@pytest.mark.parametrize("ascending", [True, False]) +@pytest.mark.parametrize("dropna", [True, False]) +def test_series_groupby_value_counts( + df, keys, bins, n, m, isort, normalize, sort, ascending, dropna +): def rebuild_index(df): arr = list(map(df.index.get_level_values, range(df.index.nlevels))) df.index = MultiIndex.from_arrays(arr, names=df.index.names) return df - for isort, normalize, sort, ascending, dropna in product((False, True), repeat=5): - - kwargs = dict( - normalize=normalize, - sort=sort, - ascending=ascending, - dropna=dropna, - bins=bins, - ) + kwargs = dict( + normalize=normalize, sort=sort, ascending=ascending, dropna=dropna, bins=bins + ) - gr = df.groupby(keys, sort=isort) - left = gr["3rd"].value_counts(**kwargs) + gr = df.groupby(keys, sort=isort) + left = gr["3rd"].value_counts(**kwargs) - gr = df.groupby(keys, sort=isort) - right = gr["3rd"].apply(Series.value_counts, **kwargs) - right.index.names = right.index.names[:-1] + ["3rd"] + gr = df.groupby(keys, sort=isort) + right = gr["3rd"].apply(Series.value_counts, **kwargs) + right.index.names = right.index.names[:-1] + ["3rd"] - # have to sort on index because of unstable sort on values - left, right = map(rebuild_index, (left, right)) # xref GH9212 - tm.assert_series_equal(left.sort_index(), right.sort_index()) + # have to sort on index because of unstable sort on values + left, right = map(rebuild_index, (left, right)) # xref GH9212 + tm.assert_series_equal(left.sort_index(), right.sort_index())
Parametrizes the for loop in `test_series_groupby_value_counts`. As a side note, this test seems to run for a pretty long time (a minute and a half); should it operate on less data perhaps?
https://api.github.com/repos/pandas-dev/pandas/pulls/28537
2019-09-19T20:03:19Z
2019-09-20T14:27:59Z
2019-09-20T14:27:59Z
2019-09-20T14:42:03Z
CLN+TST: Catch specific exception in equals
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 5dff1f93264c3..0335058a69c63 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1918,6 +1918,9 @@ def sequence_to_dt64ns( tz = validate_tz_from_dtype(dtype, tz) if isinstance(data, ABCIndexClass): + if data.nlevels > 1: + # Without this check, data._data below is None + raise TypeError("Cannot create a DatetimeArray from a MultiIndex.") data = data._data # By this point we are assured to have either a numpy array or Index diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index c7664d9777c71..bf89bbbdf2b79 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -192,7 +192,11 @@ def equals(self, other): elif not isinstance(other, type(self)): try: other = type(self)(other) - except Exception: + except (ValueError, TypeError, OverflowError): + # e.g. + # ValueError -> cannot parse str entry, or OutOfBoundsDatetime + # TypeError -> trying to convert IntervalIndex to DatetimeIndex + # OverflowError -> Index([very_large_timedeltas]) return False if not is_dtype_equal(self.dtype, other.dtype): diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index d749d9bb47d25..c3cda22497ecb 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -15,6 +15,11 @@ class TestDatetimeArrayConstructor: + def test_from_sequence_invalid_type(self): + mi = pd.MultiIndex.from_product([np.arange(5), np.arange(5)]) + with pytest.raises(TypeError, match="Cannot create a DatetimeArray"): + DatetimeArray._from_sequence(mi) + def test_only_1dim_accepted(self): arr = np.array([0, 1, 2, 3], dtype="M8[h]").astype("M8[ns]") diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index d4dff2cbce89b..2ec267c66091b 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -393,6 +393,18 @@ def test_equals(self): assert not idx.equals(list(idx3)) assert not idx.equals(pd.Series(idx3)) + # check that we do not raise when comparing with OutOfBounds objects + oob = pd.Index([datetime(2500, 1, 1)] * 3, dtype=object) + assert not idx.equals(oob) + assert not idx2.equals(oob) + assert not idx3.equals(oob) + + # check that we do not raise when comparing with OutOfBounds dt64 + oob2 = oob.map(np.datetime64) + assert not idx.equals(oob2) + assert not idx2.equals(oob2) + assert not idx3.equals(oob2) + @pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []]) @pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)]) @pytest.mark.parametrize("tz", [None, "US/Eastern"]) diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index d7d8b10347861..54ed5058b5253 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -1,3 +1,5 @@ +from datetime import timedelta + import numpy as np import pytest @@ -266,6 +268,17 @@ def test_equals(self): assert not idx.equals(list(idx2)) assert not idx.equals(pd.Series(idx2)) + # Check that we dont raise OverflowError on comparisons outside the + # implementation range + oob = pd.Index([timedelta(days=10 ** 6)] * 3, dtype=object) + assert not idx.equals(oob) + assert not idx2.equals(oob) + + # FIXME: oob.apply(np.timedelta64) incorrectly overflows + oob2 = pd.Index([np.timedelta64(x) for x in oob], dtype=object) + assert not idx.equals(oob2) + assert not idx2.equals(oob2) + @pytest.mark.parametrize("values", [["0 days", "2 days", "4 days"], []]) @pytest.mark.parametrize("freq", ["2D", Day(2), "48H", Hour(48)]) def test_freq_setter(self, values, freq):
https://api.github.com/repos/pandas-dev/pandas/pulls/28532
2019-09-19T17:21:02Z
2019-09-26T15:12:02Z
2019-09-26T15:12:02Z
2019-09-26T15:13:41Z
TST: Call tests just once with --dist=loadscope
diff --git a/.travis.yml b/.travis.yml index 398dd07089ef9..048736e4bf1d0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -85,15 +85,6 @@ install: - ci/submit_cython_cache.sh - echo "install done" - -before_script: - # display server (for clipboard functionality) needs to be started here, - # does not work if done in install:setup_env.sh (GH-26103) - - export DISPLAY=":99.0" - - echo "sh -e /etc/init.d/xvfb start" - - if [ "$JOB" != "3.8-dev" ]; then sh -e /etc/init.d/xvfb start; fi - - sleep 3 - script: - echo "script start" - echo "$JOB" diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml index d6afb263b447f..66960ca2c6c10 100644 --- a/ci/azure/posix.yml +++ b/ci/azure/posix.yml @@ -73,33 +73,16 @@ jobs: - task: PublishTestResults@2 inputs: - testResultsFiles: 'test-data-*.xml' + testResultsFiles: 'test-data.xml' testRunTitle: ${{ format('{0}-$(CONDA_PY)', parameters.name) }} displayName: 'Publish test results' - powershell: | - $junitXml = "test-data-single.xml" - $(Get-Content $junitXml | Out-String) -match 'failures="(.*?)"' - if ($matches[1] -eq 0) - { - Write-Host "No test failures in test-data-single" - } - else - { - # note that this will produce $LASTEXITCODE=1 - Write-Error "$($matches[1]) tests failed" - } - - $junitXmlMulti = "test-data-multiple.xml" - $(Get-Content $junitXmlMulti | Out-String) -match 'failures="(.*?)"' - if ($matches[1] -eq 0) - { - Write-Host "No test failures in test-data-multi" - } - else - { - # note that this will produce $LASTEXITCODE=1 - Write-Error "$($matches[1]) tests failed" + $(Get-Content "test-data.xml" | Out-String) -match 'failures="(.*?)"' + if ($matches[1] -eq 0) { + Write-Host "No test failures in test-data" + } else { + Write-Error "$($matches[1]) tests failed" # will produce $LASTEXITCODE=1 } displayName: 'Check for test failures' diff --git a/ci/print_skipped.py b/ci/print_skipped.py index e99e789a71fe8..51a2460e05fab 100755 --- a/ci/print_skipped.py +++ b/ci/print_skipped.py @@ -27,14 +27,13 @@ def main(filename): if __name__ == "__main__": print("SKIPPED TESTS:") i = 1 - for file_type in ("-single", "-multiple", ""): - for test_data in main("test-data{}.xml".format(file_type)): - if test_data is None: - print("-" * 80) - else: - print( - "#{i} {class_name}.{test_name}: {message}".format( - **dict(test_data, i=i) - ) + for test_data in main("test-data.xml"): + if test_data is None: + print("-" * 80) + else: + print( + "#{i} {class_name}.{test_name}: {message}".format( + **dict(test_data, i=i) ) - i += 1 + ) + i += 1 diff --git a/ci/run_tests.sh b/ci/run_tests.sh index d1a9447c97d4e..b91cfb3bed8cc 100755 --- a/ci/run_tests.sh +++ b/ci/run_tests.sh @@ -15,37 +15,29 @@ if [ -n "$LOCALE_OVERRIDE" ]; then # exit 1 fi fi + if [[ "not network" == *"$PATTERN"* ]]; then export http_proxy=http://1.2.3.4 https_proxy=http://1.2.3.4; fi - -if [ -n "$PATTERN" ]; then - PATTERN=" and $PATTERN" +if [ "$COVERAGE" ]; then + COVERAGE_FNAME="/tmp/test_coverage.xml" + COVERAGE="-s --cov=pandas --cov-report=xml:$COVERAGE_FNAME" fi -for TYPE in single multiple -do - if [ "$COVERAGE" ]; then - COVERAGE_FNAME="/tmp/coc-$TYPE.xml" - COVERAGE="-s --cov=pandas --cov-report=xml:$COVERAGE_FNAME" - fi +PYTEST_CMD="pytest -m \"$PATTERN\" -n auto --dist=loadfile -s --strict --durations=10 --junitxml=test-data.xml $TEST_ARGS $COVERAGE pandas" - TYPE_PATTERN=$TYPE - NUM_JOBS=1 - if [[ "$TYPE_PATTERN" == "multiple" ]]; then - TYPE_PATTERN="not single" - NUM_JOBS=2 - fi +# Travis does not have have an X server +if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then + DISPLAY=DISPLAY=:99.0 + PYTEST_CMD="xvfb-run -e /dev/stdout $PYTEST_CMD" +fi - PYTEST_CMD="pytest -m \"$TYPE_PATTERN$PATTERN\" -n $NUM_JOBS -s --strict --durations=10 --junitxml=test-data-$TYPE.xml $TEST_ARGS $COVERAGE pandas" - echo $PYTEST_CMD - # if no tests are found (the case of "single and slow"), pytest exits with code 5, and would make the script fail, if not for the below code - sh -c "$PYTEST_CMD; ret=\$?; [ \$ret = 5 ] && exit 0 || exit \$ret" +echo $PYTEST_CMD +sh -c "$PYTEST_CMD" - if [[ "$COVERAGE" && $? == 0 && "$TRAVIS_BRANCH" == "master" ]]; then - echo "uploading coverage for $TYPE tests" - echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME" - bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME - fi -done +if [[ "$COVERAGE" && $? == 0 && "$TRAVIS_BRANCH" == "master" ]]; then + echo "uploading coverage" + echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME" + bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME +fi diff --git a/environment.yml b/environment.yml index a3582c56ee9d2..bbf3c036f65c4 100644 --- a/environment.yml +++ b/environment.yml @@ -53,7 +53,7 @@ dependencies: - moto # mock S3 - pytest>=4.0.2 - pytest-cov - - pytest-xdist + - pytest-xdist>=1.21 - seaborn - statsmodels diff --git a/requirements-dev.txt b/requirements-dev.txt index 6235b61d92f29..5633a58f254ca 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -32,7 +32,7 @@ hypothesis>=3.82 moto pytest>=4.0.2 pytest-cov -pytest-xdist +pytest-xdist>=1.21 seaborn statsmodels ipywidgets
Another try to what was tried in #26949. Before this PR the tests are called twice, once in a core for the tests that affect shared results, and once in parallel for the rest. This PR makes a single call, and tests in the same scope (class or module) are granted to run in the same core, so no shared data problems should happen. The tests being very slow was possibly caused by the proxy env variables, and not `--dist=loadscope`. But please check how long tests took before merging.
https://api.github.com/repos/pandas-dev/pandas/pulls/28531
2019-09-19T15:32:56Z
2019-11-17T14:20:08Z
2019-11-17T14:20:08Z
2019-11-17T18:08:03Z
BUG: wrong exception raised by Week+Day
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index a5af4e727391a..3beaa2dfa788a 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -147,7 +147,7 @@ Datetimelike - Bug in :class:`Timestamp` subtraction when subtracting a :class:`Timestamp` from a ``np.datetime64`` object incorrectly raising ``TypeError`` (:issue:`28286`) - Addition and subtraction of integer or integer-dtype arrays with :class:`Timestamp` will now raise ``NullFrequencyError`` instead of ``ValueError`` (:issue:`28268`) - Bug in :class:`Series` and :class:`DataFrame` with integer dtype failing to raise ``TypeError`` when adding or subtracting a ``np.datetime64`` object (:issue:`28080`) -- +- Bug in :class:`Week` with ``weekday`` incorrectly raising ``AttributeError`` instead of ``TypeError`` when adding or subtracting an invalid type (:issue:`28530`) Timedelta diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 3ed25b8d3edd5..ddf2c6e65b474 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -4348,3 +4348,12 @@ def test_last_week_of_month_on_offset(): slow = (ts + offset) - offset == ts fast = offset.onOffset(ts) assert fast == slow + + +def test_week_add_invalid(): + # Week with weekday should raise TypeError and _not_ AttributeError + # when adding invalid offset + offset = Week(weekday=1) + other = Day() + with pytest.raises(TypeError, match="Cannot add"): + offset + other diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index dfe91b514bbe1..4491e6ad9ac7e 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -138,7 +138,7 @@ def to_offset(freq): delta = offset else: delta = delta + offset - except Exception: + except ValueError: raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) else: @@ -170,7 +170,7 @@ def to_offset(freq): delta = offset else: delta = delta + offset - except Exception: + except (ValueError, TypeError): raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) if delta is None: diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index edf58ba3850a1..82cbfa831bf32 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -605,7 +605,7 @@ def apply(self, other): return BDay(self.n, offset=self.offset + other, normalize=self.normalize) else: raise ApplyTypeError( - "Only know how to combine business day with " "datetime or timedelta." + "Only know how to combine business day with datetime or timedelta." ) @apply_index_wraps @@ -1545,6 +1545,13 @@ def apply(self, other): if self.weekday is None: return other + self.n * self._inc + if not isinstance(other, datetime): + raise TypeError( + "Cannot add {typ} to {cls}".format( + typ=type(other).__name__, cls=type(self).__name__ + ) + ) + k = self.n otherDay = other.weekday() if otherDay != self.weekday:
With that fixed, we can clean up an Exception in tseries.frequencies.
https://api.github.com/repos/pandas-dev/pandas/pulls/28530
2019-09-19T15:08:46Z
2019-09-19T20:33:52Z
2019-09-19T20:33:52Z
2019-09-19T21:10:10Z
WEB: Fix deployment of the website
diff --git a/azure-pipelines.yml b/azure-pipelines.yml index ba7a3bfb6ae36..5b3d4e91c1e02 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -171,7 +171,7 @@ jobs: eq(variables['Build.SourceBranch'], 'refs/heads/master')) - script: | - cd doc/build/html + cd to_deploy git remote add origin git@github.com:pandas-dev/pandas-dev.github.io.git git push -f origin master displayName: 'Publish web and docs to GitHub pages'
The master build is broken [1], because one directory wasn't updated in #28497 (the step only runs in master builds and not PR builds, so couldn't be detected). This updates the directory, the deployment should work again. 1. https://dev.azure.com/pandas-dev/pandas/_build/results?buildId=17721
https://api.github.com/repos/pandas-dev/pandas/pulls/28529
2019-09-19T15:03:22Z
2019-09-19T15:58:39Z
2019-09-19T15:58:39Z
2019-09-19T15:58:39Z
COMPAT: ensure no warnings on tab completion with Jedi 0.15
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst index de411ef63680a..14682b706f924 100644 --- a/doc/source/whatsnew/v0.25.2.rst +++ b/doc/source/whatsnew/v0.25.2.rst @@ -100,7 +100,8 @@ Other ^^^^^ - Compatibility with Python 3.8 in :meth:`DataFrame.query` (:issue:`27261`) -- +- Fix to ensure that tab-completion in an IPython console does not raise + warnings for deprecated attributes (:issue:`27900`). .. _whatsnew_0.252.contributors: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7e77c56fefe04..152983451bc38 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -166,7 +166,7 @@ class NDFrame(PandasObject, SelectionMixin): _internal_names_set = set(_internal_names) # type: Set[str] _accessors = set() # type: Set[str] _deprecations = frozenset( - ["as_blocks", "blocks", "is_copy"] + ["as_blocks", "blocks", "is_copy", "ftypes", "ix"] ) # type: FrozenSet[str] _metadata = [] # type: List[str] _is_copy = None diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 6ef9d78ff9e97..f5f7056d8bbcf 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -205,7 +205,7 @@ class Index(IndexOpsMixin, PandasObject): """ # tolist is not actually deprecated, just suppressed in the __dir__ - _deprecations = DirNamesMixin._deprecations | frozenset(["tolist"]) + _deprecations = DirNamesMixin._deprecations | frozenset(["tolist", "dtype_str"]) # To hand over control to subclasses _join_precedence = 1 diff --git a/pandas/core/series.py b/pandas/core/series.py index b0616c053df6d..2431bfcfd0356 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -54,7 +54,7 @@ import pandas as pd from pandas.core import algorithms, base, generic, nanops, ops -from pandas.core.accessor import CachedAccessor +from pandas.core.accessor import CachedAccessor, DirNamesMixin from pandas.core.arrays import ExtensionArray from pandas.core.arrays.categorical import Categorical, CategoricalAccessor from pandas.core.arrays.sparse import SparseAccessor @@ -176,8 +176,10 @@ class Series(base.IndexOpsMixin, generic.NDFrame): _metadata = ["name"] _accessors = {"dt", "cat", "str", "sparse"} # tolist is not actually deprecated, just suppressed in the __dir__ - _deprecations = generic.NDFrame._deprecations | frozenset( - ["asobject", "reshape", "valid", "tolist"] + _deprecations = ( + generic.NDFrame._deprecations + | DirNamesMixin._deprecations + | frozenset(["asobject", "reshape", "valid", "tolist", "ftype", "real", "imag"]) ) # Override cache_readonly bc Series is mutable
Closes https://github.com/pandas-dev/pandas/issues/27900 I didn't yet add any tests, because I am not fully sure how to write them (the IPython tests we already have clearly don't work, I suppose the programmatic `ip.Completer.completions(..)` doesn't go through jedi)
https://api.github.com/repos/pandas-dev/pandas/pulls/28524
2019-09-19T12:01:09Z
2019-09-20T12:43:54Z
2019-09-20T12:43:53Z
2019-09-20T15:56:19Z
DEV: skip pandas/__init__.py in isort's pre-commit hook
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b79f0f71dac23..3f98273a336cf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,3 +15,4 @@ repos: hooks: - id: isort language: python_venv + exclude: ^pandas/__init__\.py$|^pandas/core/api\.py$
I noticed that when you modified `pandas/__init__.py`, isort actually completely reordered it when using pre-commit hook. Apparantly, isort ignores the skip config when you explicitly pass a path to isort (pre-commit basically does `isort pandas/__init__.py` when that file changed). See https://github.com/pre-commit/mirrors-isort/issues/9, which suggested to add this exclude to the pre-commit config (although that duplicates the "skip" information from setup.cfg)
https://api.github.com/repos/pandas-dev/pandas/pulls/28517
2019-09-19T07:41:39Z
2019-09-19T14:17:25Z
2019-09-19T14:17:25Z
2019-09-19T14:17:29Z
CLN: clean-up internal sparse imports + restructure sparse submodule
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index a5af4e727391a..b890278d9ca30 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -96,9 +96,10 @@ Deprecations Removed SparseSeries and SparseDataFrame ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``SparseSeries`` and ``SparseDataFrame`` have been removed (:issue:`28425`). -We recommend using a ``Series`` or ``DataFrame`` with sparse values instead. -See :ref:`sparse.migration` for help with migrating existing code. +``SparseSeries``, ``SparseDataFrame`` and the ``DataFrame.to_sparse`` method +have been removed (:issue:`28425`). We recommend using a ``Series`` or +``DataFrame`` with sparse values instead. See :ref:`sparse.migration` for help +with migrating existing code. Removal of prior version deprecations/changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/__init__.py b/pandas/__init__.py index 59ecc7f609ae9..6d0c55a45ed46 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -114,7 +114,7 @@ DataFrame, ) -from pandas.core.sparse.api import SparseArray, SparseDtype +from pandas.core.arrays.sparse import SparseArray, SparseDtype from pandas.tseries.api import infer_freq from pandas.tseries import offsets diff --git a/pandas/core/arrays/sparse/__init__.py b/pandas/core/arrays/sparse/__init__.py new file mode 100644 index 0000000000000..75f3819fb19fd --- /dev/null +++ b/pandas/core/arrays/sparse/__init__.py @@ -0,0 +1,5 @@ +# flake8: noqa: F401 + +from .accessor import SparseAccessor, SparseFrameAccessor +from .array import BlockIndex, IntIndex, SparseArray, _make_index +from .dtype import SparseDtype diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py new file mode 100644 index 0000000000000..57fd6d284af31 --- /dev/null +++ b/pandas/core/arrays/sparse/accessor.py @@ -0,0 +1,336 @@ +"""Sparse accessor""" + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + +from pandas.core.dtypes.cast import find_common_type + +from pandas.core.accessor import PandasDelegate, delegate_names + +from .array import SparseArray +from .dtype import SparseDtype + + +class BaseAccessor: + _validation_msg = "Can only use the '.sparse' accessor with Sparse data." + + def __init__(self, data=None): + self._parent = data + self._validate(data) + + def _validate(self, data): + raise NotImplementedError + + +@delegate_names( + SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property" +) +class SparseAccessor(BaseAccessor, PandasDelegate): + """ + Accessor for SparseSparse from other sparse matrix data types. + """ + + def _validate(self, data): + if not isinstance(data.dtype, SparseDtype): + raise AttributeError(self._validation_msg) + + def _delegate_property_get(self, name, *args, **kwargs): + return getattr(self._parent.array, name) + + def _delegate_method(self, name, *args, **kwargs): + if name == "from_coo": + return self.from_coo(*args, **kwargs) + elif name == "to_coo": + return self.to_coo(*args, **kwargs) + else: + raise ValueError + + @classmethod + def from_coo(cls, A, dense_index=False): + """ + Create a Series with sparse values from a scipy.sparse.coo_matrix. + + Parameters + ---------- + A : scipy.sparse.coo_matrix + dense_index : bool, default False + If False (default), the SparseSeries index consists of only the + coords of the non-null entries of the original coo_matrix. + If True, the SparseSeries index consists of the full sorted + (row, col) coordinates of the coo_matrix. + + Returns + ------- + s : Series + A Series with sparse values. + + Examples + -------- + >>> from scipy import sparse + >>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), + shape=(3, 4)) + >>> A + <3x4 sparse matrix of type '<class 'numpy.float64'>' + with 3 stored elements in COOrdinate format> + >>> A.todense() + matrix([[ 0., 0., 1., 2.], + [ 3., 0., 0., 0.], + [ 0., 0., 0., 0.]]) + >>> ss = pd.Series.sparse.from_coo(A) + >>> ss + 0 2 1 + 3 2 + 1 0 3 + dtype: float64 + BlockIndex + Block locations: array([0], dtype=int32) + Block lengths: array([3], dtype=int32) + """ + from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series + from pandas import Series + + result = _coo_to_sparse_series(A, dense_index=dense_index) + result = Series(result.array, index=result.index, copy=False) + + return result + + def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False): + """ + Create a scipy.sparse.coo_matrix from a Series with MultiIndex. + + Use row_levels and column_levels to determine the row and column + coordinates respectively. row_levels and column_levels are the names + (labels) or numbers of the levels. {row_levels, column_levels} must be + a partition of the MultiIndex level names (or numbers). + + Parameters + ---------- + row_levels : tuple/list + column_levels : tuple/list + sort_labels : bool, default False + Sort the row and column labels before forming the sparse matrix. + + Returns + ------- + y : scipy.sparse.coo_matrix + rows : list (row labels) + columns : list (column labels) + + Examples + -------- + >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan]) + >>> s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0), + (1, 2, 'a', 1), + (1, 1, 'b', 0), + (1, 1, 'b', 1), + (2, 1, 'b', 0), + (2, 1, 'b', 1)], + names=['A', 'B', 'C', 'D']) + >>> ss = s.astype("Sparse") + >>> A, rows, columns = ss.sparse.to_coo(row_levels=['A', 'B'], + ... column_levels=['C', 'D'], + ... sort_labels=True) + >>> A + <3x4 sparse matrix of type '<class 'numpy.float64'>' + with 3 stored elements in COOrdinate format> + >>> A.todense() + matrix([[ 0., 0., 1., 3.], + [ 3., 0., 0., 0.], + [ 0., 0., 0., 0.]]) + >>> rows + [(1, 1), (1, 2), (2, 1)] + >>> columns + [('a', 0), ('a', 1), ('b', 0), ('b', 1)] + """ + from pandas.core.arrays.sparse.scipy_sparse import _sparse_series_to_coo + + A, rows, columns = _sparse_series_to_coo( + self._parent, row_levels, column_levels, sort_labels=sort_labels + ) + return A, rows, columns + + def to_dense(self): + """ + Convert a Series from sparse values to dense. + + .. versionadded:: 0.25.0 + + Returns + ------- + Series: + A Series with the same values, stored as a dense array. + + Examples + -------- + >>> series = pd.Series(pd.SparseArray([0, 1, 0])) + >>> series + 0 0 + 1 1 + 2 0 + dtype: Sparse[int64, 0] + + >>> series.sparse.to_dense() + 0 0 + 1 1 + 2 0 + dtype: int64 + """ + from pandas import Series + + return Series( + self._parent.array.to_dense(), + index=self._parent.index, + name=self._parent.name, + ) + + +class SparseFrameAccessor(BaseAccessor, PandasDelegate): + """ + DataFrame accessor for sparse data. + + .. versionadded:: 0.25.0 + """ + + def _validate(self, data): + dtypes = data.dtypes + if not all(isinstance(t, SparseDtype) for t in dtypes): + raise AttributeError(self._validation_msg) + + @classmethod + def from_spmatrix(cls, data, index=None, columns=None): + """ + Create a new DataFrame from a scipy sparse matrix. + + .. versionadded:: 0.25.0 + + Parameters + ---------- + data : scipy.sparse.spmatrix + Must be convertible to csc format. + index, columns : Index, optional + Row and column labels to use for the resulting DataFrame. + Defaults to a RangeIndex. + + Returns + ------- + DataFrame + Each column of the DataFrame is stored as a + :class:`SparseArray`. + + Examples + -------- + >>> import scipy.sparse + >>> mat = scipy.sparse.eye(3) + >>> pd.DataFrame.sparse.from_spmatrix(mat) + 0 1 2 + 0 1.0 0.0 0.0 + 1 0.0 1.0 0.0 + 2 0.0 0.0 1.0 + """ + from pandas import DataFrame + + data = data.tocsc() + index, columns = cls._prep_index(data, index, columns) + sparrays = [SparseArray.from_spmatrix(data[:, i]) for i in range(data.shape[1])] + data = dict(enumerate(sparrays)) + result = DataFrame(data, index=index) + result.columns = columns + return result + + def to_dense(self): + """ + Convert a DataFrame with sparse values to dense. + + .. versionadded:: 0.25.0 + + Returns + ------- + DataFrame + A DataFrame with the same values stored as dense arrays. + + Examples + -------- + >>> df = pd.DataFrame({"A": pd.SparseArray([0, 1, 0])}) + >>> df.sparse.to_dense() + A + 0 0 + 1 1 + 2 0 + """ + from pandas import DataFrame + + data = {k: v.array.to_dense() for k, v in self._parent.items()} + return DataFrame(data, index=self._parent.index, columns=self._parent.columns) + + def to_coo(self): + """ + Return the contents of the frame as a sparse SciPy COO matrix. + + .. versionadded:: 0.25.0 + + Returns + ------- + coo_matrix : scipy.sparse.spmatrix + If the caller is heterogeneous and contains booleans or objects, + the result will be of dtype=object. See Notes. + + Notes + ----- + The dtype will be the lowest-common-denominator type (implicit + upcasting); that is to say if the dtypes (even of numeric types) + are mixed, the one that accommodates all will be chosen. + + e.g. If the dtypes are float16 and float32, dtype will be upcast to + float32. By numpy.find_common_type convention, mixing int64 and + and uint64 will result in a float64 dtype. + """ + import_optional_dependency("scipy") + from scipy.sparse import coo_matrix + + dtype = find_common_type(self._parent.dtypes) + if isinstance(dtype, SparseDtype): + dtype = dtype.subtype + + cols, rows, datas = [], [], [] + for col, name in enumerate(self._parent): + s = self._parent[name] + row = s.array.sp_index.to_int_index().indices + cols.append(np.repeat(col, len(row))) + rows.append(row) + datas.append(s.array.sp_values.astype(dtype, copy=False)) + + cols = np.concatenate(cols) + rows = np.concatenate(rows) + datas = np.concatenate(datas) + return coo_matrix((datas, (rows, cols)), shape=self._parent.shape) + + @property + def density(self) -> float: + """ + Ratio of non-sparse points to total (dense) data points + represented in the DataFrame. + """ + return np.mean([column.array.density for _, column in self._parent.items()]) + + @staticmethod + def _prep_index(data, index, columns): + import pandas.core.indexes.base as ibase + + N, K = data.shape + if index is None: + index = ibase.default_index(N) + if columns is None: + columns = ibase.default_index(K) + + if len(columns) != K: + raise ValueError( + "Column length mismatch: {columns} vs. {K}".format( + columns=len(columns), K=K + ) + ) + if len(index) != N: + raise ValueError( + "Index length mismatch: {index} vs. {N}".format(index=len(index), N=N) + ) + return index, columns diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse/array.py similarity index 71% rename from pandas/core/arrays/sparse.py rename to pandas/core/arrays/sparse/array.py index c88289c3a4592..5acc922734529 100644 --- a/pandas/core/arrays/sparse.py +++ b/pandas/core/arrays/sparse/array.py @@ -4,7 +4,6 @@ from collections import abc import numbers import operator -import re from typing import Any, Callable import warnings @@ -15,11 +14,9 @@ from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex from pandas._libs.tslibs import NaT import pandas.compat as compat -from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.errors import PerformanceWarning -from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import ( astype_nansafe, construct_1d_arraylike_from_scalar, @@ -37,7 +34,6 @@ is_string_dtype, pandas_dtype, ) -from pandas.core.dtypes.dtypes import register_extension_dtype from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndexClass, @@ -46,8 +42,6 @@ ) from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna -from pandas._typing import Dtype -from pandas.core.accessor import PandasDelegate, delegate_names import pandas.core.algorithms as algos from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin from pandas.core.base import PandasObject @@ -58,329 +52,7 @@ import pandas.io.formats.printing as printing - -# ---------------------------------------------------------------------------- -# Dtype -@register_extension_dtype -class SparseDtype(ExtensionDtype): - """ - Dtype for data stored in :class:`SparseArray`. - - This dtype implements the pandas ExtensionDtype interface. - - .. versionadded:: 0.24.0 - - Parameters - ---------- - dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64 - The dtype of the underlying array storing the non-fill value values. - fill_value : scalar, optional - The scalar value not stored in the SparseArray. By default, this - depends on `dtype`. - - =========== ========== - dtype na_value - =========== ========== - float ``np.nan`` - int ``0`` - bool ``False`` - datetime64 ``pd.NaT`` - timedelta64 ``pd.NaT`` - =========== ========== - - The default value may be overridden by specifying a `fill_value`. - - Attributes - ---------- - None - - Methods - ------- - None - """ - - # We include `_is_na_fill_value` in the metadata to avoid hash collisions - # between SparseDtype(float, 0.0) and SparseDtype(float, nan). - # Without is_na_fill_value in the comparison, those would be equal since - # hash(nan) is (sometimes?) 0. - _metadata = ("_dtype", "_fill_value", "_is_na_fill_value") - - def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None: - - if isinstance(dtype, type(self)): - if fill_value is None: - fill_value = dtype.fill_value - dtype = dtype.subtype - - dtype = pandas_dtype(dtype) - if is_string_dtype(dtype): - dtype = np.dtype("object") - - if fill_value is None: - fill_value = na_value_for_dtype(dtype) - - if not is_scalar(fill_value): - raise ValueError( - "fill_value must be a scalar. Got {} instead".format(fill_value) - ) - self._dtype = dtype - self._fill_value = fill_value - - def __hash__(self): - # Python3 doesn't inherit __hash__ when a base class overrides - # __eq__, so we explicitly do it here. - return super().__hash__() - - def __eq__(self, other): - # We have to override __eq__ to handle NA values in _metadata. - # The base class does simple == checks, which fail for NA. - if isinstance(other, str): - try: - other = self.construct_from_string(other) - except TypeError: - return False - - if isinstance(other, type(self)): - subtype = self.subtype == other.subtype - if self._is_na_fill_value: - # this case is complicated by two things: - # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan) - # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT) - # i.e. we want to treat any floating-point NaN as equal, but - # not a floating-point NaN and a datetime NaT. - fill_value = ( - other._is_na_fill_value - and isinstance(self.fill_value, type(other.fill_value)) - or isinstance(other.fill_value, type(self.fill_value)) - ) - else: - fill_value = self.fill_value == other.fill_value - - return subtype and fill_value - return False - - @property - def fill_value(self): - """ - The fill value of the array. - - Converting the SparseArray to a dense ndarray will fill the - array with this value. - - .. warning:: - - It's possible to end up with a SparseArray that has ``fill_value`` - values in ``sp_values``. This can occur, for example, when setting - ``SparseArray.fill_value`` directly. - """ - return self._fill_value - - @property - def _is_na_fill_value(self): - return isna(self.fill_value) - - @property - def _is_numeric(self): - return not is_object_dtype(self.subtype) - - @property - def _is_boolean(self): - return is_bool_dtype(self.subtype) - - @property - def kind(self): - """ - The sparse kind. Either 'integer', or 'block'. - """ - return self.subtype.kind - - @property - def type(self): - return self.subtype.type - - @property - def subtype(self): - return self._dtype - - @property - def name(self): - return "Sparse[{}, {}]".format(self.subtype.name, self.fill_value) - - def __repr__(self): - return self.name - - @classmethod - def construct_array_type(cls): - return SparseArray - - @classmethod - def construct_from_string(cls, string): - """ - Construct a SparseDtype from a string form. - - Parameters - ---------- - string : str - Can take the following forms. - - string dtype - ================ ============================ - 'int' SparseDtype[np.int64, 0] - 'Sparse' SparseDtype[np.float64, nan] - 'Sparse[int]' SparseDtype[np.int64, 0] - 'Sparse[int, 0]' SparseDtype[np.int64, 0] - ================ ============================ - - It is not possible to specify non-default fill values - with a string. An argument like ``'Sparse[int, 1]'`` - will raise a ``TypeError`` because the default fill value - for integers is 0. - - Returns - ------- - SparseDtype - """ - msg = "Could not construct SparseDtype from '{}'".format(string) - if string.startswith("Sparse"): - try: - sub_type, has_fill_value = cls._parse_subtype(string) - except ValueError: - raise TypeError(msg) - else: - result = SparseDtype(sub_type) - msg = ( - "Could not construct SparseDtype from '{}'.\n\nIt " - "looks like the fill_value in the string is not " - "the default for the dtype. Non-default fill_values " - "are not supported. Use the 'SparseDtype()' " - "constructor instead." - ) - if has_fill_value and str(result) != string: - raise TypeError(msg.format(string)) - return result - else: - raise TypeError(msg) - - @staticmethod - def _parse_subtype(dtype): - """ - Parse a string to get the subtype - - Parameters - ---------- - dtype : str - A string like - - * Sparse[subtype] - * Sparse[subtype, fill_value] - - Returns - ------- - subtype : str - - Raises - ------ - ValueError - When the subtype cannot be extracted. - """ - xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$") - m = xpr.match(dtype) - has_fill_value = False - if m: - subtype = m.groupdict()["subtype"] - has_fill_value = m.groupdict()["fill_value"] or has_fill_value - elif dtype == "Sparse": - subtype = "float64" - else: - raise ValueError("Cannot parse {}".format(dtype)) - return subtype, has_fill_value - - @classmethod - def is_dtype(cls, dtype): - dtype = getattr(dtype, "dtype", dtype) - if isinstance(dtype, str) and dtype.startswith("Sparse"): - sub_type, _ = cls._parse_subtype(dtype) - dtype = np.dtype(sub_type) - elif isinstance(dtype, cls): - return True - return isinstance(dtype, np.dtype) or dtype == "Sparse" - - def update_dtype(self, dtype): - """ - Convert the SparseDtype to a new dtype. - - This takes care of converting the ``fill_value``. - - Parameters - ---------- - dtype : Union[str, numpy.dtype, SparseDtype] - The new dtype to use. - - * For a SparseDtype, it is simply returned - * For a NumPy dtype (or str), the current fill value - is converted to the new dtype, and a SparseDtype - with `dtype` and the new fill value is returned. - - Returns - ------- - SparseDtype - A new SparseDtype with the corret `dtype` and fill value - for that `dtype`. - - Raises - ------ - ValueError - When the current fill value cannot be converted to the - new `dtype` (e.g. trying to convert ``np.nan`` to an - integer dtype). - - - Examples - -------- - >>> SparseDtype(int, 0).update_dtype(float) - Sparse[float64, 0.0] - - >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan)) - Sparse[float64, nan] - """ - cls = type(self) - dtype = pandas_dtype(dtype) - - if not isinstance(dtype, cls): - fill_value = astype_nansafe(np.array(self.fill_value), dtype).item() - dtype = cls(dtype, fill_value=fill_value) - - return dtype - - @property - def _subtype_with_str(self): - """ - Whether the SparseDtype's subtype should be considered ``str``. - - Typically, pandas will store string data in an object-dtype array. - When converting values to a dtype, e.g. in ``.astype``, we need to - be more specific, we need the actual underlying type. - - Returns - ------- - - >>> SparseDtype(int, 1)._subtype_with_str - dtype('int64') - - >>> SparseDtype(object, 1)._subtype_with_str - dtype('O') - - >>> dtype = SparseDtype(str, '') - >>> dtype.subtype - dtype('O') - - >>> dtype._subtype_with_str - str - """ - if isinstance(self.fill_value, str): - return type(self.fill_value) - return self.subtype - +from .dtype import SparseDtype # ---------------------------------------------------------------------------- # Array @@ -1925,331 +1597,3 @@ def _make_index(length, indices, kind): else: # pragma: no cover raise ValueError("must be block or integer type") return index - - -# ---------------------------------------------------------------------------- -# Accessor - - -class BaseAccessor: - _validation_msg = "Can only use the '.sparse' accessor with Sparse data." - - def __init__(self, data=None): - self._parent = data - self._validate(data) - - def _validate(self, data): - raise NotImplementedError - - -@delegate_names( - SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property" -) -class SparseAccessor(BaseAccessor, PandasDelegate): - """ - Accessor for SparseSparse from other sparse matrix data types. - """ - - def _validate(self, data): - if not isinstance(data.dtype, SparseDtype): - raise AttributeError(self._validation_msg) - - def _delegate_property_get(self, name, *args, **kwargs): - return getattr(self._parent.array, name) - - def _delegate_method(self, name, *args, **kwargs): - if name == "from_coo": - return self.from_coo(*args, **kwargs) - elif name == "to_coo": - return self.to_coo(*args, **kwargs) - else: - raise ValueError - - @classmethod - def from_coo(cls, A, dense_index=False): - """ - Create a Series with sparse values from a scipy.sparse.coo_matrix. - - Parameters - ---------- - A : scipy.sparse.coo_matrix - dense_index : bool, default False - If False (default), the SparseSeries index consists of only the - coords of the non-null entries of the original coo_matrix. - If True, the SparseSeries index consists of the full sorted - (row, col) coordinates of the coo_matrix. - - Returns - ------- - s : Series - A Series with sparse values. - - Examples - -------- - >>> from scipy import sparse - >>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), - shape=(3, 4)) - >>> A - <3x4 sparse matrix of type '<class 'numpy.float64'>' - with 3 stored elements in COOrdinate format> - >>> A.todense() - matrix([[ 0., 0., 1., 2.], - [ 3., 0., 0., 0.], - [ 0., 0., 0., 0.]]) - >>> ss = pd.Series.sparse.from_coo(A) - >>> ss - 0 2 1 - 3 2 - 1 0 3 - dtype: float64 - BlockIndex - Block locations: array([0], dtype=int32) - Block lengths: array([3], dtype=int32) - """ - from pandas.core.sparse.scipy_sparse import _coo_to_sparse_series - from pandas import Series - - result = _coo_to_sparse_series(A, dense_index=dense_index) - result = Series(result.array, index=result.index, copy=False) - - return result - - def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False): - """ - Create a scipy.sparse.coo_matrix from a Series with MultiIndex. - - Use row_levels and column_levels to determine the row and column - coordinates respectively. row_levels and column_levels are the names - (labels) or numbers of the levels. {row_levels, column_levels} must be - a partition of the MultiIndex level names (or numbers). - - Parameters - ---------- - row_levels : tuple/list - column_levels : tuple/list - sort_labels : bool, default False - Sort the row and column labels before forming the sparse matrix. - - Returns - ------- - y : scipy.sparse.coo_matrix - rows : list (row labels) - columns : list (column labels) - - Examples - -------- - >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan]) - >>> s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0), - (1, 2, 'a', 1), - (1, 1, 'b', 0), - (1, 1, 'b', 1), - (2, 1, 'b', 0), - (2, 1, 'b', 1)], - names=['A', 'B', 'C', 'D']) - >>> ss = s.astype("Sparse") - >>> A, rows, columns = ss.sparse.to_coo(row_levels=['A', 'B'], - ... column_levels=['C', 'D'], - ... sort_labels=True) - >>> A - <3x4 sparse matrix of type '<class 'numpy.float64'>' - with 3 stored elements in COOrdinate format> - >>> A.todense() - matrix([[ 0., 0., 1., 3.], - [ 3., 0., 0., 0.], - [ 0., 0., 0., 0.]]) - >>> rows - [(1, 1), (1, 2), (2, 1)] - >>> columns - [('a', 0), ('a', 1), ('b', 0), ('b', 1)] - """ - from pandas.core.sparse.scipy_sparse import _sparse_series_to_coo - - A, rows, columns = _sparse_series_to_coo( - self._parent, row_levels, column_levels, sort_labels=sort_labels - ) - return A, rows, columns - - def to_dense(self): - """ - Convert a Series from sparse values to dense. - - .. versionadded:: 0.25.0 - - Returns - ------- - Series: - A Series with the same values, stored as a dense array. - - Examples - -------- - >>> series = pd.Series(pd.SparseArray([0, 1, 0])) - >>> series - 0 0 - 1 1 - 2 0 - dtype: Sparse[int64, 0] - - >>> series.sparse.to_dense() - 0 0 - 1 1 - 2 0 - dtype: int64 - """ - from pandas import Series - - return Series( - self._parent.array.to_dense(), - index=self._parent.index, - name=self._parent.name, - ) - - -class SparseFrameAccessor(BaseAccessor, PandasDelegate): - """ - DataFrame accessor for sparse data. - - .. versionadded:: 0.25.0 - """ - - def _validate(self, data): - dtypes = data.dtypes - if not all(isinstance(t, SparseDtype) for t in dtypes): - raise AttributeError(self._validation_msg) - - @classmethod - def from_spmatrix(cls, data, index=None, columns=None): - """ - Create a new DataFrame from a scipy sparse matrix. - - .. versionadded:: 0.25.0 - - Parameters - ---------- - data : scipy.sparse.spmatrix - Must be convertible to csc format. - index, columns : Index, optional - Row and column labels to use for the resulting DataFrame. - Defaults to a RangeIndex. - - Returns - ------- - DataFrame - Each column of the DataFrame is stored as a - :class:`SparseArray`. - - Examples - -------- - >>> import scipy.sparse - >>> mat = scipy.sparse.eye(3) - >>> pd.DataFrame.sparse.from_spmatrix(mat) - 0 1 2 - 0 1.0 0.0 0.0 - 1 0.0 1.0 0.0 - 2 0.0 0.0 1.0 - """ - from pandas import DataFrame - - data = data.tocsc() - index, columns = cls._prep_index(data, index, columns) - sparrays = [SparseArray.from_spmatrix(data[:, i]) for i in range(data.shape[1])] - data = dict(enumerate(sparrays)) - result = DataFrame(data, index=index) - result.columns = columns - return result - - def to_dense(self): - """ - Convert a DataFrame with sparse values to dense. - - .. versionadded:: 0.25.0 - - Returns - ------- - DataFrame - A DataFrame with the same values stored as dense arrays. - - Examples - -------- - >>> df = pd.DataFrame({"A": pd.SparseArray([0, 1, 0])}) - >>> df.sparse.to_dense() - A - 0 0 - 1 1 - 2 0 - """ - from pandas import DataFrame - - data = {k: v.array.to_dense() for k, v in self._parent.items()} - return DataFrame(data, index=self._parent.index, columns=self._parent.columns) - - def to_coo(self): - """ - Return the contents of the frame as a sparse SciPy COO matrix. - - .. versionadded:: 0.25.0 - - Returns - ------- - coo_matrix : scipy.sparse.spmatrix - If the caller is heterogeneous and contains booleans or objects, - the result will be of dtype=object. See Notes. - - Notes - ----- - The dtype will be the lowest-common-denominator type (implicit - upcasting); that is to say if the dtypes (even of numeric types) - are mixed, the one that accommodates all will be chosen. - - e.g. If the dtypes are float16 and float32, dtype will be upcast to - float32. By numpy.find_common_type convention, mixing int64 and - and uint64 will result in a float64 dtype. - """ - import_optional_dependency("scipy") - from scipy.sparse import coo_matrix - - dtype = find_common_type(self._parent.dtypes) - if isinstance(dtype, SparseDtype): - dtype = dtype.subtype - - cols, rows, datas = [], [], [] - for col, name in enumerate(self._parent): - s = self._parent[name] - row = s.array.sp_index.to_int_index().indices - cols.append(np.repeat(col, len(row))) - rows.append(row) - datas.append(s.array.sp_values.astype(dtype, copy=False)) - - cols = np.concatenate(cols) - rows = np.concatenate(rows) - datas = np.concatenate(datas) - return coo_matrix((datas, (rows, cols)), shape=self._parent.shape) - - @property - def density(self) -> float: - """ - Ratio of non-sparse points to total (dense) data points - represented in the DataFrame. - """ - return np.mean([column.array.density for _, column in self._parent.items()]) - - @staticmethod - def _prep_index(data, index, columns): - import pandas.core.indexes.base as ibase - - N, K = data.shape - if index is None: - index = ibase.default_index(N) - if columns is None: - columns = ibase.default_index(K) - - if len(columns) != K: - raise ValueError( - "Column length mismatch: {columns} vs. {K}".format( - columns=len(columns), K=K - ) - ) - if len(index) != N: - raise ValueError( - "Index length mismatch: {index} vs. {N}".format(index=len(index), N=N) - ) - return index, columns diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py new file mode 100644 index 0000000000000..6fd73ae14fff1 --- /dev/null +++ b/pandas/core/arrays/sparse/dtype.py @@ -0,0 +1,343 @@ +"""Sparse Dtype""" + +import re +from typing import Any + +import numpy as np + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.cast import astype_nansafe +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import register_extension_dtype +from pandas.core.dtypes.missing import isna, na_value_for_dtype + +from pandas._typing import Dtype + + +@register_extension_dtype +class SparseDtype(ExtensionDtype): + """ + Dtype for data stored in :class:`SparseArray`. + + This dtype implements the pandas ExtensionDtype interface. + + .. versionadded:: 0.24.0 + + Parameters + ---------- + dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64 + The dtype of the underlying array storing the non-fill value values. + fill_value : scalar, optional + The scalar value not stored in the SparseArray. By default, this + depends on `dtype`. + + =========== ========== + dtype na_value + =========== ========== + float ``np.nan`` + int ``0`` + bool ``False`` + datetime64 ``pd.NaT`` + timedelta64 ``pd.NaT`` + =========== ========== + + The default value may be overridden by specifying a `fill_value`. + + Attributes + ---------- + None + + Methods + ------- + None + """ + + # We include `_is_na_fill_value` in the metadata to avoid hash collisions + # between SparseDtype(float, 0.0) and SparseDtype(float, nan). + # Without is_na_fill_value in the comparison, those would be equal since + # hash(nan) is (sometimes?) 0. + _metadata = ("_dtype", "_fill_value", "_is_na_fill_value") + + def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None: + + if isinstance(dtype, type(self)): + if fill_value is None: + fill_value = dtype.fill_value + dtype = dtype.subtype + + dtype = pandas_dtype(dtype) + if is_string_dtype(dtype): + dtype = np.dtype("object") + + if fill_value is None: + fill_value = na_value_for_dtype(dtype) + + if not is_scalar(fill_value): + raise ValueError( + "fill_value must be a scalar. Got {} instead".format(fill_value) + ) + self._dtype = dtype + self._fill_value = fill_value + + def __hash__(self): + # Python3 doesn't inherit __hash__ when a base class overrides + # __eq__, so we explicitly do it here. + return super().__hash__() + + def __eq__(self, other): + # We have to override __eq__ to handle NA values in _metadata. + # The base class does simple == checks, which fail for NA. + if isinstance(other, str): + try: + other = self.construct_from_string(other) + except TypeError: + return False + + if isinstance(other, type(self)): + subtype = self.subtype == other.subtype + if self._is_na_fill_value: + # this case is complicated by two things: + # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan) + # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT) + # i.e. we want to treat any floating-point NaN as equal, but + # not a floating-point NaN and a datetime NaT. + fill_value = ( + other._is_na_fill_value + and isinstance(self.fill_value, type(other.fill_value)) + or isinstance(other.fill_value, type(self.fill_value)) + ) + else: + fill_value = self.fill_value == other.fill_value + + return subtype and fill_value + return False + + @property + def fill_value(self): + """ + The fill value of the array. + + Converting the SparseArray to a dense ndarray will fill the + array with this value. + + .. warning:: + + It's possible to end up with a SparseArray that has ``fill_value`` + values in ``sp_values``. This can occur, for example, when setting + ``SparseArray.fill_value`` directly. + """ + return self._fill_value + + @property + def _is_na_fill_value(self): + return isna(self.fill_value) + + @property + def _is_numeric(self): + return not is_object_dtype(self.subtype) + + @property + def _is_boolean(self): + return is_bool_dtype(self.subtype) + + @property + def kind(self): + """ + The sparse kind. Either 'integer', or 'block'. + """ + return self.subtype.kind + + @property + def type(self): + return self.subtype.type + + @property + def subtype(self): + return self._dtype + + @property + def name(self): + return "Sparse[{}, {}]".format(self.subtype.name, self.fill_value) + + def __repr__(self): + return self.name + + @classmethod + def construct_array_type(cls): + from .array import SparseArray + + return SparseArray + + @classmethod + def construct_from_string(cls, string): + """ + Construct a SparseDtype from a string form. + + Parameters + ---------- + string : str + Can take the following forms. + + string dtype + ================ ============================ + 'int' SparseDtype[np.int64, 0] + 'Sparse' SparseDtype[np.float64, nan] + 'Sparse[int]' SparseDtype[np.int64, 0] + 'Sparse[int, 0]' SparseDtype[np.int64, 0] + ================ ============================ + + It is not possible to specify non-default fill values + with a string. An argument like ``'Sparse[int, 1]'`` + will raise a ``TypeError`` because the default fill value + for integers is 0. + + Returns + ------- + SparseDtype + """ + msg = "Could not construct SparseDtype from '{}'".format(string) + if string.startswith("Sparse"): + try: + sub_type, has_fill_value = cls._parse_subtype(string) + except ValueError: + raise TypeError(msg) + else: + result = SparseDtype(sub_type) + msg = ( + "Could not construct SparseDtype from '{}'.\n\nIt " + "looks like the fill_value in the string is not " + "the default for the dtype. Non-default fill_values " + "are not supported. Use the 'SparseDtype()' " + "constructor instead." + ) + if has_fill_value and str(result) != string: + raise TypeError(msg.format(string)) + return result + else: + raise TypeError(msg) + + @staticmethod + def _parse_subtype(dtype): + """ + Parse a string to get the subtype + + Parameters + ---------- + dtype : str + A string like + + * Sparse[subtype] + * Sparse[subtype, fill_value] + + Returns + ------- + subtype : str + + Raises + ------ + ValueError + When the subtype cannot be extracted. + """ + xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$") + m = xpr.match(dtype) + has_fill_value = False + if m: + subtype = m.groupdict()["subtype"] + has_fill_value = m.groupdict()["fill_value"] or has_fill_value + elif dtype == "Sparse": + subtype = "float64" + else: + raise ValueError("Cannot parse {}".format(dtype)) + return subtype, has_fill_value + + @classmethod + def is_dtype(cls, dtype): + dtype = getattr(dtype, "dtype", dtype) + if isinstance(dtype, str) and dtype.startswith("Sparse"): + sub_type, _ = cls._parse_subtype(dtype) + dtype = np.dtype(sub_type) + elif isinstance(dtype, cls): + return True + return isinstance(dtype, np.dtype) or dtype == "Sparse" + + def update_dtype(self, dtype): + """ + Convert the SparseDtype to a new dtype. + + This takes care of converting the ``fill_value``. + + Parameters + ---------- + dtype : Union[str, numpy.dtype, SparseDtype] + The new dtype to use. + + * For a SparseDtype, it is simply returned + * For a NumPy dtype (or str), the current fill value + is converted to the new dtype, and a SparseDtype + with `dtype` and the new fill value is returned. + + Returns + ------- + SparseDtype + A new SparseDtype with the corret `dtype` and fill value + for that `dtype`. + + Raises + ------ + ValueError + When the current fill value cannot be converted to the + new `dtype` (e.g. trying to convert ``np.nan`` to an + integer dtype). + + + Examples + -------- + >>> SparseDtype(int, 0).update_dtype(float) + Sparse[float64, 0.0] + + >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan)) + Sparse[float64, nan] + """ + cls = type(self) + dtype = pandas_dtype(dtype) + + if not isinstance(dtype, cls): + fill_value = astype_nansafe(np.array(self.fill_value), dtype).item() + dtype = cls(dtype, fill_value=fill_value) + + return dtype + + @property + def _subtype_with_str(self): + """ + Whether the SparseDtype's subtype should be considered ``str``. + + Typically, pandas will store string data in an object-dtype array. + When converting values to a dtype, e.g. in ``.astype``, we need to + be more specific, we need the actual underlying type. + + Returns + ------- + + >>> SparseDtype(int, 1)._subtype_with_str + dtype('int64') + + >>> SparseDtype(object, 1)._subtype_with_str + dtype('O') + + >>> dtype = SparseDtype(str, '') + >>> dtype.subtype + dtype('O') + + >>> dtype._subtype_with_str + str + """ + if isinstance(self.fill_value, str): + return type(self.fill_value) + return self.subtype diff --git a/pandas/core/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py similarity index 100% rename from pandas/core/sparse/scipy_sparse.py rename to pandas/core/arrays/sparse/scipy_sparse.py diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py index 071a8db707b69..f1d2803ce5505 100644 --- a/pandas/tests/arrays/sparse/test_arithmetics.py +++ b/pandas/tests/arrays/sparse/test_arithmetics.py @@ -5,7 +5,7 @@ import pandas as pd from pandas.core import ops -from pandas.core.sparse.api import SparseDtype +from pandas.core.arrays.sparse import SparseDtype import pandas.util.testing as tm diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index 5d5ee565c7891..c02d8ae4e7429 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import isna -from pandas.core.sparse.api import SparseArray, SparseDtype +from pandas.core.arrays.sparse import SparseArray, SparseDtype import pandas.util.testing as tm from pandas.util.testing import assert_almost_equal diff --git a/pandas/tests/sparse/test_combine_concat.py b/pandas/tests/arrays/sparse/test_combine_concat.py similarity index 100% rename from pandas/tests/sparse/test_combine_concat.py rename to pandas/tests/arrays/sparse/test_combine_concat.py diff --git a/pandas/tests/arrays/sparse/test_dtype.py b/pandas/tests/arrays/sparse/test_dtype.py index db8f62962f0b0..aa8d2afca11e6 100644 --- a/pandas/tests/arrays/sparse/test_dtype.py +++ b/pandas/tests/arrays/sparse/test_dtype.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -from pandas.core.sparse.api import SparseDtype +from pandas.core.arrays.sparse import SparseDtype @pytest.mark.parametrize( diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 3288c9c584565..036b0213973d6 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -30,7 +30,7 @@ import pandas as pd from pandas import Categorical, CategoricalIndex, IntervalIndex, Series, date_range -from pandas.core.sparse.api import SparseDtype +from pandas.core.arrays.sparse import SparseDtype import pandas.util.testing as tm diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py index 9d08981d39894..5e80c317a587b 100644 --- a/pandas/tests/reshape/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -8,7 +8,7 @@ import pandas as pd from pandas import Categorical, DataFrame, Index, Series, get_dummies -from pandas.core.sparse.api import SparseArray, SparseDtype +from pandas.core.arrays.sparse import SparseArray, SparseDtype import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal diff --git a/pandas/tests/sparse/__init__.py b/pandas/tests/sparse/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000
Updating our internal imports to use the path were they actually live, not the old `core.sparse.api` one. While doing this, I was thinking now that for the tests, we could actually also use the top-level `pd.` imports. @TomAugspurger in the `core/sparse/` module, the only actual code living there is the scipy_sparse code. I moved that to `arrays` module (but could also be moved within the sparse.py file, or create a sparse subdirectory, ..). Do we want to raise a deprecation warning on pandas.core.sparse.api?
https://api.github.com/repos/pandas-dev/pandas/pulls/28516
2019-09-19T07:20:38Z
2019-09-27T14:58:18Z
2019-09-27T14:58:18Z
2019-09-27T14:59:19Z
REF: Parametrize test
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index f87d6dba72e68..5eb9a067b11e4 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -528,32 +528,33 @@ def test_as_array_datetime_tz(self): assert mgr.get("g").dtype == "datetime64[ns, CET]" assert mgr.as_array().dtype == "object" - def test_astype(self): + @pytest.mark.parametrize("t", ["float16", "float32", "float64", "int32", "int64"]) + def test_astype(self, t): # coerce all mgr = create_mgr("c: f4; d: f2; e: f8") - for t in ["float16", "float32", "float64", "int32", "int64"]: - t = np.dtype(t) - tmgr = mgr.astype(t) - assert tmgr.get("c").dtype.type == t - assert tmgr.get("d").dtype.type == t - assert tmgr.get("e").dtype.type == t + + t = np.dtype(t) + tmgr = mgr.astype(t) + assert tmgr.get("c").dtype.type == t + assert tmgr.get("d").dtype.type == t + assert tmgr.get("e").dtype.type == t # mixed mgr = create_mgr("a,b: object; c: bool; d: datetime; e: f4; f: f2; g: f8") - for t in ["float16", "float32", "float64", "int32", "int64"]: - t = np.dtype(t) - tmgr = mgr.astype(t, errors="ignore") - assert tmgr.get("c").dtype.type == t - assert tmgr.get("e").dtype.type == t - assert tmgr.get("f").dtype.type == t - assert tmgr.get("g").dtype.type == t - - assert tmgr.get("a").dtype.type == np.object_ - assert tmgr.get("b").dtype.type == np.object_ - if t != np.int64: - assert tmgr.get("d").dtype.type == np.datetime64 - else: - assert tmgr.get("d").dtype.type == t + + t = np.dtype(t) + tmgr = mgr.astype(t, errors="ignore") + assert tmgr.get("c").dtype.type == t + assert tmgr.get("e").dtype.type == t + assert tmgr.get("f").dtype.type == t + assert tmgr.get("g").dtype.type == t + + assert tmgr.get("a").dtype.type == np.object_ + assert tmgr.get("b").dtype.type == np.object_ + if t != np.int64: + assert tmgr.get("d").dtype.type == np.datetime64 + else: + assert tmgr.get("d").dtype.type == t def test_convert(self): def _compare(old_mgr, new_mgr):
Let me know if I'm mistaken here, but I think we would prefer parametrization over for loops inside tests (e.g., so `pytest` flags which case is failing), and this should be equivalent.
https://api.github.com/repos/pandas-dev/pandas/pulls/28515
2019-09-19T00:40:34Z
2019-09-19T14:32:40Z
2019-09-19T14:32:40Z
2019-09-19T16:53:20Z
TST: suppress 1485 warnings issued by xml parser
diff --git a/pandas/tests/io/excel/__init__.py b/pandas/tests/io/excel/__init__.py index e69de29bb2d1d..550172329fc57 100644 --- a/pandas/tests/io/excel/__init__.py +++ b/pandas/tests/io/excel/__init__.py @@ -0,0 +1,6 @@ +import pytest + +pytestmark = pytest.mark.filterwarnings( + # Looks like tree.getiterator is deprecated in favor of tree.iter + "ignore:This method will be removed in future versions:PendingDeprecationWarning" +)
So we don't have to scroll through tons of ``` pandas/tests/io/excel/test_xlrd.py::test_excel_table_sheet_by_index[.xlsm] /usr/local/lib/python3.7/site-packages/xlrd/xlsx.py:312: PendingDeprecationWarning: This method will be removed in future versions. Use 'tree.iter()' or 'list(tree.iter())' instead. for elem in self.tree.iter() if Element_has_iter else self.tree.getiterator(): ```
https://api.github.com/repos/pandas-dev/pandas/pulls/28514
2019-09-18T23:47:04Z
2019-09-19T15:49:21Z
2019-09-19T15:49:21Z
2019-09-19T16:05:23Z
CLN: dont catch Exception when calling maybe_convert_numeric
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index ac9b57dc8d342..e31918c21c2ac 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -796,7 +796,7 @@ def maybe_convert_objects(values: np.ndarray, convert_numeric: bool = True): new_values = lib.maybe_convert_numeric( values, set(), coerce_numeric=True ) - except Exception: + except (ValueError, TypeError): pass else: # if we are all nans then leave me alone @@ -875,7 +875,7 @@ def soft_convert_objects( if numeric and is_object_dtype(values.dtype): try: converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True) - except Exception: + except (ValueError, TypeError): pass else: # If all NaNs, then do not-alter @@ -953,9 +953,10 @@ def try_datetime(v): # we might have a sequence of the same-datetimes with tz's # if so coerce to a DatetimeIndex; if they are not the same, # then these stay as object dtype, xref GH19671 + from pandas._libs.tslibs import conversion + from pandas import DatetimeIndex + try: - from pandas._libs.tslibs import conversion - from pandas import DatetimeIndex values, tz = conversion.datetime_to_datetime64(v) return DatetimeIndex(values).tz_localize("UTC").tz_convert(tz=tz) diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index a0e2c8d9cab65..fa33d11bda7eb 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -137,21 +137,20 @@ def to_numeric(arg, errors="raise", downcast=None): else: values = arg - try: - if is_numeric_dtype(values): - pass - elif is_datetime_or_timedelta_dtype(values): - values = values.astype(np.int64) - else: - values = ensure_object(values) - coerce_numeric = errors not in ("ignore", "raise") + if is_numeric_dtype(values): + pass + elif is_datetime_or_timedelta_dtype(values): + values = values.astype(np.int64) + else: + values = ensure_object(values) + coerce_numeric = errors not in ("ignore", "raise") + try: values = lib.maybe_convert_numeric( values, set(), coerce_numeric=coerce_numeric ) - - except Exception: - if errors == "raise": - raise + except (ValueError, TypeError): + if errors == "raise": + raise # attempt downcast only if the data has been successfully converted # to a numerical dtype and if a downcast method has been specified diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 72f1adf0aad3d..3678e32943b2e 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1782,14 +1782,17 @@ def _infer_types(self, values, na_values, try_num_bool=True): np.putmask(values, mask, np.nan) return values, na_count - if try_num_bool: + if try_num_bool and is_object_dtype(values.dtype): + # exclude e.g DatetimeIndex here try: result = lib.maybe_convert_numeric(values, na_values, False) - na_count = isna(result).sum() - except Exception: + except (ValueError, TypeError): + # e.g. encountering datetime string gets ValueError + # TypeError can be raised in floatify result = values - if values.dtype == np.object_: - na_count = parsers.sanitize_objects(result, na_values, False) + na_count = parsers.sanitize_objects(result, na_values, False) + else: + na_count = isna(result).sum() else: result = values if values.dtype == np.object_: diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 0a8707bdac3a0..cfa6304909bb7 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -379,9 +379,12 @@ def test_isinf_scalar(self): assert not libmissing.isneginf_scalar(1) assert not libmissing.isneginf_scalar("a") - def test_maybe_convert_numeric_infinities(self): + @pytest.mark.parametrize("maybe_int", [True, False]) + @pytest.mark.parametrize( + "infinity", ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"] + ) + def test_maybe_convert_numeric_infinities(self, infinity, maybe_int): # see gh-13274 - infinities = ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"] na_values = {"", "NULL", "nan"} pos = np.array(["inf"], dtype=np.float64) @@ -389,35 +392,31 @@ def test_maybe_convert_numeric_infinities(self): msg = "Unable to parse string" - for infinity in infinities: - for maybe_int in (True, False): - out = lib.maybe_convert_numeric( - np.array([infinity], dtype=object), na_values, maybe_int - ) - tm.assert_numpy_array_equal(out, pos) - - out = lib.maybe_convert_numeric( - np.array(["-" + infinity], dtype=object), na_values, maybe_int - ) - tm.assert_numpy_array_equal(out, neg) - - out = lib.maybe_convert_numeric( - np.array([infinity], dtype=object), na_values, maybe_int - ) - tm.assert_numpy_array_equal(out, pos) - - out = lib.maybe_convert_numeric( - np.array(["+" + infinity], dtype=object), na_values, maybe_int - ) - tm.assert_numpy_array_equal(out, pos) - - # too many characters - with pytest.raises(ValueError, match=msg): - lib.maybe_convert_numeric( - np.array(["foo_" + infinity], dtype=object), - na_values, - maybe_int, - ) + out = lib.maybe_convert_numeric( + np.array([infinity], dtype=object), na_values, maybe_int + ) + tm.assert_numpy_array_equal(out, pos) + + out = lib.maybe_convert_numeric( + np.array(["-" + infinity], dtype=object), na_values, maybe_int + ) + tm.assert_numpy_array_equal(out, neg) + + out = lib.maybe_convert_numeric( + np.array([infinity], dtype=object), na_values, maybe_int + ) + tm.assert_numpy_array_equal(out, pos) + + out = lib.maybe_convert_numeric( + np.array(["+" + infinity], dtype=object), na_values, maybe_int + ) + tm.assert_numpy_array_equal(out, pos) + + # too many characters + with pytest.raises(ValueError, match=msg): + lib.maybe_convert_numeric( + np.array(["foo_" + infinity], dtype=object), na_values, maybe_int + ) def test_maybe_convert_numeric_post_floatify_nan(self, coerce): # see gh-13314
and parametrized one of the tests for maybe_convert_numeric
https://api.github.com/repos/pandas-dev/pandas/pulls/28513
2019-09-18T23:07:38Z
2019-09-20T12:35:26Z
2019-09-20T12:35:26Z
2019-09-20T14:16:27Z
Re-implemented parametrization of test_frame_from_json_to_json
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 9c489c7cc17ec..415b1d81eb3e4 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -7,7 +7,7 @@ import numpy as np import pytest -from pandas.compat import PY35, is_platform_32bit +from pandas.compat import PY35, is_platform_32bit, is_platform_windows import pandas.util._test_decorators as td import pandas as pd @@ -154,322 +154,212 @@ def test_frame_non_unique_columns_raises(self, orient): with pytest.raises(ValueError, match=msg): df.to_json(orient=orient) - def test_frame_from_json_to_json(self): - def _check_orient( - df, - orient, - dtype=None, - numpy=False, - convert_axes=True, - check_dtype=True, - raise_ok=None, - sort=None, - check_index_type=True, - check_column_type=True, - check_numpy_dtype=False, - ): - if sort is not None: - df = df.sort_values(sort) - else: - df = df.sort_index() - - # if we are not unique, then check that we are raising ValueError - # for the appropriate orients - if not df.index.is_unique and orient in ["index", "columns"]: - msg = "DataFrame index must be unique for orient='{}'".format(orient) - with pytest.raises(ValueError, match=msg): - df.to_json(orient=orient) - return - if not df.columns.is_unique and orient in ["index", "columns", "records"]: - # TODO: not executed. fix this. - with pytest.raises(ValueError, match="ksjkajksfjksjfkjs"): - df.to_json(orient=orient) - return - - dfjson = df.to_json(orient=orient) - - try: - unser = read_json( - dfjson, - orient=orient, - dtype=dtype, - numpy=numpy, - convert_axes=convert_axes, - ) - except Exception as detail: - if raise_ok is not None: - if isinstance(detail, raise_ok): - return - raise - - if sort is not None and sort in unser.columns: - unser = unser.sort_values(sort) - else: - unser = unser.sort_index() - - if not dtype: - check_dtype = False - - if not convert_axes and df.index.dtype.type == np.datetime64: - unser.index = DatetimeIndex(unser.index.values.astype("i8") * 1e6) - if orient == "records": - # index is not captured in this orientation - tm.assert_almost_equal( - df.values, unser.values, check_dtype=check_numpy_dtype - ) - tm.assert_index_equal( - df.columns, unser.columns, exact=check_column_type - ) - elif orient == "values": - # index and cols are not captured in this orientation - if numpy is True and df.shape == (0, 0): - assert unser.shape[0] == 0 - else: - tm.assert_almost_equal( - df.values, unser.values, check_dtype=check_numpy_dtype - ) - elif orient == "split": - # index and col labels might not be strings - unser.index = [str(i) for i in unser.index] - unser.columns = [str(i) for i in unser.columns] - - if sort is None: - unser = unser.sort_index() - tm.assert_almost_equal( - df.values, unser.values, check_dtype=check_numpy_dtype - ) - else: - if convert_axes: - tm.assert_frame_equal( - df, - unser, - check_dtype=check_dtype, - check_index_type=check_index_type, - check_column_type=check_column_type, - ) - else: - tm.assert_frame_equal( - df, unser, check_less_precise=False, check_dtype=check_dtype - ) - - def _check_all_orients( - df, - dtype=None, - convert_axes=True, - raise_ok=None, - sort=None, - check_index_type=True, - check_column_type=True, - ): + def test_frame_default_orient(self): + assert self.frame.to_json() == self.frame.to_json(orient="columns") - # numpy=False - if convert_axes: - _check_orient( - df, - "columns", - dtype=dtype, - sort=sort, - check_index_type=False, - check_column_type=False, - ) - _check_orient( - df, - "records", - dtype=dtype, - sort=sort, - check_index_type=False, - check_column_type=False, - ) - _check_orient( - df, - "split", - dtype=dtype, - sort=sort, - check_index_type=False, - check_column_type=False, - ) - _check_orient( - df, - "index", - dtype=dtype, - sort=sort, - check_index_type=False, - check_column_type=False, - ) - _check_orient( - df, - "values", - dtype=dtype, - sort=sort, - check_index_type=False, - check_column_type=False, - ) + @pytest.mark.parametrize("dtype", [False, float]) + @pytest.mark.parametrize("convert_axes", [True, False]) + @pytest.mark.parametrize("numpy", [True, False]) + def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype): + data = self.frame.to_json(orient=orient) + result = pd.read_json( + data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype + ) - _check_orient(df, "columns", dtype=dtype, convert_axes=False, sort=sort) - _check_orient(df, "records", dtype=dtype, convert_axes=False, sort=sort) - _check_orient(df, "split", dtype=dtype, convert_axes=False, sort=sort) - _check_orient(df, "index", dtype=dtype, convert_axes=False, sort=sort) - _check_orient(df, "values", dtype=dtype, convert_axes=False, sort=sort) - - # numpy=True and raise_ok might be not None, so ignore the error - if convert_axes: - _check_orient( - df, - "columns", - dtype=dtype, - numpy=True, - raise_ok=raise_ok, - sort=sort, - check_index_type=False, - check_column_type=False, - ) - _check_orient( - df, - "records", - dtype=dtype, - numpy=True, - raise_ok=raise_ok, - sort=sort, - check_index_type=False, - check_column_type=False, - ) - _check_orient( - df, - "split", - dtype=dtype, - numpy=True, - raise_ok=raise_ok, - sort=sort, - check_index_type=False, - check_column_type=False, - ) - _check_orient( - df, - "index", - dtype=dtype, - numpy=True, - raise_ok=raise_ok, - sort=sort, - check_index_type=False, - check_column_type=False, - ) - _check_orient( - df, - "values", - dtype=dtype, - numpy=True, - raise_ok=raise_ok, - sort=sort, - check_index_type=False, - check_column_type=False, - ) + expected = self.frame.copy() - _check_orient( - df, - "columns", - dtype=dtype, - numpy=True, - convert_axes=False, - raise_ok=raise_ok, - sort=sort, - ) - _check_orient( - df, - "records", - dtype=dtype, - numpy=True, - convert_axes=False, - raise_ok=raise_ok, - sort=sort, - ) - _check_orient( - df, - "split", - dtype=dtype, - numpy=True, - convert_axes=False, - raise_ok=raise_ok, - sort=sort, - ) - _check_orient( - df, - "index", - dtype=dtype, - numpy=True, - convert_axes=False, - raise_ok=raise_ok, - sort=sort, - ) - _check_orient( - df, - "values", - dtype=dtype, - numpy=True, - convert_axes=False, - raise_ok=raise_ok, - sort=sort, - ) + if not numpy and (orient == "index" or (PY35 and orient == "columns")): + # TODO: debug why sort is required + expected = expected.sort_index() - # basic - _check_all_orients(self.frame) - assert self.frame.to_json() == self.frame.to_json(orient="columns") + if orient == "records" or orient == "values": + expected = expected.reset_index(drop=True) + if orient == "values": + expected.columns = range(len(expected.columns)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", [False, np.int64]) + @pytest.mark.parametrize("convert_axes", [True, False]) + @pytest.mark.parametrize("numpy", [True, False]) + def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype): + data = self.intframe.to_json(orient=orient) + result = pd.read_json( + data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype + ) + expected = self.intframe.copy() + if not numpy and (orient == "index" or (PY35 and orient == "columns")): + expected = expected.sort_index() - _check_all_orients(self.intframe, dtype=self.intframe.values.dtype) - _check_all_orients(self.intframe, dtype=False) + if orient == "records" or orient == "values": + expected = expected.reset_index(drop=True) + if orient == "values": + expected.columns = range(len(expected.columns)) - # big one - # index and columns are strings as all unserialised JSON object keys - # are assumed to be strings - biggie = DataFrame( + if ( + numpy + and (is_platform_32bit() or is_platform_windows()) + and not dtype + and orient != "split" + ): + # TODO: see what is causing roundtrip dtype loss + expected = expected.astype(np.int32) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"]) + @pytest.mark.parametrize("convert_axes", [True, False]) + @pytest.mark.parametrize("numpy", [True, False]) + def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype): + df = DataFrame( np.zeros((200, 4)), columns=[str(i) for i in range(4)], index=[str(i) for i in range(200)], + dtype=dtype, ) - _check_all_orients(biggie, dtype=False, convert_axes=False) - # dtypes - _check_all_orients( - DataFrame(biggie, dtype=np.float64), dtype=np.float64, convert_axes=False + # TODO: do we even need to support U3 dtypes? + if numpy and dtype == "U3" and orient != "split": + pytest.xfail("Can't decode directly to array") + + data = df.to_json(orient=orient) + result = pd.read_json( + data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype ) - _check_all_orients( - DataFrame(biggie, dtype=np.int), dtype=np.int, convert_axes=False + + expected = df.copy() + if not numpy and (orient == "index" or (PY35 and orient == "columns")): + expected = expected.sort_index() + + if not dtype: + expected = expected.astype(np.int64) + + # index columns, and records orients cannot fully preserve the string + # dtype for axes as the index and column labels are used as keys in + # JSON objects. JSON keys are by definition strings, so there's no way + # to disambiguate whether those keys actually were strings or numeric + # beforehand and numeric wins out. + # TODO: Split should be able to support this + if convert_axes and (orient in ("split", "index", "columns")): + expected.columns = expected.columns.astype(np.int64) + expected.index = expected.index.astype(np.int64) + elif orient == "records" and convert_axes: + expected.columns = expected.columns.astype(np.int64) + + if orient == "records" or orient == "values": + expected = expected.reset_index(drop=True) + if orient == "values": + expected.columns = range(len(expected.columns)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("convert_axes", [True, False]) + @pytest.mark.parametrize("numpy", [True, False]) + def test_roundtrip_categorical(self, orient, convert_axes, numpy): + # TODO: create a better frame to test with and improve coverage + if orient in ("index", "columns"): + pytest.xfail( + "Can't have duplicate index values for orient '{}')".format(orient) + ) + + data = self.categorical.to_json(orient=orient) + if numpy and orient in ("records", "values"): + pytest.xfail("Orient {} is broken with numpy=True".format(orient)) + + result = pd.read_json( + data, orient=orient, convert_axes=convert_axes, numpy=numpy ) - _check_all_orients( - DataFrame(biggie, dtype="U3"), - dtype="U3", - convert_axes=False, - raise_ok=ValueError, + + expected = self.categorical.copy() + expected.index = expected.index.astype(str) # Categorical not preserved + expected.index.name = None # index names aren't preserved in JSON + + if not numpy and (orient == "index" or (PY35 and orient == "columns")): + expected = expected.sort_index() + + if orient == "records" or orient == "values": + expected = expected.reset_index(drop=True) + if orient == "values": + expected.columns = range(len(expected.columns)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("convert_axes", [True, False]) + @pytest.mark.parametrize("numpy", [True, False]) + def test_roundtrip_empty(self, orient, convert_axes, numpy): + data = self.empty_frame.to_json(orient=orient) + result = pd.read_json( + data, orient=orient, convert_axes=convert_axes, numpy=numpy ) + expected = self.empty_frame.copy() + + # TODO: both conditions below are probably bugs + if convert_axes: + expected.index = expected.index.astype(float) + expected.columns = expected.columns.astype(float) + if numpy and orient == "values": + expected = expected.reindex([0], axis=1).reset_index(drop=True) - # categorical - _check_all_orients(self.categorical, sort="sort", raise_ok=ValueError) + tm.assert_frame_equal(result, expected) - # empty - _check_all_orients( - self.empty_frame, check_index_type=False, check_column_type=False + @pytest.mark.parametrize("convert_axes", [True, False]) + @pytest.mark.parametrize("numpy", [True, False]) + def test_roundtrip_timestamp(self, orient, convert_axes, numpy): + # TODO: improve coverage with date_format parameter + data = self.tsframe.to_json(orient=orient) + result = pd.read_json( + data, orient=orient, convert_axes=convert_axes, numpy=numpy ) + expected = self.tsframe.copy() + + if not convert_axes: # one off for ts handling + # DTI gets converted to epoch values + idx = expected.index.astype(np.int64) // 1000000 + if orient != "split": # TODO: handle consistently across orients + idx = idx.astype(str) + + expected.index = idx - # time series data - _check_all_orients(self.tsframe) + if orient == "records" or orient == "values": + expected = expected.reset_index(drop=True) + if orient == "values": + expected.columns = range(len(expected.columns)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("convert_axes", [True, False]) + @pytest.mark.parametrize("numpy", [True, False]) + def test_roundtrip_mixed(self, orient, convert_axes, numpy): + if numpy and orient != "split": + pytest.xfail("Can't decode directly to array") - # mixed data index = pd.Index(["a", "b", "c", "d", "e"]) - data = { + values = { "A": [0.0, 1.0, 2.0, 3.0, 4.0], "B": [0.0, 1.0, 0.0, 1.0, 0.0], "C": ["foo1", "foo2", "foo3", "foo4", "foo5"], "D": [True, False, True, False, True], } - df = DataFrame(data=data, index=index) - _check_orient(df, "split", check_dtype=False) - _check_orient(df, "records", check_dtype=False) - _check_orient(df, "values", check_dtype=False) - _check_orient(df, "columns", check_dtype=False) - # index oriented is problematic as it is read back in in a transposed - # state, so the columns are interpreted as having mixed data and - # given object dtypes. - # force everything to have object dtype beforehand - _check_orient(df.transpose().transpose(), "index", dtype=False) + + df = DataFrame(data=values, index=index) + + data = df.to_json(orient=orient) + result = pd.read_json( + data, orient=orient, convert_axes=convert_axes, numpy=numpy + ) + + expected = df.copy() + expected = expected.assign(**expected.select_dtypes("number").astype(np.int64)) + + if not numpy and (orient == "index" or (PY35 and orient == "columns")): + expected = expected.sort_index() + + if orient == "records" or orient == "values": + expected = expected.reset_index(drop=True) + if orient == "values": + expected.columns = range(len(expected.columns)) + + tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "data,msg,orient",
This piece was broken off of #27838 as it made the diff much larger, so hopefully easier to digest on its own. As is, parametrization here has brought up a lot of rough edges which are responsible for some of the complexity. These are noted with TODOs and summarized as follows (save Py35 issues, which aren't worth addressing at this point): - Frame order is not maintained when `numpy=False` (default) and `orient="index"` - On windows or 32 bit platforms it appears that `np.int64` roundtrips as `np.int32` (maybe not an issue?) - `orient="split"` does not preserve strings in the index if those strings are numeric, though it should be able to - `convert_axes` may have surprising behavior when dealing with empty DataFrames - DTI seem to roundtrip as strings when written with epoch format for all but `orient="split" Not all of these are the same priority, but figure worth leaving as follow ups
https://api.github.com/repos/pandas-dev/pandas/pulls/28510
2019-09-18T21:02:02Z
2019-09-20T17:53:30Z
2019-09-20T17:53:30Z
2019-09-20T18:06:29Z
BUG: fix array_equivalent with mismatched tzawareness
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 594de703258a4..1c2f80b832201 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -55,8 +55,7 @@ cimport pandas._libs.util as util from pandas._libs.util cimport is_nan, UINT64_MAX, INT64_MAX, INT64_MIN from pandas._libs.tslib import array_to_datetime -from pandas._libs.tslibs.nattype cimport NPY_NAT -from pandas._libs.tslibs.nattype import NaT +from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT from pandas._libs.tslibs.conversion cimport convert_to_tsobject from pandas._libs.tslibs.timedeltas cimport convert_to_timedelta64 from pandas._libs.tslibs.timezones cimport get_timezone, tz_compare @@ -525,9 +524,18 @@ def array_equivalent_object(left: object[:], right: object[:]) -> bool: # we are either not equal or both nan # I think None == None will be true here - if not (PyObject_RichCompareBool(x, y, Py_EQ) or - (x is None or is_nan(x)) and (y is None or is_nan(y))): - return False + try: + if not (PyObject_RichCompareBool(x, y, Py_EQ) or + (x is None or is_nan(x)) and (y is None or is_nan(y))): + return False + except TypeError as err: + # Avoid raising TypeError on tzawareness mismatch + # TODO: This try/except can be removed if/when Timestamp + # comparisons are change dto match datetime, see GH#28507 + if "tz-naive and tz-aware" in str(err): + return False + raise + return True diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 6dd032b9248ed..cd87fbef02e4f 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -445,8 +445,14 @@ def array_equivalent(left, right, strict_nan=False): if not isinstance(right_value, float) or not np.isnan(right_value): return False else: - if np.any(left_value != right_value): - return False + try: + if np.any(left_value != right_value): + return False + except TypeError as err: + if "Cannot compare tz-naive" in str(err): + # tzawareness compat failure, see GH#28507 + return False + raise return True # NaNs can occur in float and complex arrays. diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 6ef9d78ff9e97..c7e9dd5f0ea6d 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4324,12 +4324,9 @@ def equals(self, other): # if other is not object, use other's logic for coercion return other.equals(self) - try: - return array_equivalent( - com.values_from_object(self), com.values_from_object(other) - ) - except Exception: - return False + return array_equivalent( + com.values_from_object(self), com.values_from_object(other) + ) def identical(self, other): """ diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index 1a292d5bfcbb6..25b447e1df7d4 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -25,6 +25,9 @@ from pandas import DatetimeIndex, Float64Index, NaT, Series, TimedeltaIndex, date_range from pandas.util import testing as tm +now = pd.Timestamp.now() +utcnow = pd.Timestamp.now("UTC") + @pytest.mark.parametrize("notna_f", [notna, notnull]) def test_notna_notnull(notna_f): @@ -332,6 +335,29 @@ def test_array_equivalent(): assert not array_equivalent(DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan])) +@pytest.mark.parametrize( + "lvalue, rvalue", + [ + # There are 3 variants for each of lvalue and rvalue. We include all + # three for the tz-naive `now` and exclude the datetim64 variant + # for utcnow because it drops tzinfo. + (now, utcnow), + (now.to_datetime64(), utcnow), + (now.to_pydatetime(), utcnow), + (now, utcnow), + (now.to_datetime64(), utcnow.to_pydatetime()), + (now.to_pydatetime(), utcnow.to_pydatetime()), + ], +) +def test_array_equivalent_tzawareness(lvalue, rvalue): + # we shouldn't raise if comparing tzaware and tznaive datetimes + left = np.array([lvalue], dtype=object) + right = np.array([rvalue], dtype=object) + + assert not array_equivalent(left, right, strict_nan=True) + assert not array_equivalent(left, right, strict_nan=False) + + def test_array_equivalent_compat(): # see gh-13388 m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])
Some of this can be simplified if we change the behavior discussed in #28507.
https://api.github.com/repos/pandas-dev/pandas/pulls/28508
2019-09-18T19:18:34Z
2019-09-20T12:38:23Z
2019-09-20T12:38:23Z
2019-09-20T14:15:56Z
BUG/CLN: DatetimeTZDtype.construct_from_string Exception
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index aa7e6801ba431..fcdb89dd8a334 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -685,7 +685,7 @@ def __init__(self, unit="ns", tz=None): tz = timezones.tz_standardize(tz) elif tz is not None: raise pytz.UnknownTimeZoneError(tz) - elif tz is None: + if tz is None: raise TypeError("A 'tz' is required.") self._unit = unit @@ -737,14 +737,17 @@ def construct_from_string(cls, string): """ if isinstance(string, str): msg = "Could not construct DatetimeTZDtype from '{}'" - try: - match = cls._match.match(string) - if match: - d = match.groupdict() + match = cls._match.match(string) + if match: + d = match.groupdict() + try: return cls(unit=d["unit"], tz=d["tz"]) - except Exception: - # TODO(py3): Change this pass to `raise TypeError(msg) from e` - pass + except (KeyError, TypeError, ValueError) as err: + # KeyError if maybe_get_tz tries and fails to get a + # pytz timezone (actually pytz.UnknownTimeZoneError). + # TypeError if we pass a nonsense tz; + # ValueError if we pass a unit other than "ns" + raise TypeError(msg.format(string)) from err raise TypeError(msg.format(string)) raise TypeError("Could not construct DatetimeTZDtype") diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 3288c9c584565..6013e58bc6929 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -248,9 +248,19 @@ def test_construct_from_string_raises(self): with pytest.raises(TypeError, match="notatz"): DatetimeTZDtype.construct_from_string("datetime64[ns, notatz]") - with pytest.raises(TypeError, match="^Could not construct DatetimeTZDtype$"): + msg = "^Could not construct DatetimeTZDtype" + with pytest.raises(TypeError, match=msg): + # list instead of string DatetimeTZDtype.construct_from_string(["datetime64[ns, notatz]"]) + with pytest.raises(TypeError, match=msg): + # non-nano unit + DatetimeTZDtype.construct_from_string("datetime64[ps, UTC]") + + with pytest.raises(TypeError, match=msg): + # dateutil str that returns None from gettz + DatetimeTZDtype.construct_from_string("datetime64[ns, dateutil/invalid]") + def test_is_dtype(self): assert not DatetimeTZDtype.is_dtype(None) assert DatetimeTZDtype.is_dtype(self.dtype)
The bug part of this is that "dateutil/something_invalid" fails to raise on master because dateutil.tz.gettz("something_invalid") returns None instead of raising.
https://api.github.com/repos/pandas-dev/pandas/pulls/28505
2019-09-18T17:33:49Z
2019-09-20T12:34:28Z
2019-09-20T12:34:28Z
2019-09-20T14:25:15Z
Cleaned up JSON test with ambiguous DTA usage
diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 4851f4bd27a7b..9c489c7cc17ec 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -813,20 +813,11 @@ def test_series_roundtrip_simple(self, orient, numpy): @pytest.mark.parametrize("dtype", [False, None]) @pytest.mark.parametrize("numpy", [True, False]) def test_series_roundtrip_object(self, orient, numpy, dtype): - # TODO: see why tm.makeObjectSeries provides back DTA - dtSeries = Series( - [str(d) for d in self.objSeries], - index=self.objSeries.index, - name=self.objSeries.name, - ) - data = dtSeries.to_json(orient=orient) + data = self.objSeries.to_json(orient=orient) result = pd.read_json( data, typ="series", orient=orient, numpy=numpy, dtype=dtype ) - if dtype is False: - expected = dtSeries.copy() - else: - expected = self.objSeries.copy() + expected = self.objSeries.copy() if not numpy and PY35 and orient in ("index", "columns"): expected = expected.sort_index() @@ -897,6 +888,19 @@ def test_series_with_dtype(self): expected = Series([4] * 3) assert_series_equal(result, expected) + @pytest.mark.parametrize( + "dtype,expected", + [ + (True, Series(["2000-01-01"], dtype="datetime64[ns]")), + (False, Series([946684800000])), + ], + ) + def test_series_with_dtype_datetime(self, dtype, expected): + s = Series(["2000-01-01"], dtype="datetime64[ns]") + data = s.to_json() + result = pd.read_json(data, typ="series", dtype=dtype) + assert_series_equal(result, expected) + def test_frame_from_json_precise_float(self): df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]]) result = read_json(df.to_json(), precise_float=True)
Follow up to #27838 enabled by #28444 I created a new test specific to the handling of DTA with `dtype=X` to provide a more logical separation of concerns
https://api.github.com/repos/pandas-dev/pandas/pulls/28502
2019-09-18T15:38:03Z
2019-09-18T16:27:03Z
2019-09-18T16:27:03Z
2019-09-18T16:27:14Z
PERF: use isinstance(obj, Foo) instead of ABCFoo
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1203cd9fbd1b3..540524df44632 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -47,6 +47,7 @@ Axis, Dtype, FilePathOrBuffer, + FrameOrSeriesUnion, IndexKeyFunc, Label, Level, @@ -106,12 +107,6 @@ needs_i8_conversion, pandas_dtype, ) -from pandas.core.dtypes.generic import ( - ABCDataFrame, - ABCIndexClass, - ABCMultiIndex, - ABCSeries, -) from pandas.core.dtypes.missing import isna, notna from pandas.core import algorithms, common as com, nanops, ops @@ -1838,7 +1833,7 @@ def to_records( dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: - if isinstance(self.index, ABCMultiIndex): + if isinstance(self.index, MultiIndex): # array of tuples to numpy cols. copy copy copy ix_vals = list(map(np.array, zip(*self.index.values))) else: @@ -1851,7 +1846,7 @@ def to_records( count = 0 index_names = list(self.index.names) - if isinstance(self.index, ABCMultiIndex): + if isinstance(self.index, MultiIndex): for i, n in enumerate(index_names): if n is None: index_names[i] = f"level_{count}" @@ -2782,7 +2777,7 @@ def __getitem__(self, key): # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) - if data.shape[1] == 1 and not isinstance(self.columns, ABCMultiIndex): + if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): data = data[key] return data @@ -3600,7 +3595,7 @@ def reindexer(value): elif isinstance(value, DataFrame): # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame - if isinstance(self.columns, ABCMultiIndex) and key in self.columns: + if isinstance(self.columns, MultiIndex) and key in self.columns: loc = self.columns.get_loc(key) if isinstance(loc, (slice, Series, np.ndarray, Index)): cols = maybe_droplevels(self.columns[loc], key) @@ -3649,7 +3644,7 @@ def reindexer(value): # broadcast across multiple columns if necessary if broadcast and key in self.columns and value.ndim == 1: - if not self.columns.is_unique or isinstance(self.columns, ABCMultiIndex): + if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)) @@ -4337,9 +4332,7 @@ def set_index( missing: List[Label] = [] for col in keys: - if isinstance( - col, (ABCIndexClass, ABCSeries, np.ndarray, list, abc.Iterator) - ): + if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)): # arrays are fine as long as they are one-dimensional # iterators get converted to list below if getattr(col, "ndim", 1) != 1: @@ -4368,7 +4361,7 @@ def set_index( names = [] if append: names = list(self.index.names) - if isinstance(self.index, ABCMultiIndex): + if isinstance(self.index, MultiIndex): for i in range(self.index.nlevels): arrays.append(self.index._get_level_values(i)) else: @@ -4376,11 +4369,11 @@ def set_index( to_remove: List[Label] = [] for col in keys: - if isinstance(col, ABCMultiIndex): + if isinstance(col, MultiIndex): for n in range(col.nlevels): arrays.append(col._get_level_values(n)) names.extend(col.names) - elif isinstance(col, (ABCIndexClass, ABCSeries)): + elif isinstance(col, (Index, Series)): # if Index then not MultiIndex (treated above) arrays.append(col) names.append(col.name) @@ -4625,7 +4618,7 @@ def _maybe_casted_values(index, labels=None): if not drop: to_insert: Iterable[Tuple[Any, Optional[Any]]] - if isinstance(self.index, ABCMultiIndex): + if isinstance(self.index, MultiIndex): names = [ (n if n is not None else f"level_{i}") for i, n in enumerate(self.index.names) @@ -4636,7 +4629,7 @@ def _maybe_casted_values(index, labels=None): names = [default] if self.index.name is None else [self.index.name] to_insert = ((self.index, None),) - multi_col = isinstance(self.columns, ABCMultiIndex) + multi_col = isinstance(self.columns, MultiIndex) for i, (lev, lab) in reversed(list(enumerate(to_insert))): if not (level is None or i in level): continue @@ -5240,7 +5233,7 @@ def sort_index( level, ascending=ascending, sort_remaining=sort_remaining ) - elif isinstance(labels, ABCMultiIndex): + elif isinstance(labels, MultiIndex): from pandas.core.sorting import lexsort_indexer indexer = lexsort_indexer( @@ -5607,14 +5600,14 @@ def swaplevel(self, i=-2, j=-1, axis=0) -> "DataFrame": axis = self._get_axis_number(axis) - if not isinstance(result._get_axis(axis), ABCMultiIndex): # pragma: no cover + if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError("Can only swap levels on a hierarchical axis.") if axis == 0: - assert isinstance(result.index, ABCMultiIndex) + assert isinstance(result.index, MultiIndex) result.index = result.index.swaplevel(i, j) else: - assert isinstance(result.columns, ABCMultiIndex) + assert isinstance(result.columns, MultiIndex) result.columns = result.columns.swaplevel(i, j) return result @@ -5635,16 +5628,16 @@ def reorder_levels(self, order, axis=0) -> "DataFrame": DataFrame """ axis = self._get_axis_number(axis) - if not isinstance(self._get_axis(axis), ABCMultiIndex): # pragma: no cover + if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError("Can only reorder levels on a hierarchical axis.") result = self.copy() if axis == 0: - assert isinstance(result.index, ABCMultiIndex) + assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) else: - assert isinstance(result.columns, ABCMultiIndex) + assert isinstance(result.columns, MultiIndex) result.columns = result.columns.reorder_levels(order) return result @@ -5913,7 +5906,8 @@ def extract_values(arr): # Does two things: # 1. maybe gets the values from the Series / Index # 2. convert datelike to i8 - if isinstance(arr, (ABCIndexClass, ABCSeries)): + # TODO: extract_array? + if isinstance(arr, (Index, Series)): arr = arr._values if needs_i8_conversion(arr.dtype): @@ -5925,7 +5919,8 @@ def extract_values(arr): def combiner(x, y): mask = isna(x) - if isinstance(mask, (ABCIndexClass, ABCSeries)): + # TODO: extract_array? + if isinstance(mask, (Index, Series)): mask = mask._values x_values = extract_values(x) @@ -7020,8 +7015,8 @@ def _gotitem( self, key: Union[str, List[str]], ndim: int, - subset: Optional[Union[Series, ABCDataFrame]] = None, - ) -> Union[Series, ABCDataFrame]: + subset: Optional[FrameOrSeriesUnion] = None, + ) -> FrameOrSeriesUnion: """ Sub-classes to define. Return a sliced object. @@ -8225,7 +8220,7 @@ def _count_level(self, level, axis=0, numeric_only=False): count_axis = frame._get_axis(axis) agg_axis = frame._get_agg_axis(axis) - if not isinstance(count_axis, ABCMultiIndex): + if not isinstance(count_axis, MultiIndex): raise TypeError( f"Can only count levels on hierarchical {self._get_axis_name(axis)}." ) diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index c3e170b0e39c4..1a315ff0ead52 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -7,13 +7,12 @@ from pandas.core.dtypes.common import is_extension_array_dtype, is_list_like from pandas.core.dtypes.concat import concat_compat -from pandas.core.dtypes.generic import ABCMultiIndex from pandas.core.dtypes.missing import notna from pandas.core.arrays import Categorical import pandas.core.common as com from pandas.core.frame import DataFrame, _shared_docs -from pandas.core.indexes.base import Index +from pandas.core.indexes.api import Index, MultiIndex from pandas.core.reshape.concat import concat from pandas.core.tools.numeric import to_numeric @@ -33,7 +32,7 @@ def melt( # TODO: what about the existing index? # If multiindex, gather names of columns on all level for checking presence # of `id_vars` and `value_vars` - if isinstance(frame.columns, ABCMultiIndex): + if isinstance(frame.columns, MultiIndex): cols = [x for c in frame.columns for x in c] else: cols = list(frame.columns) @@ -41,7 +40,7 @@ def melt( if id_vars is not None: if not is_list_like(id_vars): id_vars = [id_vars] - elif isinstance(frame.columns, ABCMultiIndex) and not isinstance(id_vars, list): + elif isinstance(frame.columns, MultiIndex) and not isinstance(id_vars, list): raise ValueError( "id_vars must be a list of tuples when columns are a MultiIndex" ) @@ -60,9 +59,7 @@ def melt( if value_vars is not None: if not is_list_like(value_vars): value_vars = [value_vars] - elif isinstance(frame.columns, ABCMultiIndex) and not isinstance( - value_vars, list - ): + elif isinstance(frame.columns, MultiIndex) and not isinstance(value_vars, list): raise ValueError( "value_vars must be a list of tuples when columns are a MultiIndex" ) @@ -84,7 +81,7 @@ def melt( frame.columns = frame.columns.get_level_values(col_level) if var_name is None: - if isinstance(frame.columns, ABCMultiIndex): + if isinstance(frame.columns, MultiIndex): if len(frame.columns.names) == len(set(frame.columns.names)): var_name = frame.columns.names else: diff --git a/pandas/core/series.py b/pandas/core/series.py index 64220862c3e51..b236825ac4dc8 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -45,13 +45,7 @@ is_object_dtype, is_scalar, ) -from pandas.core.dtypes.generic import ( - ABCDataFrame, - ABCDatetimeIndex, - ABCMultiIndex, - ABCPeriodIndex, - ABCSeries, -) +from pandas.core.dtypes.generic import ABCDataFrame from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.missing import ( isna, @@ -272,7 +266,7 @@ def __init__( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) - elif isinstance(data, ABCSeries): + elif isinstance(data, Series): if index is None: index = data.index else: @@ -3549,7 +3543,7 @@ def swaplevel(self, i=-2, j=-1, copy=True) -> "Series": Series Series with levels swapped in MultiIndex. """ - assert isinstance(self.index, ABCMultiIndex) + assert isinstance(self.index, MultiIndex) new_index = self.index.swaplevel(i, j) return self._constructor(self._values, index=new_index, copy=copy).__finalize__( self, method="swaplevel" @@ -3574,7 +3568,7 @@ def reorder_levels(self, order) -> "Series": raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy() - assert isinstance(result.index, ABCMultiIndex) + assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result @@ -4661,8 +4655,8 @@ def to_timestamp(self, freq=None, how="start", copy=True) -> "Series": if copy: new_values = new_values.copy() - assert isinstance(self.index, (ABCDatetimeIndex, ABCPeriodIndex)) - new_index = self.index.to_timestamp(freq=freq, how=how) + assert isinstance(self.index, PeriodIndex) + new_index = self.index.to_timestamp(freq=freq, how=how) # type: ignore return self._constructor(new_values, index=new_index).__finalize__( self, method="to_timestamp" ) @@ -4688,8 +4682,8 @@ def to_period(self, freq=None, copy=True) -> "Series": if copy: new_values = new_values.copy() - assert isinstance(self.index, ABCDatetimeIndex) - new_index = self.index.to_period(freq=freq) + assert isinstance(self.index, DatetimeIndex) + new_index = self.index.to_period(freq=freq) # type: ignore return self._constructor(new_values, index=new_index).__finalize__( self, method="to_period" ) diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index aac1df5dcd396..4ec20977a5e7f 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -14,9 +14,9 @@ from pandas.core.dtypes import missing from pandas.core.dtypes.common import is_float, is_scalar -from pandas.core.dtypes.generic import ABCIndex, ABCMultiIndex, ABCPeriodIndex +from pandas.core.dtypes.generic import ABCIndex -from pandas import Index +from pandas import Index, MultiIndex, PeriodIndex import pandas.core.common as com from pandas.io.common import stringify_path @@ -465,7 +465,7 @@ def _format_header_mi(self): coloffset = 0 lnum = 0 - if self.index and isinstance(self.df.index, ABCMultiIndex): + if self.index and isinstance(self.df.index, MultiIndex): coloffset = len(self.df.index[0]) - 1 if self.merge_cells: @@ -507,7 +507,7 @@ def _format_header_regular(self): if self.index: coloffset = 1 - if isinstance(self.df.index, ABCMultiIndex): + if isinstance(self.df.index, MultiIndex): coloffset = len(self.df.index[0]) colnames = self.columns @@ -526,7 +526,7 @@ def _format_header_regular(self): ) def _format_header(self): - if isinstance(self.columns, ABCMultiIndex): + if isinstance(self.columns, MultiIndex): gen = self._format_header_mi() else: gen = self._format_header_regular() @@ -545,7 +545,7 @@ def _format_header(self): return itertools.chain(gen, gen2) def _format_body(self): - if isinstance(self.df.index, ABCMultiIndex): + if isinstance(self.df.index, MultiIndex): return self._format_hierarchical_rows() else: return self._format_regular_rows() @@ -569,7 +569,7 @@ def _format_regular_rows(self): else: index_label = self.df.index.names[0] - if isinstance(self.columns, ABCMultiIndex): + if isinstance(self.columns, MultiIndex): self.rowcounter += 1 if index_label and self.header is not False: @@ -577,7 +577,7 @@ def _format_regular_rows(self): # write index_values index_values = self.df.index - if isinstance(self.df.index, ABCPeriodIndex): + if isinstance(self.df.index, PeriodIndex): index_values = self.df.index.to_timestamp() for idx, idxval in enumerate(index_values): @@ -609,7 +609,7 @@ def _format_hierarchical_rows(self): # with index names (blank if None) for # unambiguous round-trip, unless not merging, # in which case the names all go on one row Issue #11328 - if isinstance(self.columns, ABCMultiIndex) and self.merge_cells: + if isinstance(self.columns, MultiIndex) and self.merge_cells: self.rowcounter += 1 # if index labels are not empty go ahead and dump diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index c7eb3eeedcadf..68ad559967ece 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -56,12 +56,6 @@ is_scalar, is_timedelta64_dtype, ) -from pandas.core.dtypes.generic import ( - ABCDatetimeIndex, - ABCMultiIndex, - ABCPeriodIndex, - ABCTimedeltaIndex, -) from pandas.core.dtypes.missing import isna, notna from pandas.core.arrays.datetimes import DatetimeArray @@ -69,7 +63,7 @@ from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.construction import extract_array -from pandas.core.indexes.api import Index, ensure_index +from pandas.core.indexes.api import Index, MultiIndex, PeriodIndex, ensure_index from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex @@ -294,7 +288,7 @@ def _get_footer(self) -> str: if getattr(self.series.index, "freq", None) is not None: assert isinstance( - self.series.index, (ABCDatetimeIndex, ABCPeriodIndex, ABCTimedeltaIndex) + self.series.index, (DatetimeIndex, PeriodIndex, TimedeltaIndex) ) footer += f"Freq: {self.series.index.freqstr}" @@ -329,7 +323,7 @@ def _get_footer(self) -> str: def _get_formatted_index(self) -> Tuple[List[str], bool]: index = self.tr_series.index - is_multi = isinstance(index, ABCMultiIndex) + is_multi = isinstance(index, MultiIndex) if is_multi: have_header = any(name for name in index.names) @@ -976,7 +970,7 @@ def _get_formatted_column_labels(self, frame: "DataFrame") -> List[List[str]]: columns = frame.columns - if isinstance(columns, ABCMultiIndex): + if isinstance(columns, MultiIndex): fmt_columns = columns.format(sparsify=False, adjoin=False) fmt_columns = list(zip(*fmt_columns)) dtypes = self.frame.dtypes._values @@ -1036,7 +1030,7 @@ def _get_formatted_index(self, frame: "DataFrame") -> List[str]: columns = frame.columns fmt = self._get_formatter("__index__") - if isinstance(index, ABCMultiIndex): + if isinstance(index, MultiIndex): fmt_index = index.format( sparsify=self.sparsify, adjoin=False, @@ -1071,7 +1065,7 @@ def _get_formatted_index(self, frame: "DataFrame") -> List[str]: def _get_column_name_list(self) -> List[str]: names: List[str] = [] columns = self.frame.columns - if isinstance(columns, ABCMultiIndex): + if isinstance(columns, MultiIndex): names.extend("" if name is None else name for name in columns.names) else: names.append("" if columns.name is None else columns.name) @@ -1767,7 +1761,7 @@ def _cond(values): def _has_names(index: Index) -> bool: - if isinstance(index, ABCMultiIndex): + if isinstance(index, MultiIndex): return com.any_not_none(*index.names) else: return index.name is not None diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 1be0f977f9b20..e31d977512f1e 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -9,9 +9,7 @@ from pandas._libs import lib -from pandas.core.dtypes.generic import ABCMultiIndex - -from pandas import option_context +from pandas import MultiIndex, option_context from pandas.io.common import is_url from pandas.io.formats.format import ( @@ -233,7 +231,7 @@ def _write_table(self, indent: int = 0) -> None: def _write_col_header(self, indent: int) -> None: truncate_h = self.fmt.truncate_h - if isinstance(self.columns, ABCMultiIndex): + if isinstance(self.columns, MultiIndex): template = 'colspan="{span:d}" halign="left"' if self.fmt.sparsify: @@ -376,7 +374,7 @@ def _write_body(self, indent: int) -> None: fmt_values = self._get_formatted_values() # write values - if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex): + if self.fmt.index and isinstance(self.frame.index, MultiIndex): self._write_hierarchical_rows(fmt_values, indent + self.indent_delta) else: self._write_regular_rows(fmt_values, indent + self.indent_delta) @@ -585,7 +583,7 @@ def write_style(self) -> None: ("tbody tr th:only-of-type", "vertical-align", "middle"), ("tbody tr th", "vertical-align", "top"), ] - if isinstance(self.columns, ABCMultiIndex): + if isinstance(self.columns, MultiIndex): element_props.append(("thead tr th", "text-align", "left")) if self.show_row_idx_names: element_props.append( diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index e36696dc23a87..4b79bef41d025 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -5,7 +5,6 @@ import numpy as np from pandas.core.dtypes.common import is_dict_like -from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import remove_na_arraylike import pandas as pd @@ -248,7 +247,7 @@ def boxplot( if return_type not in BoxPlot._valid_return_types: raise ValueError("return_type must be {'axes', 'dict', 'both'}") - if isinstance(data, ABCSeries): + if isinstance(data, pd.Series): data = data.to_frame("x") column = "x" diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 11c59a0903440..c6d159d3d016b 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -22,9 +22,8 @@ is_integer_dtype, is_nested_list_like, ) -from pandas.core.dtypes.generic import ABCSeries -from pandas import Index, get_option +from pandas import Index, Series, get_option import pandas.core.common as com from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.period import Period, PeriodIndex, period_range @@ -252,7 +251,7 @@ def _dt_to_float_ordinal(dt): preserving hours, minutes, seconds and microseconds. Return value is a :func:`float`. """ - if isinstance(dt, (np.ndarray, Index, ABCSeries)) and is_datetime64_ns_dtype(dt): + if isinstance(dt, (np.ndarray, Index, Series)) and is_datetime64_ns_dtype(dt): base = dates.epoch2num(dt.asi8 / 1.0e9) else: base = dates.date2num(dt) @@ -288,8 +287,8 @@ def try_parse(values): return values elif isinstance(values, str): return try_parse(values) - elif isinstance(values, (list, tuple, np.ndarray, Index, ABCSeries)): - if isinstance(values, ABCSeries): + elif isinstance(values, (list, tuple, np.ndarray, Index, Series)): + if isinstance(values, Series): # https://github.com/matplotlib/matplotlib/issues/11391 # Series was skipped. Convert to DatetimeIndex to get asi8 values = Index(values) diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index 54cf307a6ee66..589f8933efa96 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -4,10 +4,8 @@ import numpy as np import pytest -from pandas.core.dtypes.generic import ABCMultiIndex - import pandas as pd -from pandas import DataFrame, Index, Series, isna +from pandas import DataFrame, Index, MultiIndex, Series, isna import pandas._testing as tm from pandas.core.base import SpecificationError @@ -519,7 +517,7 @@ def test_map(self, datetime_series): tm.assert_series_equal(a.map(c), exp) def test_map_empty(self, indices): - if isinstance(indices, ABCMultiIndex): + if isinstance(indices, MultiIndex): pytest.skip("Initializing a Series from a MultiIndex is not supported") s = Series(indices)
per discussion in #27353, isinstance checks with the ABCFoo are about 4x as expensive as the Foo checks. Individually they are still very small, but they add up.
https://api.github.com/repos/pandas-dev/pandas/pulls/34040
2020-05-07T00:38:35Z
2020-05-09T19:21:20Z
2020-05-09T19:21:20Z
2020-05-09T19:31:07Z
CLN: spelling fixes in docstrings
diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index f6380808d5ac2..6130e05b2a4dc 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -89,7 +89,7 @@ def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[i ] uniquified_aggspec = _make_unique_kwarg_list(aggspec_order) - # get the new indice of columns by comparison + # get the new index of columns by comparison col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order) return aggspec, columns, col_idx_order @@ -182,7 +182,7 @@ def maybe_mangle_lambdas(agg_spec: Any) -> Any: is_dict = is_dict_like(agg_spec) if not (is_dict or is_list_like(agg_spec)): return agg_spec - mangled_aggspec = type(agg_spec)() # dict or OrderdDict + mangled_aggspec = type(agg_spec)() # dict or OrderedDict if is_dict: for key, aggfuncs in agg_spec.items(): diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index cc0fdae094cf9..48e2e73160dae 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -126,7 +126,7 @@ def _ensure_data( except (TypeError, ValueError, OverflowError): # if we are trying to coerce to a dtype - # and it is incompat this will fall through to here + # and it is incompatible this will fall through to here return ensure_object(values), np.dtype("object") # datetimelike @@ -474,7 +474,7 @@ def _factorize_array( values : ndarray na_sentinel : int, default -1 size_hint : int, optional - Passsed through to the hashtable's 'get_labels' method + Passed through to the hashtable's 'get_labels' method na_value : object, optional A value in `values` to consider missing. Note: only use this parameter when you know that you don't have any values pandas would @@ -1240,7 +1240,7 @@ def get_indexer(current_indexer, other_indexer): break # Now find all values which are equal to - # the (nsmallest: largest)/(nlarrgest: smallest) + # the (nsmallest: largest)/(nlargest: smallest) # from our series. border_value = values == values[values.index[-1]] diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index bd903d9b1fae3..92b84907e1c11 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -696,7 +696,7 @@ def _values_for_factorize(self) -> Tuple[np.ndarray, Any]: na_value : object The value in `values` to consider missing. This will be treated as NA in the factorization routines, so it will be coded as - `na_sentinal` and not included in `uniques`. By default, + `na_sentinel` and not included in `uniques`. By default, ``np.nan`` is used. Notes diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 9cdb5dfc47776..800e9bbaab0c4 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2007,7 +2007,7 @@ def __setitem__(self, key, value): # tuple of indexers (dataframe) elif isinstance(key, tuple): # only allow 1 dimensional slicing, but can - # in a 2-d case be passd (slice(None),....) + # in a 2-d case be passed (slice(None),....) if len(key) == 2: if not com.is_null_slice(key[0]): raise AssertionError("invalid slicing for a 1-ndim categorical") diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 75888459049c1..5d87680ba5347 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -210,7 +210,7 @@ def _check_compatible_with( ---------- other setitem : bool, default False - For __setitem__ we may have stricter compatibility resrictions than + For __setitem__ we may have stricter compatibility restrictions than for comparisons. Raises diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index d04fdee8961e4..8be714820d18b 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -284,7 +284,7 @@ def __array__(self, dtype=None) -> np.ndarray: elif dtype == bool: return ~self._isnan - # This will raise TypeErorr for non-object dtypes + # This will raise TypeError for non-object dtypes return np.array(list(self), dtype=object) def __arrow_array__(self, type=None): diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 0877e98e55311..9b6d4ad7323d0 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -784,7 +784,7 @@ def __getitem__(self, key): # TODO: I think we can avoid densifying when masking a # boolean SparseArray with another. Need to look at the # key's fill_value for True / False, and then do an intersection - # on the indicies of the sp_values. + # on the indices of the sp_values. if isinstance(key, SparseArray): if is_bool_dtype(key): key = key.to_dense() diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index a460d07e1f6f2..a62f94b1a3665 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -258,7 +258,7 @@ def _generate_range(cls, start, end, periods, freq, closed=None): if start is None and end is None: if closed is not None: raise ValueError( - "Closed has to be None if not both of startand end are defined" + "Closed has to be None if not both of start and end are defined" ) left_closed, right_closed = dtl.validate_endpoints(closed) @@ -877,7 +877,7 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"): """ Parameters ---------- - array : list-like + data : list-like copy : bool, default False unit : str, default "ns" The timedelta unit to treat integers as multiples of. @@ -930,7 +930,7 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"): copy = copy and not copy_made elif is_float_dtype(data.dtype): - # cast the unit, multiply base/frace separately + # cast the unit, multiply base/frac separately # to avoid precision issues from float -> int mask = np.isnan(data) m, p = precision_from_unit(unit) diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index b0410e31c6de7..e189da2056a42 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -605,7 +605,7 @@ def use_inf_as_na_cb(key): : str The plotting backend to use. The default value is "matplotlib", the backend provided with pandas. Other backends can be specified by - prodiving the name of the module that implements the backend. + providing the name of the module that implements the backend. """ diff --git a/pandas/core/construction.py b/pandas/core/construction.py index e6e26f0eec597..b110a316a76d9 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -543,7 +543,7 @@ def _try_cast( return subarr try: - # GH#15832: Check if we are requesting a numeric dype and + # GH#15832: Check if we are requesting a numeric dtype and # that we can convert the data to the requested dtype. if is_integer_dtype(dtype): # this will raise if we have e.g. floats diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index b91cfde45f079..9865a7d28542d 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -198,8 +198,7 @@ def maybe_downcast_numeric(result, dtype, do_round: bool = False): return result if isinstance(result, list): - # reached via groupoby.agg _ohlc; really this should be handled - # earlier + # reached via groupby.agg._ohlc; really this should be handled earlier result = np.array(result) def trans(x): @@ -1693,7 +1692,7 @@ def convert_scalar_for_putitemlike(scalar, dtype: np.dtype): Parameters ---------- scalar : scalar - dtype : np.dtpye + dtype : np.dtype Returns ------- diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 82b2795582ff1..e7e8d016e52b2 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -357,7 +357,7 @@ def _concatenate_2d(to_concat, axis: int): def concat_datetime(to_concat, axis=0, typs=None): """ provide concatenation of an datetimelike array of arrays each of which is a - single M8[ns], datetimet64[ns, tz] or m8[ns] dtype + single M8[ns], datetime64[ns, tz] or m8[ns] dtype Parameters ---------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2e56398db0c18..576129010d518 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -644,7 +644,7 @@ def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: In case of non-interactive session, no boundaries apply. - `ignore_width` is here so ipnb+HTML output can behave the way + `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index af5930fd22869..438babda9ff7a 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1555,7 +1555,7 @@ def filter(self, func, dropna=True, *args, **kwargs): Parameters ---------- - f : function + func : function Function to apply to each subframe. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; If False, groups that evaluate False are filled with NaNs. diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 71d7a07aadf7f..43e6b02e9dc53 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -405,8 +405,8 @@ def _get_cython_func_and_vals( Parameters ---------- - kind : sttr - how : srt + kind : str + how : str values : np.ndarray is_numeric : bool @@ -643,7 +643,7 @@ def agg_series( return self._aggregate_series_pure_python(obj, func) elif obj.index._has_complex_internals: - # Pre-empt TypeError in _aggregate_series_fast + # Preempt TypeError in _aggregate_series_fast return self._aggregate_series_pure_python(obj, func) try: @@ -895,7 +895,7 @@ def agg_series( assert len(self.bins) > 0 # otherwise we'd get IndexError in get_result if is_extension_array_dtype(obj.dtype): - # pre-empt SeriesBinGrouper from raising TypeError + # preempt SeriesBinGrouper from raising TypeError return self._aggregate_series_pure_python(obj, func) dummy = obj[:0] diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py index 3d0e3699264a8..6dbcfef46fa98 100644 --- a/pandas/core/indexers.py +++ b/pandas/core/indexers.py @@ -441,7 +441,7 @@ def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any: """ from pandas.core.construction import array as pd_array - # whathever is not an array-like is returned as-is (possible valid array + # whatever is not an array-like is returned as-is (possible valid array # indexers that are not array-like: integer, slice, Ellipsis, None) # In this context, tuples are not considered as array-like, as they have # a specific meaning in indexing (multi-dimensional indexing) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 72369a13b150f..f1e1ebcaca1c4 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2784,7 +2784,7 @@ def get_loc_level(self, key, level=0, drop_level: bool = True): def maybe_mi_droplevels(indexer, levels, drop_level: bool): if not drop_level: return self[indexer] - # kludgearound + # kludge around orig_index = new_index = self[indexer] levels = [self._get_level_number(i) for i in levels] for i in sorted(levels, reverse=True): diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 54892d5656990..b0b85f69396ba 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -473,7 +473,7 @@ def get_loc(self, key, method=None, tolerance=None): Parameters ---------- key : Period, NaT, str, or datetime - String or datetime key must be parseable as Period. + String or datetime key must be parsable as Period. Returns ------- diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index b463b8d738d30..c34b8965ca36a 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -741,7 +741,7 @@ def _make_evaluate_binop(op, step=False): """ Parameters ---------- - op : callable that accepts 2 parms + op : callable that accepts 2 params perform the binary op step : callable, optional, default to False op to apply to the step parm if not None diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e51ec33ba8519..b857a59195695 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -90,7 +90,7 @@ class IndexingError(Exception): class IndexingMixin: """ - Mixin for adding .loc/.iloc/.at/.iat to Datafames and Series. + Mixin for adding .loc/.iloc/.at/.iat to Dataframes and Series. """ @property @@ -1498,7 +1498,7 @@ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): return key def _get_setitem_indexer(self, key): - # GH#32257 Fall through to let numnpy do validation + # GH#32257 Fall through to let numpy do validation return key # ------------------------------------------------------------------- @@ -2257,9 +2257,9 @@ def need_slice(obj) -> bool: def _non_reducing_slice(slice_): """ - Ensurse that a slice doesn't reduce to a Series or Scalar. + Ensure that a slice doesn't reduce to a Series or Scalar. - Any user-paseed `subset` should have this called on it + Any user-passed `subset` should have this called on it to make sure we're always working with DataFrames. """ # default to column slice, like DataFrame diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index e4dcffae45f67..67512114ec5b1 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -825,7 +825,7 @@ def setitem(self, indexer, value): return self.astype(dtype).setitem(indexer, value) - # value must be storeable at this moment + # value must be storable at this moment if is_extension_array_dtype(getattr(value, "dtype", None)): # We need to be careful not to allow through strings that # can be parsed to EADtypes diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 6368a2498b04c..b11302ea01e12 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -600,7 +600,7 @@ def replace_list( """ do a list replace """ inplace = validate_bool_kwarg(inplace, "inplace") - # figure out our mask a-priori to avoid repeated replacements + # figure out our mask apriori to avoid repeated replacements values = self.as_array() def comp(s, regex=False): diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 8dd46edb5579d..48a55e533c957 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -143,7 +143,7 @@ def _bn_ok_dtype(dtype: Dtype, name: str) -> bool: # GH 9422 # further we also want to preserve NaN when all elements - # are NaN, unlinke bottleneck/numpy which consider this + # are NaN, unlike bottleneck/numpy which consider this # to be 0 if name in ["nansum", "nanprod"]: return False diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index a1d853e38e757..59ac2a2071f0a 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -131,7 +131,7 @@ def na_arithmetic_op(left, right, op, str_rep: Optional[str], is_cmp: bool = Fal """ Return the result of evaluating op on the passed in values. - If native types are not compatible, try coersion to object dtype. + If native types are not compatible, try coercion to object dtype. Parameters ---------- @@ -184,7 +184,7 @@ def arithmetic_op(left: ArrayLike, right: Any, op, str_rep: str): Returns ------- - ndarrray or ExtensionArray + ndarray or ExtensionArray Or a 2-tuple of these in the case of divmod or rdivmod. """ @@ -315,7 +315,7 @@ def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike: Returns ------- - ndarrray or ExtensionArray + ndarray or ExtensionArray """ fill_int = lambda x: x diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index e3f4a80ecce7c..bc612f891d362 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -652,7 +652,7 @@ def __init__( ) = self._get_merge_keys() # validate the merge keys dtypes. We may need to coerce - # to avoid incompat dtypes + # to avoid incompatible dtypes self._maybe_coerce_merge_keys() # If argument passed to validate, @@ -1067,7 +1067,7 @@ def _get_merge_keys(self): return left_keys, right_keys, join_names def _maybe_coerce_merge_keys(self): - # we have valid mergees but we may have to further + # we have valid merges but we may have to further # coerce these if they are originally incompatible types # # for example if these are categorical, but are not dtype_equal @@ -1392,7 +1392,7 @@ def _restore_dropped_levels_multijoin( """ - def _convert_to_mulitindex(index) -> MultiIndex: + def _convert_to_multiindex(index) -> MultiIndex: if isinstance(index, MultiIndex): return index else: @@ -1402,7 +1402,7 @@ def _convert_to_mulitindex(index) -> MultiIndex: # the returned index if of type Index # Assure that join_index is of type MultiIndex # so that dropped levels can be appended - join_index = _convert_to_mulitindex(join_index) + join_index = _convert_to_multiindex(join_index) join_levels = join_index.levels join_codes = join_index.codes diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 17473ac26dfd6..c8d5eecf0e496 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -210,7 +210,7 @@ def _add_margins( grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name) if table.ndim == 2: - # i.e. DataFramae + # i.e. DataFrame for level in table.columns.names[1:]: if margins_name in table.columns.get_level_values(level): raise ValueError(msg) diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 345239eeb2372..6eae54633befb 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -440,7 +440,7 @@ def _bins_to_cuts( categories=labels if len(set(labels)) == len(labels) else None, ordered=ordered, ) - # TODO: handle mismach between categorical label order and pandas.cut order. + # TODO: handle mismatch between categorical label order and pandas.cut order. np.putmask(ids, na_mask, 0) result = algos.take_nd(labels, ids - 1) diff --git a/pandas/core/series.py b/pandas/core/series.py index eb409b432f89c..64220862c3e51 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -912,7 +912,7 @@ def __getitem__(self, key): def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): - # _convert_slice_indexer to determin if this slice is positional + # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) @@ -3377,7 +3377,7 @@ def nlargest(self, n=5, keep="first") -> "Series": ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, - ... "Anguilla": 11300, "Monserat": 5200} + ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 @@ -3389,7 +3389,7 @@ def nlargest(self, n=5, keep="first") -> "Series": Nauru 11300 Tuvalu 11300 Anguilla 11300 - Monserat 5200 + Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. @@ -3475,7 +3475,7 @@ def nsmallest(self, n=5, keep="first") -> "Series": ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, - ... "Anguilla": 11300, "Monserat": 5200} + ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 @@ -3487,13 +3487,13 @@ def nsmallest(self, n=5, keep="first") -> "Series": Nauru 11300 Tuvalu 11300 Anguilla 11300 - Monserat 5200 + Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() - Monserat 5200 + Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 @@ -3504,7 +3504,7 @@ def nsmallest(self, n=5, keep="first") -> "Series": 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) - Monserat 5200 + Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 @@ -3514,7 +3514,7 @@ def nsmallest(self, n=5, keep="first") -> "Series": with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') - Monserat 5200 + Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 @@ -3523,7 +3523,7 @@ def nsmallest(self, n=5, keep="first") -> "Series": that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') - Monserat 5200 + Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 72d778524a364..1bb095d9bf72c 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -134,7 +134,7 @@ def _map_stringarray( func: Callable[[str], Any], arr: "StringArray", na_value: Any, dtype: Dtype ) -> ArrayLike: """ - Map a callable over valid elements of a StringArrray. + Map a callable over valid elements of a StringArray. Parameters ---------- @@ -2008,11 +2008,11 @@ def _noarg_wrapper( docstring=None, forbidden_types=["bytes"], returns_string=True, - **kargs, + **kwargs, ): @forbid_nonstring_types(forbidden_types, name=name) def wrapper(self): - result = _na_map(f, self._parent, **kargs) + result = _na_map(f, self._parent, **kwargs) return self._wrap_result(result, returns_string=returns_string) wrapper.__name__ = f.__name__ if name is None else name @@ -2321,7 +2321,7 @@ def _get_series_list(self, others): elif all(not is_list_like(x) for x in others): return [Series(others, index=idx)] raise TypeError( - "others must be Series, Index, DataFrame, np.ndarrary " + "others must be Series, Index, DataFrame, np.ndarray " "or list-like (either containing only strings or " "containing only objects of type Series/Index/" "np.ndarray[1-dim])" diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py index f4bd14ad5c679..40bff5a75709b 100644 --- a/pandas/io/clipboard/__init__.py +++ b/pandas/io/clipboard/__init__.py @@ -494,7 +494,7 @@ def paste_wsl(): # Automatic detection of clipboard mechanisms -# and importing is done in deteremine_clipboard(): +# and importing is done in determine_clipboard(): def determine_clipboard(): """ Determine the OS/platform and set the copy() and paste() functions diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py index d68a1fdde8da9..d2d5fdc7ab8a2 100644 --- a/pandas/io/formats/info.py +++ b/pandas/io/formats/info.py @@ -180,7 +180,7 @@ def _sizeof_fmt(num, size_qualifier): if verbose: _verbose_repr() - elif verbose is False: # specifically set to False, not nesc None + elif verbose is False: # specifically set to False, not necessarily None _non_verbose_repr() else: if exceeds_info_cols: diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index fecdf3b758f0f..f7ba4750bc2ad 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -124,7 +124,7 @@ class Styler: * Column label cells include * ``col_heading`` * ``col<n>`` where `n` is the numeric position of the column - * ``evel<k>`` where `k` is the level in a MultiIndex + * ``level<k>`` where `k` is the level in a MultiIndex * Blank cells include ``blank`` * Data cells include ``data`` @@ -542,7 +542,7 @@ def render(self, **kwargs) -> str: d = self._translate() # filter out empty styles, every cell will have a class # but the list of props may just be [['', '']]. - # so we have the neested anys below + # so we have the nested anys below trimmed = [x for x in d["cellstyle"] if any(any(y) for y in x["props"])] d["cellstyle"] = trimmed d.update(kwargs) diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index e833fdc20d542..44765dbe74b46 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -241,7 +241,7 @@ def _pull_field( def _pull_records(js: Dict[str, Any], spec: Union[List, str]) -> List: """ - Interal function to pull field for records, and similar to + Internal function to pull field for records, and similar to _pull_field, but require to return list. And will raise error if has non iterable value. """ diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 0a9daea105b64..cde7a98eb42ae 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -105,7 +105,7 @@ def write( table = self.api.Table.from_pandas(df, **from_pandas_kwargs) # write_to_dataset does not support a file-like object when - # a dircetory path is used, so just pass the path string. + # a directory path is used, so just pass the path string. if partition_cols is not None: self.api.parquet.write_to_dataset( table, @@ -190,7 +190,7 @@ def read(self, path, columns=None, **kwargs): # When path is s3:// an S3File is returned. # We need to retain the original path(str) while also - # pass the S3File().open function to fsatparquet impl. + # pass the S3File().open function to fastparquet impl. s3, filesystem = get_file_and_filesystem(path) try: parquet_file = self.api.ParquetFile(path, open_with=filesystem.open) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 82380d456cd6d..9b3a29026448e 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2626,7 +2626,7 @@ class GenericFixed(Fixed): _reverse_index_map = {v: k for k, v in _index_type_map.items()} attributes: List[str] = [] - # indexer helpders + # indexer helpers def _class_to_alias(self, cls) -> str: return self._index_type_map.get(cls, "") @@ -2819,7 +2819,7 @@ def read_index_node( ) -> Index: data = node[start:stop] # If the index was an empty array write_array_empty() will - # have written a sentinel. Here we relace it with the original. + # have written a sentinel. Here we replace it with the original. if "shape" in node._v_attrs and np.prod(node._v_attrs.shape) == 0: data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type,) kind = _ensure_decoded(node._v_attrs.kind) @@ -3592,7 +3592,7 @@ def validate_data_columns(self, data_columns, min_itemsize, non_index_axes): ) # evaluate the passed data_columns, True == use all columns - # take only valide axis labels + # take only valid axis labels if data_columns is True: data_columns = list(axis_labels) elif data_columns is None: diff --git a/pandas/io/stata.py b/pandas/io/stata.py index f445f05c2ee05..3ce1680c109f9 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -627,7 +627,7 @@ def __init__(self, catarray: Series, encoding: str = "latin-1"): def generate_value_label(self, byteorder: str) -> bytes: """ - Generate the binary representation of the value labals. + Generate the binary representation of the value labels. Parameters ---------- diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index e466a215091ea..bfc50ec47109e 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -1658,7 +1658,7 @@ def _find_backend(backend: str): try: return _backends[backend] except KeyError: - # Fall back to unregisted, module name approach. + # Fall back to unregistered, module name approach. try: module = importlib.import_module(backend) except ImportError: diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 46941e437a4ce..19a75eb151782 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -282,10 +282,10 @@ def _maybe_right_yaxis(self, ax, axes_num): return self._get_ax_layer(ax) if hasattr(ax, "right_ax"): - # if it has right_ax proparty, ``ax`` must be left axes + # if it has right_ax property, ``ax`` must be left axes return ax.right_ax elif hasattr(ax, "left_ax"): - # if it has left_ax proparty, ``ax`` must be right axes + # if it has left_ax property, ``ax`` must be right axes return ax else: # otherwise, create twin axes @@ -387,7 +387,7 @@ def _compute_plot_data(self): if self.include_bool is True: include_type.append(np.bool_) - # GH22799, exclude datatime-like type for boxplot + # GH22799, exclude datetime-like type for boxplot exclude_type = None if self._kind == "box": # TODO: change after solving issue 27881 @@ -1103,7 +1103,7 @@ def _make_plot(self): @classmethod def _plot(cls, ax, x, y, style=None, column_num=None, stacking_id=None, **kwds): - # column_num is used to get the target column from protf in line and + # column_num is used to get the target column from plotf in line and # area plots if column_num == 0: cls._initialize_stacker(ax, stacking_id, len(y)) diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py index 7319e8de3ec6e..0cafcfed38a54 100644 --- a/pandas/plotting/_matplotlib/misc.py +++ b/pandas/plotting/_matplotlib/misc.py @@ -385,7 +385,7 @@ def parallel_coordinates( def lag_plot(series, lag=1, ax=None, **kwds): - # workaround because `c='b'` is hardcoded in matplotlibs scatter method + # workaround because `c='b'` is hardcoded in matplotlib's scatter method import matplotlib.pyplot as plt kwds.setdefault("c", plt.rcParams["patch.facecolor"]) diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 08d945f679810..ef8376bfef8a9 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -277,7 +277,7 @@ def _remove_labels_from_axis(axis): t.set_visible(False) # set_visible will not be effective if - # minor axis has NullLocator and NullFormattor (default) + # minor axis has NullLocator and NullFormatter (default) if isinstance(axis.get_minor_locator(), ticker.NullLocator): axis.set_minor_locator(ticker.AutoLocator()) if isinstance(axis.get_minor_formatter(), ticker.NullFormatter): diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index e1de9d1bcf832..aabc10609b78b 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -269,7 +269,7 @@ def apply(self, other): def apply_index(self, i): """ Vectorized apply of DateOffset to DatetimeIndex, - raises NotImplentedError for offsets without a + raises NotImplementedError for offsets without a vectorized implementation. Parameters @@ -667,7 +667,7 @@ def _get_business_hours_by_sec(self, start, end): """ Return business hours in a day by seconds. """ - # create dummy datetime to calculate businesshours in a day + # create dummy datetime to calculate business hours in a day dtstart = datetime(2014, 4, 1, start.hour, start.minute) day = 1 if start < end else 2 until = datetime(2014, 4, day, end.hour, end.minute) @@ -2216,7 +2216,7 @@ def _rollback_to_year(self, other): # roll adjustment qtr_lens = self.get_weeks(norm) - # check thet qtr_lens is consistent with self._offset addition + # check that qtr_lens is consistent with self._offset addition end = liboffsets.shift_day(start, days=7 * sum(qtr_lens)) assert self._offset.is_on_offset(end), (start, end, qtr_lens) diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index b7bdbde5bac5e..26b5df862a07e 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -345,7 +345,7 @@ def doc(*args: Union[str, Callable], **kwargs: str) -> Callable[[F], F]: *args : str or callable The string / docstring / docstring template to be appended in order after default docstring under function. - **kwags : str + **kwargs : str The string which would be used to format docstring template. """ diff --git a/pandas/util/_doctools.py b/pandas/util/_doctools.py index 71965b8e7dd9d..f413490764124 100644 --- a/pandas/util/_doctools.py +++ b/pandas/util/_doctools.py @@ -23,7 +23,7 @@ def __init__( def _shape(self, df: pd.DataFrame) -> Tuple[int, int]: """ - Calculate table chape considering index levels. + Calculate table shape considering index levels. """ row, col = df.shape return row + df.columns.nlevels, col + df.index.nlevels diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index 53a25eb321b73..fbb44408f01be 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -353,7 +353,7 @@ def validate_percentile(q: Union[float, Iterable[float]]) -> np.ndarray: """ Validate percentiles (used by describe and quantile). - This function checks if the given float oriterable of floats is a valid percentile + This function checks if the given float or iterable of floats is a valid percentile otherwise raises a ValueError. Parameters
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/34039
2020-05-06T22:55:07Z
2020-05-08T17:35:40Z
2020-05-08T17:35:40Z
2020-05-08T17:36:22Z
CI: Pin Cython on Numpy Dev
diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml index 29ebfe2639e32..652438440d3f2 100644 --- a/ci/deps/azure-37-numpydev.yaml +++ b/ci/deps/azure-37-numpydev.yaml @@ -14,7 +14,7 @@ dependencies: - pytz - pip - pip: - - cython>=0.29.16 + - cython==0.29.16 # GH#34014 - "git+git://github.com/dateutil/dateutil.git" - "-f https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" - "--pre"
- [x] ref https://github.com/pandas-dev/pandas/issues/34014 Until we fix the above issue with cython 3.0a4 cc @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/34036
2020-05-06T21:19:23Z
2020-05-06T22:04:29Z
2020-05-06T22:04:29Z
2020-05-06T22:04:33Z
CLN: address TODOs and FIXMEs
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 9cdb5dfc47776..efb53e5a89314 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -7,7 +7,7 @@ from pandas._config import get_option -from pandas._libs import algos as libalgos, hashtable as htable +from pandas._libs import NaT, algos as libalgos, hashtable as htable from pandas._typing import ArrayLike, Dtype, Ordered, Scalar from pandas.util._decorators import cache_readonly, deprecate_kwarg, doc from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs @@ -344,7 +344,7 @@ def __init__( ) from err except ValueError as err: - # FIXME + # TODO(EA2D) raise NotImplementedError( "> 1 ndim Categorical are not supported at this time" ) from err @@ -1425,7 +1425,7 @@ def _internal_get_values(self): """ # if we are a datetime and period index, return Index to keep metadata if needs_i8_conversion(self.categories.dtype): - return self.categories.take(self._codes, fill_value=np.nan) + return self.categories.take(self._codes, fill_value=NaT) elif is_integer_dtype(self.categories) and -1 in self._codes: return self.categories.astype("object").take(self._codes, fill_value=np.nan) return np.array(self) diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index b0410e31c6de7..4d5b9c6920e48 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -356,8 +356,7 @@ def _deprecate_negative_int_max_colwidth(key): ) cf.register_option( - # FIXME: change `validator=is_nonnegative_int` - # in version 1.2 + # TODO(2.0): change `validator=is_nonnegative_int` see GH#31569 "max_colwidth", 50, max_colwidth_doc, diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index abfbe8d783325..5b20b8e1b3be5 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -484,7 +484,6 @@ def is_period_dtype(arr_or_dtype) -> bool: # GH#33400 fastpath for dtype object return arr_or_dtype.type is Period - # TODO: Consider making Period an instance of PeriodDtype if arr_or_dtype is None: return False return PeriodDtype.is_dtype(arr_or_dtype) @@ -523,7 +522,6 @@ def is_interval_dtype(arr_or_dtype) -> bool: # GH#33400 fastpath for dtype object return arr_or_dtype.type is Interval - # TODO: Consider making Interval an instance of IntervalDtype if arr_or_dtype is None: return False return IntervalDtype.is_dtype(arr_or_dtype) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2e56398db0c18..4e86b3710a1bd 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8338,9 +8338,7 @@ def blk_func(values): assert len(res) == max(list(res.keys())) + 1, res.keys() out = df._constructor_sliced(res, index=range(len(res)), dtype=out_dtype) out.index = df.columns - if axis == 0 and df.dtypes.apply(needs_i8_conversion).any(): - # FIXME: needs_i8_conversion check is kludge, not sure - # why it is necessary in this case and this case alone + if axis == 0 and is_object_dtype(out.dtype): out[:] = coerce_to_dtypes(out.values, df.dtypes) return out diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 6368a2498b04c..5ab792563f136 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1284,9 +1284,6 @@ def _slice_take_blocks_ax0(self, slice_or_indexer, fill_value=lib.no_default): # When filling blknos, make sure blknos is updated before appending to # blocks list, that way new blkno is exactly len(blocks). - # - # FIXME: mgr_groupby_blknos must return mgr_locs in ascending order, - # pytables serialization will break otherwise. blocks = [] for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=True): if blkno == -1: diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py index 04dedac3e8b9b..0b0bbd3a6dc48 100644 --- a/pandas/tests/extension/test_boolean.py +++ b/pandas/tests/extension/test_boolean.py @@ -148,13 +148,13 @@ def test_error(self, data, all_arithmetic_operators): # other specific errors tested in the boolean array specific tests pass - def test_arith_frame_with_scalar(self, data, all_arithmetic_operators): + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request): # frame & scalar op_name = all_arithmetic_operators - if op_name in self.implements: - super().test_arith_frame_with_scalar(data, all_arithmetic_operators) - else: - pytest.xfail("_reduce needs implementation") + if op_name not in self.implements: + mark = pytest.mark.xfail(reason="_reduce needs implementation") + request.node.add_marker(mark) + super().test_arith_frame_with_scalar(data, all_arithmetic_operators) class TestComparisonOps(base.BaseComparisonOpsTests): diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index bb3595f4aef68..e48065b47f17c 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -175,20 +175,30 @@ def test_take_series(self, data): # ValueError: PandasArray must be 1-dimensional. super().test_take_series(data) - def test_loc_iloc_frame_single_dtype(self, data): + def test_loc_iloc_frame_single_dtype(self, data, request): npdtype = data.dtype.numpy_dtype if npdtype == object or npdtype == np.float64: # GH#33125 - pytest.xfail(reason="GH#33125 astype doesn't recognize data.dtype") + mark = pytest.mark.xfail( + reason="GH#33125 astype doesn't recognize data.dtype" + ) + request.node.add_marker(mark) super().test_loc_iloc_frame_single_dtype(data) class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests): @skip_nested - def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op): + def test_groupby_extension_apply( + self, data_for_grouping, groupby_apply_op, request + ): # ValueError: Names should be list-like for a MultiIndex - if data_for_grouping.dtype.numpy_dtype == np.float64: - pytest.xfail(reason="GH#33125 astype doesn't recognize data.dtype") + a = "a" + is_identity = groupby_apply_op(a) is a + if data_for_grouping.dtype.numpy_dtype == np.float64 and is_identity: + mark = pytest.mark.xfail( + reason="GH#33125 astype doesn't recognize data.dtype" + ) + request.node.add_marker(mark) super().test_groupby_extension_apply(data_for_grouping, groupby_apply_op) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 781625f1f2ec9..4560c7385655d 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -1260,7 +1260,7 @@ def test_get_nonexistent_category(): def test_series_groupby_on_2_categoricals_unobserved( - reduction_func: str, observed: bool + reduction_func: str, observed: bool, request ): # GH 17605 @@ -1268,7 +1268,8 @@ def test_series_groupby_on_2_categoricals_unobserved( pytest.skip("ngroup is not truly a reduction") if reduction_func == "corrwith": # GH 32293 - pytest.xfail("TODO: implemented SeriesGroupBy.corrwith") + mark = pytest.mark.xfail(reason="TODO: implemented SeriesGroupBy.corrwith") + request.node.add_marker(mark) df = pd.DataFrame( { diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index b671564eb77fe..e718a6b759963 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -113,7 +113,7 @@ def test_pandas_gbq(df): pandas_gbq = import_module("pandas_gbq") # noqa -@pytest.mark.xfail(reason="0.7.0 pending") +@pytest.mark.xfail(reason="0.8.1 tries to import urlencode from pd.io.common") @tm.network def test_pandas_datareader():
https://api.github.com/repos/pandas-dev/pandas/pulls/34033
2020-05-06T18:24:44Z
2020-05-06T21:20:56Z
2020-05-06T21:20:56Z
2020-05-06T21:31:09Z
PERF: use is_tick_offset to check for Tick
diff --git a/pandas/_libs/tslibs/c_timestamp.pyx b/pandas/_libs/tslibs/c_timestamp.pyx index cde2c2573072f..86979fa7946d9 100644 --- a/pandas/_libs/tslibs/c_timestamp.pyx +++ b/pandas/_libs/tslibs/c_timestamp.pyx @@ -40,6 +40,7 @@ from pandas._libs.tslibs.timezones cimport ( get_timezone, is_utc, tz_compare) from pandas._libs.tslibs.timezones import UTC from pandas._libs.tslibs.tzconversion cimport tz_convert_single +from pandas._libs.tslibs.offsets cimport is_tick_object class NullFrequencyError(ValueError): @@ -247,13 +248,9 @@ cdef class _Timestamp(datetime): elif is_integer_object(other): raise integer_op_not_supported(self) - elif PyDelta_Check(other) or hasattr(other, 'delta'): - # delta --> offsets.Tick + elif PyDelta_Check(other): # logic copied from delta_to_nanoseconds to prevent circular import - if hasattr(other, 'nanos'): - # Tick - nanos = other.nanos - elif hasattr(other, 'delta'): + if hasattr(other, 'delta'): # pd.Timedelta nanos = other.value elif PyDelta_Check(other): @@ -264,6 +261,16 @@ cdef class _Timestamp(datetime): result = type(self)(self.value + nanos, tz=self.tzinfo, freq=self.freq) return result + elif is_tick_object(other): + try: + nanos = other.nanos + except OverflowError: + raise OverflowError( + f"the add operation between {other} and {self} will overflow" + ) + result = type(self)(self.value + nanos, tz=self.tzinfo, freq=self.freq) + return result + elif is_array(other): if other.dtype.kind in ['i', 'u']: raise integer_op_not_supported(self) @@ -280,8 +287,7 @@ cdef class _Timestamp(datetime): def __sub__(self, other): if (is_timedelta64_object(other) or is_integer_object(other) or - PyDelta_Check(other) or hasattr(other, 'delta')): - # `delta` attribute is for offsets.Tick or offsets.Week obj + PyDelta_Check(other) or is_tick_object(other)): neg_other = -other return self + neg_other diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index ec397a470f2ec..a45e10e8337a4 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -147,8 +147,7 @@ cdef class _NaT(datetime): return c_NaT elif util.is_datetime64_object(other) or util.is_timedelta64_object(other): return c_NaT - elif hasattr(other, "delta"): - # Timedelta, offsets.Tick, offsets.Week + elif util.is_offset_object(other): return c_NaT elif util.is_integer_object(other) or util.is_period_object(other): @@ -184,8 +183,7 @@ cdef class _NaT(datetime): return c_NaT elif util.is_datetime64_object(other) or util.is_timedelta64_object(other): return c_NaT - elif hasattr(other, "delta"): - # offsets.Tick, offsets.Week + elif util.is_offset_object(other): return c_NaT elif util.is_integer_object(other) or util.is_period_object(other): diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 878dbcfd99842..f5d32237280eb 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -57,8 +57,7 @@ from pandas._libs.tslibs.resolution import Resolution from pandas._libs.tslibs.nattype import nat_strings from pandas._libs.tslibs.nattype cimport ( _nat_scalar_rules, NPY_NAT, is_null_datetimelike, c_NaT as NaT) -from pandas._libs.tslibs.offsets cimport to_offset -from pandas._libs.tslibs.offsets import _Tick +from pandas._libs.tslibs.offsets cimport to_offset, is_tick_object from pandas._libs.tslibs.tzconversion cimport tz_convert_utc_to_tzlocal @@ -1588,9 +1587,9 @@ cdef class _Period: int64_t nanos, offset_nanos if (PyDelta_Check(other) or util.is_timedelta64_object(other) or - isinstance(other, _Tick)): + is_tick_object(other)): offset = to_offset(self.freq.rule_code) - if isinstance(offset, _Tick): + if is_tick_object(offset): nanos = delta_to_nanoseconds(other) offset_nanos = delta_to_nanoseconds(offset) if nanos % offset_nanos == 0: diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 032d3186217e3..58600678c0938 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -31,8 +31,7 @@ from pandas._libs.tslibs.np_datetime cimport ( from pandas._libs.tslibs.nattype import nat_strings from pandas._libs.tslibs.nattype cimport ( checknull_with_nat, NPY_NAT, c_NaT as NaT) -from pandas._libs.tslibs.offsets cimport to_offset -from pandas._libs.tslibs.offsets import _Tick as Tick +from pandas._libs.tslibs.offsets cimport to_offset, is_tick_object # ---------------------------------------------------------------------- # Constants @@ -140,10 +139,10 @@ def ints_to_pytimedelta(const int64_t[:] arr, box=False): # ---------------------------------------------------------------------- cpdef int64_t delta_to_nanoseconds(delta) except? -1: - if hasattr(delta, 'nanos'): + if is_tick_object(delta): return delta.nanos - if hasattr(delta, 'delta'): - delta = delta.delta + if isinstance(delta, _Timedelta): + delta = delta.value if is_timedelta64_object(delta): return delta.astype("timedelta64[ns]").item() if is_integer_object(delta): @@ -204,8 +203,8 @@ cdef convert_to_timedelta64(object ts, object unit): else: ts = parse_timedelta_string(ts) ts = np.timedelta64(ts) - elif hasattr(ts, 'delta'): - ts = np.timedelta64(delta_to_nanoseconds(ts), 'ns') + elif is_tick_object(ts): + ts = np.timedelta64(ts.nanos, 'ns') if PyDelta_Check(ts): ts = np.timedelta64(delta_to_nanoseconds(ts), 'ns') @@ -562,12 +561,10 @@ cdef bint _validate_ops_compat(other): # return True if we are compat with operating if checknull_with_nat(other): return True - elif PyDelta_Check(other) or is_timedelta64_object(other): + elif is_any_td_scalar(other): return True elif isinstance(other, str): return True - elif hasattr(other, 'delta'): - return True return False @@ -779,8 +776,7 @@ cdef class _Timedelta(timedelta): if isinstance(other, _Timedelta): ots = other - elif (is_timedelta64_object(other) or PyDelta_Check(other) - or isinstance(other, Tick)): + elif is_any_td_scalar(other): ots = Timedelta(other) # TODO: watch out for overflows @@ -1249,8 +1245,8 @@ class Timedelta(_Timedelta): if unit is not None: value = value.astype(f'timedelta64[{unit}]') value = value.astype('timedelta64[ns]') - elif hasattr(value, 'delta'): - value = np.timedelta64(delta_to_nanoseconds(value.delta), 'ns') + elif is_tick_object(value): + value = np.timedelta64(value.nanos, 'ns') elif is_integer_object(value) or is_float_object(value): # unit=None is de-facto 'ns' unit = parse_timedelta_unit(unit) @@ -1460,7 +1456,7 @@ class Timedelta(_Timedelta): cdef bint is_any_td_scalar(object obj): return ( - PyDelta_Check(obj) or is_timedelta64_object(obj) or isinstance(obj, Tick) + PyDelta_Check(obj) or is_timedelta64_object(obj) or is_tick_object(obj) )
https://api.github.com/repos/pandas-dev/pandas/pulls/34032
2020-05-06T17:27:42Z
2020-05-06T21:27:56Z
2020-05-06T21:27:56Z
2020-05-06T21:36:47Z