title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
Backport PR #29010 on branch 0.25.x (Setuptools CI fixup)
diff --git a/ci/setup_env.sh b/ci/setup_env.sh index 02e48f6cb9af1..1834b9674d898 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -55,6 +55,7 @@ echo echo "update conda" conda config --set ssl_verify false conda config --set quiet true --set always_yes true --set changeps1 false +conda install pip # create conda to create a historical artifact for pip & setuptools conda update -n base conda echo "conda info -a"
Backport PR #29010: Setuptools CI fixup
https://api.github.com/repos/pandas-dev/pandas/pulls/29015
2019-10-15T21:38:04Z
2019-10-16T12:37:34Z
2019-10-16T12:37:34Z
2019-10-16T12:37:35Z
CLN: Fix typing in pandas\tests\arrays\test_datetimelike.py (#28926)
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 7c482664bca48..117a19acbfc3a 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -1,3 +1,5 @@ +from typing import Type, Union + import numpy as np import pytest @@ -5,6 +7,9 @@ import pandas as pd from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray +from pandas.core.indexes.datetimes import DatetimeIndex +from pandas.core.indexes.period import PeriodIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex import pandas.util.testing as tm @@ -52,7 +57,7 @@ def timedelta_index(request): class SharedTests: - index_cls = None + index_cls = None # type: Type[Union[DatetimeIndex, PeriodIndex, TimedeltaIndex]] def test_compare_len1_raises(self): # make sure we raise when comparing with different lengths, specific diff --git a/setup.cfg b/setup.cfg index c9ba13443e97c..ed95380544b05 100644 --- a/setup.cfg +++ b/setup.cfg @@ -136,9 +136,6 @@ ignore_errors=True [mypy-pandas.tests.arithmetic.test_datetime64] ignore_errors=True -[mypy-pandas.tests.arrays.test_datetimelike] -ignore_errors=True - [mypy-pandas.tests.dtypes.test_common] ignore_errors=True
- [x] partially adresses #28926 - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/29014
2019-10-15T21:32:37Z
2019-10-23T00:52:51Z
2019-10-23T00:52:51Z
2019-10-23T06:01:37Z
TST: restore xfail for test_apply
diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 2831c07cb21d3..5391cb5ce821f 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -4,7 +4,7 @@ import numpy as np import pytest -from pandas.compat import PY37, is_platform_windows +from pandas.compat import PY37 import pandas as pd from pandas import ( @@ -209,10 +209,9 @@ def test_level_get_group(observed): assert_frame_equal(result, expected) -# GH#21636 previously flaky on py37 -@pytest.mark.xfail( - is_platform_windows() and PY37, reason="Flaky, GH-27902", strict=False -) +# GH#21636 flaky on py37; may be related to older numpy, see discussion +# https://github.com/MacPython/pandas-wheels/pull/64 +@pytest.mark.xfail(PY37, reason="Flaky, GH-27902", strict=False) @pytest.mark.parametrize("ordered", [True, False]) def test_apply(ordered): # GH 10138 @@ -229,6 +228,9 @@ def test_apply(ordered): idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"]) expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"]) + # GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]]) + # is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"]) + # when we expect Series(0., index=["values"]) result = grouped.apply(lambda x: np.mean(x)) assert_frame_equal(result, expected)
xref https://github.com/MacPython/pandas-wheels/pull/64
https://api.github.com/repos/pandas-dev/pandas/pulls/29013
2019-10-15T20:57:05Z
2019-10-16T12:25:42Z
2019-10-16T12:25:41Z
2019-10-16T15:23:34Z
Remove to original callable
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index 1d0f4b583bd0c..95f3392011d3f 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -17,7 +17,6 @@ import ast import collections import doctest -import functools import glob import importlib import inspect @@ -245,7 +244,7 @@ def __init__(self, name): self.name = name obj = self._load_obj(name) self.obj = obj - self.code_obj = self._to_original_callable(obj) + self.code_obj = inspect.unwrap(obj) self.raw_doc = obj.__doc__ or "" self.clean_doc = pydoc.getdoc(obj) self.doc = NumpyDocString(self.clean_doc) @@ -292,30 +291,6 @@ def _load_obj(name): obj = getattr(obj, part) return obj - @staticmethod - def _to_original_callable(obj): - """ - Find the Python object that contains the source code of the object. - - This is useful to find the place in the source code (file and line - number) where a docstring is defined. It does not currently work for - all cases, but it should help find some (properties...). - """ - while True: - if inspect.isfunction(obj) or inspect.isclass(obj): - f = inspect.getfile(obj) - if f.startswith("<") and f.endswith(">"): - return None - return obj - if inspect.ismethod(obj): - obj = obj.__func__ - elif isinstance(obj, functools.partial): - obj = obj.func - elif isinstance(obj, property): - obj = obj.fget - else: - return None - @property def type(self): return type(self.obj).__name__
- [x] tests passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` @datapythonista This is the pull request for replacing to_original_callable
https://api.github.com/repos/pandas-dev/pandas/pulls/29012
2019-10-15T20:49:20Z
2019-11-07T23:22:50Z
null
2019-11-07T23:22:50Z
Setuptools CI fixup
diff --git a/ci/setup_env.sh b/ci/setup_env.sh index be8c3645691fe..794130355fd74 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -55,6 +55,7 @@ echo echo "update conda" conda config --set ssl_verify false conda config --set quiet true --set always_yes true --set changeps1 false +conda install pip # create conda to create a historical artifact for pip & setuptools conda update -n base conda echo "conda info -a"
Closes https://github.com/pandas-dev/pandas/issues/29008
https://api.github.com/repos/pandas-dev/pandas/pulls/29010
2019-10-15T20:08:49Z
2019-10-15T21:37:41Z
2019-10-15T21:37:40Z
2019-10-15T21:37:45Z
CI: troubleshoot RemoveError: 'setuptools' is a dependency of conda
diff --git a/ci/deps/azure-35-compat.yaml b/ci/deps/azure-35-compat.yaml index dd54001984ec7..db616fc6e9421 100644 --- a/ci/deps/azure-35-compat.yaml +++ b/ci/deps/azure-35-compat.yaml @@ -22,7 +22,7 @@ dependencies: - pytest-xdist - pytest-mock - pytest-azurepipelines - - pip + - pip=19.2.3 - pip: # for python 3.5, pytest>=4.0.2, cython>=0.29.13 is not available in conda - cython>=0.29.13 diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-36-32bit.yaml index 1e2e6c33e8c15..6540d91569c40 100644 --- a/ci/deps/azure-36-32bit.yaml +++ b/ci/deps/azure-36-32bit.yaml @@ -17,7 +17,7 @@ dependencies: - pytest-mock - pytest-azurepipelines - hypothesis>=3.58.0 - - pip + - pip=19.2.3 - pip: # Anaconda doesn't build a new enough Cython - cython>=0.29.13 diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml index 76868f598f11b..0255a1934d653 100644 --- a/ci/deps/azure-36-locale.yaml +++ b/ci/deps/azure-36-locale.yaml @@ -25,6 +25,6 @@ dependencies: - pytest-mock - pytest-azurepipelines - hypothesis>=3.58.0 - - pip + - pip=19.2.3 - pip: - html5lib==1.0b2 diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml index 21205375204dc..498b6b509fafa 100644 --- a/ci/deps/azure-36-locale_slow.yaml +++ b/ci/deps/azure-36-locale_slow.yaml @@ -31,6 +31,7 @@ dependencies: - pytest-mock - pytest-azurepipelines - moto - - pip + - pip=19.2.3 + - setuptools=41.0.1 - pip: - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index 24464adb74f5b..1b38ecb4aa59f 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -30,6 +30,6 @@ dependencies: - pytest-xdist>=1.29.0 - pytest-mock - pytest-azurepipelines - - pip + - pip=19.2.3 - pip: - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml index 0fb06fd43724c..891f1561ca861 100644 --- a/ci/deps/azure-37-numpydev.yaml +++ b/ci/deps/azure-37-numpydev.yaml @@ -11,7 +11,7 @@ dependencies: - pytest-xdist - pytest-mock - hypothesis>=3.58.0 - - pip + - pip=19.2.3 - pip: - "git+git://github.com/dateutil/dateutil.git" - "-f https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" diff --git a/ci/deps/azure-macos-35.yaml b/ci/deps/azure-macos-35.yaml index 4e0f09904b695..623c6a984e296 100644 --- a/ci/deps/azure-macos-35.yaml +++ b/ci/deps/azure-macos-35.yaml @@ -21,7 +21,8 @@ dependencies: - xlrd - xlsxwriter - xlwt - - pip + - pip=19.2.3 + - setuptools=41.0.1 - pip: # Anaconda / conda-forge don't build for 3.5 - cython>=0.29.13 diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml index e4e917d13990c..8a2ae2fe7e58f 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-36-cov.yaml @@ -44,7 +44,7 @@ dependencies: - pytest-cov - pytest-mock - hypothesis>=3.58.0 - - pip + - pip=19.2.3 - pip: - brotlipy - coverage diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml index 44795766d7c31..de634fb3f91ff 100644 --- a/ci/deps/travis-36-locale.yaml +++ b/ci/deps/travis-36-locale.yaml @@ -37,6 +37,6 @@ dependencies: - pytest>=5.0.1 - pytest-xdist>=1.29.0 - pytest-mock - - pip + - pip=19.2.3 - pip: - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index 440ca6c480b87..d2378453c3e34 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -18,7 +18,7 @@ dependencies: - pytest-mock - hypothesis>=3.58.0 - s3fs<0.3 - - pip + - pip=19.2.3 - pyreadstat - pip: - moto
- [ ] closes #29008 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/29009
2019-10-15T19:36:00Z
2019-10-15T19:54:33Z
null
2019-10-15T19:54:33Z
CLN: Fix mypy error in 'pandas/tests/computation/test_eval.py'
diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index b6ffd8a83e409..4d40cd3a2d4ca 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -2,6 +2,7 @@ from functools import reduce from itertools import product import operator +from typing import Dict, Type import warnings import numpy as np @@ -19,7 +20,11 @@ from pandas.core.computation.check import _NUMEXPR_VERSION from pandas.core.computation.engines import NumExprClobberingError, _engines import pandas.core.computation.expr as expr -from pandas.core.computation.expr import PandasExprVisitor, PythonExprVisitor +from pandas.core.computation.expr import ( + BaseExprVisitor, + PandasExprVisitor, + PythonExprVisitor, +) from pandas.core.computation.expressions import _NUMEXPR_INSTALLED, _USE_NUMEXPR from pandas.core.computation.ops import ( _arith_ops_syms, @@ -1884,7 +1889,7 @@ def test_invalid_parser(): "python": PythonExprVisitor, "pytables": pytables.ExprVisitor, "pandas": PandasExprVisitor, -} +} # type: Dict[str, Type[BaseExprVisitor]] @pytest.mark.parametrize("engine", _engines) diff --git a/setup.cfg b/setup.cfg index 462e79dae1039..ca15386b2c429 100644 --- a/setup.cfg +++ b/setup.cfg @@ -145,9 +145,6 @@ ignore_errors=True [mypy-pandas.tests.arrays.test_period] ignore_errors=True -[mypy-pandas.tests.computation.test_eval] -ignore_errors=True - [mypy-pandas.tests.dtypes.test_common] ignore_errors=True
Added explicit typing annotation for _parsers: Dict[str, Type[BaseExprVisitor]] Also added some import for Dict, Type and BaseExprVisitor - [x] relates to #28926 - [x] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/29007
2019-10-15T17:15:07Z
2019-10-16T18:43:33Z
2019-10-16T18:43:32Z
2019-10-16T18:43:39Z
CLN: Fix mypy errors in pandas/tests/io/test_sql.py Reverted and added type changes
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 7491cef17ebfc..183a47c6039ec 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -583,7 +583,7 @@ class _TestSQLApi(PandasSQLTest): """ flavor = "sqlite" - mode = None + mode = None # type: str def setup_connect(self): self.conn = self.connect() @@ -1234,7 +1234,7 @@ class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest): """ - flavor = None + flavor = None # type: str @pytest.fixture(autouse=True, scope="class") def setup_class(cls): diff --git a/setup.cfg b/setup.cfg index 55d25abde585c..462e79dae1039 100644 --- a/setup.cfg +++ b/setup.cfg @@ -205,9 +205,6 @@ ignore_errors=True [mypy-pandas.tests.io.json.test_ujson] ignore_errors=True -[mypy-pandas.tests.io.test_sql] -ignore_errors=True - [mypy-pandas.tests.plotting.test_backend] ignore_errors=True
- [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` ref closed PR #28979
https://api.github.com/repos/pandas-dev/pandas/pulls/29006
2019-10-15T16:13:25Z
2019-10-16T03:47:07Z
2019-10-16T03:47:07Z
2019-10-16T03:47:16Z
Backport PR #29000 on branch 0.25.x
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst index 59aedd2c5e8e6..35229591ecfae 100644 --- a/doc/source/whatsnew/v0.25.2.rst +++ b/doc/source/whatsnew/v0.25.2.rst @@ -1,6 +1,6 @@ .. _whatsnew_0252: -What's new in 0.25.2 (October XX, 2019) +What's new in 0.25.2 (October 15, 2019) --------------------------------------- These are the changes in pandas 0.25.2. See :ref:`release` for a full changelog @@ -15,93 +15,25 @@ including other versions of pandas. Bug fixes ~~~~~~~~~ -Categorical -^^^^^^^^^^^ - -- - -Datetimelike -^^^^^^^^^^^^ - -- -- -- - -Timezones -^^^^^^^^^ - -- - -Numeric -^^^^^^^ - -- -- -- -- - -Conversion -^^^^^^^^^^ - -- - -Interval -^^^^^^^^ - -- - Indexing ^^^^^^^^ -- Fix regression in :meth:`DataFrame.reindex` not following ``limit`` argument (:issue:`28631`). +- Fix regression in :meth:`DataFrame.reindex` not following the ``limit`` argument (:issue:`28631`). - Fix regression in :meth:`RangeIndex.get_indexer` for decreasing :class:`RangeIndex` where target values may be improperly identified as missing/present (:issue:`28678`) -- -- -- - -Missing -^^^^^^^ - -- I/O ^^^ -- Fix regression in notebook display where <th> tags not used for :attr:`DataFrame.index` (:issue:`28204`). +- Fix regression in notebook display where ``<th>`` tags were missing for :attr:`DataFrame.index` values (:issue:`28204`). - Regression in :meth:`~DataFrame.to_csv` where writing a :class:`Series` or :class:`DataFrame` indexed by an :class:`IntervalIndex` would incorrectly raise a ``TypeError`` (:issue:`28210`) - Fix :meth:`~DataFrame.to_csv` with ``ExtensionArray`` with list-like values (:issue:`28840`). -- - -Plotting -^^^^^^^^ - -- -- -- Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug incorrectly raising an ``IndexError`` when passing a list of quantiles to :meth:`pandas.core.groupby.DataFrameGroupBy.quantile` (:issue:`28113`). -- -- -- - -Reshaping -^^^^^^^^^ - -- -- -- -- -- - -Sparse -^^^^^^ - -- +- Bug in :meth:`pandas.core.groupby.GroupBy.shift`, :meth:`pandas.core.groupby.GroupBy.bfill` and :meth:`pandas.core.groupby.GroupBy.ffill` where timezone information would be dropped (:issue:`19995`, :issue:`27992`) -Other ^^^^^ - Compatibility with Python 3.8 in :meth:`DataFrame.query` (:issue:`27261`)
https://api.github.com/repos/pandas-dev/pandas/pulls/29005
2019-10-15T13:54:10Z
2019-10-15T15:21:34Z
2019-10-15T15:21:34Z
2019-10-15T15:22:50Z
fix is_scalar documentation (#28998)
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 1c2f80b832201..5761f1ffb0c0a 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -137,8 +137,8 @@ def is_scalar(val: object) -> bool: Examples -------- - >>> dt = pd.datetime.datetime(2018, 10, 3) - >>> pd.is_scalar(dt) + >>> dt = datetime.datetime(2018, 10, 3) + >>> pd.api.types.is_scalar(dt) True >>> pd.api.types.is_scalar([2, 3])
- [x] closes #28998 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry is_scalar documentation was wrong, see #28998
https://api.github.com/repos/pandas-dev/pandas/pulls/29004
2019-10-15T12:20:19Z
2019-10-15T15:21:59Z
2019-10-15T15:21:58Z
2019-10-15T15:25:22Z
Backport PR #28730 on branch 0.25.x (CI: 3.8 build)
diff --git a/.travis.yml b/.travis.yml index 8335a6ee92bef..6465abb317c01 100644 --- a/.travis.yml +++ b/.travis.yml @@ -31,6 +31,12 @@ matrix: - python: 3.5 include: + - dist: bionic + # 18.04 + python: 3.8-dev + env: + - JOB="3.8-dev" PATTERN="(not slow and not network)" + - dist: trusty env: - JOB="3.7" ENV_FILE="ci/deps/travis-37.yaml" PATTERN="(not slow and not network)" @@ -72,6 +78,7 @@ before_install: # This overrides travis and tells it to look nowhere. - export BOTO_CONFIG=/dev/null + install: - echo "install start" - ci/prep_cython_cache.sh @@ -79,17 +86,19 @@ install: - ci/submit_cython_cache.sh - echo "install done" + before_script: # display server (for clipboard functionality) needs to be started here, # does not work if done in install:setup_env.sh (GH-26103) - export DISPLAY=":99.0" - echo "sh -e /etc/init.d/xvfb start" - - sh -e /etc/init.d/xvfb start + - if [ "$JOB" != "3.8-dev" ]; then sh -e /etc/init.d/xvfb start; fi - sleep 3 script: - echo "script start" - - source activate pandas-dev + - echo "$JOB" + - if [ "$JOB" != "3.8-dev" ]; then source activate pandas-dev; fi - ci/run_tests.sh after_script: diff --git a/ci/build38.sh b/ci/build38.sh new file mode 100644 index 0000000000000..5c798c17301e0 --- /dev/null +++ b/ci/build38.sh @@ -0,0 +1,25 @@ +#!/bin/bash -e +# Special build for python3.8 until numpy puts its own wheels up + +sudo apt-get install build-essential gcc xvfb +pip install --no-deps -U pip wheel setuptools +pip install python-dateutil pytz cython pytest pytest-xdist hypothesis + +# Possible alternative for getting numpy: +# pip install --pre -f https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com/ numpy +git clone https://github.com/numpy/numpy +cd numpy +python setup.py build_ext --inplace +python setup.py install +cd .. +rm -rf numpy + +python setup.py build_ext -inplace +python -m pip install --no-build-isolation -e . + +python -c "import sys; print(sys.version_info)" +python -c "import pandas as pd" +python -c "import hypothesis" + +# TODO: Is there anything else in setup_env that we really want to do? +# ci/setup_env.sh diff --git a/ci/setup_env.sh b/ci/setup_env.sh index 88742e0483c7e..02e48f6cb9af1 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -1,5 +1,9 @@ #!/bin/bash -e +if [ "$JOB" == "3.8-dev" ]; then + /bin/bash ci/build38.sh + exit 0 +fi # edit the locale file if needed if [ -n "$LOCALE_OVERRIDE" ]; then
Backport PR #28730: CI: 3.8 build
https://api.github.com/repos/pandas-dev/pandas/pulls/29002
2019-10-15T11:39:03Z
2019-10-15T13:51:37Z
2019-10-15T13:51:37Z
2019-10-15T13:51:38Z
DOC: 0.25.2 whatsnew cleanup
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst index 73a5ac5f840be..a99751f9bab9f 100644 --- a/doc/source/whatsnew/v0.25.2.rst +++ b/doc/source/whatsnew/v0.25.2.rst @@ -1,6 +1,6 @@ .. _whatsnew_0252: -What's new in 0.25.2 (October XX, 2019) +What's new in 0.25.2 (October 15, 2019) --------------------------------------- These are the changes in pandas 0.25.2. See :ref:`release` for a full changelog @@ -15,91 +15,24 @@ including other versions of pandas. Bug fixes ~~~~~~~~~ -Categorical -^^^^^^^^^^^ - -- - -Datetimelike -^^^^^^^^^^^^ - -- -- -- - -Timezones -^^^^^^^^^ - -- - -Numeric -^^^^^^^ - -- -- -- -- - -Conversion -^^^^^^^^^^ - -- - -Interval -^^^^^^^^ - -- - Indexing ^^^^^^^^ -- Fix regression in :meth:`DataFrame.reindex` not following ``limit`` argument (:issue:`28631`). +- Fix regression in :meth:`DataFrame.reindex` not following the ``limit`` argument (:issue:`28631`). - Fix regression in :meth:`RangeIndex.get_indexer` for decreasing :class:`RangeIndex` where target values may be improperly identified as missing/present (:issue:`28678`) -- -- - -Missing -^^^^^^^ - -- I/O ^^^ -- Fix regression in notebook display where <th> tags not used for :attr:`DataFrame.index` (:issue:`28204`). +- Fix regression in notebook display where ``<th>`` tags were missing for :attr:`DataFrame.index` values (:issue:`28204`). - Regression in :meth:`~DataFrame.to_csv` where writing a :class:`Series` or :class:`DataFrame` indexed by an :class:`IntervalIndex` would incorrectly raise a ``TypeError`` (:issue:`28210`) - Fix :meth:`~DataFrame.to_csv` with ``ExtensionArray`` with list-like values (:issue:`28840`). -- - -Plotting -^^^^^^^^ - -- -- -- Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug incorrectly raising an ``IndexError`` when passing a list of quantiles to :meth:`pandas.core.groupby.DataFrameGroupBy.quantile` (:issue:`28113`). - Bug in :meth:`pandas.core.groupby.GroupBy.shift`, :meth:`pandas.core.groupby.GroupBy.bfill` and :meth:`pandas.core.groupby.GroupBy.ffill` where timezone information would be dropped (:issue:`19995`, :issue:`27992`) -- -- -- - -Reshaping -^^^^^^^^^ - -- -- -- -- -- - -Sparse -^^^^^^ - -- Other ^^^^^
https://api.github.com/repos/pandas-dev/pandas/pulls/29000
2019-10-15T11:36:22Z
2019-10-15T13:51:07Z
2019-10-15T13:51:07Z
2019-10-15T13:51:58Z
Backport PR #28982 on branch 0.25.x (Document 3.8 compatibility)
diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-36-32bit.yaml index 321cc203961d5..1e2e6c33e8c15 100644 --- a/ci/deps/azure-36-32bit.yaml +++ b/ci/deps/azure-36-32bit.yaml @@ -3,6 +3,7 @@ channels: - defaults - conda-forge dependencies: + - attrs=19.1.0 - gcc_linux-32 - gcc_linux-32 - gxx_linux-32 @@ -11,7 +12,7 @@ dependencies: - python=3.6.* - pytz=2017.2 # universal - - pytest>=4.0.2,<5.0.0 + - pytest - pytest-xdist - pytest-mock - pytest-azurepipelines diff --git a/doc/source/install.rst b/doc/source/install.rst index fc99b458fa0af..7d1150c2f65fa 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -18,7 +18,7 @@ Instructions for installing from source, Python version support ---------------------- -Officially Python 3.5.3 and above, 3.6, and 3.7. +Officially Python 3.5.3 and above, 3.6, 3.7, and 3.8. Installing pandas ----------------- diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst index 0fea95710d638..8e43d06001a5f 100644 --- a/doc/source/whatsnew/v0.25.2.rst +++ b/doc/source/whatsnew/v0.25.2.rst @@ -6,6 +6,10 @@ What's new in 0.25.2 (October XX, 2019) These are the changes in pandas 0.25.2. See :ref:`release` for a full changelog including other versions of pandas. +.. note:: + + Pandas 0.25.2 adds compatibility for Python 3.8 (:issue:`28147`). + .. _whatsnew_0252.bug_fixes: Bug fixes diff --git a/setup.py b/setup.py index 50f58ceaf7c2e..8cc60ba6352c2 100755 --- a/setup.py +++ b/setup.py @@ -230,6 +230,7 @@ def build_extensions(self): "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Programming Language :: Cython", "Topic :: Scientific/Engineering", ]
Backport PR #28982: Document 3.8 compatibility
https://api.github.com/repos/pandas-dev/pandas/pulls/28999
2019-10-15T11:32:17Z
2019-10-15T13:50:33Z
2019-10-15T13:50:33Z
2019-10-15T13:50:33Z
CLN: Type error fix in tests\tseries\offsets\test_yqm_offsets.py
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index ddf2c6e65b474..5cc10bf00203d 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -1,4 +1,5 @@ from datetime import date, datetime, time as dt_time, timedelta +from typing import Type import numpy as np import pytest @@ -92,7 +93,7 @@ def test_to_M8(): class Base: - _offset = None + _offset = None # type: Type[DateOffset] d = Timestamp(datetime(2008, 1, 2)) timezones = [ diff --git a/setup.cfg b/setup.cfg index 462e79dae1039..4adbf4ed29dc7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -219,6 +219,3 @@ ignore_errors=True [mypy-pandas.tests.tseries.offsets.test_offsets] ignore_errors=True - -[mypy-pandas.tests.tseries.offsets.test_yqm_offsets] -ignore_errors=True
- [ ] xref #28926 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28996
2019-10-15T09:47:22Z
2019-10-22T00:36:48Z
2019-10-22T00:36:48Z
2019-10-22T00:36:55Z
CLN: tests/extension/json/test_json.py typefix
diff --git a/pandas/tests/extension/base/base.py b/pandas/tests/extension/base/base.py index 2f808d20acd31..0a8628b8ef705 100644 --- a/pandas/tests/extension/base/base.py +++ b/pandas/tests/extension/base/base.py @@ -2,8 +2,10 @@ class BaseExtensionTests: + @staticmethod + def assert_series_equal(left, right, **kwargs): + tm.assert_series_equal(left, right, **kwargs) - assert_equal = staticmethod(tm.assert_equal) - assert_series_equal = staticmethod(tm.assert_series_equal) - assert_frame_equal = staticmethod(tm.assert_frame_equal) - assert_extension_array_equal = staticmethod(tm.assert_extension_array_equal) + @staticmethod + def assert_frame_equal(left, right, **kwargs): + tm.assert_frame_equal(left, right, **kwargs) diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py index 7262a85b1fe00..def478ccb6a00 100644 --- a/pandas/tests/extension/base/constructors.py +++ b/pandas/tests/extension/base/constructors.py @@ -3,6 +3,7 @@ import pandas as pd from pandas.core.internals import ExtensionBlock +import pandas.util.testing as tm from .base import BaseExtensionTests @@ -10,11 +11,11 @@ class BaseConstructorsTests(BaseExtensionTests): def test_from_sequence_from_cls(self, data): result = type(data)._from_sequence(data, dtype=data.dtype) - self.assert_extension_array_equal(result, data) + tm.assert_extension_array_equal(result, data) data = data[:0] result = type(data)._from_sequence(data, dtype=data.dtype) - self.assert_extension_array_equal(result, data) + tm.assert_extension_array_equal(result, data) def test_array_from_scalars(self, data): scalars = [data[0], data[1], data[2]] @@ -67,10 +68,10 @@ def test_from_dtype(self, data): def test_pandas_array(self, data): # pd.array(extension_array) should be idempotent... result = pd.array(data) - self.assert_extension_array_equal(result, data) + tm.assert_extension_array_equal(result, data) def test_pandas_array_dtype(self, data): # ... but specifying dtype will override idempotency result = pd.array(data, dtype=np.dtype(object)) expected = pd.arrays.PandasArray(np.asarray(data, dtype=object)) - self.assert_equal(result, expected) + tm.assert_equal(result, expected) diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py index d56cc50f4739c..bfde796ce7229 100644 --- a/pandas/tests/extension/base/getitem.py +++ b/pandas/tests/extension/base/getitem.py @@ -2,6 +2,7 @@ import pytest import pandas as pd +import pandas.util.testing as tm from .base import BaseExtensionTests @@ -200,7 +201,7 @@ def test_take_negative(self, data): n = len(data) result = data.take([0, -n, n - 1, -1]) expected = data.take([0, 0, n - 1, n - 1]) - self.assert_extension_array_equal(result, expected) + tm.assert_extension_array_equal(result, expected) def test_take_non_na_fill_value(self, data_missing): fill_value = data_missing[1] # valid @@ -209,7 +210,7 @@ def test_take_non_na_fill_value(self, data_missing): array = data_missing._from_sequence([na, fill_value, na]) result = array.take([-1, 1], fill_value=fill_value, allow_fill=True) expected = array.take([1, 1]) - self.assert_extension_array_equal(result, expected) + tm.assert_extension_array_equal(result, expected) def test_take_pandas_style_negative_raises(self, data, na_value): with pytest.raises(ValueError): diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index 973088cb72e7a..b09a86bfd32dc 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -120,7 +120,7 @@ def test_factorize(self, data_for_grouping, na_sentinel): expected_uniques = data_for_grouping.take([0, 4, 7]) tm.assert_numpy_array_equal(codes, expected_codes) - self.assert_extension_array_equal(uniques, expected_uniques) + tm.assert_extension_array_equal(uniques, expected_uniques) @pytest.mark.parametrize("na_sentinel", [-1, -2]) def test_factorize_equivalence(self, data_for_grouping, na_sentinel): @@ -128,7 +128,7 @@ def test_factorize_equivalence(self, data_for_grouping, na_sentinel): codes_2, uniques_2 = data_for_grouping.factorize(na_sentinel=na_sentinel) tm.assert_numpy_array_equal(codes_1, codes_2) - self.assert_extension_array_equal(uniques_1, uniques_2) + tm.assert_extension_array_equal(uniques_1, uniques_2) def test_factorize_empty(self, data): codes, uniques = pd.factorize(data[:0]) @@ -136,7 +136,7 @@ def test_factorize_empty(self, data): expected_uniques = type(data)._from_sequence([], dtype=data[:0].dtype) tm.assert_numpy_array_equal(codes, expected_codes) - self.assert_extension_array_equal(uniques, expected_uniques) + tm.assert_extension_array_equal(uniques, expected_uniques) def test_fillna_copy_frame(self, data_missing): arr = data_missing.take([1, 1]) @@ -240,7 +240,7 @@ def test_shift_non_empty_array(self, data, periods, indices): subset = data[:2] result = subset.shift(periods) expected = subset.take(indices, allow_fill=True) - self.assert_extension_array_equal(result, expected) + tm.assert_extension_array_equal(result, expected) @pytest.mark.parametrize("periods", [-4, -1, 0, 1, 4]) def test_shift_empty_array(self, data, periods): @@ -248,18 +248,18 @@ def test_shift_empty_array(self, data, periods): empty = data[:0] result = empty.shift(periods) expected = empty - self.assert_extension_array_equal(result, expected) + tm.assert_extension_array_equal(result, expected) def test_shift_fill_value(self, data): arr = data[:4] fill_value = data[0] result = arr.shift(1, fill_value=fill_value) expected = data.take([0, 0, 1, 2]) - self.assert_extension_array_equal(result, expected) + tm.assert_extension_array_equal(result, expected) result = arr.shift(-2, fill_value=fill_value) expected = data.take([2, 3, 0, 0]) - self.assert_extension_array_equal(result, expected) + tm.assert_extension_array_equal(result, expected) def test_hash_pandas_object_works(self, data, as_frame): # https://github.com/pandas-dev/pandas/issues/23066 @@ -268,7 +268,7 @@ def test_hash_pandas_object_works(self, data, as_frame): data = data.to_frame() a = pd.util.hash_pandas_object(data) b = pd.util.hash_pandas_object(data) - self.assert_equal(a, b) + tm.assert_equal(a, b) def test_searchsorted(self, data_for_sorting, as_series): b, c, a = data_for_sorting @@ -313,7 +313,7 @@ def test_where_series(self, data, na_value, as_frame): if as_frame: expected = expected.to_frame(name="a") - self.assert_equal(result, expected) + tm.assert_equal(result, expected) # array other cond = np.array([True, False, True, True]) @@ -325,7 +325,7 @@ def test_where_series(self, data, na_value, as_frame): expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype)) if as_frame: expected = expected.to_frame(name="a") - self.assert_equal(result, expected) + tm.assert_equal(result, expected) @pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]]) def test_repeat(self, data, repeats, as_series, use_numpy): @@ -341,7 +341,7 @@ def test_repeat(self, data, repeats, as_series, use_numpy): if as_series: expected = pd.Series(expected, index=arr.index.repeat(repeats)) - self.assert_equal(result, expected) + tm.assert_equal(result, expected) @pytest.mark.parametrize( "repeats, kwargs, error, msg", diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py index 21bbb365ab0f3..596e390151660 100644 --- a/pandas/tests/extension/base/missing.py +++ b/pandas/tests/extension/base/missing.py @@ -25,7 +25,7 @@ def test_isna(self, data_missing): def test_dropna_array(self, data_missing): result = data_missing.dropna() expected = data_missing[[1]] - self.assert_extension_array_equal(result, expected) + tm.assert_extension_array_equal(result, expected) def test_dropna_series(self, data_missing): ser = pd.Series(data_missing) @@ -56,7 +56,7 @@ def test_fillna_scalar(self, data_missing): valid = data_missing[1] result = data_missing.fillna(valid) expected = data_missing.fillna(valid) - self.assert_extension_array_equal(result, expected) + tm.assert_extension_array_equal(result, expected) def test_fillna_limit_pad(self, data_missing): arr = data_missing.take([1, 0, 0, 0, 1]) diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py index 90e607343297d..4b2396fa02954 100644 --- a/pandas/tests/extension/base/reshaping.py +++ b/pandas/tests/extension/base/reshaping.py @@ -5,6 +5,7 @@ import pandas as pd from pandas.core.internals import ExtensionBlock +import pandas.util.testing as tm from .base import BaseExtensionTests @@ -239,7 +240,7 @@ def test_stack(self, data, columns): assert all(result.dtypes == df.iloc[:, 0].dtype) result = result.astype(object) - self.assert_equal(result, expected) + tm.assert_equal(result, expected) @pytest.mark.parametrize( "index", diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py index bb6bb02b462e2..da251c5b18211 100644 --- a/pandas/tests/extension/base/setitem.py +++ b/pandas/tests/extension/base/setitem.py @@ -4,6 +4,7 @@ import pytest import pandas as pd +import pandas.util.testing as tm from .base import BaseExtensionTests @@ -46,7 +47,7 @@ def test_setitem_empty_indxer(self, data, box_in_series): data = pd.Series(data) original = data.copy() data[np.array([], dtype=int)] = [] - self.assert_equal(data, original) + tm.assert_equal(data, original) def test_setitem_sequence_broadcasts(self, data, box_in_series): if box_in_series: @@ -180,7 +181,7 @@ def test_setitem_slice_mismatch_length_raises(self, data): def test_setitem_slice_array(self, data): arr = data[:5].copy() arr[:5] = data[-5:] - self.assert_extension_array_equal(arr, data[-5:]) + tm.assert_extension_array_equal(arr, data[-5:]) def test_setitem_scalar_key_sequence_raise(self, data): arr = data[:5].copy() diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index b5c3abd8ce8f6..59897726de793 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -135,7 +135,7 @@ def test_take_na_value_other_decimal(self): arr = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("2.0")]) result = arr.take([0, -1], allow_fill=True, fill_value=decimal.Decimal("-1.0")) expected = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("-1.0")]) - self.assert_extension_array_equal(result, expected) + tm.assert_extension_array_equal(result, expected) class TestMissing(BaseDecimal, base.BaseMissingTests): diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py index 7e027a65eec3a..e0611507fd024 100644 --- a/pandas/tests/extension/json/test_json.py +++ b/pandas/tests/extension/json/test_json.py @@ -92,7 +92,7 @@ def assert_series_equal(self, left, right, **kwargs): ) tm.assert_series_equal(left, right, **kwargs) - def assert_frame_equal(self, left, right, *args, **kwargs): + def assert_frame_equal(self, left, right, **kwargs): tm.assert_index_equal( left.columns, right.columns, @@ -106,11 +106,11 @@ def assert_frame_equal(self, left, right, *args, **kwargs): jsons = (left.dtypes == "json").index for col in jsons: - self.assert_series_equal(left[col], right[col], *args, **kwargs) + self.assert_series_equal(left[col], right[col], **kwargs) left = left.drop(columns=jsons) right = right.drop(columns=jsons) - tm.assert_frame_equal(left, right, *args, **kwargs) + tm.assert_frame_equal(left, right, **kwargs) class TestDtype(BaseJSON, base.BaseDtypeTests): diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py index a60607d586ada..3e2e63bd68e3d 100644 --- a/pandas/tests/extension/test_datetime.py +++ b/pandas/tests/extension/test_datetime.py @@ -6,6 +6,7 @@ import pandas as pd from pandas.core.arrays import DatetimeArray from pandas.tests.extension import base +import pandas.util.testing as tm @pytest.fixture(params=["US/Central"]) @@ -199,7 +200,7 @@ def test_unstack(self, obj): expected.columns.names = [None, "a"] result = ser.unstack(0) - self.assert_equal(result, expected) + tm.assert_equal(result, expected) class TestSetitem(BaseDatetimeTests, base.BaseSetitemTests): diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 6ebe71e173ec2..ed3d16bba0b39 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -172,7 +172,7 @@ def test_isna(self, data_missing): expected = SparseArray([True, False], dtype=expected_dtype) result = pd.isna(data_missing) - self.assert_equal(result, expected) + tm.assert_equal(result, expected) result = pd.Series(data_missing).isna() expected = pd.Series(expected)
The error is due to the fact that the builtins.staticmethod type defined in extension.base.base.BaseExtensionTests does not match the function type in extension.json.test_json.BaseJSON. I don't see a possibility for a proper type hint fix without changing the structure of the code. One way would be to replace the call of staticmethod by function definitions that call the corresponding methods. Here I propose an easier suppression of the error using type: Any. What do you think? - [ ] xref #28926 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28994
2019-10-15T09:24:51Z
2020-02-09T18:08:29Z
null
2020-02-09T18:08:30Z
ENH: Informative dtype message for for assert_series_equal
diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py index 9571e8027ccf7..86e5d506e0779 100644 --- a/pandas/tests/util/test_assert_frame_equal.py +++ b/pandas/tests/util/test_assert_frame_equal.py @@ -141,7 +141,7 @@ def test_empty_dtypes(check_dtype): df1["col1"] = df1["col1"].astype("int64") if check_dtype: - msg = "Attributes are different" + msg = r"Attributes of DataFrame\..* are different" with pytest.raises(AssertionError, match=msg): assert_frame_equal(df1, df2, **kwargs) else: diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index a12d9386eb159..bad3f2e67f8bb 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -179,7 +179,7 @@ def test_series_equal_values_mismatch(check_less_precise): def test_series_equal_categorical_mismatch(check_categorical): - msg = """Attributes are different + msg = """Attributes of Series are different Attribute "dtype" are different \\[left\\]: CategoricalDtype\\(categories=\\['a', 'b'\\], ordered=False\\) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 4cf2776f5aa7c..73535e55d4fa5 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1156,7 +1156,9 @@ def assert_series_equal( ): pass else: - assert_attr_equal("dtype", left, right) + assert_attr_equal( + "dtype", left, right, obj="Attributes of {obj}".format(obj=obj) + ) if check_exact: assert_numpy_array_equal( @@ -1315,8 +1317,9 @@ def assert_frame_equal( >>> assert_frame_equal(df1, df2) Traceback (most recent call last): - AssertionError: Attributes are different ... + AssertionError: Attributes of DataFrame.iloc[:, 1] are different + Attribute "dtype" are different [left]: int64 [right]: float64
Pass obj to assert_attr_equal in assert_series_equal. - [ ] closes #28991 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry - [x] Fix example in `assert_frame_equal` docstring
https://api.github.com/repos/pandas-dev/pandas/pulls/28993
2019-10-15T09:18:26Z
2019-10-18T17:28:58Z
2019-10-18T17:28:58Z
2021-03-08T11:19:23Z
tests/indexing/test_coercion.py typefix
diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 05b58b0eca9b8..4f38d7beb9c0b 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -1,4 +1,5 @@ import itertools +from typing import Dict, List import numpy as np import pytest @@ -928,7 +929,7 @@ class TestReplaceSeriesCoercion(CoercionBase): klasses = ["series"] method = "replace" - rep = {} + rep = {} # type: Dict[str, List] rep["object"] = ["a", "b"] rep["int64"] = [4, 5] rep["float64"] = [1.1, 2.2] diff --git a/setup.cfg b/setup.cfg index 64494bf84363e..eb6e0269a2ea5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -196,9 +196,6 @@ ignore_errors=True [mypy-pandas.tests.indexes.timedeltas.test_timedelta] ignore_errors=True -[mypy-pandas.tests.indexing.test_coercion] -ignore_errors=True - [mypy-pandas.tests.indexing.test_loc] ignore_errors=True
- [ ] xref #28926 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28990
2019-10-15T08:10:48Z
2019-10-16T23:52:14Z
2019-10-16T23:52:14Z
2019-10-16T23:52:20Z
pandas-dev#28926 Mypy fix/extension json array
diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index b64ddbd6ac84d..94bef0617df3c 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -16,6 +16,7 @@ import random import string import sys +from typing import Mapping import numpy as np @@ -27,7 +28,7 @@ class JSONDtype(ExtensionDtype): type = abc.Mapping name = "json" - na_value = UserDict() + na_value = UserDict() # type: Mapping[type, type] @classmethod def construct_array_type(cls): diff --git a/setup.cfg b/setup.cfg index 64494bf84363e..e3add80d5dcdf 100644 --- a/setup.cfg +++ b/setup.cfg @@ -157,9 +157,6 @@ ignore_errors=True [mypy-pandas.tests.extension.decimal.test_decimal] ignore_errors=True -[mypy-pandas.tests.extension.json.array] -ignore_errors=True - [mypy-pandas.tests.extension.json.test_json] ignore_errors=True
ref: #28926 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/28989
2019-10-15T06:50:16Z
2019-12-17T17:56:22Z
null
2019-12-17T17:56:22Z
mypy typing issues for decimal pandas/tests/extension/decimal/test_decimal.py
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 3ac9d37ccf4f3..6d06ae61c5310 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -110,27 +110,27 @@ def assert_frame_equal(self, left, right, *args, **kwargs): tm.assert_frame_equal(left, right, *args, **kwargs) -class TestDtype(BaseDecimal, base.BaseDtypeTests): +class TestDtype(BaseDecimal, base.BaseDtypeTests): # type: ignore def test_hashable(self, dtype): pass -class TestInterface(BaseDecimal, base.BaseInterfaceTests): +class TestInterface(BaseDecimal, base.BaseInterfaceTests): # type: ignore pass -class TestConstructors(BaseDecimal, base.BaseConstructorsTests): +class TestConstructors(BaseDecimal, base.BaseConstructorsTests): # type: ignore @pytest.mark.skip(reason="not implemented constructor from dtype") def test_from_dtype(self, data): # construct from our dtype & string dtype pass -class TestReshaping(BaseDecimal, base.BaseReshapingTests): +class TestReshaping(BaseDecimal, base.BaseReshapingTests): # type: ignore pass -class TestGetitem(BaseDecimal, base.BaseGetitemTests): +class TestGetitem(BaseDecimal, base.BaseGetitemTests): # type: ignore def test_take_na_value_other_decimal(self): arr = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("2.0")]) result = arr.take([0, -1], allow_fill=True, fill_value=decimal.Decimal("-1.0")) @@ -138,7 +138,7 @@ def test_take_na_value_other_decimal(self): self.assert_extension_array_equal(result, expected) -class TestMissing(BaseDecimal, base.BaseMissingTests): +class TestMissing(BaseDecimal, base.BaseMissingTests): # type: ignore pass @@ -163,7 +163,7 @@ class TestBooleanReduce(Reduce, base.BaseBooleanReduceTests): pass -class TestMethods(BaseDecimal, base.BaseMethodsTests): +class TestMethods(BaseDecimal, base.BaseMethodsTests): # type: ignore @pytest.mark.parametrize("dropna", [True, False]) @pytest.mark.xfail(reason="value_counts not implemented yet.") def test_value_counts(self, all_data, dropna): @@ -179,11 +179,11 @@ def test_value_counts(self, all_data, dropna): tm.assert_series_equal(result, expected) -class TestCasting(BaseDecimal, base.BaseCastingTests): +class TestCasting(BaseDecimal, base.BaseCastingTests): # type: ignore pass -class TestGroupby(BaseDecimal, base.BaseGroupbyTests): +class TestGroupby(BaseDecimal, base.BaseGroupbyTests): # type: ignore @pytest.mark.xfail( reason="needs to correctly define __eq__ to handle nans, xref #27081." ) @@ -191,11 +191,11 @@ def test_groupby_apply_identity(self, data_for_grouping): super().test_groupby_apply_identity(data_for_grouping) -class TestSetitem(BaseDecimal, base.BaseSetitemTests): +class TestSetitem(BaseDecimal, base.BaseSetitemTests): # type: ignore pass -class TestPrinting(BaseDecimal, base.BasePrintingTests): +class TestPrinting(BaseDecimal, base.BasePrintingTests): # type: ignore def test_series_repr(self, data): # Overriding this base test to explicitly test that # the custom _formatter is used @@ -264,7 +264,7 @@ def test_astype_dispatches(frame): assert result.dtype.context.prec == ctx.prec -class TestArithmeticOps(BaseDecimal, base.BaseArithmeticOpsTests): +class TestArithmeticOps(BaseDecimal, base.BaseArithmeticOpsTests): # type: ignore def check_opname(self, s, op_name, other, exc=None): super().check_opname(s, op_name, other, exc=None) @@ -298,7 +298,7 @@ def test_error(self): pass -class TestComparisonOps(BaseDecimal, base.BaseComparisonOpsTests): +class TestComparisonOps(BaseDecimal, base.BaseComparisonOpsTests): # type: ignore def check_opname(self, s, op_name, other, exc=None): super().check_opname(s, op_name, other, exc=None) diff --git a/setup.cfg b/setup.cfg index 64494bf84363e..a332602729b6a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -155,7 +155,7 @@ ignore_errors=True ignore_errors=True [mypy-pandas.tests.extension.decimal.test_decimal] -ignore_errors=True +ignore_errors=False [mypy-pandas.tests.extension.json.array] ignore_errors=True
- [ ] xref #28926 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28988
2019-10-15T06:01:31Z
2019-12-27T19:51:36Z
null
2019-12-27T19:51:36Z
Backport PR #28841: BUG: use EA.astype in ExtensionBlock.to_native_types (#28841)
diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst index 0fea95710d638..49e87b34482b8 100644 --- a/doc/source/whatsnew/v0.25.2.rst +++ b/doc/source/whatsnew/v0.25.2.rst @@ -65,7 +65,7 @@ I/O - Fix regression in notebook display where <th> tags not used for :attr:`DataFrame.index` (:issue:`28204`). - Regression in :meth:`~DataFrame.to_csv` where writing a :class:`Series` or :class:`DataFrame` indexed by an :class:`IntervalIndex` would incorrectly raise a ``TypeError`` (:issue:`28210`) -- +- Fix :meth:`~DataFrame.to_csv` with ``ExtensionArray`` with list-like values (:issue:`28840`). - Plotting diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 33517087c5d76..74189142af79f 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -749,7 +749,6 @@ def _try_coerce_and_cast_result(self, result, dtype=None): def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ - values = self.get_values() if slicer is not None: @@ -1848,6 +1847,23 @@ def get_values(self, dtype=None): def to_dense(self): return np.asarray(self.values) + def to_native_types(self, slicer=None, na_rep="nan", quoting=None, **kwargs): + """override to use ExtensionArray astype for the conversion""" + values = self.values + if slicer is not None: + values = values[slicer] + mask = isna(values) + + try: + values = values.astype(str) + values[mask] = na_rep + except Exception: + # eg SparseArray does not support setitem, needs to be converted to ndarray + return super().to_native_types(slicer, na_rep, quoting, **kwargs) + + # we are expected to return a 2-d ndarray + return values.reshape(1, len(values)) + def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): """ Take values according to indexer and return them as a block. @@ -2374,6 +2390,7 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): is_extension = True _can_hold_element = DatetimeBlock._can_hold_element + to_native_types = DatetimeBlock.to_native_types @property def _holder(self): diff --git a/pandas/tests/extension/list/__init__.py b/pandas/tests/extension/list/__init__.py new file mode 100644 index 0000000000000..108f1937d07d3 --- /dev/null +++ b/pandas/tests/extension/list/__init__.py @@ -0,0 +1,3 @@ +from .array import ListArray, ListDtype, make_data + +__all__ = ["ListArray", "ListDtype", "make_data"] diff --git a/pandas/tests/extension/list/array.py b/pandas/tests/extension/list/array.py new file mode 100644 index 0000000000000..0ca9fadb68829 --- /dev/null +++ b/pandas/tests/extension/list/array.py @@ -0,0 +1,133 @@ +""" +Test extension array for storing nested data in a pandas container. + +The ListArray stores an ndarray of lists. +""" +import numbers +import random +import string + +import numpy as np + +from pandas.core.dtypes.base import ExtensionDtype + +import pandas as pd +from pandas.core.arrays import ExtensionArray + + +class ListDtype(ExtensionDtype): + type = list + name = "list" + na_value = np.nan + + @classmethod + def construct_array_type(cls): + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return ListArray + + @classmethod + def construct_from_string(cls, string): + if string == cls.name: + return cls() + else: + raise TypeError("Cannot construct a '{}' from '{}'".format(cls, string)) + + +class ListArray(ExtensionArray): + dtype = ListDtype() + __array_priority__ = 1000 + + def __init__(self, values, dtype=None, copy=False): + if not isinstance(values, np.ndarray): + raise TypeError("Need to pass a numpy array as values") + for val in values: + if not isinstance(val, self.dtype.type) and not pd.isna(val): + raise TypeError("All values must be of type " + str(self.dtype.type)) + self.data = values + + @classmethod + def _from_sequence(cls, scalars, dtype=None, copy=False): + data = np.empty(len(scalars), dtype=object) + data[:] = scalars + return cls(data) + + def __getitem__(self, item): + if isinstance(item, numbers.Integral): + return self.data[item] + else: + # slice, list-like, mask + return type(self)(self.data[item]) + + def __len__(self) -> int: + return len(self.data) + + def isna(self): + return np.array( + [not isinstance(x, list) and np.isnan(x) for x in self.data], dtype=bool + ) + + def take(self, indexer, allow_fill=False, fill_value=None): + # re-implement here, since NumPy has trouble setting + # sized objects like UserDicts into scalar slots of + # an ndarary. + indexer = np.asarray(indexer) + msg = ( + "Index is out of bounds or cannot do a " + "non-empty take from an empty array." + ) + + if allow_fill: + if fill_value is None: + fill_value = self.dtype.na_value + # bounds check + if (indexer < -1).any(): + raise ValueError + try: + output = [ + self.data[loc] if loc != -1 else fill_value for loc in indexer + ] + except IndexError: + raise IndexError(msg) + else: + try: + output = [self.data[loc] for loc in indexer] + except IndexError: + raise IndexError(msg) + + return self._from_sequence(output) + + def copy(self): + return type(self)(self.data[:]) + + def astype(self, dtype, copy=True): + if isinstance(dtype, type(self.dtype)) and dtype == self.dtype: + if copy: + return self.copy() + return self + elif pd.api.types.is_string_dtype(dtype) and not pd.api.types.is_object_dtype( + dtype + ): + # numpy has problems with astype(str) for nested elements + return np.array([str(x) for x in self.data], dtype=dtype) + return np.array(self.data, dtype=dtype, copy=copy) + + @classmethod + def _concat_same_type(cls, to_concat): + data = np.concatenate([x.data for x in to_concat]) + return cls(data) + + +def make_data(): + # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer + data = np.empty(100, dtype=object) + data[:] = [ + [random.choice(string.ascii_letters) for _ in range(random.randint(0, 10))] + for _ in range(100) + ] + return data diff --git a/pandas/tests/extension/list/test_list.py b/pandas/tests/extension/list/test_list.py new file mode 100644 index 0000000000000..c5c4417155562 --- /dev/null +++ b/pandas/tests/extension/list/test_list.py @@ -0,0 +1,30 @@ +import pytest + +import pandas as pd + +from .array import ListArray, ListDtype, make_data + + +@pytest.fixture +def dtype(): + return ListDtype() + + +@pytest.fixture +def data(): + """Length-100 ListArray for semantics test.""" + data = make_data() + + while len(data[0]) == len(data[1]): + data = make_data() + + return ListArray(data) + + +def test_to_csv(data): + # https://github.com/pandas-dev/pandas/issues/28840 + # array with list-likes fail when doing astype(str) on the numpy array + # which was done in to_native_types + df = pd.DataFrame({"a": data}) + res = df.to_csv() + assert str(data[0]) in res
https://api.github.com/repos/pandas-dev/pandas/pulls/28985
2019-10-15T02:38:13Z
2019-10-15T13:17:18Z
2019-10-15T13:17:18Z
2019-10-15T13:17:18Z
CLN: Add type hinting to base classes (#28926)
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 7491cef17ebfc..e7f10f1b5ab07 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -21,6 +21,7 @@ from datetime import date, datetime, time from io import StringIO import sqlite3 +from typing import Optional import warnings import numpy as np @@ -583,7 +584,7 @@ class _TestSQLApi(PandasSQLTest): """ flavor = "sqlite" - mode = None + mode = None # type: Optional[str] def setup_connect(self): self.conn = self.connect() @@ -1234,7 +1235,7 @@ class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest): """ - flavor = None + flavor = None # type: Optional[str] @pytest.fixture(autouse=True, scope="class") def setup_class(cls): @@ -1837,7 +1838,7 @@ class _TestSQLiteAlchemy: """ - flavor = "sqlite" + flavor = "sqlite" # type: Optional[str] @classmethod def connect(cls): @@ -1886,7 +1887,7 @@ class _TestMySQLAlchemy: """ - flavor = "mysql" + flavor = "mysql" # type: Optional[str] @classmethod def connect(cls): @@ -1955,7 +1956,7 @@ class _TestPostgreSQLAlchemy: """ - flavor = "postgresql" + flavor = "postgresql" # type: Optional[str] @classmethod def connect(cls): diff --git a/setup.cfg b/setup.cfg index 55d25abde585c..462e79dae1039 100644 --- a/setup.cfg +++ b/setup.cfg @@ -205,9 +205,6 @@ ignore_errors=True [mypy-pandas.tests.io.json.test_ujson] ignore_errors=True -[mypy-pandas.tests.io.test_sql] -ignore_errors=True - [mypy-pandas.tests.plotting.test_backend] ignore_errors=True
- [x] partially addresses #28926 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [N/A] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28983
2019-10-14T23:08:47Z
2019-10-15T13:45:41Z
null
2019-10-15T13:45:41Z
Document 3.8 compatibility
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index fc99b458fa0af..7d1150c2f65fa 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -18,7 +18,7 @@ Instructions for installing from source, Python version support ---------------------- -Officially Python 3.5.3 and above, 3.6, and 3.7. +Officially Python 3.5.3 and above, 3.6, 3.7, and 3.8. Installing pandas ----------------- diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst index 9789c9fce3541..54a451e4427a2 100644 --- a/doc/source/whatsnew/v0.25.2.rst +++ b/doc/source/whatsnew/v0.25.2.rst @@ -6,6 +6,10 @@ What's new in 0.25.2 (October XX, 2019) These are the changes in pandas 0.25.2. See :ref:`release` for a full changelog including other versions of pandas. +.. note:: + + Pandas 0.25.2 adds compatibility for Python 3.8 (:issue:`28147`). + .. _whatsnew_0252.bug_fixes: Bug fixes diff --git a/setup.py b/setup.py index 04aedcb101e25..c35a0e75ecb80 100755 --- a/setup.py +++ b/setup.py @@ -228,6 +228,7 @@ def build_extensions(self): "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Programming Language :: Cython", "Topic :: Scientific/Engineering", ]
Closes #28147
https://api.github.com/repos/pandas-dev/pandas/pulls/28982
2019-10-14T21:46:18Z
2019-10-15T11:31:52Z
2019-10-15T11:31:52Z
2019-10-15T11:31:55Z
Mypy fix/io sql
- [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` As part of this PR, did the following: * Variable typing. * Code cleanup and reformatting the imports. * Internal class `_TestSQLAlchemyConn` behaves like an abstract class. * To solve **variable name clashing** between parent classes in some of the errors, I restructured the inheritance from a structure like > (_TestSQLAlchemy, _TestDBSQLAlchemy) --> TestDBSQLAlchemy to > _TestSQLAlchemy --> _TestDBSQLAlchemy --> TestDBSQLAlchemy Not sure why it was like that unless I'm missing something?
https://api.github.com/repos/pandas-dev/pandas/pulls/28979
2019-10-14T18:38:42Z
2019-10-15T15:51:20Z
null
2019-10-15T16:14:25Z
CLN: declare types in rank_1d_, rank_2d
diff --git a/pandas/_libs/algos_rank_helper.pxi.in b/pandas/_libs/algos_rank_helper.pxi.in index 5dac94394c7ed..1ba1667b687be 100644 --- a/pandas/_libs/algos_rank_helper.pxi.in +++ b/pandas/_libs/algos_rank_helper.pxi.in @@ -24,7 +24,7 @@ dtypes = [('object', 'object', 'Infinity()', 'NegInfinity()'), @cython.wraparound(False) @cython.boundscheck(False) -def rank_1d_{{dtype}}(object in_arr, ties_method='average', +def rank_1d_{{dtype}}({{ctype}}[:] in_arr, ties_method='average', ascending=True, na_option='keep', pct=False): """ Fast NaN-friendly version of scipy.stats.rankdata @@ -189,7 +189,7 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average', return ranks -def rank_2d_{{dtype}}(object in_arr, axis=0, ties_method='average', +def rank_2d_{{dtype}}({{ctype}}[:, :] in_arr, axis=0, ties_method='average', ascending=True, na_option='keep', pct=False): """ Fast NaN-friendly version of scipy.stats.rankdata @@ -226,12 +226,10 @@ def rank_2d_{{dtype}}(object in_arr, axis=0, ties_method='average', keep_na = na_option == 'keep' - in_arr = np.asarray(in_arr) - if axis == 0: - values = in_arr.T.copy() + values = np.asarray(in_arr).T.copy() else: - values = in_arr.copy() + values = np.asarray(in_arr).copy() {{if dtype == 'object'}} if values.dtype != np.object_:
This is necessary before we can move these functions to use fused types.
https://api.github.com/repos/pandas-dev/pandas/pulls/28978
2019-10-14T18:37:15Z
2019-10-15T12:09:06Z
2019-10-15T12:09:06Z
2019-10-15T15:23:58Z
CLN: remove unnecessary get_value_at calls
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 979dad6db0838..22f7104debf10 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -41,11 +41,13 @@ cdef inline bint is_definitely_invalid_key(object val): cpdef get_value_at(ndarray arr, object loc, object tz=None): + obj = util.get_value_at(arr, loc) + if arr.descr.type_num == NPY_DATETIME: - return Timestamp(util.get_value_at(arr, loc), tz=tz) + return Timestamp(obj, tz=tz) elif arr.descr.type_num == NPY_TIMEDELTA: - return Timedelta(util.get_value_at(arr, loc)) - return util.get_value_at(arr, loc) + return Timedelta(obj) + return obj # Don't populate hash tables in monotonic indexes larger than this @@ -102,6 +104,9 @@ cdef class IndexEngine: arr[loc] = value cpdef get_loc(self, object val): + cdef: + Py_ssize_t loc + if is_definitely_invalid_key(val): raise TypeError("'{val}' is an invalid key".format(val=val)) @@ -114,7 +119,7 @@ cdef class IndexEngine: loc = _bin_search(values, val) # .searchsorted(val, side='left') if loc >= len(values): raise KeyError(val) - if util.get_value_at(values, loc) != val: + if values[loc] != val: raise KeyError(val) return loc @@ -352,22 +357,22 @@ cdef Py_ssize_t _bin_search(ndarray values, object val) except -1: Py_ssize_t mid = 0, lo = 0, hi = len(values) - 1 object pval - if hi == 0 or (hi > 0 and val > util.get_value_at(values, hi)): + if hi == 0 or (hi > 0 and val > values[hi]): return len(values) while lo < hi: mid = (lo + hi) // 2 - pval = util.get_value_at(values, mid) + pval = values[mid] if val < pval: hi = mid elif val > pval: lo = mid + 1 else: - while mid > 0 and val == util.get_value_at(values, mid - 1): + while mid > 0 and val == values[mid - 1]: mid -= 1 return mid - if val <= util.get_value_at(values, mid): + if val <= values[mid]: return mid else: return mid + 1 @@ -387,13 +392,16 @@ cdef class DatetimeEngine(Int64Engine): return 'M8[ns]' def __contains__(self, object val): + cdef: + int64_t loc + if self.over_size_threshold and self.is_monotonic_increasing: if not self.is_unique: return self._get_loc_duplicates(val) values = self._get_index_values() conv = maybe_datetimelike_to_i8(val) loc = values.searchsorted(conv, side='left') - return util.get_value_at(values, loc) == conv + return values[loc] == conv self._ensure_mapping_populated() return maybe_datetimelike_to_i8(val) in self.mapping @@ -405,6 +413,8 @@ cdef class DatetimeEngine(Int64Engine): return algos.is_monotonic(values, timelike=True) cpdef get_loc(self, object val): + cdef: + int64_t loc if is_definitely_invalid_key(val): raise TypeError @@ -422,7 +432,7 @@ cdef class DatetimeEngine(Int64Engine): self._date_check_type(val) raise KeyError(val) - if loc == len(values) or util.get_value_at(values, loc) != conv: + if loc == len(values) or values[loc] != conv: raise KeyError(val) return loc diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 1c2f80b832201..a3a50644e58f3 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -782,8 +782,16 @@ def generate_slices(const int64_t[:] labels, Py_ssize_t ngroups): return starts, ends -def indices_fast(object index, const int64_t[:] labels, list keys, +def indices_fast(ndarray index, const int64_t[:] labels, list keys, list sorted_labels): + """ + Parameters + ---------- + index : ndarray + labels : ndarray[int64] + keys : list + sorted_labels : list[ndarray[int64]] + """ cdef: Py_ssize_t i, j, k, lab, cur, start, n = len(labels) dict result = {} @@ -803,8 +811,7 @@ def indices_fast(object index, const int64_t[:] labels, list keys, if lab != -1: tup = PyTuple_New(k) for j in range(k): - val = util.get_value_at(keys[j], - sorted_labels[j][i - 1]) + val = keys[j][sorted_labels[j][i - 1]] PyTuple_SET_ITEM(tup, j, val) Py_INCREF(val) @@ -814,8 +821,7 @@ def indices_fast(object index, const int64_t[:] labels, list keys, tup = PyTuple_New(k) for j in range(k): - val = util.get_value_at(keys[j], - sorted_labels[j][n - 1]) + val = keys[j][sorted_labels[j][n - 1]] PyTuple_SET_ITEM(tup, j, val) Py_INCREF(val) result[tup] = index[start:] diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index 34eb9412451c5..0eac0e94f0beb 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -121,7 +121,7 @@ cdef class Reducer: for i in range(self.nresults): if has_ndarray_labels: - name = util.get_value_at(labels, i) + name = labels[i] elif has_labels: # labels is an ExtensionArray name = labels[i] diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index e6edad656d430..94810369785d3 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -303,8 +303,8 @@ def get_flattened_iterator(comp_ids, ngroups, levels, labels): def get_indexer_dict(label_list, keys): - """ return a diction of {labels} -> {indexers} """ - shape = list(map(len, keys)) + """ return a dict of {labels} -> {indexers} """ + shape = [len(x) for x in keys] group_index = get_group_index(label_list, shape, sort=True, xnull=True) ngroups = (
I found in another branch that these calls didn't play nicely with `nogil`, so went through and removed the ones that are unnecessary. Turns out we really only need the one in libindex.get_value_at, since that is the only place where we _arent_ assured that `loc` is intlike.
https://api.github.com/repos/pandas-dev/pandas/pulls/28977
2019-10-14T18:13:36Z
2019-10-15T12:08:42Z
2019-10-15T12:08:42Z
2019-10-15T15:26:44Z
Fix mypy error message in `pandas/tests/indexes/test_numeric.py` …
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index b657d8d16df81..1ac6370860ba6 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -1,4 +1,5 @@ import gc +from typing import Optional, Type import numpy as np import pytest @@ -30,7 +31,7 @@ class Base: """ base class for index sub-class tests """ - _holder = None + _holder = None # type: Optional[Type[Index]] _compat_props = ["shape", "ndim", "size", "nbytes"] def test_pickle_compat_construction(self): diff --git a/setup.cfg b/setup.cfg index 64494bf84363e..7fbc41f032faf 100644 --- a/setup.cfg +++ b/setup.cfg @@ -187,9 +187,6 @@ ignore_errors=True [mypy-pandas.tests.indexes.test_category] ignore_errors=True -[mypy-pandas.tests.indexes.test_numeric] -ignore_errors=True - [mypy-pandas.tests.indexes.test_range] ignore_errors=True
Use Optional to signal that the value may be either an `Index` or `None`. - [X] Relates to #28926 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28976
2019-10-14T15:55:50Z
2019-10-16T08:32:27Z
null
2019-10-16T08:32:28Z
PERF: Benchmark merge with non-int64 and tolerance (#28922)
diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py index 6aa82a43a4d6a..5cf9f6336ba0c 100644 --- a/asv_bench/benchmarks/join_merge.py +++ b/asv_bench/benchmarks/join_merge.py @@ -273,10 +273,10 @@ def time_merge_ordered(self): class MergeAsof: - params = [["backward", "forward", "nearest"]] - param_names = ["direction"] + params = [["backward", "forward", "nearest"], [None, 5]] + param_names = ["direction", "tolerance"] - def setup(self, direction): + def setup(self, direction, tolerance): one_count = 200000 two_count = 1000000 @@ -303,6 +303,9 @@ def setup(self, direction): df1["time32"] = np.int32(df1.time) df2["time32"] = np.int32(df2.time) + df1["timeu64"] = np.uint64(df1.time) + df2["timeu64"] = np.uint64(df2.time) + self.df1a = df1[["time", "value1"]] self.df2a = df2[["time", "value2"]] self.df1b = df1[["time", "key", "value1"]] @@ -313,22 +316,52 @@ def setup(self, direction): self.df2d = df2[["time32", "value2"]] self.df1e = df1[["time", "key", "key2", "value1"]] self.df2e = df2[["time", "key", "key2", "value2"]] + self.df1f = df1[["timeu64", "value1"]] + self.df2f = df2[["timeu64", "value2"]] + + def time_on_int(self, direction, tolerance): + merge_asof( + self.df1a, self.df2a, on="time", direction=direction, tolerance=tolerance + ) - def time_on_int(self, direction): - merge_asof(self.df1a, self.df2a, on="time", direction=direction) + def time_on_int32(self, direction, tolerance): + merge_asof( + self.df1d, self.df2d, on="time32", direction=direction, tolerance=tolerance + ) - def time_on_int32(self, direction): - merge_asof(self.df1d, self.df2d, on="time32", direction=direction) + def time_on_uint64(self, direction, tolerance): + merge_asof( + self.df1f, self.df2f, on="timeu64", direction=direction, tolerance=tolerance + ) - def time_by_object(self, direction): - merge_asof(self.df1b, self.df2b, on="time", by="key", direction=direction) + def time_by_object(self, direction, tolerance): + merge_asof( + self.df1b, + self.df2b, + on="time", + by="key", + direction=direction, + tolerance=tolerance, + ) - def time_by_int(self, direction): - merge_asof(self.df1c, self.df2c, on="time", by="key2", direction=direction) + def time_by_int(self, direction, tolerance): + merge_asof( + self.df1c, + self.df2c, + on="time", + by="key2", + direction=direction, + tolerance=tolerance, + ) - def time_multiby(self, direction): + def time_multiby(self, direction, tolerance): merge_asof( - self.df1e, self.df2e, on="time", by=["key", "key2"], direction=direction + self.df1e, + self.df2e, + on="time", + by=["key", "key2"], + direction=direction, + tolerance=tolerance, )
- [X] closes #28922 - [ ] tests added / passed - [X] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28974
2019-10-14T14:46:30Z
2019-10-22T00:41:22Z
2019-10-22T00:41:22Z
2019-10-22T06:57:15Z
CLN: pandas-dev#28926 Fix mypy errors in pandas/tests/io/parser/conftest.py
diff --git a/pandas/tests/io/parser/conftest.py b/pandas/tests/io/parser/conftest.py index 2c347a096006a..183ad500b15f3 100644 --- a/pandas/tests/io/parser/conftest.py +++ b/pandas/tests/io/parser/conftest.py @@ -1,4 +1,5 @@ import os +from typing import List, Optional import pytest @@ -6,9 +7,9 @@ class BaseParser: - engine = None + engine = None # type: Optional[str] low_memory = True - float_precision_choices = [] + float_precision_choices = [] # type: List[Optional[str]] def update_kwargs(self, kwargs): kwargs = kwargs.copy() @@ -59,11 +60,11 @@ def csv1(csv_dir_path): _py_parsers_only = [_pythonParser] _c_parsers_only = [_cParserHighMemory, _cParserLowMemory] -_all_parsers = _c_parsers_only + _py_parsers_only +_all_parsers = [*_c_parsers_only, *_py_parsers_only] _py_parser_ids = ["python"] _c_parser_ids = ["c_high", "c_low"] -_all_parser_ids = _c_parser_ids + _py_parser_ids +_all_parser_ids = [*_c_parser_ids, *_py_parser_ids] @pytest.fixture(params=_all_parsers, ids=_all_parser_ids) diff --git a/setup.cfg b/setup.cfg index 64494bf84363e..e3cfd8858cc4a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -205,9 +205,6 @@ ignore_errors=True [mypy-pandas.tests.io.json.test_ujson] ignore_errors=True -[mypy-pandas.tests.io.parser.conftest] -ignore_errors=True - [mypy-pandas.tests.io.test_sql] ignore_errors=True
- [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/28973
2019-10-14T12:08:20Z
2019-10-15T12:30:28Z
2019-10-15T12:30:28Z
2019-10-15T12:34:01Z
CLN: pandas-dev#28926 Fix pandas/tests/tseries/offsets/test_offsets_properties
diff --git a/pandas/tests/tseries/offsets/test_offsets_properties.py b/pandas/tests/tseries/offsets/test_offsets_properties.py index 880ff1f137520..a05de78e299f7 100644 --- a/pandas/tests/tseries/offsets/test_offsets_properties.py +++ b/pandas/tests/tseries/offsets/test_offsets_properties.py @@ -36,8 +36,8 @@ with warnings.catch_warnings(): warnings.simplefilter("ignore") - min_dt = (pd.Timestamp(1900, 1, 1).to_pydatetime(),) - max_dt = (pd.Timestamp(1900, 1, 1).to_pydatetime(),) + min_dt = pd.Timestamp(1900, 1, 1).to_pydatetime() + max_dt = pd.Timestamp(1900, 1, 1).to_pydatetime() gen_date_range = st.builds( pd.date_range, diff --git a/setup.cfg b/setup.cfg index 64494bf84363e..775999bc21b97 100644 --- a/setup.cfg +++ b/setup.cfg @@ -226,8 +226,5 @@ ignore_errors=True [mypy-pandas.tests.tseries.offsets.test_offsets] ignore_errors=True -[mypy-pandas.tests.tseries.offsets.test_offsets_properties] -ignore_errors=True - [mypy-pandas.tests.tseries.offsets.test_yqm_offsets] ignore_errors=True
…/teset_offsets_properties.py - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/28972
2019-10-14T11:01:31Z
2019-10-15T12:20:16Z
2019-10-15T12:20:15Z
2019-10-15T12:33:28Z
Fix mypy error messages in pandas/test/tseries/offsets/test_offsets.py
diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index ddf2c6e65b474..e435a6846a77e 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -1,4 +1,5 @@ from datetime import date, datetime, time as dt_time, timedelta +from typing import List, Type import numpy as np import pytest @@ -92,7 +93,7 @@ def test_to_M8(): class Base: - _offset = None + _offset = None # type: Type[DateOffset] d = Timestamp(datetime(2008, 1, 2)) timezones = [ @@ -740,7 +741,7 @@ def test_onOffset(self): for offset, d, expected in tests: assert_onOffset(offset, d, expected) - apply_cases = [] + apply_cases = [] # type: List apply_cases.append( ( BDay(), @@ -1697,7 +1698,7 @@ def test_opening_time(self, case): assert offset._next_opening_time(dt) == exp_next assert offset._prev_opening_time(dt) == exp_prev - apply_cases = [] + apply_cases = [] # type: List apply_cases.append( ( BusinessHour(), @@ -2628,7 +2629,7 @@ def test_onOffset(self, case): offset, d, expected = case assert_onOffset(offset, d, expected) - apply_cases = [] + apply_cases = [] # type: List apply_cases.append( ( CDay(), @@ -2875,7 +2876,7 @@ def test_onOffset(self, case): offset, d, expected = case assert_onOffset(offset, d, expected) - apply_cases = [] + apply_cases = [] # type: List apply_cases.append( ( CBMonthEnd(), @@ -3024,7 +3025,7 @@ def test_onOffset(self, case): offset, dt, expected = case assert_onOffset(offset, dt, expected) - apply_cases = [] + apply_cases = [] # type: List apply_cases.append( ( CBMonthBegin(), diff --git a/setup.cfg b/setup.cfg index 462e79dae1039..7f08e704e4a9e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -217,8 +217,5 @@ ignore_errors=True [mypy-pandas.tests.test_base] ignore_errors=True -[mypy-pandas.tests.tseries.offsets.test_offsets] -ignore_errors=True - [mypy-pandas.tests.tseries.offsets.test_yqm_offsets] ignore_errors=True
- [ ] xref #28926 - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/28971
2019-10-14T09:56:19Z
2019-10-27T02:33:38Z
null
2019-10-27T02:33:48Z
fix #28926 mypy error in pandas\tests\arrays\test_array.py
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index a21d9e67e49e5..78cc54db4b1b8 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -831,7 +831,9 @@ def _raise_on_incompatible(left, right): def period_array( - data: Sequence[Optional[Period]], freq: Optional[Tick] = None, copy: bool = False + data: Sequence[Optional[Period]], + freq: Optional[Union[str, Tick]] = None, + copy: bool = False, ) -> PeriodArray: """ Construct a new PeriodArray from a sequence of Period scalars. diff --git a/setup.cfg b/setup.cfg index f32deff9dafb8..c9ba13443e97c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -136,15 +136,9 @@ ignore_errors=True [mypy-pandas.tests.arithmetic.test_datetime64] ignore_errors=True -[mypy-pandas.tests.arrays.test_array] -ignore_errors=True - [mypy-pandas.tests.arrays.test_datetimelike] ignore_errors=True -[mypy-pandas.tests.arrays.test_period] -ignore_errors=True - [mypy-pandas.tests.dtypes.test_common] ignore_errors=True @@ -190,9 +184,6 @@ ignore_errors=True [mypy-pandas.tests.series.test_operators] ignore_errors=True -[mypy-pandas.tests.test_base] -ignore_errors=True - [mypy-pandas.tests.tseries.offsets.test_offsets] ignore_errors=True
- [ ] xref #28926 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28970
2019-10-14T08:52:52Z
2019-10-21T11:57:29Z
2019-10-21T11:57:28Z
2019-10-21T11:57:40Z
CLN: Exception in _libs
diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in index ac713a928973f..08bfaf21db9fb 100644 --- a/pandas/_libs/intervaltree.pxi.in +++ b/pandas/_libs/intervaltree.pxi.in @@ -158,7 +158,7 @@ cdef class IntervalTree(IntervalMixin): # TODO: write get_indexer_intervals cdef: - size_t old_len + Py_ssize_t old_len Py_ssize_t i Int64Vector result @@ -179,7 +179,7 @@ cdef class IntervalTree(IntervalMixin): the given array of scalar targets. Non-unique positions are repeated. """ cdef: - size_t old_len + Py_ssize_t old_len Py_ssize_t i Int64Vector result, missing diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 1c2f80b832201..36dddbb446e83 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2066,7 +2066,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, floats[i] = float(val) complexes[i] = complex(val) seen.float_ = 1 - except Exception: + except (ValueError, TypeError): seen.object_ = 1 break else: @@ -2346,7 +2346,8 @@ def to_object_array_tuples(rows: object): row = rows[i] for j in range(len(row)): result[i, j] = row[j] - except Exception: + except TypeError: + # e.g. "Expected tuple, got list" # upcast any subclasses to tuple for i in range(n): row = (rows[i],) if checknull(rows[i]) else tuple(rows[i]) diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 33665484311ba..bf0a0ae5a3fe9 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -581,7 +581,7 @@ def try_parse_dates(object[:] values, parser=None, else: result[i] = parse_date(values[i]) except Exception: - # failed + # Since parser is user-defined, we can't guess what it migh raise return values else: parse_date = parser diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index cbfbc14c35b35..bc1fdfae99de9 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -226,11 +226,8 @@ cdef object get_dst_info(object tz): if treat_tz_as_pytz(tz): trans = np.array(tz._utc_transition_times, dtype='M8[ns]') trans = trans.view('i8') - try: - if tz._utc_transition_times[0].year == 1: - trans[0] = NPY_NAT + 1 - except Exception: - pass + if tz._utc_transition_times[0].year == 1: + trans[0] = NPY_NAT + 1 deltas = unbox_utcoffsets(tz._transition_info) typ = 'pytz'
I think this is the last of the ones in _libs. Also fix some build warnings in interval.pyx
https://api.github.com/repos/pandas-dev/pandas/pulls/28967
2019-10-14T03:43:25Z
2019-10-15T12:18:37Z
2019-10-15T12:18:36Z
2019-10-15T15:22:32Z
TST: Add tests for num-bool coercion
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 584e22f8488f5..bafd93fd67a9e 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -11,6 +11,7 @@ import pandas as pd from pandas import Index, Series, Timedelta, TimedeltaIndex +from pandas.conftest import all_arithmetic_functions from pandas.core import ops import pandas.util.testing as tm @@ -72,6 +73,25 @@ def test_compare_invalid(self): tm.assert_series_equal(a / b, 1 / (b / a)) +class TestNumericArraylikeArithmeticWithBool: + @pytest.mark.parametrize("num", [1.0, 1]) + def test_array_like_bool_and_num_op_coerce( + self, num, all_arithmetic_functions, box_with_array + ): + # GH 18549 + op = all_arithmetic_functions + expected = [op(num, num)] + expected = tm.box_expected(expected, box_with_array) + bool_box = tm.box_expected([True], box_with_array) + try: + tm.assert_equal(expected, op(bool_box, num)) + tm.assert_equal(expected, op(num, bool_box)) + except TypeError: + # Some operators may not be supported and that's okay. If supported + # we should should see the operation coerce to a numeric value. + pass + + # ------------------------------------------------------------------ # Numeric dtypes Arithmetic with Datetime/Timedelta Scalar
Boolean dtypes in both Series and DataFrames should be coerced to the other operand's dtype. - [x] closes #18549 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28966
2019-10-14T03:22:03Z
2019-12-17T17:24:14Z
null
2019-12-17T17:24:14Z
Fix mypy errors for pandas\tests\*: test_convert_to.py
diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py index 3f0768ad5bdac..c9a7507969f5b 100644 --- a/pandas/tests/frame/test_convert_to.py +++ b/pandas/tests/frame/test_convert_to.py @@ -575,9 +575,9 @@ def test_frame_to_dict_tz(self): ), ), ( - defaultdict(list), + defaultdict(dict), defaultdict( - list, + dict, { 0: {"int_col": 1, "float_col": 1.0}, 1: {"int_col": 2, "float_col": 2.0}, diff --git a/setup.cfg b/setup.cfg index 462e79dae1039..229fb41bf5d79 100644 --- a/setup.cfg +++ b/setup.cfg @@ -166,9 +166,6 @@ ignore_errors=True [mypy-pandas.tests.frame.test_constructors] ignore_errors=True -[mypy-pandas.tests.frame.test_convert_to] -ignore_errors=True - [mypy-pandas.tests.indexes.datetimes.test_datetimelike] ignore_errors=True
- [x ] xref #28926 - [x ] tests added / passed - [x ] passes `black pandas` - [x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [N/A ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28965
2019-10-14T03:09:49Z
2019-10-18T17:56:07Z
2019-10-18T17:56:07Z
2019-10-18T17:56:18Z
TST: added test for df.loc modify datetime columns
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 35291efecd1ac..5e517d556a095 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -712,6 +712,32 @@ def test_loc_assign_non_ns_datetime(self, unit): expected = Series(df.loc[:, "expected"], name=unit) tm.assert_series_equal(df.loc[:, unit], expected) + def test_loc_modify_datetime(self): + # see gh-28837 + df = DataFrame.from_dict( + {"date": [1485264372711, 1485265925110, 1540215845888, 1540282121025]} + ) + + df["date_dt"] = pd.to_datetime(df["date"], unit="ms", cache=True) + + df.loc[:, "date_dt_cp"] = df.loc[:, "date_dt"] + df.loc[[2, 3], "date_dt_cp"] = df.loc[[2, 3], "date_dt"] + + expected = DataFrame( + [ + [1485264372711, "2017-01-24 13:26:12.711", "2017-01-24 13:26:12.711"], + [1485265925110, "2017-01-24 13:52:05.110", "2017-01-24 13:52:05.110"], + [1540215845888, "2018-10-22 13:44:05.888", "2018-10-22 13:44:05.888"], + [1540282121025, "2018-10-23 08:08:41.025", "2018-10-23 08:08:41.025"], + ], + columns=["date", "date_dt", "date_dt_cp"], + ) + + columns = ["date_dt", "date_dt_cp"] + expected[columns] = expected[columns].apply(pd.to_datetime) + + tm.assert_frame_equal(df, expected) + def test_loc_setitem_frame(self): df = self.frame_labels
relevant GitHub issue #28837 - [x] closes #28837 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28964
2019-10-14T02:48:18Z
2019-10-29T06:02:28Z
2019-10-29T06:02:28Z
2019-10-29T06:02:44Z
TST: added test for df.loc modify datetime columns
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 35291efecd1ac..22997a29341b2 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -712,6 +712,17 @@ def test_loc_assign_non_ns_datetime(self, unit): expected = Series(df.loc[:, "expected"], name=unit) tm.assert_series_equal(df.loc[:, unit], expected) + def test_loc_modify_datetime(self): + # GH 28837 + df_orig = pd.DataFrame.from_dict( + {"date": [1485264372711, 1485265925110, 1540215845888, 1540282121025]} + ) + df_orig["date_dt"] = pd.to_datetime(df_orig["date"], unit="ms", cache=True) + df_orig.loc[:, "date_dt_cp"] = df_orig.loc[:, "date_dt"] + df = df_orig.copy() + df.loc[[2, 3], "date_dt_cp"] = df.loc[[2, 3], "date_dt"] + tm.assert_series_equal(df.loc[:, "date_dt_cp"], df_orig.loc[:, "date_dt_cp"]) + def test_loc_setitem_frame(self): df = self.frame_labels
relevant GitHub issue #28837 - [x] closes #28837 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28963
2019-10-14T02:43:03Z
2019-10-14T02:46:15Z
null
2019-10-14T02:46:15Z
CLN: move small bits outside of try/excepts
diff --git a/pandas/_libs/algos_take_helper.pxi.in b/pandas/_libs/algos_take_helper.pxi.in index 3a3adc71875ed..f10061a417c03 100644 --- a/pandas/_libs/algos_take_helper.pxi.in +++ b/pandas/_libs/algos_take_helper.pxi.in @@ -276,7 +276,6 @@ cdef _take_2d(ndarray[take_t, ndim=2] values, object idx): Py_ssize_t i, j, N, K ndarray[Py_ssize_t, ndim=2, cast=True] indexer = idx ndarray[take_t, ndim=2] result - object val N, K = (<object>values).shape diff --git a/pandas/core/base.py b/pandas/core/base.py index 56ffd3db6e942..d07a120560196 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -267,7 +267,7 @@ def aggregate(self, func, *args, **kwargs): agg = aggregate - def _try_aggregate_string_function(self, arg, *args, **kwargs): + def _try_aggregate_string_function(self, arg: str, *args, **kwargs): """ if arg is a string, then try to operate on it: - try to find a function (or attribute) on ourselves @@ -292,12 +292,10 @@ def _try_aggregate_string_function(self, arg, *args, **kwargs): f = getattr(np, arg, None) if f is not None: - try: + if hasattr(self, "__array__"): + # in particular exclude Window return f(self, *args, **kwargs) - except (AttributeError, TypeError): - pass - raise AttributeError( "'{arg}' is not a valid function for " "'{cls}' object".format(arg=arg, cls=type(self).__name__) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 068d5e5275f0d..76a3893d3af2a 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -952,6 +952,7 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True, min_count=-1): if alt is None: # we cannot perform the operation # in an alternate way, exclude the block + assert how == "ohlc" deleted_items.append(locs) continue @@ -1025,17 +1026,20 @@ def _aggregate_frame(self, func, *args, **kwargs): if axis != obj._info_axis_number: try: for name, data in self: - result[name] = self._try_cast(func(data, *args, **kwargs), data) + fres = func(data, *args, **kwargs) + result[name] = self._try_cast(fres, data) except Exception: return self._aggregate_item_by_item(func, *args, **kwargs) else: for name in self.indices: + data = self.get_group(name, obj=obj) try: - data = self.get_group(name, obj=obj) - result[name] = self._try_cast(func(data, *args, **kwargs), data) + fres = func(data, *args, **kwargs) except Exception: wrapper = lambda x: func(x, *args, **kwargs) result[name] = data.apply(wrapper, axis=axis) + else: + result[name] = self._try_cast(fres, data) return self._wrap_frame_output(result, obj) @@ -1410,9 +1414,10 @@ def _transform_item_by_item(self, obj, wrapper): for i, col in enumerate(obj): try: output[col] = self[col].transform(wrapper) - inds.append(i) except Exception: pass + else: + inds.append(i) if len(output) == 0: raise TypeError("Transform function invalid for data types") diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index cc297629a7004..8461b4381e2ea 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -598,14 +598,7 @@ def pipe(self, func, *args, **kwargs): plot = property(GroupByPlot) def _make_wrapper(self, name): - if name not in self._apply_whitelist: - is_callable = callable(getattr(self._selected_obj, name, None)) - kind = " callable " if is_callable else " " - msg = ( - "Cannot access{0}attribute {1!r} of {2!r} objects, try " - "using the 'apply' method".format(kind, name, type(self).__name__) - ) - raise AttributeError(msg) + assert name in self._apply_whitelist self._set_group_selection() @@ -919,9 +912,10 @@ def _python_agg_general(self, func, *args, **kwargs): for name, obj in self._iterate_slices(): try: result, counts = self.grouper.agg_series(obj, f) - output[name] = self._try_cast(result, obj, numeric_only=True) except TypeError: continue + else: + output[name] = self._try_cast(result, obj, numeric_only=True) if len(output) == 0: return self._python_apply_general(f)
https://api.github.com/repos/pandas-dev/pandas/pulls/28962
2019-10-14T02:15:28Z
2019-10-16T12:21:57Z
2019-10-16T12:21:56Z
2019-10-16T15:25:52Z
CLN: fix mypy errors in pandas\tests\indexes\interval\test_base.py #28926
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index b657d8d16df81..1ac6370860ba6 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -1,4 +1,5 @@ import gc +from typing import Optional, Type import numpy as np import pytest @@ -30,7 +31,7 @@ class Base: """ base class for index sub-class tests """ - _holder = None + _holder = None # type: Optional[Type[Index]] _compat_props = ["shape", "ndim", "size", "nbytes"] def test_pickle_compat_construction(self): diff --git a/setup.cfg b/setup.cfg index f7920fb61b942..d4657100c1291 100644 --- a/setup.cfg +++ b/setup.cfg @@ -148,33 +148,12 @@ ignore_errors=True [mypy-pandas.tests.extension.json.test_json] ignore_errors=True -[mypy-pandas.tests.indexes.datetimes.test_datetimelike] -ignore_errors=True - -[mypy-pandas.tests.indexes.interval.test_base] -ignore_errors=True - [mypy-pandas.tests.indexes.interval.test_interval_tree] ignore_errors=True -[mypy-pandas.tests.indexes.period.test_period] -ignore_errors=True - [mypy-pandas.tests.indexes.test_base] ignore_errors=True -[mypy-pandas.tests.indexes.test_category] -ignore_errors=True - -[mypy-pandas.tests.indexes.test_numeric] -ignore_errors=True - -[mypy-pandas.tests.indexes.test_range] -ignore_errors=True - -[mypy-pandas.tests.indexes.timedeltas.test_timedelta] -ignore_errors=True - [mypy-pandas.tests.indexing.test_loc] ignore_errors=True
- [ ] xref #28926 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Hi! I based this change on what I saw in #28947. However, when I test this with `mypy pandas/tests/indexes/interval/test_base.py`, I get this error, which I didn't expect: ``` pandas/tests/indexes/interval/test_base.py:16: error: Incompatible types in assignment (expression has type "Optional[Type[IntervalIndex]]", base class "Base" defined the type as "None") ``` I thought the "Optional" that was added would accept the "None." But, additionally, in my local environment, I also can't see any of the errors listed in issue #28926, so maybe I don't have something set up right.
https://api.github.com/repos/pandas-dev/pandas/pulls/28961
2019-10-14T00:16:53Z
2019-10-28T19:56:53Z
2019-10-28T19:56:53Z
2019-10-28T19:57:01Z
Fix mypy errors for pandas\tests\* #28926 (test_algos.py)
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index a5706d8baa614..6df2c8faf7aee 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -767,7 +767,7 @@ def test_same_object_is_in(self): # with similar behavior, then we at least should # fall back to usual python's behavior: "a in [a] == True" class LikeNan: - def __eq__(self): + def __eq__(self, other): return False def __hash__(self): diff --git a/setup.cfg b/setup.cfg index 149af6c283d05..64494bf84363e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -220,9 +220,6 @@ ignore_errors=True [mypy-pandas.tests.series.test_operators] ignore_errors=True -[mypy-pandas.tests.test_algos] -ignore_errors=True - [mypy-pandas.tests.test_base] ignore_errors=True
- [x] xref #28926 - [x] tests added / passed (verified that tests still pass) - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [N/A] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28960
2019-10-13T23:14:56Z
2019-10-14T00:28:49Z
2019-10-14T00:28:49Z
2019-10-14T00:28:58Z
REF: re-raise AssertionError unchanged
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 3069bbbf34bb7..3f741f08d1363 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -441,7 +441,7 @@ def _group_add(floating[:, :] out, floating[:, :] sumx, nobs if len(values) != len(labels): - raise AssertionError("len(index) != len(labels)") + raise ValueError("len(index) != len(labels)") nobs = np.zeros_like(out) sumx = np.zeros_like(out) @@ -491,7 +491,7 @@ def _group_prod(floating[:, :] out, floating[:, :] prodx, nobs if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") + raise ValueError("len(index) != len(labels)") nobs = np.zeros_like(out) prodx = np.ones_like(out) @@ -541,7 +541,7 @@ def _group_var(floating[:, :] out, assert min_count == -1, "'min_count' only used in add and prod" if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") + raise ValueError("len(index) != len(labels)") nobs = np.zeros_like(out) mean = np.zeros_like(out) @@ -596,7 +596,7 @@ def _group_mean(floating[:, :] out, assert min_count == -1, "'min_count' only used in add and prod" if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") + raise ValueError("len(index) != len(labels)") nobs = np.zeros_like(out) sumx = np.zeros_like(out) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index aa817ec451aa5..8cd727e744519 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -261,6 +261,8 @@ def aggregate(self, func=None, *args, **kwargs): try: return self._python_agg_general(func, *args, **kwargs) + except AssertionError: + raise except Exception: result = self._aggregate_named(func, *args, **kwargs) @@ -887,6 +889,8 @@ def aggregate(self, func=None, *args, **kwargs): result = self._aggregate_multiple_funcs( [func], _level=_level, _axis=self.axis ) + except AssertionError: + raise except Exception: result = self._aggregate_frame(func) else: @@ -1036,6 +1040,8 @@ def _aggregate_frame(self, func, *args, **kwargs): for name, data in self: fres = func(data, *args, **kwargs) result[name] = self._try_cast(fres, data) + except AssertionError: + raise except Exception: return self._aggregate_item_by_item(func, *args, **kwargs) else: @@ -1043,6 +1049,8 @@ def _aggregate_frame(self, func, *args, **kwargs): data = self.get_group(name, obj=obj) try: fres = func(data, *args, **kwargs) + except AssertionError: + raise except Exception: wrapper = lambda x: func(x, *args, **kwargs) result[name] = data.apply(wrapper, axis=axis) @@ -1398,6 +1406,8 @@ def _choose_path(self, fast_path, slow_path, group): # if we make it here, test if we can use the fast path try: res_fast = fast_path(group) + except AssertionError: + raise except Exception: # Hard to know ex-ante what exceptions `fast_path` might raise return path, res @@ -1422,6 +1432,8 @@ def _transform_item_by_item(self, obj, wrapper): for i, col in enumerate(obj): try: output[col] = self[col].transform(wrapper) + except AssertionError: + raise except Exception: pass else: diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 92ea733cc3447..6f2868482b798 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -44,13 +44,7 @@ class providing the base-class of operations. from pandas.core import nanops import pandas.core.algorithms as algorithms from pandas.core.arrays import Categorical -from pandas.core.base import ( - DataError, - GroupByError, - PandasObject, - SelectionMixin, - SpecificationError, -) +from pandas.core.base import DataError, PandasObject, SelectionMixin import pandas.core.common as com from pandas.core.construction import extract_array from pandas.core.frame import DataFrame @@ -862,8 +856,6 @@ def _cython_transform(self, how, numeric_only=True, **kwargs): result, names = self.grouper.transform(obj.values, how, **kwargs) except NotImplementedError: continue - except AssertionError as e: - raise GroupByError(str(e)) if self._transform_should_cast(how): output[name] = self._try_cast(result, obj) else: @@ -890,12 +882,7 @@ def _cython_agg_general(self, how, alt=None, numeric_only=True, min_count=-1): if numeric_only and not is_numeric: continue - try: - result, names = self.grouper.aggregate( - obj.values, how, min_count=min_count - ) - except AssertionError as e: - raise GroupByError(str(e)) + result, names = self.grouper.aggregate(obj.values, how, min_count=min_count) output[name] = self._try_cast(result, obj) if len(output) == 0: @@ -1353,8 +1340,8 @@ def f(self, **kwargs): # try a cython aggregation if we can try: return self._cython_agg_general(alias, alt=npfunc, **kwargs) - except AssertionError as e: - raise SpecificationError(str(e)) + except AssertionError: + raise except DataError: pass except Exception: diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 40517eefe4d5d..27415a1bacdbd 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -647,6 +647,8 @@ def _transform( def agg_series(self, obj, func): try: return self._aggregate_series_fast(obj, func) + except AssertionError: + raise except Exception: return self._aggregate_series_pure_python(obj, func) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 545bc21dd6d1b..5185d95cfac4c 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -360,6 +360,8 @@ def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): result = grouped._aggregate_item_by_item(how, *args, **kwargs) else: result = grouped.aggregate(how, *args, **kwargs) + except AssertionError: + raise except Exception: # we have a non-reducing function diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index 7e3cbed09c6d7..5dad868c8c3aa 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -19,7 +19,7 @@ date_range, period_range, ) -from pandas.core.groupby.groupby import SpecificationError +from pandas.core.base import SpecificationError import pandas.util.testing as tm from pandas.io.formats.printing import pprint_thing
This should be all the remaining places where we catch `Exception` in groupby/apply/resample code. This should make debugging much easier going forward. cc @jreback @WillAyd
https://api.github.com/repos/pandas-dev/pandas/pulls/28959
2019-10-13T22:49:30Z
2019-10-16T23:37:30Z
2019-10-16T23:37:30Z
2019-10-16T23:41:59Z
CLN: Consistent and Annotated Return Type of _iterate_slices
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 79e941f262931..c82d8a25fedba 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -14,7 +14,18 @@ import itertools import sys from textwrap import dedent -from typing import FrozenSet, List, Optional, Sequence, Set, Tuple, Type, Union +from typing import ( + FrozenSet, + Hashable, + Iterable, + List, + Optional, + Sequence, + Set, + Tuple, + Type, + Union, +) import warnings import numpy as np @@ -861,7 +872,7 @@ def style(self): """ @Appender(_shared_docs["items"]) - def items(self): + def items(self) -> Iterable[Tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 068d5e5275f0d..4125d6a918b26 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -11,7 +11,17 @@ from functools import partial from textwrap import dedent import typing -from typing import Any, Callable, FrozenSet, Sequence, Type, Union +from typing import ( + Any, + Callable, + FrozenSet, + Hashable, + Iterable, + Sequence, + Tuple, + Type, + Union, +) import warnings import numpy as np @@ -132,7 +142,7 @@ def pinner(cls): class SeriesGroupBy(GroupBy): _apply_whitelist = base.series_apply_whitelist - def _iterate_slices(self): + def _iterate_slices(self) -> Iterable[Tuple[Hashable, Series]]: yield self._selection_name, self._selected_obj @property @@ -898,22 +908,20 @@ def aggregate(self, func=None, *args, **kwargs): agg = aggregate - def _iterate_slices(self): - if self.axis == 0: - # kludge - if self._selection is None: - slice_axis = self.obj.columns - else: - slice_axis = self._selection_list - slicer = lambda x: self.obj[x] + def _iterate_slices(self) -> Iterable[Tuple[Hashable, Series]]: + obj = self._selected_obj + if self.axis == 1: + obj = obj.T + + if isinstance(obj, Series) and obj.name not in self.exclusions: + # Occurs when doing DataFrameGroupBy(...)["X"] + yield obj.name, obj else: - slice_axis = self.obj.index - slicer = self.obj.xs + for label, values in obj.items(): + if label in self.exclusions: + continue - for val in slice_axis: - if val in self.exclusions: - continue - yield val, slicer(val) + yield label, values def _cython_agg_general(self, how, alt=None, numeric_only=True, min_count=-1): new_items, new_blocks = self._cython_agg_blocks( diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index cc297629a7004..4ea02d59597f1 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -14,7 +14,7 @@ class providing the base-class of operations. import inspect import re import types -from typing import FrozenSet, List, Optional, Tuple, Type, Union +from typing import FrozenSet, Hashable, Iterable, List, Optional, Tuple, Type, Union import numpy as np @@ -758,7 +758,7 @@ def _python_apply_general(self, f): keys, values, not_indexed_same=mutated or self.mutated ) - def _iterate_slices(self): + def _iterate_slices(self) -> Iterable[Tuple[Hashable, Series]]: raise AbstractMethodError(self) def transform(self, func, *args, **kwargs):
General pre-cursor to getting block management out of groupby. This is also a pre-cursor to fixing #21668 but needs to be coupled with a few more changes as a follow up On master calls to _iterate_slices look up by label, potentially yielding a DataFrame if there were duplicated columns. This takes the surprise out of that and simply returns a Tuple of label / series for each item along the axis @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/28958
2019-10-13T21:47:03Z
2019-10-16T12:24:47Z
2019-10-16T12:24:47Z
2020-01-16T00:33:45Z
CLN: Clean DirNameMixin._deprecated
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index 2d4ded9e2e6ba..b863e7ef3d580 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -4,7 +4,7 @@ that can be mixed into or pinned onto other pandas classes. """ -from typing import Set +from typing import FrozenSet, Set import warnings from pandas.util._decorators import Appender @@ -12,9 +12,7 @@ class DirNamesMixin: _accessors = set() # type: Set[str] - _deprecations = frozenset( - ["asobject", "base", "data", "flags", "itemsize", "strides"] - ) + _deprecations = frozenset() # type: FrozenSet[str] def _dir_deletions(self): """ diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 5e974f0b69e59..6b9836ba8bcec 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -331,7 +331,9 @@ class Categorical(ExtensionArray, PandasObject): __array_priority__ = 1000 _dtype = CategoricalDtype(ordered=False) # tolist is not actually deprecated, just suppressed in the __dir__ - _deprecations = PandasObject._deprecations | frozenset(["tolist", "get_values"]) + _deprecations = PandasObject._deprecations | frozenset( + ["tolist", "itemsize", "get_values"] + ) _typ = "categorical" def __init__( diff --git a/pandas/core/base.py b/pandas/core/base.py index 56ffd3db6e942..f400a9a009c8a 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -4,7 +4,7 @@ import builtins from collections import OrderedDict import textwrap -from typing import Dict, Optional +from typing import Dict, FrozenSet, Optional import warnings import numpy as np @@ -653,7 +653,17 @@ class IndexOpsMixin: # ndarray compatibility __array_priority__ = 1000 - _deprecations = frozenset(["item"]) + _deprecations = frozenset( + [ + "tolist", # tolist is not deprecated, just suppressed in the __dir__ + "base", + "data", + "item", + "itemsize", + "flags", + "strides", + ] + ) # type: FrozenSet[str] def transpose(self, *args, **kwargs): """ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 7dee3a17f8f9e..572240a524569 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1,7 +1,7 @@ from datetime import datetime import operator from textwrap import dedent -from typing import Union +from typing import FrozenSet, Union import warnings import numpy as np @@ -63,7 +63,7 @@ from pandas.core.dtypes.missing import array_equivalent, isna from pandas.core import ops -from pandas.core.accessor import CachedAccessor, DirNamesMixin +from pandas.core.accessor import CachedAccessor import pandas.core.algorithms as algos from pandas.core.arrays import ExtensionArray from pandas.core.base import IndexOpsMixin, PandasObject @@ -206,10 +206,10 @@ class Index(IndexOpsMixin, PandasObject): # tolist is not actually deprecated, just suppressed in the __dir__ _deprecations = ( - IndexOpsMixin._deprecations - | DirNamesMixin._deprecations - | frozenset(["tolist", "contains", "dtype_str", "get_values", "set_value"]) - ) + PandasObject._deprecations + | IndexOpsMixin._deprecations + | frozenset(["asobject", "contains", "dtype_str", "get_values", "set_value"]) + ) # type: FrozenSet[str] # To hand over control to subclasses _join_precedence = 1 diff --git a/pandas/core/series.py b/pandas/core/series.py index ff8149cc2e922..03801a78be9a5 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -54,7 +54,7 @@ import pandas as pd from pandas.core import algorithms, base, generic, nanops, ops -from pandas.core.accessor import CachedAccessor, DirNamesMixin +from pandas.core.accessor import CachedAccessor from pandas.core.arrays import ExtensionArray from pandas.core.arrays.categorical import Categorical, CategoricalAccessor from pandas.core.arrays.sparse import SparseAccessor @@ -178,10 +178,8 @@ class Series(base.IndexOpsMixin, generic.NDFrame): _deprecations = ( base.IndexOpsMixin._deprecations | generic.NDFrame._deprecations - | DirNamesMixin._deprecations | frozenset( [ - "tolist", # tolist is not deprecated, just suppressed in the __dir__ "asobject", "compress", "valid",
This moves the content of ``DirNameMixin._deprecations`` to more appropriate locations, typically ``IndexOpsMixIn._deprecations``, as that is a common subclass of ``Index`` and ``Series``. The names in ``DirNameMixin._deprecations`` belonged to those two classes, so having the deprecated names located all the way up in ``DirNameMixin`` made them be available in all classes that subclass ``DirNameMixin``, which was unfortunate. By having ``DirNameMixin._deprecations`` start with an empty set, it will be easier to use ``DirNameMixin`` and ``PandasObject`` where/when needed, without inheriting undesired names in ``_deprecated``. This PR also moves ``"tolist"`` to ``IndexOpsMixIn._deprecations``, because ``tolist`` is defined in that class.
https://api.github.com/repos/pandas-dev/pandas/pulls/28957
2019-10-13T20:57:12Z
2019-10-16T18:53:42Z
2019-10-16T18:53:42Z
2019-10-16T19:07:10Z
To string with encoding
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 7c86ad0f029ed..511e85929f352 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -109,6 +109,7 @@ Other enhancements (:issue:`28368`) - :meth:`DataFrame.to_json` now accepts an ``indent`` integer argument to enable pretty printing of JSON output (:issue:`12004`) - :meth:`read_stata` can read Stata 119 dta files. (:issue:`28250`) +- Added ``encoding`` argument to :meth:`DataFrame.to_string` for non-ascii text (:issue:`28766`) - Added ``encoding`` argument to :func:`DataFrame.to_html` for non-ascii text (:issue:`28663`) Build Changes diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 64755b2390eaf..f032a9a919b3c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -755,6 +755,7 @@ def to_string( decimal: str = ".", line_width: Optional[int] = None, max_colwidth: Optional[int] = None, + encoding: Optional[str] = None, ) -> Optional[str]: """ Render a DataFrame to a console-friendly tabular output. @@ -765,6 +766,10 @@ def to_string( Max width to truncate each column in characters. By default, no limit. .. versionadded:: 1.0.0 + encoding : str, default "utf-8" + Set character encoding. + + .. versionadded:: 1.0 %(returns)s See Also -------- @@ -803,7 +808,7 @@ def to_string( decimal=decimal, line_width=line_width, ) - return formatter.to_string(buf=buf) + return formatter.to_string(buf=buf, encoding=encoding) # ---------------------------------------------------------------------- diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index b8c40e3f62221..7c58eafd2ec39 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -485,6 +485,8 @@ def get_buffer( if encoding is None: encoding = "utf-8" + elif not isinstance(buf, str): + raise ValueError("buf is not a file name and encoding is specified.") if hasattr(buf, "write"): yield buf @@ -895,8 +897,12 @@ def _join_multiline(self, *args) -> str: st = ed return "\n\n".join(str_lst) - def to_string(self, buf: Optional[FilePathOrBuffer[str]] = None) -> Optional[str]: - return self.get_result(buf=buf) + def to_string( + self, + buf: Optional[FilePathOrBuffer[str]] = None, + encoding: Optional[str] = None, + ) -> Optional[str]: + return self.get_result(buf=buf, encoding=encoding) def to_latex( self, diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 454e2afb8abe0..9aba4c8aa5019 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -73,17 +73,19 @@ def filepath_or_buffer(filepath_or_buffer_id, tmp_path): @pytest.fixture -def assert_filepath_or_buffer_equals(filepath_or_buffer, filepath_or_buffer_id): +def assert_filepath_or_buffer_equals( + filepath_or_buffer, filepath_or_buffer_id, encoding +): """ Assertion helper for checking filepath_or_buffer. """ def _assert_filepath_or_buffer_equals(expected): if filepath_or_buffer_id == "string": - with open(filepath_or_buffer) as f: + with open(filepath_or_buffer, encoding=encoding) as f: result = f.read() elif filepath_or_buffer_id == "pathlike": - result = filepath_or_buffer.read_text() + result = filepath_or_buffer.read_text(encoding=encoding) elif filepath_or_buffer_id == "buffer": result = filepath_or_buffer.getvalue() assert result == expected @@ -3240,14 +3242,32 @@ def test_repr_html_ipython_config(ip): @pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"]) +@pytest.mark.parametrize( + "encoding, data", + [(None, "abc"), ("utf-8", "abc"), ("gbk", "造成输出中文显示乱码"), ("foo", "abc")], +) def test_filepath_or_buffer_arg( - float_frame, method, filepath_or_buffer, assert_filepath_or_buffer_equals + method, + filepath_or_buffer, + assert_filepath_or_buffer_equals, + encoding, + data, + filepath_or_buffer_id, ): - df = float_frame - expected = getattr(df, method)() + df = DataFrame([data]) - getattr(df, method)(buf=filepath_or_buffer) - assert_filepath_or_buffer_equals(expected) + if filepath_or_buffer_id not in ["string", "pathlike"] and encoding is not None: + with pytest.raises( + ValueError, match="buf is not a file name and encoding is specified." + ): + getattr(df, method)(buf=filepath_or_buffer, encoding=encoding) + elif encoding == "foo": + with pytest.raises(LookupError, match="unknown encoding"): + getattr(df, method)(buf=filepath_or_buffer, encoding=encoding) + else: + expected = getattr(df, method)() + getattr(df, method)(buf=filepath_or_buffer, encoding=encoding) + assert_filepath_or_buffer_equals(expected) @pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
- [x] close #28766 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28951
2019-10-13T06:12:47Z
2019-10-23T17:48:08Z
2019-10-23T17:48:08Z
2019-10-23T17:50:48Z
Change width of specific columns using col_space param in df.to_html
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index ad62c56a337b6..b5c95a0465340 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -532,7 +532,7 @@ def __init__( self, frame: "DataFrame", columns: Optional[Sequence[str]] = None, - col_space: Optional[Union[str, int]] = None, + col_space: Optional[Union[dict, str, int]] = None, header: Union[bool, Sequence[str]] = True, index: bool = True, na_rep: str = "NaN", diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 50fa4796f8d72..aa74daf94d530 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -123,7 +123,11 @@ def write_th( """ if header and self.fmt.col_space is not None: tags = tags or "" - tags += 'style="min-width: {colspace};"'.format(colspace=self.fmt.col_space) + if type(self.fmt.col_space) is dict: + if s in self.fmt.col_space: + tags += 'style="min-width: {colspace};"'.format(colspace=self.fmt.col_space[s]) + else: + tags += 'style="min-width: {colspace};"'.format(colspace=self.fmt.col_space) self._write_cell(s, kind="th", indent=indent, tags=tags)
DataFrame.to_html col_space parameter to change width of a specific column only - [ ] closes #28917 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28950
2019-10-13T04:14:24Z
2019-10-13T12:13:59Z
null
2019-10-13T12:13:59Z
WEB: Adding new pandas logo
diff --git a/web/pandas/_templates/layout.html b/web/pandas/_templates/layout.html index fe3e4d1245d93..120058afd1190 100644 --- a/web/pandas/_templates/layout.html +++ b/web/pandas/_templates/layout.html @@ -12,6 +12,7 @@ <title>pandas - Python Data Analysis Library</title> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> + <link rel='shortcut icon' type='image/x-icon' href='{{ base_url }}/static/img/favicon.ico'/> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" diff --git a/web/pandas/about/citing.md b/web/pandas/about/citing.md index 77b79c41aa4d1..5cd31d8722b9d 100644 --- a/web/pandas/about/citing.md +++ b/web/pandas/about/citing.md @@ -33,14 +33,91 @@ If you use _pandas_ for a scientific publication, we would appreciate citations When using the project name _pandas_, please use it in lower case, even at the beginning of a sentence. -The official logo of _pandas_ is: +The official logos of _pandas_ are: -![]({{ base_url }}/static/img/pandas.svg) +### Primary logo -You can download a `svg` version of the logo [here]({{ base_url }}/static/img/pandas.svg). +<table class="table logo"> + <tr> + <td> + <img alt="" src="{{ base_url }}/static/img/pandas.svg"/> + </td> + <td style="background-color: #150458"> + <img alt="" src="{{ base_url }}/static/img/pandas_white.svg"/> + </td> + </tr> +</table> + +### Secondary logo + +<table class="table logo"> + <tr> + <td> + <img alt="" src="{{ base_url }}/static/img/pandas_secondary.svg"/> + </td> + <td style="background-color: #150458"> + <img alt="" src="{{ base_url }}/static/img/pandas_secondary_white.svg"/> + </td> + </tr> +</table> + +### Logo mark + +<table class="table logo"> + <tr> + <td> + <img alt="" src="{{ base_url }}/static/img/pandas_mark.svg"/> + </td> + <td style="background-color: #150458"> + <img alt="" src="{{ base_url }}/static/img/pandas_mark_white.svg"/> + </td> + </tr> +</table> + +### Logo usage + +The pandas logo is available in full color and white accent. +The full color logo should only appear against white backgrounds. +The white accent logo should go against contrasting color background. When using the logo, please follow the next directives: -- Leave enough margin around the logo +- Primary logo should never be seen under 1 inch in size for printing and 72px for web +- The secondary logo should never be seen under 0.75 inch in size for printing and 55px for web +- Leave enough margin around the logo (leave the height of the logo in the top, bottom and both sides) - Do not distort the logo by changing its proportions - Do not place text or other elements on top of the logo + +### Colors + +<table class="table"> + <tr> + <td style="text-align: center;"> + <svg xmlns="http://www.w3.org/2000/svg" width="100" height="100"> + <circle cx="50" cy="50" r="50" fill="#150458"/> + </svg> + <br/> + <b style="color: #150458;">Blue</b><br/> + RGB: R21 G4 B88<br/> + HEX: #150458 + </td> + <td style="text-align: center;"> + <svg xmlns="http://www.w3.org/2000/svg" width="100" height="100"> + <circle cx="50" cy="50" r="50" fill="#ffca00"/> + </svg> + <br/> + <b style="color: #150458;">Yellow</b><br/> + RGB: R255 G202 B0<br/> + HEX: #FFCA00 + </td> + <td style="text-align: center;"> + <svg xmlns="http://www.w3.org/2000/svg" width="100" height="100"> + <circle cx="50" cy="50" r="50" fill="#e70488"/> + </svg> + <br/> + <b style="color: #150458;">Pink</b><br/> + RGB: R231 G4 B136<br/> + HEX: #E70488 + </td> + </tr> +</table> diff --git a/web/pandas/config.yml b/web/pandas/config.yml index d5c505f298437..e2a95a5039884 100644 --- a/web/pandas/config.yml +++ b/web/pandas/config.yml @@ -16,7 +16,7 @@ main: - tables - fenced_code static: - logo: # /static/img/pandas.svg + logo: /static/img/pandas_white.svg css: - /static/css/pandas.css navbar: diff --git a/web/pandas/static/css/pandas.css b/web/pandas/static/css/pandas.css index 0a227cf8d96c9..8b5905d480ac3 100644 --- a/web/pandas/static/css/pandas.css +++ b/web/pandas/static/css/pandas.css @@ -31,7 +31,7 @@ code { color: #130654; } a.navbar-brand img { - max-height: 2em; + height: 3rem; } div.card { margin: 0 0 .2em .2em !important; @@ -52,3 +52,9 @@ div.card .card-title { .navbar-dark .navbar-nav .nav-link:hover { color: white; } +table.logo td { + text-align: center; +} +table.logo img { + height: 4rem; +} diff --git a/web/pandas/static/img/favicon.ico b/web/pandas/static/img/favicon.ico new file mode 100644 index 0000000000000..0af2443dcaa3e Binary files /dev/null and b/web/pandas/static/img/favicon.ico differ diff --git a/web/pandas/static/img/pandas.svg b/web/pandas/static/img/pandas.svg deleted file mode 120000 index 2e5d3872e4845..0000000000000 --- a/web/pandas/static/img/pandas.svg +++ /dev/null @@ -1 +0,0 @@ -../../../../doc/logo/pandas_logo.svg \ No newline at end of file diff --git a/web/pandas/static/img/pandas.svg b/web/pandas/static/img/pandas.svg new file mode 100644 index 0000000000000..a7af4e4d2d401 --- /dev/null +++ b/web/pandas/static/img/pandas.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 818.63 331.21"><defs><style>.cls-1{fill:#130754;}.cls-2{fill:#ffca00;}.cls-3{fill:#e70488;}</style></defs><title>Artboard 63</title><path class="cls-1" d="M290.85,199.21c-10.27,0-20.73-4.25-27.28-12.58v45H243l0-111.09h18.6l.71,12.22c6.38-9.39,17.71-14.35,28.52-14.35,20.73,0,36,17.37,36,40.4S311.58,199.22,290.85,199.21Zm-6.37-65.55c-12.05,0-21.79,9.39-21.79,25.16S272.43,184,284.48,184s21.79-9.39,21.79-25.16S296.53,133.66,284.48,133.66Z"/><path class="cls-1" d="M404.36,197.1l-.71-12.22c-6.38,9.39-17.72,14.35-28.53,14.34-20.73,0-36-17.36-36-40.39s15.24-40.4,36-40.39c10.81,0,22.15,5,28.53,14.35l.71-12.22H423V197.1Zm-22.85-63.43c-12.05,0-21.79,9.39-21.8,25.16S369.45,184,381.5,184s21.8-9.39,21.8-25.16S393.56,133.67,381.51,133.67Z"/><path class="cls-1" d="M494.87,197.11V154.77c0-14.88-5.13-19.84-14.52-19.84-9.75,0-20.38,8.85-20.38,19.48v42.7H439.41V120.57H458.2l.89,14.18c5.14-9.75,16.65-16.3,28.35-16.3,20.37,0,28,14.18,28,33.13v45.54Z"/><path class="cls-1" d="M590.77,197.13l-.71-12.23c-6.38,9.39-17.72,14.35-28.52,14.35-20.73,0-36-17.37-36-40.4s15.24-40.39,36-40.39c10.27,0,20.72,4.26,27.28,12.58V90.83h20.56l0,106.3ZM567.92,133.7c-12,0-21.79,9.39-21.79,25.15S555.87,184,567.92,184s21.79-9.38,21.79-25.15S580,133.7,567.92,133.7Z"/><path class="cls-1" d="M686.6,197.14l-.71-12.22c-6.38,9.39-17.72,14.34-28.53,14.34-20.73,0-36-17.36-36-40.4s15.24-40.39,36-40.39c10.81,0,22.15,5,28.53,14.36l.71-12.23h18.6v76.53Zm-22.85-63.43c-12,0-21.79,9.39-21.8,25.16S651.7,184,663.74,184s21.8-9.39,21.8-25.16S675.8,133.71,663.75,133.71Z"/><path class="cls-1" d="M750.73,199.63a60.16,60.16,0,0,1-30.65-8.69l3.37-14.17c6.2,3.72,15.59,8.51,26.93,8.51,8.15,0,13.82-2.48,13.82-8.86,0-5.49-5.85-7.44-16.3-9.92-18.78-4.08-25.51-14-25.51-24.81,0-12.05,9.39-23.38,30.12-23.38,12.58,0,23.57,5.49,26,6.91l-3.37,13.47A44.59,44.59,0,0,0,753,132.31c-8.32,0-12.4,2.83-12.4,7.44,0,5.13,5.32,7.44,13.46,9.39,20.2,4.25,28.35,13.64,28.35,23.92C782.45,189.53,770.4,199.63,750.73,199.63Z"/><rect class="cls-1" x="74.88" y="68.42" width="24.09" height="50.02"/><rect class="cls-1" x="74.88" y="171.17" width="24.09" height="50.02"/><rect class="cls-2" x="74.88" y="133.04" width="24.09" height="23.6"/><rect class="cls-1" x="36.19" y="109.55" width="24.09" height="166.27"/><rect class="cls-1" x="112.78" y="212.44" width="24.09" height="50.02"/><rect class="cls-1" x="112.78" y="109.61" width="24.09" height="50.02"/><rect class="cls-3" x="112.78" y="174.23" width="24.09" height="23.6"/><rect class="cls-1" x="150.67" y="55.39" width="24.09" height="166.27"/></svg> \ No newline at end of file diff --git a/web/pandas/static/img/pandas_mark.svg b/web/pandas/static/img/pandas_mark.svg new file mode 100644 index 0000000000000..1451f57de198e --- /dev/null +++ b/web/pandas/static/img/pandas_mark.svg @@ -0,0 +1,111 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + id="Layer_1" + data-name="Layer 1" + viewBox="0 0 210.21 280.43" + version="1.1" + sodipodi:docname="pandas_mark.svg" + inkscape:version="0.92.4 (unknown)"> + <metadata + id="metadata27"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + </cc:Work> + </rdf:RDF> + </metadata> + <sodipodi:namedview + pagecolor="#ffffff" + bordercolor="#666666" + borderopacity="1" + objecttolerance="10" + gridtolerance="10" + guidetolerance="10" + inkscape:pageopacity="0" + inkscape:pageshadow="2" + inkscape:window-width="1131" + inkscape:window-height="921" + id="namedview25" + showgrid="false" + inkscape:zoom="0.84156476" + inkscape:cx="107.48153" + inkscape:cy="140.215" + inkscape:window-x="0" + inkscape:window-y="0" + inkscape:window-maximized="0" + inkscape:current-layer="Layer_1" /> + <defs + id="defs4"> + <style + id="style2">.cls-1{fill:#130754;}.cls-2{fill:#48e5ac;}.cls-3{fill:#e70488;}</style> + </defs> + <title + id="title6">Artboard 61</title> + <rect + class="cls-1" + x="74.51" + y="43.03" + width="24.09" + height="50.02" + id="rect8" /> + <rect + class="cls-1" + x="74.51" + y="145.78" + width="24.09" + height="50.02" + id="rect10" /> + <rect + class="cls-2" + x="74.51" + y="107.65" + width="24.09" + height="23.6" + id="rect12" + style="fill:#ffca00;fill-opacity:1" /> + <rect + class="cls-1" + x="35.81" + y="84.15" + width="24.09" + height="166.27" + id="rect14" /> + <rect + class="cls-1" + x="112.41" + y="187.05" + width="24.09" + height="50.02" + id="rect16" /> + <rect + class="cls-1" + x="112.41" + y="84.21" + width="24.09" + height="50.02" + id="rect18" /> + <rect + class="cls-3" + x="112.41" + y="148.84" + width="24.09" + height="23.6" + id="rect20" /> + <rect + class="cls-1" + x="150.3" + y="30" + width="24.09" + height="166.27" + id="rect22" /> +</svg> diff --git a/web/pandas/static/img/pandas_mark_white.svg b/web/pandas/static/img/pandas_mark_white.svg new file mode 100644 index 0000000000000..ae50bf5430c3a --- /dev/null +++ b/web/pandas/static/img/pandas_mark_white.svg @@ -0,0 +1,111 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + id="Layer_1" + data-name="Layer 1" + viewBox="0 0 210.21 280.43" + version="1.1" + sodipodi:docname="pandas_mark_white.svg" + inkscape:version="0.92.4 (unknown)"> + <metadata + id="metadata27"> + <rdf:RDF> + <cc:Work + rdf:about=""> + <dc:format>image/svg+xml</dc:format> + <dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> + </cc:Work> + </rdf:RDF> + </metadata> + <sodipodi:namedview + pagecolor="#ffffff" + bordercolor="#666666" + borderopacity="1" + objecttolerance="10" + gridtolerance="10" + guidetolerance="10" + inkscape:pageopacity="0" + inkscape:pageshadow="2" + inkscape:window-width="761" + inkscape:window-height="480" + id="namedview25" + showgrid="false" + inkscape:zoom="0.84156476" + inkscape:cx="105.105" + inkscape:cy="140.215" + inkscape:window-x="0" + inkscape:window-y="0" + inkscape:window-maximized="0" + inkscape:current-layer="Layer_1" /> + <defs + id="defs4"> + <style + id="style2">.cls-1{fill:#fff;}.cls-2{fill:#48e5ac;}.cls-3{fill:#e70488;}</style> + </defs> + <title + id="title6">Artboard 61 copy</title> + <rect + class="cls-1" + x="74.51" + y="43.03" + width="24.09" + height="50.02" + id="rect8" /> + <rect + class="cls-1" + x="74.51" + y="145.78" + width="24.09" + height="50.02" + id="rect10" /> + <rect + class="cls-2" + x="74.51" + y="107.65" + width="24.09" + height="23.6" + id="rect12" + style="fill:#ffca00;fill-opacity:1" /> + <rect + class="cls-1" + x="35.81" + y="84.15" + width="24.09" + height="166.27" + id="rect14" /> + <rect + class="cls-1" + x="112.41" + y="187.05" + width="24.09" + height="50.02" + id="rect16" /> + <rect + class="cls-1" + x="112.41" + y="84.21" + width="24.09" + height="50.02" + id="rect18" /> + <rect + class="cls-3" + x="112.41" + y="148.84" + width="24.09" + height="23.6" + id="rect20" /> + <rect + class="cls-1" + x="150.3" + y="30" + width="24.09" + height="166.27" + id="rect22" /> +</svg> diff --git a/web/pandas/static/img/pandas_secondary.svg b/web/pandas/static/img/pandas_secondary.svg new file mode 100644 index 0000000000000..e74404842e5b6 --- /dev/null +++ b/web/pandas/static/img/pandas_secondary.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 664.97 470.93"><defs><style>.cls-1{fill:#130754;}.cls-2{fill:#ffca00;}.cls-3{fill:#e70488;}</style></defs><title>Artboard 57</title><path class="cls-1" d="M110.61,397.48c-10.28,0-20.73-4.25-27.29-12.58v45H62.76l0-111.08h18.6L82.09,331c6.38-9.39,17.72-14.35,28.53-14.34,20.72,0,36,17.36,36,40.4S131.33,397.49,110.61,397.48Zm-6.37-65.55c-12,0-21.8,9.39-21.8,25.16s9.74,25.16,21.79,25.16S126,372.86,126,357.09,116.28,331.93,104.24,331.93Z"/><path class="cls-1" d="M224.11,395.37l-.71-12.22c-6.38,9.39-17.72,14.35-28.52,14.35-20.73,0-36-17.37-36-40.4s15.24-40.4,36-40.39c10.8,0,22.14,5,28.52,14.35l.71-12.22h18.6v76.54Zm-22.85-63.43c-12,0-21.79,9.39-21.79,25.16s9.74,25.16,21.79,25.16,21.79-9.39,21.79-25.15S213.31,332,201.26,331.94Z"/><path class="cls-1" d="M314.62,395.39V353c0-14.88-5.14-19.84-14.53-19.84-9.74,0-20.37,8.85-20.38,19.48v42.7H259.17V318.84H278l.88,14.18c5.14-9.75,16.66-16.3,28.35-16.3,20.37,0,28,14.18,28,33.14v45.53Z"/><path class="cls-1" d="M410.52,395.4l-.71-12.23c-6.37,9.39-17.71,14.35-28.52,14.35-20.73,0-36-17.37-36-40.4s15.24-40.39,36-40.39c10.27,0,20.73,4.26,27.28,12.59V289.1h20.55l0,106.3ZM387.68,332c-12.05,0-21.8,9.39-21.8,25.16s9.74,25.15,21.79,25.16,21.79-9.39,21.79-25.16S399.72,332,387.68,332Z"/><path class="cls-1" d="M506.35,395.41l-.71-12.22c-6.38,9.39-17.72,14.35-28.52,14.34-20.73,0-36-17.36-36-40.39s15.24-40.4,36-40.39c10.8,0,22.14,5,28.52,14.35l.71-12.22H525v76.53ZM483.5,332c-12.05,0-21.79,9.39-21.79,25.16s9.74,25.16,21.79,25.16,21.79-9.39,21.79-25.16S495.55,332,483.5,332Z"/><path class="cls-1" d="M570.49,397.9a60.15,60.15,0,0,1-30.65-8.68L543.2,375c6.2,3.72,15.59,8.51,26.93,8.51,8.15,0,13.82-2.48,13.82-8.86,0-5.49-5.84-7.44-16.3-9.92-18.77-4.08-25.51-14-25.5-24.81,0-12,9.39-23.38,30.12-23.38,12.58,0,23.56,5.5,26,6.91L594.94,337a44.52,44.52,0,0,0-22.14-6.38c-8.33,0-12.41,2.83-12.41,7.44,0,5.13,5.32,7.44,13.47,9.39,20.19,4.25,28.34,13.64,28.34,23.92C602.2,387.81,590.15,397.9,570.49,397.9Z"/><rect class="cls-1" x="301.89" y="54.05" width="24.09" height="50.02"/><rect class="cls-1" x="301.89" y="156.8" width="24.09" height="50.02"/><rect class="cls-2" x="301.89" y="118.68" width="24.09" height="23.6"/><rect class="cls-1" x="263.19" y="95.18" width="24.09" height="166.27"/><rect class="cls-1" x="339.79" y="198.07" width="24.09" height="50.02"/><rect class="cls-1" x="339.79" y="95.24" width="24.09" height="50.02"/><rect class="cls-3" x="339.79" y="159.86" width="24.09" height="23.6"/><rect class="cls-1" x="377.68" y="41.03" width="24.09" height="166.27"/></svg> \ No newline at end of file diff --git a/web/pandas/static/img/pandas_secondary_white.svg b/web/pandas/static/img/pandas_secondary_white.svg new file mode 100644 index 0000000000000..86bcca57a031e --- /dev/null +++ b/web/pandas/static/img/pandas_secondary_white.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 664.97 470.93"><defs><style>.cls-1{fill:#fff;}.cls-2{fill:#ffca00;}.cls-3{fill:#e70488;}</style></defs><title>Artboard 57 copy</title><path class="cls-1" d="M110.61,397.48c-10.28,0-20.73-4.25-27.29-12.58v45H62.76l0-111.08h18.6L82.09,331c6.38-9.39,17.72-14.35,28.53-14.34,20.72,0,36,17.36,36,40.4S131.33,397.49,110.61,397.48Zm-6.37-65.55c-12,0-21.8,9.39-21.8,25.16s9.74,25.16,21.79,25.16S126,372.86,126,357.09,116.28,331.93,104.24,331.93Z"/><path class="cls-1" d="M224.11,395.37l-.71-12.22c-6.38,9.39-17.72,14.35-28.52,14.35-20.73,0-36-17.37-36-40.4s15.24-40.4,36-40.39c10.8,0,22.14,5,28.52,14.35l.71-12.22h18.6v76.54Zm-22.85-63.43c-12,0-21.79,9.39-21.79,25.16s9.74,25.16,21.79,25.16,21.79-9.39,21.79-25.15S213.31,332,201.26,331.94Z"/><path class="cls-1" d="M314.62,395.39V353c0-14.88-5.14-19.84-14.53-19.84-9.74,0-20.37,8.85-20.38,19.48v42.7H259.17V318.84H278l.88,14.18c5.14-9.75,16.66-16.3,28.35-16.3,20.37,0,28,14.18,28,33.14v45.53Z"/><path class="cls-1" d="M410.52,395.4l-.71-12.23c-6.37,9.39-17.71,14.35-28.52,14.35-20.73,0-36-17.37-36-40.4s15.24-40.39,36-40.39c10.27,0,20.73,4.26,27.28,12.59V289.1h20.55l0,106.3ZM387.68,332c-12.05,0-21.8,9.39-21.8,25.16s9.74,25.15,21.79,25.16,21.79-9.39,21.79-25.16S399.72,332,387.68,332Z"/><path class="cls-1" d="M506.35,395.41l-.71-12.22c-6.38,9.39-17.72,14.35-28.52,14.34-20.73,0-36-17.36-36-40.39s15.24-40.4,36-40.39c10.8,0,22.14,5,28.52,14.35l.71-12.22H525v76.53ZM483.5,332c-12.05,0-21.79,9.39-21.79,25.16s9.74,25.16,21.79,25.16,21.79-9.39,21.79-25.16S495.55,332,483.5,332Z"/><path class="cls-1" d="M570.49,397.9a60.15,60.15,0,0,1-30.65-8.68L543.2,375c6.2,3.72,15.59,8.51,26.93,8.51,8.15,0,13.82-2.48,13.82-8.86,0-5.49-5.84-7.44-16.3-9.92-18.77-4.08-25.51-14-25.5-24.81,0-12,9.39-23.38,30.12-23.38,12.58,0,23.56,5.5,26,6.91L594.94,337a44.52,44.52,0,0,0-22.14-6.38c-8.33,0-12.41,2.83-12.41,7.44,0,5.13,5.32,7.44,13.47,9.39,20.19,4.25,28.34,13.64,28.34,23.92C602.2,387.81,590.15,397.9,570.49,397.9Z"/><rect class="cls-1" x="301.89" y="54.05" width="24.09" height="50.02"/><rect class="cls-1" x="301.89" y="156.8" width="24.09" height="50.02"/><rect class="cls-2" x="301.89" y="118.68" width="24.09" height="23.6"/><rect class="cls-1" x="263.19" y="95.18" width="24.09" height="166.27"/><rect class="cls-1" x="339.79" y="198.07" width="24.09" height="50.02"/><rect class="cls-1" x="339.79" y="95.24" width="24.09" height="50.02"/><rect class="cls-3" x="339.79" y="159.86" width="24.09" height="23.6"/><rect class="cls-1" x="377.68" y="41.03" width="24.09" height="166.27"/></svg> \ No newline at end of file diff --git a/web/pandas/static/img/pandas_white.svg b/web/pandas/static/img/pandas_white.svg new file mode 100644 index 0000000000000..bc7c41651182d --- /dev/null +++ b/web/pandas/static/img/pandas_white.svg @@ -0,0 +1 @@ +<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 818.63 331.21"><defs><style>.cls-1{fill:#fff;}.cls-2{fill:#ffca00;}.cls-3{fill:#e70488;}</style></defs><title>Artboard 63 copy 2</title><path class="cls-1" d="M290.85,199.21c-10.27,0-20.73-4.25-27.28-12.58v45H243l0-111.09h18.6l.71,12.22c6.38-9.39,17.71-14.35,28.52-14.35,20.73,0,36,17.37,36,40.4S311.58,199.22,290.85,199.21Zm-6.37-65.55c-12.05,0-21.79,9.39-21.79,25.16S272.43,184,284.48,184s21.79-9.39,21.79-25.16S296.53,133.66,284.48,133.66Z"/><path class="cls-1" d="M404.36,197.1l-.71-12.22c-6.38,9.39-17.72,14.35-28.53,14.34-20.73,0-36-17.36-36-40.39s15.24-40.4,36-40.39c10.81,0,22.15,5,28.53,14.35l.71-12.22H423V197.1Zm-22.85-63.43c-12.05,0-21.79,9.39-21.8,25.16S369.45,184,381.5,184s21.8-9.39,21.8-25.16S393.56,133.67,381.51,133.67Z"/><path class="cls-1" d="M494.87,197.11V154.77c0-14.88-5.13-19.84-14.52-19.84-9.75,0-20.38,8.85-20.38,19.48v42.7H439.41V120.57H458.2l.89,14.18c5.14-9.75,16.65-16.3,28.35-16.3,20.37,0,28,14.18,28,33.13v45.54Z"/><path class="cls-1" d="M590.77,197.13l-.71-12.23c-6.38,9.39-17.72,14.35-28.52,14.35-20.73,0-36-17.37-36-40.4s15.24-40.39,36-40.39c10.27,0,20.72,4.26,27.28,12.58V90.83h20.56l0,106.3ZM567.92,133.7c-12,0-21.79,9.39-21.79,25.15S555.87,184,567.92,184s21.79-9.38,21.79-25.15S580,133.7,567.92,133.7Z"/><path class="cls-1" d="M686.6,197.14l-.71-12.22c-6.38,9.39-17.72,14.34-28.53,14.34-20.73,0-36-17.36-36-40.4s15.24-40.39,36-40.39c10.81,0,22.15,5,28.53,14.36l.71-12.23h18.6v76.53Zm-22.85-63.43c-12,0-21.79,9.39-21.8,25.16S651.7,184,663.74,184s21.8-9.39,21.8-25.16S675.8,133.71,663.75,133.71Z"/><path class="cls-1" d="M750.73,199.63a60.16,60.16,0,0,1-30.65-8.69l3.37-14.17c6.2,3.72,15.59,8.51,26.93,8.51,8.15,0,13.82-2.48,13.82-8.86,0-5.49-5.85-7.44-16.3-9.92-18.78-4.08-25.51-14-25.51-24.81,0-12.05,9.39-23.38,30.12-23.38,12.58,0,23.57,5.49,26,6.91l-3.37,13.47A44.59,44.59,0,0,0,753,132.31c-8.32,0-12.4,2.83-12.4,7.44,0,5.13,5.32,7.44,13.46,9.39,20.2,4.25,28.35,13.64,28.35,23.92C782.45,189.53,770.4,199.63,750.73,199.63Z"/><rect class="cls-1" x="74.88" y="68.42" width="24.09" height="50.02"/><rect class="cls-1" x="74.88" y="171.17" width="24.09" height="50.02"/><rect class="cls-2" x="74.88" y="133.04" width="24.09" height="23.6"/><rect class="cls-1" x="36.19" y="109.55" width="24.09" height="166.27"/><rect class="cls-1" x="112.78" y="212.44" width="24.09" height="50.02"/><rect class="cls-1" x="112.78" y="109.61" width="24.09" height="50.02"/><rect class="cls-3" x="112.78" y="174.23" width="24.09" height="23.6"/><rect class="cls-1" x="150.67" y="55.39" width="24.09" height="166.27"/></svg> \ No newline at end of file
- [X] closes #21376, xref #28521 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry ![logo_screenshot](https://user-images.githubusercontent.com/10058240/66708951-9ae55280-ed1f-11e9-9621-b482a3af345c.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/28948
2019-10-12T23:40:15Z
2019-10-22T04:07:21Z
2019-10-22T04:07:21Z
2019-10-22T04:07:21Z
CLN: fix mypy errors in pandas/tests/extension/test_numpy.py #28926
diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index e35464964f432..e968962caf0b7 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -1,4 +1,5 @@ import operator +from typing import Optional, Type import pytest @@ -61,10 +62,10 @@ class BaseArithmeticOpsTests(BaseOpsUtil): * divmod_exc = TypeError """ - series_scalar_exc = TypeError - frame_scalar_exc = TypeError - series_array_exc = TypeError - divmod_exc = TypeError + series_scalar_exc = TypeError # type: Optional[Type[TypeError]] + frame_scalar_exc = TypeError # type: Optional[Type[TypeError]] + series_array_exc = TypeError # type: Optional[Type[TypeError]] + divmod_exc = TypeError # type: Optional[Type[TypeError]] def test_arith_series_with_scalar(self, data, all_arithmetic_operators): # series & scalar diff --git a/setup.cfg b/setup.cfg index 9c841b76761f5..9af7215b1dc56 100644 --- a/setup.cfg +++ b/setup.cfg @@ -166,12 +166,6 @@ ignore_errors=True [mypy-pandas.tests.extension.json.test_json] ignore_errors=True -[mypy-pandas.tests.extension.test_numpy] -ignore_errors=True - -[mypy-pandas.tests.extension.test_sparse] -ignore_errors=True - [mypy-pandas.tests.frame.test_constructors] ignore_errors=True
- [ ] xref #28926 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28947
2019-10-12T22:12:36Z
2019-10-13T15:58:58Z
2019-10-13T15:58:58Z
2019-10-13T15:59:05Z
PR06 doc string fixes
diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index 8614230c4811f..63344af63470f 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -198,14 +198,14 @@ def eval( <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. - parser : string, default 'pandas', {'pandas', 'python'} + parser : {'pandas', 'python'}, default 'pandas' The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. - engine : string or None, default 'numexpr', {'python', 'numexpr'} + engine : {'python', 'numexpr'}, default 'numexpr' The engine used to evaluate the expression. Supported engines are diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5200ad0ba0d23..79e941f262931 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6238,7 +6238,7 @@ def unstack(self, level=-1, fill_value=None): ---------- level : int, str, or list of these, default -1 (last level) Level(s) of index to unstack, can pass level name. - fill_value : int, string or dict + fill_value : int, str or dict Replace NaN with this value if the unstack produces missing values. Returns diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fa269b4ebeab1..da8db23fb538b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2353,7 +2353,7 @@ def to_json( .. versionadded:: 0.23.0 - indent : integer, optional + indent : int, optional Length of whitespace used to indent each record. .. versionadded:: 1.0.0 diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 7be11696b7d45..068d5e5275f0d 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1644,7 +1644,7 @@ def nunique(self, dropna=True): Parameters ---------- - dropna : boolean, default True + dropna : bool, default True Don't include NaN in the counts. Returns diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c9c02ad9e496a..7dee3a17f8f9e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4531,7 +4531,7 @@ def shift(self, periods=1, freq=None): periods : int, default 1 Number of periods (or increments) to shift by, can be positive or negative. - freq : pandas.DateOffset, pandas.Timedelta or string, optional + freq : pandas.DateOffset, pandas.Timedelta or str, optional Frequency increment to shift by. If None, the index is shifted by its own `freq` attribute. Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index ed3a4a7953df3..b538c4df00e19 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -77,7 +77,7 @@ class CategoricalIndex(Index, accessor.PandasDelegate): Whether or not this categorical is treated as an ordered categorical. If not given here or in `dtype`, the resulting categorical will be unordered. - dtype : CategoricalDtype or the string "category", optional + dtype : CategoricalDtype or "category", optional If :class:`CategoricalDtype`, cannot be used together with `categories` or `ordered`. diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 0b20df38e7d42..6a2f49cd1470e 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1328,7 +1328,7 @@ def indexer_at_time(self, time, asof=False): Parameters ---------- - time : datetime.time or string + time : datetime.time or str datetime.time or string in appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"). diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 2cc15f7650ac1..a2d48b5100a2e 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1412,11 +1412,11 @@ def interval_range( Right bound for generating intervals periods : int, default None Number of periods to generate - freq : numeric, string, or DateOffset, default None + freq : numeric, str, or DateOffset, default None The length of each interval. Must be consistent with the type of start and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1 for numeric and 'D' for datetime-like. - name : string, default None + name : str, default None Name of the resulting IntervalIndex closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 2007da541bb2e..596eaf0c55dbd 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1650,7 +1650,7 @@ def to_frame(self, index=True, name=None): Parameters ---------- - index : boolean, default True + index : bool, default True Set the index of the returned DataFrame as the original MultiIndex. name : list / sequence of strings, optional @@ -2334,7 +2334,7 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True): level : list-like, int or str, default 0 If a string is given, must be a name of the level If list-like must be names or ints of levels. - ascending : boolean, default True + ascending : bool, default True False to sort in descending order Can also be a list to specify a directed ordering sort_remaining : sort by the remaining levels after level diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index be5d75224e77d..6942a5797a7f0 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -285,10 +285,10 @@ def qcut(x, q, labels=None, retbins=False, precision=3, duplicates="raise"): Parameters ---------- x : 1d ndarray or Series - q : integer or array of quantiles + q : int or list-like of int Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles - labels : array or boolean, default None + labels : array or bool, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index fa33d11bda7eb..05696ffd4605d 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -39,7 +39,7 @@ def to_numeric(arg, errors="raise", downcast=None): - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaN - If 'ignore', then invalid parsing will return the input - downcast : {'integer', 'signed', 'unsigned', 'float'} , default None + downcast : {'integer', 'signed', 'unsigned', 'float'}, default None If not None, and if the data has been successfully cast to a numerical dtype (or if the data was numeric to begin with), downcast that resulting data to the smallest numerical dtype diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 6ce288890b6c7..c71677fa3b570 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -384,7 +384,7 @@ def read_json( By file-like object, we refer to objects with a ``read()`` method, such as a file handler (e.g. via builtin ``open`` function) or ``StringIO``. - orient : string, + orient : str Indication of expected JSON string format. Compatible JSON strings can be produced by ``to_json()`` with a corresponding orient value. diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 3678e32943b2e..c82486532530f 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -257,7 +257,7 @@ arguments. dayfirst : bool, default False DD/MM format dates, international and European format. -cache_dates : boolean, default True +cache_dates : bool, default True If True, use a cache of unique, converted dates to apply the datetime conversion. May produce significant speed-up when parsing duplicate date strings, especially ones with timezone offsets. diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 0db5b1b4eecfa..c87cad5472bd9 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1025,8 +1025,8 @@ def append( Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data - append : boolean, default True, append the input data to the - existing + append : bool, default True + Append the input data to the existing. data_columns : list of columns, or True, default None List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes @@ -1037,8 +1037,9 @@ def append( chunksize : size to chunk the writing expectedrows : expected TOTAL row size of this table encoding : default None, provide an encoding for strings - dropna : boolean, default False, do not write an ALL nan row to - the store settable by the option 'io.hdf.dropna_table' + dropna : bool, default False + Do not write an ALL nan row to the store settable + by the option 'io.hdf.dropna_table'. Notes ----- diff --git a/pandas/io/sql.py b/pandas/io/sql.py index b0683fb8b0dfb..822b3288c82d9 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -287,7 +287,7 @@ def read_sql_query( If a DBAPI2 object, only sqlite3 is supported. index_col : string or list of strings, optional, default: None Column(s) to set as index(MultiIndex). - coerce_float : boolean, default True + coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Useful for SQL result sets. params : list, tuple or dict, optional, default: None diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 0b674b556b2ee..679b74caba79e 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -53,31 +53,31 @@ ) _statafile_processing_params1 = """\ -convert_dates : boolean, defaults to True +convert_dates : bool, default True Convert date variables to DataFrame time values. -convert_categoricals : boolean, defaults to True +convert_categoricals : bool, default True Read value labels and convert columns to Categorical/Factor variables.""" _encoding_params = """\ -encoding : string, None or encoding +encoding : str, None or encoding Encoding used to parse the files. None defaults to latin-1.""" _statafile_processing_params2 = """\ -index_col : string, optional, default: None +index_col : str, optional Column to set as index. -convert_missing : boolean, defaults to False +convert_missing : bool, default False Flag indicating whether to convert missing values to their Stata representations. If False, missing values are replaced with nan. If True, columns containing missing values are returned with object data types and missing values are represented by StataMissingValue objects. -preserve_dtypes : boolean, defaults to True +preserve_dtypes : bool, default True Preserve Stata datatypes. If False, numeric data are upcast to pandas default types for foreign data (float64 or int64). columns : list or None Columns to retain. Columns will be returned in the given order. None returns all columns. -order_categoricals : boolean, defaults to True +order_categoricals : bool, default True Flag indicating whether converted categorical data are ordered.""" _chunksize_params = """\ @@ -86,7 +86,7 @@ given number of lines.""" _iterator_params = """\ -iterator : boolean, default False +iterator : bool, default False Return StataReader object.""" _read_stata_doc = """ diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 4491e6ad9ac7e..0dcd8aeb4df9b 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -223,7 +223,7 @@ def infer_freq(index, warn=True): ---------- index : DatetimeIndex or TimedeltaIndex if passed a Series will use the values of the series (NOT THE INDEX) - warn : boolean, default True + warn : bool, default True Returns -------
This PR contains doctrine PR06 fixes mostly doing the conversion below(not that many left to fix :)): boolean to bool string to str integer to int Tests unchanged black pandas ran successfully
https://api.github.com/repos/pandas-dev/pandas/pulls/28946
2019-10-12T22:07:20Z
2019-10-13T19:07:57Z
2019-10-13T19:07:57Z
2019-10-13T19:08:21Z
BUG: Fix comparison between nullable int and string
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 38051e9772ae9..08253c160d408 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -838,6 +838,7 @@ ExtensionArray ^^^^^^^^^^^^^^ - Bug in :class:`arrays.PandasArray` when setting a scalar string (:issue:`28118`, :issue:`28150`). +- Bug where nullable integers could not be compared to strings (:issue:`28930`) - diff --git a/pandas/conftest.py b/pandas/conftest.py index 3553a411a27f8..6b43bf58b5046 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -654,6 +654,24 @@ def any_int_dtype(request): return request.param +@pytest.fixture(params=ALL_EA_INT_DTYPES) +def any_nullable_int_dtype(request): + """ + Parameterized fixture for any nullable integer dtype. + + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + """ + + return request.param + + @pytest.fixture(params=ALL_REAL_DTYPES) def any_real_dtype(request): """ diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 2bfb53aa1c800..08a3eca1e9055 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -26,6 +26,7 @@ from pandas.core import nanops, ops from pandas.core.algorithms import take from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin +from pandas.core.ops import invalid_comparison from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.tools.numeric import to_numeric @@ -646,7 +647,11 @@ def cmp_method(self, other): with warnings.catch_warnings(): warnings.filterwarnings("ignore", "elementwise", FutureWarning) with np.errstate(all="ignore"): - result = op(self._data, other) + method = getattr(self._data, f"__{op_name}__") + result = method(other) + + if result is NotImplemented: + result = invalid_comparison(self._data, other, op) # nans propagate if mask is None: diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py index d051345fdd12d..f94dbfcc3ec6c 100644 --- a/pandas/tests/extension/test_integer.py +++ b/pandas/tests/extension/test_integer.py @@ -168,6 +168,27 @@ def check_opname(self, s, op_name, other, exc=None): def _compare_other(self, s, data, op_name, other): self.check_opname(s, op_name, other) + def test_compare_to_string(self, any_nullable_int_dtype): + # GH 28930 + s = pd.Series([1, None], dtype=any_nullable_int_dtype) + result = s == "a" + expected = pd.Series([False, False]) + + self.assert_series_equal(result, expected) + + def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators): + # GH 28930 + s1 = pd.Series([1, 2, 3], dtype=any_nullable_int_dtype) + s2 = pd.Series([1, 2, 3], dtype="int") + + method = getattr(s1, all_compare_operators) + result = method(2) + + method = getattr(s2, all_compare_operators) + expected = method(2) + + self.assert_series_equal(result, expected) + class TestInterface(base.BaseInterfaceTests): pass
- [x] closes #28930 - [x] tests added / passed - [x] passes `black pandas` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28945
2019-10-12T21:09:02Z
2019-12-10T13:27:19Z
2019-12-10T13:27:18Z
2019-12-11T09:32:51Z
TST: add test_series_any_timedelta for GH17667
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index dc4db6e7902a8..9acf9e21b9775 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1065,6 +1065,23 @@ def test_frame_any_all_group(self): ex = DataFrame({"data": [False, False]}, index=["one", "two"]) tm.assert_frame_equal(result, ex) + def test_series_any_timedelta(self): + # GH 17667 + df = DataFrame( + { + "a": Series([0, 0]), + "t": Series([pd.to_timedelta(0, "s"), pd.to_timedelta(1, "ms")]), + } + ) + + result = df.any(axis=0) + expected = Series(data=[False, True], index=["a", "t"]) + tm.assert_series_equal(result, expected) + + result = df.any(axis=1) + expected = Series(data=[False, True]) + tm.assert_series_equal(result, expected) + def test_std_var_pass_ddof(self): index = MultiIndex.from_arrays( [np.arange(5).repeat(10), np.tile(np.arange(10), 5)]
Test case example is the same as the one given in the issue #17667 - [x] closes #17667 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28942
2019-10-12T17:51:22Z
2019-10-13T16:44:43Z
2019-10-13T16:44:42Z
2020-10-24T14:10:17Z
DOC: disable nbsphinx including requirejs
diff --git a/doc/source/conf.py b/doc/source/conf.py index 34faf183db1c2..86f78d9c0f0ae 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -120,6 +120,9 @@ plot_pre_code = """import numpy as np import pandas as pd""" +# nbsphinx do not use requirejs (breaks bootstrap) +nbsphinx_requirejs_path = "" + # Add any paths that contain templates here, relative to this directory. templates_path = ["../_templates"]
To fix https://github.com/pandas-dev/pandas-sphinx-theme/issues/25. With their latest release, `nbsphinx` started including require.js by default, which doesn't play nice with the bootstrap.js (and as a result, no javascript at all works). This is using an option of `nbsphinx` to not include requirejs (which we don't need, nbsphinx added this for rendering notebooks with plotly, I think)
https://api.github.com/repos/pandas-dev/pandas/pulls/28940
2019-10-12T16:40:49Z
2019-10-12T18:03:58Z
2019-10-12T18:03:58Z
2019-10-12T18:04:01Z
CLN: try/except cleanups
diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 605d179e7c652..91f3e878c3807 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -396,15 +396,11 @@ def wrap_results_for_axis(self): result = self.obj._constructor(data=results) if not isinstance(results[0], ABCSeries): - try: + if len(result.index) == len(self.res_columns): result.index = self.res_columns - except ValueError: - pass - try: + if len(result.columns) == len(self.res_index): result.columns = self.res_index - except ValueError: - pass return result diff --git a/pandas/core/base.py b/pandas/core/base.py index 4d5b20c56df5a..2d798dd15ad24 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -16,6 +16,7 @@ from pandas.util._decorators import Appender, Substitution, cache_readonly from pandas.util._validators import validate_bool_kwarg +from pandas.core.dtypes.cast import is_nested_object from pandas.core.dtypes.common import ( is_categorical_dtype, is_datetime64_ns_dtype, @@ -566,25 +567,27 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): # degenerate case if obj.ndim == 1: for a in arg: + colg = self._gotitem(obj.name, ndim=1, subset=obj) try: - colg = self._gotitem(obj.name, ndim=1, subset=obj) - results.append(colg.aggregate(a)) + new_res = colg.aggregate(a) - # make sure we find a good name - name = com.get_callable_name(a) or a - keys.append(name) except (TypeError, DataError): pass except SpecificationError: raise + else: + results.append(new_res) + + # make sure we find a good name + name = com.get_callable_name(a) or a + keys.append(name) # multiples else: for index, col in enumerate(obj): + colg = self._gotitem(col, ndim=1, subset=obj.iloc[:, index]) try: - colg = self._gotitem(col, ndim=1, subset=obj.iloc[:, index]) - results.append(colg.aggregate(arg)) - keys.append(col) + new_res = colg.aggregate(arg) except (TypeError, DataError): pass except ValueError: @@ -592,6 +595,9 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): continue except SpecificationError: raise + else: + results.append(new_res) + keys.append(col) # if we are empty if not len(results): @@ -604,7 +610,6 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): # we are concatting non-NDFrame objects, # e.g. a list of scalars - from pandas.core.dtypes.cast import is_nested_object from pandas import Series result = Series(results, index=keys, name=self.name) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 5200d33c6a1fb..7be11696b7d45 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -505,9 +505,7 @@ def true_and_notna(x, *args, **kwargs): indices = [ self._get_index(name) for name, group in self if true_and_notna(group) ] - except ValueError: - raise TypeError("the filter must return a boolean result") - except TypeError: + except (ValueError, TypeError): raise TypeError("the filter must return a boolean result") filtered = self._apply_filter(indices, dropna) @@ -1052,8 +1050,8 @@ def _aggregate_item_by_item(self, func, *args, **kwargs): data = obj[item] colg = SeriesGroupBy(data, selection=item, grouper=self.grouper) + cast = self._transform_should_cast(func) try: - cast = self._transform_should_cast(func) result[item] = colg.aggregate(func, *args, **kwargs) if cast:
Move non-raising stuff out of try/except to narrow down the failure modes.
https://api.github.com/repos/pandas-dev/pandas/pulls/28939
2019-10-12T16:26:24Z
2019-10-12T17:00:19Z
2019-10-12T17:00:19Z
2019-10-12T18:32:07Z
fix #28926 pandas\api\test_api.py mypy errors
diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 6c50159663574..0af8ed0ebf8d5 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -1,3 +1,5 @@ +from typing import List + import pandas as pd from pandas import api, compat from pandas.util import testing as tm @@ -41,7 +43,7 @@ class TestPDApi(Base): ] # these are already deprecated; awaiting removal - deprecated_modules = [] + deprecated_modules = [] # type: List[str] # misc misc = ["IndexSlice", "NaT"] @@ -92,10 +94,10 @@ class TestPDApi(Base): classes.extend(["Panel", "SparseSeries", "SparseDataFrame"]) # these are already deprecated; awaiting removal - deprecated_classes = [] + deprecated_classes = [] # type: List[str] # these should be deprecated in the future - deprecated_classes_in_future = [] + deprecated_classes_in_future = [] # type: List[str] # external modules exposed in pandas namespace modules = ["np", "datetime"] @@ -171,10 +173,10 @@ class TestPDApi(Base): funcs_to = ["to_datetime", "to_msgpack", "to_numeric", "to_pickle", "to_timedelta"] # top-level to deprecate in the future - deprecated_funcs_in_future = [] + deprecated_funcs_in_future = [] # type: List[str] # these are already deprecated; awaiting removal - deprecated_funcs = [] + deprecated_funcs = [] # type: List[str] # private modules in pandas namespace private_modules = [ diff --git a/setup.cfg b/setup.cfg index 9c841b76761f5..69b67c82a1e9f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -133,9 +133,6 @@ no_implicit_optional=True [mypy-pandas.conftest] ignore_errors=True -[mypy-pandas.tests.api.test_api] -ignore_errors=True - [mypy-pandas.tests.arithmetic.test_datetime64] ignore_errors=True
- [ ] xref #28926 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28935
2019-10-11T21:35:29Z
2019-10-13T19:23:37Z
2019-10-13T19:23:37Z
2019-10-13T19:23:53Z
REF: de-duplicate groupby_helper code
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 3069bbbf34bb7..c9994812462b1 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -372,7 +372,8 @@ def group_any_all(uint8_t[:] out, const uint8_t[:] mask, object val_test, bint skipna): - """Aggregated boolean values to show truthfulness of group elements + """ + Aggregated boolean values to show truthfulness of group elements. Parameters ---------- diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index f052feea0bbf3..c837c6c5c6519 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -20,6 +20,18 @@ ctypedef fused rank_t: object +cdef inline bint _treat_as_na(rank_t val, bint is_datetimelike) nogil: + if rank_t is object: + # Should never be used, but we need to avoid the `val != val` below + # or else cython will raise about gil acquisition. + raise NotImplementedError + + elif rank_t is int64_t: + return is_datetimelike and val == NPY_NAT + else: + return val != val + + @cython.wraparound(False) @cython.boundscheck(False) def group_last(rank_t[:, :] out, @@ -61,24 +73,16 @@ def group_last(rank_t[:, :] out, for j in range(K): val = values[i, j] - # not nan - if rank_t is int64_t: - # need a special notna check - if val != NPY_NAT: - nobs[lab, j] += 1 - resx[lab, j] = val - else: - if val == val: - nobs[lab, j] += 1 - resx[lab, j] = val + if val == val: + # NB: use _treat_as_na here once + # conditional-nogil is available. + nobs[lab, j] += 1 + resx[lab, j] = val for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: - if rank_t is int64_t: - out[i, j] = NPY_NAT - else: - out[i, j] = NAN + out[i, j] = NAN else: out[i, j] = resx[i, j] else: @@ -92,16 +96,10 @@ def group_last(rank_t[:, :] out, for j in range(K): val = values[i, j] - # not nan - if rank_t is int64_t: - # need a special notna check - if val != NPY_NAT: - nobs[lab, j] += 1 - resx[lab, j] = val - else: - if val == val: - nobs[lab, j] += 1 - resx[lab, j] = val + if not _treat_as_na(val, True): + # TODO: Sure we always want is_datetimelike=True? + nobs[lab, j] += 1 + resx[lab, j] = val for i in range(ncounts): for j in range(K): @@ -113,6 +111,7 @@ def group_last(rank_t[:, :] out, break else: out[i, j] = NAN + else: out[i, j] = resx[i, j] @@ -121,7 +120,6 @@ def group_last(rank_t[:, :] out, # block. raise RuntimeError("empty group with uint64_t") - group_last_float64 = group_last["float64_t"] group_last_float32 = group_last["float32_t"] group_last_int64 = group_last["int64_t"] @@ -169,8 +167,9 @@ def group_nth(rank_t[:, :] out, for j in range(K): val = values[i, j] - # not nan if val == val: + # NB: use _treat_as_na here once + # conditional-nogil is available. nobs[lab, j] += 1 if nobs[lab, j] == rank: resx[lab, j] = val @@ -193,18 +192,11 @@ def group_nth(rank_t[:, :] out, for j in range(K): val = values[i, j] - # not nan - if rank_t is int64_t: - # need a special notna check - if val != NPY_NAT: - nobs[lab, j] += 1 - if nobs[lab, j] == rank: - resx[lab, j] = val - else: - if val == val: - nobs[lab, j] += 1 - if nobs[lab, j] == rank: - resx[lab, j] = val + if not _treat_as_na(val, True): + # TODO: Sure we always want is_datetimelike=True? + nobs[lab, j] += 1 + if nobs[lab, j] == rank: + resx[lab, j] = val for i in range(ncounts): for j in range(K): @@ -487,17 +479,11 @@ def group_max(groupby_t[:, :] out, for j in range(K): val = values[i, j] - # not nan - if groupby_t is int64_t: - if val != nan_val: - nobs[lab, j] += 1 - if val > maxx[lab, j]: - maxx[lab, j] = val - else: - if val == val: - nobs[lab, j] += 1 - if val > maxx[lab, j]: - maxx[lab, j] = val + if not _treat_as_na(val, True): + # TODO: Sure we always want is_datetimelike=True? + nobs[lab, j] += 1 + if val > maxx[lab, j]: + maxx[lab, j] = val for i in range(ncounts): for j in range(K): @@ -563,17 +549,11 @@ def group_min(groupby_t[:, :] out, for j in range(K): val = values[i, j] - # not nan - if groupby_t is int64_t: - if val != nan_val: - nobs[lab, j] += 1 - if val < minx[lab, j]: - minx[lab, j] = val - else: - if val == val: - nobs[lab, j] += 1 - if val < minx[lab, j]: - minx[lab, j] = val + if not _treat_as_na(val, True): + # TODO: Sure we always want is_datetimelike=True? + nobs[lab, j] += 1 + if val < minx[lab, j]: + minx[lab, j] = val for i in range(ncounts): for j in range(K): @@ -643,21 +623,13 @@ def group_cummin(groupby_t[:, :] out, for j in range(K): val = values[i, j] - # val = nan - if groupby_t is int64_t: - if is_datetimelike and val == NPY_NAT: - out[i, j] = NPY_NAT - else: - mval = accum[lab, j] - if val < mval: - accum[lab, j] = mval = val - out[i, j] = mval + if _treat_as_na(val, is_datetimelike): + out[i, j] = val else: - if val == val: - mval = accum[lab, j] - if val < mval: - accum[lab, j] = mval = val - out[i, j] = mval + mval = accum[lab, j] + if val < mval: + accum[lab, j] = mval = val + out[i, j] = mval @cython.boundscheck(False) @@ -712,17 +684,10 @@ def group_cummax(groupby_t[:, :] out, for j in range(K): val = values[i, j] - if groupby_t is int64_t: - if is_datetimelike and val == NPY_NAT: - out[i, j] = NPY_NAT - else: - mval = accum[lab, j] - if val > mval: - accum[lab, j] = mval = val - out[i, j] = mval + if _treat_as_na(val, is_datetimelike): + out[i, j] = val else: - if val == val: - mval = accum[lab, j] - if val > mval: - accum[lab, j] = mval = val - out[i, j] = mval + mval = accum[lab, j] + if val > mval: + accum[lab, j] = mval = val + out[i, j] = mval
There's one other piece of de-duplication I think should be feasible but cython is still raising compilation errors for, so will do separately. Orthogonal to #28931, but I expect it will cause merge conflicts. 28931 should be a higher priority.
https://api.github.com/repos/pandas-dev/pandas/pulls/28934
2019-10-11T19:10:49Z
2019-10-16T19:09:04Z
2019-10-16T19:09:04Z
2019-10-16T19:15:52Z
BUG: Preserve key order when using loc on MultiIndex DataFrame
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 607a2c02944b4..7071289ef3243 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -161,8 +161,14 @@ Missing MultiIndex ^^^^^^^^^^ +- Bug in :meth:`Dataframe.loc` when used with a :class:`MultiIndex`. The returned values were not in the same order as the given inputs (:issue:`22797`) -- +.. ipython:: python + + df = pd.DataFrame(np.arange(4), + index=[["a", "a", "b", "b"], [1, 2, 1, 2]]) + # Rows are now ordered as the requested keys + df.loc[(['b', 'a'], [2, 1]), :] - I/O diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 4af9901d79a46..c560d81ba95f6 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1,6 +1,6 @@ import datetime from sys import getsizeof -from typing import Any, Hashable, List, Optional, Sequence, Union +from typing import Any, Hashable, Iterable, List, Optional, Sequence, Tuple, Union import warnings import numpy as np @@ -9,6 +9,7 @@ from pandas._libs import Timestamp, algos as libalgos, index as libindex, lib, tslibs from pandas._libs.hashtable import duplicated_int64 +from pandas._typing import AnyArrayLike, ArrayLike, Scalar from pandas.compat.numpy import function as nv from pandas.errors import PerformanceWarning, UnsortedIndexError from pandas.util._decorators import Appender, cache_readonly @@ -3081,9 +3082,69 @@ def _update_indexer(idxr, indexer=indexer): # empty indexer if indexer is None: return Int64Index([])._ndarray_values + + indexer = self._reorder_indexer(seq, indexer) + return indexer._ndarray_values - # -------------------------------------------------------------------- + def _reorder_indexer( + self, seq: Tuple[Union[Scalar, Iterable, AnyArrayLike], ...], indexer: ArrayLike + ) -> ArrayLike: + """ + Reorder an indexer of a MultiIndex (self) so that the label are in the + same order as given in seq + + Parameters + ---------- + seq : label/slice/list/mask or a sequence of such + indexer: an Int64Index indexer of self + + Returns + ------- + indexer : a sorted Int64Index indexer of self ordered as seq + """ + # If the index is lexsorted and the list_like label in seq are sorted + # then we do not need to sort + if self.is_lexsorted(): + need_sort = False + for i, k in enumerate(seq): + if is_list_like(k): + if not need_sort: + k_codes = self.levels[i].get_indexer(k) + k_codes = k_codes[k_codes >= 0] # Filter absent keys + # True if the given codes are not ordered + need_sort = (k_codes[:-1] > k_codes[1:]).any() + # Bail out if both index and seq are sorted + if not need_sort: + return indexer + + n = len(self) + keys: Tuple[np.ndarray, ...] = tuple() + # For each level of the sequence in seq, map the level codes with the + # order they appears in a list-like sequence + # This mapping is then use to reorder the indexer + for i, k in enumerate(seq): + if com.is_bool_indexer(k): + new_order = np.arange(n)[indexer] + elif is_list_like(k): + # Generate a map with all level codes as sorted initially + key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len( + self.levels[i] + ) + # Set order as given in the indexer list + level_indexer = self.levels[i].get_indexer(k) + level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys + key_order_map[level_indexer] = np.arange(len(level_indexer)) + + new_order = key_order_map[self.codes[i][indexer]] + else: + # For all other case, use the same order as the level + new_order = np.arange(n)[indexer] + keys = (new_order,) + keys + + # Find the reordering using lexsort on the keys mapping + ind = np.lexsort(keys) + return indexer[ind] def truncate(self, before=None, after=None): """ diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 640cd8faf6811..b377ca2869bd3 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -2534,3 +2534,29 @@ def test_sort_ascending_list(self): result = s.sort_index(level=["third", "first"], ascending=[False, True]) expected = s.iloc[[0, 4, 1, 5, 2, 6, 3, 7]] tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "keys, expected", + [ + (["b", "a"], [["b", "b", "a", "a"], [1, 2, 1, 2]]), + (["a", "b"], [["a", "a", "b", "b"], [1, 2, 1, 2]]), + ((["a", "b"], [1, 2]), [["a", "a", "b", "b"], [1, 2, 1, 2]]), + ((["a", "b"], [2, 1]), [["a", "a", "b", "b"], [2, 1, 2, 1]]), + ((["b", "a"], [2, 1]), [["b", "b", "a", "a"], [2, 1, 2, 1]]), + ((["b", "a"], [1, 2]), [["b", "b", "a", "a"], [1, 2, 1, 2]]), + ((["c", "a"], [2, 1]), [["c", "a", "a"], [1, 2, 1]]), + ], + ) + @pytest.mark.parametrize("dim", ["index", "columns"]) + def test_multilevel_index_loc_order(self, dim, keys, expected): + # GH 22797 + # Try to respect order of keys given for MultiIndex.loc + kwargs = {dim: [["c", "a", "a", "b", "b"], [1, 1, 2, 1, 2]]} + df = pd.DataFrame(np.arange(25).reshape(5, 5), **kwargs,) + exp_index = MultiIndex.from_arrays(expected) + if dim == "index": + res = df.loc[keys, :] + tm.assert_index_equal(res.index, exp_index) + elif dim == "columns": + res = df.loc[:, keys] + tm.assert_index_equal(res.columns, exp_index)
## Description closes #22797 As described in #22797, the key order given to loc for a MultiIndex DataFrame was not respected: ``` import pandas as pd import numpy as np df = pd.DataFrame(np.arange(12).reshape((4, 3)), index=[['a', 'a', 'b', 'b'], [1, 2, 1, 2]], columns=[['Ohio', 'Ohio', 'Colorado'], ['Green', 'Red', 'Green']]) df.loc[(['b','a'],[2, 1]),:] # Out Ohio Colorado Green Red Green a 1 0 1 2 2 3 4 5 b 1 6 7 8 2 9 10 11 ``` ## Proposed fix The culprit was the use of intersection of indexers in the loc function. I tried keeping the indexers sorted during the whole function (in the main loop), but performance were really affected (by a factor 3!!!). As an other solution, I tried to sort the result after the indexers were computed. It was already way better (worse "only" by a factor 1.15 or so, see the asv benchmark result). So I computed and add a flag testing if the result need to be sorted (the benchmark seems to always have sorted key in the loc call). **Update** The sorting function is now a separate private function (_reorder_indexer). It is called at the end of the get_locs function. ## Benchmark Benchmark with the flag (I run asv compare with -s option): <details> Benchmarks that have got worse: before after ratio [39602e7d] [da8b55af] <master> <multiindex_sort_loc_order_issue_22797> + 5.62±0.2μs 6.27±0.2μs 1.11 index_cached_properties.IndexCache.time_shape('Float64Index') + 6.57±0.2μs 7.49±0.2μs 1.14 index_cached_properties.IndexCache.time_shape('TimedeltaIndex') </details> Benchmark without flag: <details> Benchmarks that have got worse: before after ratio [39602e7d] [c786822a] <master> <multiindex_sort_loc_order_issue_22797~1> + 2.49±0.02ms 2.87±0.01ms 1.15 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, False, 'int') + 2.53±0ms 2.91±0.01ms 1.15 ctors.SeriesConstructors.time_series_constructor(<class 'list'>, True, 'int') + 29.2±0.7ms 33.1±0.02ms 1.13 frame_ctor.FromLists.time_frame_from_lists + 87.2±1ms 98.9±1ms 1.13 frame_ctor.FromRecords.time_frame_from_records_generator(None) + 12.8±0.09ms 14.3±0.09ms 1.11 groupby.MultiColumn.time_col_select_numpy_sum + 5.62±0.2μs 6.32±0.4μs 1.12 index_cached_properties.IndexCache.time_shape('Float64Index') + 4.96±0.02ms 5.71±0.01ms 1.15 indexing.MultiIndexing.time_index_slice + 2.91±0ms 3.29±0.01ms 1.13 inference.ToNumeric.time_from_numeric_str('coerce') + 2.92±0ms 3.29±0.01ms 1.13 inference.ToNumeric.time_from_numeric_str('ignore') + 3.45±0.01ms 3.84±0.01ms 1.11 series_methods.Map.time_map('lambda', 'object') + 29.3±0.2ms 33.2±0.04ms 1.13 strings.Methods.time_len </details> ## Checklist - [x] closes #22797 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28933
2019-10-11T18:52:53Z
2020-02-02T22:20:57Z
2020-02-02T22:20:56Z
2020-03-16T19:31:39Z
TYPING: Module 'pytz' has no attribute 'FixedOffset'
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index b6f25d45f136a..ff88c614cabd0 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -4,11 +4,14 @@ from datetime import datetime, time import locale +# https://github.com/python/typeshed/pull/XXXX +# error: Module 'pytz' has no attribute 'FixedOffset' from dateutil.parser import parse from dateutil.tz.tz import tzoffset import numpy as np import pytest import pytz +from pytz import FixedOffset # type:ignore from pandas._libs import tslib from pandas._libs.tslibs import iNaT, parsing @@ -267,19 +270,19 @@ def test_to_datetime_format_weeks(self, cache): [ "%Y-%m-%d %H:%M:%S%z", ["2010-01-01 12:00:00+0100"] * 2, - [pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2, + [pd.Timestamp("2010-01-01 12:00:00", tzinfo=FixedOffset(60))] * 2, ], [ "%Y-%m-%d %H:%M:%S %z", ["2010-01-01 12:00:00 +0100"] * 2, - [pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2, + [pd.Timestamp("2010-01-01 12:00:00", tzinfo=FixedOffset(60))] * 2, ], [ "%Y-%m-%d %H:%M:%S %z", ["2010-01-01 12:00:00 +0100", "2010-01-01 12:00:00 -0100"], [ - pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60)), - pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(-60)), + pd.Timestamp("2010-01-01 12:00:00", tzinfo=FixedOffset(60)), + pd.Timestamp("2010-01-01 12:00:00", tzinfo=FixedOffset(-60)), ], ], [ @@ -287,9 +290,9 @@ def test_to_datetime_format_weeks(self, cache): ["2010-01-01 12:00:00 Z", "2010-01-01 12:00:00 Z"], [ pd.Timestamp( - "2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0) - ), # pytz coerces to UTC - pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)), + "2010-01-01 12:00:00", tzinfo=FixedOffset(0) + ), # pytz.FixedOffset coerces to UTC + pd.Timestamp("2010-01-01 12:00:00", tzinfo=FixedOffset(0)), ], ], ], @@ -931,8 +934,8 @@ def test_iso_8601_strings_same_offset_no_box(self): expected = np.array( [ - datetime(2018, 1, 4, 9, 1, tzinfo=pytz.FixedOffset(540)), - datetime(2018, 1, 4, 9, 2, tzinfo=pytz.FixedOffset(540)), + datetime(2018, 1, 4, 9, 1, tzinfo=FixedOffset(540)), + datetime(2018, 1, 4, 9, 2, tzinfo=FixedOffset(540)), ], dtype=object, ) @@ -1006,7 +1009,7 @@ def test_mixed_offsets_with_native_datetime_raises(self): def test_non_iso_strings_with_tz_offset(self): result = to_datetime(["March 1, 2018 12:00:00+0400"] * 2) expected = DatetimeIndex( - [datetime(2018, 3, 1, 12, tzinfo=pytz.FixedOffset(240))] * 2 + [datetime(2018, 3, 1, 12, tzinfo=FixedOffset(240))] * 2 ) tm.assert_index_equal(result, expected) @@ -2079,12 +2082,12 @@ def test_parsers_time(self): [ ( "2013-01-01 05:45+0545", - pytz.FixedOffset(345), + FixedOffset(345), "Timestamp('2013-01-01 05:45:00+0545', tz='pytz.FixedOffset(345)')", ), ( "2013-01-01 05:30+0530", - pytz.FixedOffset(330), + FixedOffset(330), "Timestamp('2013-01-01 05:30:00+0530', tz='pytz.FixedOffset(330)')", ), ],
xref https://github.com/pandas-dev/pandas/pull/28914#issuecomment-541064775 @WillAyd we _could_ use `from pytz import FixedOffset, ...` so that only a single `type: ignores` is required.
https://api.github.com/repos/pandas-dev/pandas/pulls/28932
2019-10-11T17:27:06Z
2019-10-12T17:14:45Z
null
2019-10-12T17:14:45Z
add uint64 support for some libgroupby funcs
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index 6b434b6470581..f052feea0bbf3 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -16,6 +16,7 @@ ctypedef fused rank_t: float64_t float32_t int64_t + uint64_t object @@ -34,6 +35,7 @@ def group_last(rank_t[:, :] out, rank_t val ndarray[rank_t, ndim=2] resx ndarray[int64_t, ndim=2] nobs + bint runtime_error = False assert min_count == -1, "'min_count' only used in add and prod" @@ -106,11 +108,20 @@ def group_last(rank_t[:, :] out, if nobs[i, j] == 0: if rank_t is int64_t: out[i, j] = NPY_NAT + elif rank_t is uint64_t: + runtime_error = True + break else: out[i, j] = NAN else: out[i, j] = resx[i, j] + if runtime_error: + # We cannot raise directly above because that is within a nogil + # block. + raise RuntimeError("empty group with uint64_t") + + group_last_float64 = group_last["float64_t"] group_last_float32 = group_last["float32_t"] group_last_int64 = group_last["int64_t"] @@ -132,6 +143,7 @@ def group_nth(rank_t[:, :] out, rank_t val ndarray[rank_t, ndim=2] resx ndarray[int64_t, ndim=2] nobs + bint runtime_error = False assert min_count == -1, "'min_count' only used in add and prod" @@ -199,11 +211,19 @@ def group_nth(rank_t[:, :] out, if nobs[i, j] == 0: if rank_t is int64_t: out[i, j] = NPY_NAT + elif rank_t is uint64_t: + runtime_error = True + break else: out[i, j] = NAN else: out[i, j] = resx[i, j] + if runtime_error: + # We cannot raise directly above because that is within a nogil + # block. + raise RuntimeError("empty group with uint64_t") + group_nth_float64 = group_nth["float64_t"] group_nth_float32 = group_nth["float32_t"] @@ -282,12 +302,16 @@ def group_rank(float64_t[:, :] out, if ascending ^ (na_option == 'top'): if rank_t is int64_t: nan_fill_val = np.iinfo(np.int64).max + elif rank_t is uint64_t: + nan_fill_val = np.iinfo(np.uint64).max else: nan_fill_val = np.inf order = (masked_vals, mask, labels) else: if rank_t is int64_t: nan_fill_val = np.iinfo(np.int64).min + elif rank_t is uint64_t: + nan_fill_val = 0 else: nan_fill_val = -np.inf @@ -397,6 +421,7 @@ def group_rank(float64_t[:, :] out, group_rank_float64 = group_rank["float64_t"] group_rank_float32 = group_rank["float32_t"] group_rank_int64 = group_rank["int64_t"] +group_rank_uint64 = group_rank["uint64_t"] # Note: we do not have a group_rank_object because that would require a # not-nogil implementation, see GH#19560 @@ -410,6 +435,7 @@ ctypedef fused groupby_t: float64_t float32_t int64_t + uint64_t @cython.wraparound(False) @@ -426,6 +452,7 @@ def group_max(groupby_t[:, :] out, Py_ssize_t i, j, N, K, lab, ncounts = len(counts) groupby_t val, count, nan_val ndarray[groupby_t, ndim=2] maxx, nobs + bint runtime_error = False assert min_count == -1, "'min_count' only used in add and prod" @@ -439,6 +466,11 @@ def group_max(groupby_t[:, :] out, # Note: evaluated at compile-time maxx[:] = -_int64_max nan_val = NPY_NAT + elif groupby_t is uint64_t: + # NB: We do not define nan_val because there is no such thing + # for uint64_t. We carefully avoid having to reference it in this + # case. + maxx[:] = 0 else: maxx[:] = -np.inf nan_val = NAN @@ -462,7 +494,7 @@ def group_max(groupby_t[:, :] out, if val > maxx[lab, j]: maxx[lab, j] = val else: - if val == val and val != nan_val: + if val == val: nobs[lab, j] += 1 if val > maxx[lab, j]: maxx[lab, j] = val @@ -470,10 +502,18 @@ def group_max(groupby_t[:, :] out, for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: + if groupby_t is uint64_t: + runtime_error = True + break out[i, j] = nan_val else: out[i, j] = maxx[i, j] + if runtime_error: + # We cannot raise directly above because that is within a nogil + # block. + raise RuntimeError("empty group with uint64_t") + @cython.wraparound(False) @cython.boundscheck(False) @@ -489,6 +529,7 @@ def group_min(groupby_t[:, :] out, Py_ssize_t i, j, N, K, lab, ncounts = len(counts) groupby_t val, count, nan_val ndarray[groupby_t, ndim=2] minx, nobs + bint runtime_error = False assert min_count == -1, "'min_count' only used in add and prod" @@ -501,6 +542,11 @@ def group_min(groupby_t[:, :] out, if groupby_t is int64_t: minx[:] = _int64_max nan_val = NPY_NAT + elif groupby_t is uint64_t: + # NB: We do not define nan_val because there is no such thing + # for uint64_t. We carefully avoid having to reference it in this + # case. + minx[:] = np.iinfo(np.uint64).max else: minx[:] = np.inf nan_val = NAN @@ -524,7 +570,7 @@ def group_min(groupby_t[:, :] out, if val < minx[lab, j]: minx[lab, j] = val else: - if val == val and val != nan_val: + if val == val: nobs[lab, j] += 1 if val < minx[lab, j]: minx[lab, j] = val @@ -532,10 +578,18 @@ def group_min(groupby_t[:, :] out, for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: + if groupby_t is uint64_t: + runtime_error = True + break out[i, j] = nan_val else: out[i, j] = minx[i, j] + if runtime_error: + # We cannot raise directly above because that is within a nogil + # block. + raise RuntimeError("empty group with uint64_t") + @cython.boundscheck(False) @cython.wraparound(False) @@ -575,6 +629,8 @@ def group_cummin(groupby_t[:, :] out, accum = np.empty((ngroups, K), dtype=np.asarray(values).dtype) if groupby_t is int64_t: accum[:] = _int64_max + elif groupby_t is uint64_t: + accum[:] = np.iinfo(np.uint64).max else: accum[:] = np.inf @@ -642,6 +698,8 @@ def group_cummax(groupby_t[:, :] out, accum = np.empty((ngroups, K), dtype=np.asarray(values).dtype) if groupby_t is int64_t: accum[:] = -_int64_max + elif groupby_t is uint64_t: + accum[:] = 0 else: accum[:] = -np.inf diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index cc297629a7004..b69a9cd87c025 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1361,7 +1361,15 @@ def f(self, **kwargs): return self._cython_agg_general(alias, alt=npfunc, **kwargs) except AssertionError as e: raise SpecificationError(str(e)) + except DataError: + pass except Exception: + # TODO: the remaining test cases that get here are from: + # - AttributeError from _cython_agg_blocks bug passing + # DataFrame to make_block; see GH#28275 + # - TypeError in _cython_operation calling ensure_float64 + # on object array containing complex numbers; + # see test_groupby_complex, test_max_nan_bug pass # apply a non-cython aggregation diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index afb22a732691c..571e710ba8928 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -378,7 +378,7 @@ def test_median_empty_bins(observed): @pytest.mark.parametrize( - "dtype", ["int8", "int16", "int32", "int64", "float32", "float64"] + "dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"] ) @pytest.mark.parametrize( "method,data",
cc @WillAyd I hope you agree the runtime_error thing here is easier to implement/review with fused type than it would be with tempita.
https://api.github.com/repos/pandas-dev/pandas/pulls/28931
2019-10-11T17:19:03Z
2019-10-16T12:42:48Z
2019-10-16T12:42:48Z
2019-10-16T15:25:04Z
ENH: Support for specifying col names for col_space
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5200ad0ba0d23..e9878692784d9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2188,6 +2188,7 @@ def to_html( buf=None, columns=None, col_space=None, + col_space_cols=None, header=True, index=True, na_rep="NaN", @@ -2244,6 +2245,7 @@ def to_html( self, columns=columns, col_space=col_space, + col_space_cols=col_space_cols, na_rep=na_rep, formatters=formatters, float_format=float_format, diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index ad62c56a337b6..40b7d68595804 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -533,6 +533,7 @@ def __init__( frame: "DataFrame", columns: Optional[Sequence[str]] = None, col_space: Optional[Union[str, int]] = None, + col_space_cols: Optional[Sequence[str]] = None, header: Union[bool, Sequence[str]] = True, index: bool = True, na_rep: str = "NaN", @@ -575,6 +576,7 @@ def __init__( self.na_rep = na_rep self.decimal = decimal self.col_space = col_space + self.col_space_cols = col_space_cols self.header = header self.index = index self.line_width = line_width diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 50fa4796f8d72..999e28828f9b4 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -100,7 +100,7 @@ def write_th( self, s: Any, header: bool = False, indent: int = 0, tags: Optional[str] = None ) -> None: """ - Method for writting a formatted <th> cell. + Method for writing a formatted <th> cell. If col_space is set on the formatter then that is used for the value of min-width. @@ -122,8 +122,9 @@ def write_th( A written <th> cell. """ if header and self.fmt.col_space is not None: - tags = tags or "" - tags += 'style="min-width: {colspace};"'.format(colspace=self.fmt.col_space) + if self.fmt.col_space_cols is None or s in self.fmt.col_space_cols: + tags = tags or "" + tags += 'style="min-width: {colspace};"'.format(colspace=self.fmt.col_space) self._write_cell(s, kind="th", indent=indent, tags=tags)
**TO UPDATE FOLLOWING THINGS** - [ ] closes #28917 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28929
2019-10-11T16:22:35Z
2019-12-04T09:03:32Z
null
2019-12-04T09:03:32Z
ENH: pd.MultiIndex.get_loc(np.nan)
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 5b4761c3bc6c5..09d9a1d7ef322 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -903,6 +903,7 @@ Indexing - Bug when indexing with ``.loc`` where the index was a :class:`CategoricalIndex` with non-string categories didn't work (:issue:`17569`, :issue:`30225`) - :meth:`Index.get_indexer_non_unique` could fail with `TypeError` in some cases, such as when searching for ints in a string index (:issue:`28257`) - Bug in :meth:`Float64Index.get_loc` incorrectly raising ``TypeError`` instead of ``KeyError`` (:issue:`29189`) +- :meth:`MultiIndex.get_loc` can't find missing values when input includes missing values (:issue:`19132`) - Bug in :meth:`Series.__setitem__` incorrectly assigning values with boolean indexer when the length of new data matches the number of ``True`` values and new data is not a ``Series`` or an ``np.array`` (:issue:`30567`) Missing diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index db9806a046305..d3e0cc7b041ba 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2539,7 +2539,7 @@ def _partial_tup_index(self, tup, side="left"): for k, (lab, lev, labs) in enumerate(zipped): section = labs[start:end] - if lab not in lev: + if lab not in lev and not isna(lab): if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)): raise TypeError(f"Level type mismatch: {lab}") @@ -2549,13 +2549,38 @@ def _partial_tup_index(self, tup, side="left"): loc -= 1 return start + section.searchsorted(loc, side=side) - idx = lev.get_loc(lab) + idx = self._get_loc_single_level_index(lev, lab) if k < n - 1: end = start + section.searchsorted(idx, side="right") start = start + section.searchsorted(idx, side="left") else: return start + section.searchsorted(idx, side=side) + def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int: + """ + If key is NA value, location of index unify as -1. + + Parameters + ---------- + level_index: Index + key : label + + Returns + ------- + loc : int + If key is NA value, loc is -1 + Else, location of key in index. + + See Also + -------- + Index.get_loc : The get_loc method for (single-level) index. + """ + + if is_scalar(key) and isna(key): + return -1 + else: + return level_index.get_loc(key) + def get_loc(self, key, method=None): """ Get location for a label or a tuple of labels as an integer, slice or @@ -2654,7 +2679,9 @@ def _maybe_to_slice(loc): loc = np.arange(start, stop, dtype="int64") for i, k in enumerate(follow_key, len(lead_key)): - mask = self.codes[i][loc] == self.levels[i].get_loc(k) + mask = self.codes[i][loc] == self._get_loc_single_level_index( + self.levels[i], k + ) if not mask.all(): loc = loc[mask] if not len(loc): @@ -2882,7 +2909,7 @@ def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes): else: - code = level_index.get_loc(key) + code = self._get_loc_single_level_index(level_index, key) if level > 0 or self.lexsort_depth == 0: # Desired level is not sorted @@ -3377,14 +3404,11 @@ def isin(self, values, level=None): return algos.isin(self.values, values) else: num = self._get_level_number(level) - levs = self.levels[num] - level_codes = self.codes[num] + levs = self.get_level_values(num) - sought_labels = levs.isin(values).nonzero()[0] if levs.size == 0: - return np.zeros(len(level_codes), dtype=np.bool_) - else: - return np.lib.arraysetops.in1d(level_codes, sought_labels) + return np.zeros(len(levs), dtype=np.bool_) + return levs.isin(values) MultiIndex._add_numeric_methods_disabled() diff --git a/pandas/tests/indexes/multi/test_contains.py b/pandas/tests/indexes/multi/test_contains.py index 4b0895c823b8b..49aa63210cd5e 100644 --- a/pandas/tests/indexes/multi/test_contains.py +++ b/pandas/tests/indexes/multi/test_contains.py @@ -98,3 +98,27 @@ def test_isin_level_kwarg(): with pytest.raises(KeyError, match="'Level C not found'"): idx.isin(vals_1, level="C") + + +def test_contains_with_missing_value(): + # issue 19132 + idx = MultiIndex.from_arrays([[1, np.nan, 2]]) + assert np.nan in idx + + idx = MultiIndex.from_arrays([[1, 2], [np.nan, 3]]) + assert np.nan not in idx + assert (1, np.nan) in idx + + +@pytest.mark.parametrize( + "labels,expected,level", + [ + ([("b", np.nan)], np.array([False, False, True]), None,), + ([np.nan, "a"], np.array([True, True, False]), 0), + (["d", np.nan], np.array([False, True, True]), 1), + ], +) +def test_isin_multi_index_with_missing_value(labels, expected, level): + # GH 19132 + midx = MultiIndex.from_arrays([[np.nan, "a", "b"], ["c", "d", np.nan]]) + tm.assert_numpy_array_equal(midx.isin(labels, level=level), expected) diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index 176d47a3bdb9b..ad6f06d065150 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -437,3 +437,91 @@ def test_timestamp_multiindex_indexer(): ) should_be = pd.Series(data=np.arange(24, len(qidx) + 24), index=qidx, name="foo") tm.assert_series_equal(result, should_be) + + +def test_get_loc_with_values_including_missing_values(): + # issue 19132 + idx = MultiIndex.from_product([[np.nan, 1]] * 2) + expected = slice(0, 2, None) + assert idx.get_loc(np.nan) == expected + + idx = MultiIndex.from_arrays([[np.nan, 1, 2, np.nan]]) + expected = np.array([True, False, False, True]) + tm.assert_numpy_array_equal(idx.get_loc(np.nan), expected) + + idx = MultiIndex.from_product([[np.nan, 1]] * 3) + expected = slice(2, 4, None) + assert idx.get_loc((np.nan, 1)) == expected + + +@pytest.mark.parametrize( + "index_arr,labels,expected", + [ + ( + [[1, np.nan, 2], [3, 4, 5]], + [1, np.nan, 2], + np.array([-1, -1, -1], dtype=np.intp), + ), + ([[1, np.nan, 2], [3, 4, 5]], [(np.nan, 4)], np.array([1], dtype=np.intp)), + ([[1, 2, 3], [np.nan, 4, 5]], [(1, np.nan)], np.array([0], dtype=np.intp)), + ( + [[1, 2, 3], [np.nan, 4, 5]], + [np.nan, 4, 5], + np.array([-1, -1, -1], dtype=np.intp), + ), + ], +) +def test_get_indexer_with_missing_value(index_arr, labels, expected): + # issue 19132 + idx = MultiIndex.from_arrays(index_arr) + result = idx.get_indexer(labels) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "index_arr,expected,target,algo", + [ + ([[np.nan, "a", "b"], ["c", "d", "e"]], 0, np.nan, "left"), + ([[np.nan, "a", "b"], ["c", "d", "e"]], 1, (np.nan, "c"), "right"), + ([["a", "b", "c"], ["d", np.nan, "d"]], 1, ("b", np.nan), "left"), + ], +) +def test_get_slice_bound_with_missing_value(index_arr, expected, target, algo): + # issue 19132 + idx = MultiIndex.from_arrays(index_arr) + result = idx.get_slice_bound(target, side=algo, kind="loc") + assert result == expected + + +@pytest.mark.parametrize( + "index_arr,expected,start_idx,end_idx", + [ + ([[np.nan, 1, 2], [3, 4, 5]], slice(0, 2, None), np.nan, 1), + ([[np.nan, 1, 2], [3, 4, 5]], slice(0, 3, None), np.nan, (2, 5)), + ([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), 3), + ([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), (3, 5)), + ], +) +def test_slice_indexer_with_missing_value(index_arr, expected, start_idx, end_idx): + # issue 19132 + idx = MultiIndex.from_arrays(index_arr) + result = idx.slice_indexer(start=start_idx, end=end_idx) + assert result == expected + + +@pytest.mark.parametrize( + "index_arr,expected,start_idx,end_idx", + [ + ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, None), + ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, "b"), + ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, ("b", "e")), + ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), None), + ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), "c"), + ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), ("c", "e")), + ], +) +def test_slice_locs_with_missing_value(index_arr, expected, start_idx, end_idx): + # issue 19132 + idx = MultiIndex.from_arrays(index_arr) + result = idx.slice_locs(start=start_idx, end=end_idx) + assert result == expected
MultiIndex.get_loc could not find nan with values including missing values as a input. Background: In `MultiIndex`, missing value is denoted by -1 in codes and doesn't exist in `self.levels` So, could not find NA value in `self.levels`. Before PR xref #28783 - [x] closes #19132 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28919
2019-10-11T07:35:50Z
2020-01-09T03:11:43Z
2020-01-09T03:11:42Z
2020-01-09T10:18:40Z
BUG: Avoid undefined behaviour when converting from float to timedelta
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index eb442e8bf3486..09b80d1b3a9ac 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -360,7 +360,7 @@ def _wrap_results(result, dtype, fill_value=None): result = tslibs.Timedelta(result, unit="ns") else: - result = result.astype("i8").view(dtype) + result = result.astype("m8[ns]").view(dtype) return result
Summation of timedelta series with NaTs in them result in undefined behaviour because the final wrapping step of the summation ends up converting the NaNs in the sum through a direct cast to int64. This cast is undefined for NaN and just happens to work on x86_64 because of the way `cvttd2si` works. On Aarch64, the corresponding `fcvtzs` sets the result to 0 on undefined input. This fix trivially sets the conversion target to m8 instead of i8 so that numpy correctly casts from NaN to NaT. Note that the fix in numpy for the same is pending in PR numpy/numpy#14669 . There is an existing test (test_sum_nanops_timedelta in frame/test_analytics.py) that exercises this bug and has been verified to have been fixed with this and the numpy patch. - [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28918
2019-10-11T06:28:40Z
2019-10-12T17:08:43Z
2019-10-12T17:08:43Z
2019-10-12T17:09:10Z
TYPES: add types in core.util.hashing
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index ca5279e93f678..b503c6a7c3e21 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -5,8 +5,8 @@ import numpy as np +from pandas._libs import Timestamp import pandas._libs.hashing as hashing -import pandas._libs.tslibs as tslibs from pandas.core.dtypes.cast import infer_dtype_from_scalar from pandas.core.dtypes.common import ( @@ -26,13 +26,19 @@ _default_hash_key = "0123456789123456" -def _combine_hash_arrays(arrays, num_items: int): +# Note: The return type is technically a np.uint64, see GH#28916 for +# annotation discussion. +def _combine_hash_arrays(arrays, num_items: int) -> int: """ Parameters ---------- arrays : generator num_items : int + Returns + ------- + np.uint64 + Should be the same as CPython's tupleobject.c """ try: @@ -58,7 +64,7 @@ def hash_pandas_object( obj, index: bool = True, encoding: str = "utf8", - hash_key=None, + hash_key: str = _default_hash_key, categorize: bool = True, ): """ @@ -84,9 +90,6 @@ def hash_pandas_object( """ from pandas import Series - if hash_key is None: - hash_key = _default_hash_key - if isinstance(obj, ABCMultiIndex): return Series(hash_tuples(obj, encoding, hash_key), dtype="uint64", copy=False) @@ -142,7 +145,7 @@ def hash_pandas_object( return h -def hash_tuples(vals, encoding="utf8", hash_key=None): +def hash_tuples(vals, encoding: str = "utf8", hash_key: str = _default_hash_key): """ Hash an MultiIndex / list-of-tuples efficiently @@ -187,7 +190,7 @@ def hash_tuples(vals, encoding="utf8", hash_key=None): return h -def hash_tuple(val, encoding: str = "utf8", hash_key=None): +def hash_tuple(val, encoding: str = "utf8", hash_key: str = _default_hash_key): """ Hash a single tuple efficiently @@ -247,7 +250,12 @@ def _hash_categorical(c, encoding: str, hash_key: str): return result -def hash_array(vals, encoding: str = "utf8", hash_key=None, categorize: bool = True): +def hash_array( + vals, + encoding: str = "utf8", + hash_key: str = _default_hash_key, + categorize: bool = True, +): """ Given a 1d array, return an array of deterministic integers. @@ -273,9 +281,6 @@ def hash_array(vals, encoding: str = "utf8", hash_key=None, categorize: bool = T raise TypeError("must pass a ndarray-like") dtype = vals.dtype - if hash_key is None: - hash_key = _default_hash_key - # For categoricals, we hash the categories, then remap the codes to the # hash values. (This check is above the complex check so that we don't ask # numpy if categorical is a subdtype of complex, as it will choke). @@ -326,9 +331,17 @@ def hash_array(vals, encoding: str = "utf8", hash_key=None, categorize: bool = T return vals -def _hash_scalar(val, encoding: str = "utf8", hash_key=None): +def _hash_scalar( + val, encoding: str = "utf8", hash_key: str = _default_hash_key +) -> np.ndarray: """ - Hash scalar value + Hash scalar value. + + Parameters + ---------- + val : scalar + encoding : str, default "utf8" + hash_key : str, default _default_hash_key Returns ------- @@ -343,8 +356,8 @@ def _hash_scalar(val, encoding: str = "utf8", hash_key=None): # for tz-aware datetimes, we need the underlying naive UTC value and # not the tz aware object or pd extension type (as # infer_dtype_from_scalar would do) - if not isinstance(val, tslibs.Timestamp): - val = tslibs.Timestamp(val) + if not isinstance(val, Timestamp): + val = Timestamp(val) val = val.tz_convert(None) dtype, val = infer_dtype_from_scalar(val)
cc @simonjayhawkins
https://api.github.com/repos/pandas-dev/pandas/pulls/28916
2019-10-11T03:32:30Z
2019-10-12T15:54:45Z
null
2019-11-21T20:00:23Z
DOC: Fix typos in docstrings
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 2da74012de968..b49bb856a2e2b 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -638,7 +638,7 @@ def levels(self): @property def _values(self): - # We override here, since our parent uses _data, which we dont' use. + # We override here, since our parent uses _data, which we don't use. return self.values @property diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 2ecb66bc8f1e4..c6dce77c4d078 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -40,9 +40,9 @@ class TimedeltaDelegateMixin(DatetimelikeDelegateMixin): # Most attrs are dispatched via datetimelike_{ops,methods} - # Some are "raw" methods, the result is not not re-boxed in an Index + # Some are "raw" methods, the result is not re-boxed in an Index # We also have a few "extra" attrs, which may or may not be raw, - # which we we dont' want to expose in the .dt accessor. + # which we don't want to expose in the .dt accessor. _delegate_class = TimedeltaArray _delegated_properties = TimedeltaArray._datetimelike_ops + ["components"] _delegated_methods = TimedeltaArray._datetimelike_methods + [
Fixed three typos in docstrings.
https://api.github.com/repos/pandas-dev/pandas/pulls/28915
2019-10-11T02:49:50Z
2019-10-11T03:53:58Z
2019-10-11T03:53:58Z
2019-10-11T04:11:56Z
TYPING: lockdown test modules passing mypy
diff --git a/setup.cfg b/setup.cfg index 43dbac15f5cfe..9c841b76761f5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -130,5 +130,116 @@ skip = pandas/__init__.py,pandas/core/api.py ignore_missing_imports=True no_implicit_optional=True -[mypy-pandas.conftest,pandas.tests.*] +[mypy-pandas.conftest] +ignore_errors=True + +[mypy-pandas.tests.api.test_api] +ignore_errors=True + +[mypy-pandas.tests.arithmetic.test_datetime64] +ignore_errors=True + +[mypy-pandas.tests.arrays.test_array] +ignore_errors=True + +[mypy-pandas.tests.arrays.test_datetimelike] +ignore_errors=True + +[mypy-pandas.tests.arrays.test_period] +ignore_errors=True + +[mypy-pandas.tests.computation.test_eval] +ignore_errors=True + +[mypy-pandas.tests.dtypes.test_common] +ignore_errors=True + +[mypy-pandas.tests.dtypes.test_inference] +ignore_errors=True + +[mypy-pandas.tests.extension.decimal.test_decimal] +ignore_errors=True + +[mypy-pandas.tests.extension.json.array] +ignore_errors=True + +[mypy-pandas.tests.extension.json.test_json] +ignore_errors=True + +[mypy-pandas.tests.extension.test_numpy] +ignore_errors=True + +[mypy-pandas.tests.extension.test_sparse] +ignore_errors=True + +[mypy-pandas.tests.frame.test_constructors] +ignore_errors=True + +[mypy-pandas.tests.frame.test_convert_to] +ignore_errors=True + +[mypy-pandas.tests.indexes.datetimes.test_datetimelike] +ignore_errors=True + +[mypy-pandas.tests.indexes.interval.test_base] +ignore_errors=True + +[mypy-pandas.tests.indexes.interval.test_interval_tree] +ignore_errors=True + +[mypy-pandas.tests.indexes.period.test_period] +ignore_errors=True + +[mypy-pandas.tests.indexes.test_base] +ignore_errors=True + +[mypy-pandas.tests.indexes.test_category] +ignore_errors=True + +[mypy-pandas.tests.indexes.test_numeric] +ignore_errors=True + +[mypy-pandas.tests.indexes.test_range] +ignore_errors=True + +[mypy-pandas.tests.indexes.timedeltas.test_timedelta] +ignore_errors=True + +[mypy-pandas.tests.indexing.test_coercion] +ignore_errors=True + +[mypy-pandas.tests.indexing.test_loc] +ignore_errors=True + +[mypy-pandas.tests.io.json.test_ujson] +ignore_errors=True + +[mypy-pandas.tests.io.parser.conftest] +ignore_errors=True + +[mypy-pandas.tests.io.test_sql] +ignore_errors=True + +[mypy-pandas.tests.plotting.test_backend] +ignore_errors=True + +[mypy-pandas.tests.series.test_constructors] +ignore_errors=True + +[mypy-pandas.tests.series.test_operators] +ignore_errors=True + +[mypy-pandas.tests.test_algos] +ignore_errors=True + +[mypy-pandas.tests.test_base] +ignore_errors=True + +[mypy-pandas.tests.tseries.offsets.test_offsets] +ignore_errors=True + +[mypy-pandas.tests.tseries.offsets.test_offsets_properties] +ignore_errors=True + +[mypy-pandas.tests.tseries.offsets.test_yqm_offsets] ignore_errors=True
xref https://github.com/pandas-dev/pandas/pull/28904#discussion_r333740051 and https://github.com/pandas-dev/pandas/pull/28746#discussion_r330499267
https://api.github.com/repos/pandas-dev/pandas/pulls/28914
2019-10-11T00:02:00Z
2019-10-11T11:56:58Z
2019-10-11T11:56:58Z
2019-10-11T13:43:35Z
CLN: simplify maybe_promote in float and complex cases
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 1e353c97be754..40db53016fb62 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -405,11 +405,8 @@ def maybe_promote(dtype, fill_value=np.nan): dtype = np.min_scalar_type(fill_value) elif dtype.kind == "c": - if not np.can_cast(fill_value, dtype): - if np.can_cast(fill_value, np.dtype("c16")): - dtype = np.dtype(np.complex128) - else: - dtype = np.dtype(np.object_) + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) if dtype.kind == "c" and not np.isnan(fill_value): fill_value = dtype.type(fill_value) @@ -490,16 +487,8 @@ def maybe_promote(dtype, fill_value=np.nan): if issubclass(dtype.type, np.bool_): dtype = np.dtype(np.object_) elif issubclass(dtype.type, (np.integer, np.floating)): - c8 = np.dtype(np.complex64) - info = np.finfo(dtype) if dtype.kind == "f" else np.iinfo(dtype) - if ( - np.can_cast(fill_value, c8) - and np.can_cast(info.min, c8) - and np.can_cast(info.max, c8) - ): - dtype = np.dtype(np.complex64) - else: - dtype = np.dtype(np.complex128) + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) elif dtype.kind == "c": mst = np.min_scalar_type(fill_value)
Analogous to #28899 (orthogonal) for complex and float dtypes. I'm pretty sure that after the current crop of maybe_promote PRs goes through we can do another round of consolidating code.
https://api.github.com/repos/pandas-dev/pandas/pulls/28913
2019-10-10T22:13:45Z
2019-10-11T11:57:37Z
2019-10-11T11:57:37Z
2019-10-11T15:29:11Z
Maintain Timezone Awareness with to_json and date_format="iso"
diff --git a/asv_bench/benchmarks/io/json.py b/asv_bench/benchmarks/io/json.py index 5c1d39776b91c..4b7a8bfd103b3 100644 --- a/asv_bench/benchmarks/io/json.py +++ b/asv_bench/benchmarks/io/json.py @@ -67,10 +67,11 @@ class ToJSON(BaseIO): params = [ ["split", "columns", "index", "values", "records"], ["df", "df_date_idx", "df_td_int_ts", "df_int_floats", "df_int_float_str"], + ["epoch", "iso"], ] - param_names = ["orient", "frame"] + param_names = ["orient", "frame", "date_format"] - def setup(self, orient, frame): + def setup(self, orient, frame, date_format): N = 10 ** 5 ncols = 5 index = date_range("20000101", periods=N, freq="H") @@ -115,21 +116,21 @@ def setup(self, orient, frame): index=index, ) - def time_to_json(self, orient, frame): - getattr(self, frame).to_json(self.fname, orient=orient) + def time_to_json(self, orient, frame, date_format): + getattr(self, frame).to_json(self.fname, orient=orient, date_format=date_format) - def peakmem_to_json(self, orient, frame): - getattr(self, frame).to_json(self.fname, orient=orient) + def peakmem_to_json(self, orient, frame, date_format): + getattr(self, frame).to_json(self.fname, orient=orient, date_format=date_format) - def time_to_json_wide(self, orient, frame): + def time_to_json_wide(self, orient, frame, date_format): base_df = getattr(self, frame).copy() df = concat([base_df.iloc[:100]] * 1000, ignore_index=True, axis=1) - df.to_json(self.fname, orient=orient) + df.to_json(self.fname, orient=orient, date_format=date_format) - def peakmem_to_json_wide(self, orient, frame): + def peakmem_to_json_wide(self, orient, frame, date_format): base_df = getattr(self, frame).copy() df = concat([base_df.iloc[:100]] * 1000, ignore_index=True, axis=1) - df.to_json(self.fname, orient=orient) + df.to_json(self.fname, orient=orient, date_format=date_format) class ToJSONLines(BaseIO): diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index cde2a4279cf27..47dc65b2afc6b 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -314,6 +314,7 @@ I/O - Bug in :func:`read_hdf` closing stores that it didn't open when Exceptions are raised (:issue:`28699`) - Bug in :meth:`DataFrame.read_json` where using ``orient="index"`` would not maintain the order (:issue:`28557`) - Bug in :meth:`DataFrame.to_html` where the length of the ``formatters`` argument was not verified (:issue:`28469`) +- Bug in :meth:`DataFrame.to_json` where timezone-aware dates were converted to UTC (:issue:`12997`) Plotting ^^^^^^^^ diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 48712dc68829d..7bd20c32bd741 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -436,7 +436,7 @@ static void *PyFloatToDOUBLE(JSOBJ _obj, JSONTypeContext *tc, void *outValue, } static void *PyBytesToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, - size_t *_outLen) { + size_t *_outLen) { PyObject *obj = (PyObject *)_obj; *_outLen = PyBytes_GET_SIZE(obj); return PyBytes_AS_STRING(obj); @@ -462,9 +462,33 @@ static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, return PyBytes_AS_STRING(newObj); } +/* +Generic function to serialize date time structs to the appropriate JSON format. + +Parameters +---------- +npy_datetimestruct *dts : Pointer to a struct holding datetime information + (year, month, day, etc...) +JSONTypeContext *tc : Pointer to the context for serialization +void *outValue : Pointer to a JSON serializable value size_t +*_outLen : For C-string output, the length of the string that needs to be + accounted for. +int offset_in_min : Number of minutes the npy_datetimestruct is offset from UTC + +Returns +------- +TODO : This returns a C String for ISO dates while also modifying the cStr for + the type context. That seems buggy and/or unnecessary? + +Notes +----- +In an ideal world we wouldn't have to handle offset_in_min separate from +npy_datetimestruct. Unfortunately npy_datetimestruct does not hold this info, so +we pass it alongside the struct. +*/ static void *PandasDateTimeStructToJSON(npy_datetimestruct *dts, JSONTypeContext *tc, void *outValue, - size_t *_outLen) { + size_t *_outLen, int offset_in_min) { NPY_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; if (((PyObjectEncoder *)tc->encoder)->datetimeIso) { @@ -477,7 +501,8 @@ static void *PandasDateTimeStructToJSON(npy_datetimestruct *dts, return NULL; } - if (!make_iso_8601_datetime(dts, GET_TC(tc)->cStr, *_outLen, base)) { + if (!make_iso_8601_datetime(dts, GET_TC(tc)->cStr, *_outLen, 1, 0, base, + offset_in_min, 0)) { PRINTMARK(); *_outLen = strlen(GET_TC(tc)->cStr); return GET_TC(tc)->cStr; @@ -505,19 +530,74 @@ static void *NpyDateTimeScalarToJSON(JSOBJ _obj, JSONTypeContext *tc, pandas_datetime_to_datetimestruct(obj->obval, (NPY_DATETIMEUNIT)obj->obmeta.base, &dts); - return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen); + return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen, 0); } +/* +Top level method for returning the conversion routine for serializing a +datetimestruct to JSON. + +Parameters +---------- +JSOBJ _obj : In all actuality, this is a PyObject* passed from the Object_ type + context; should be a datetime +JSONTypeContext *tc : Pointer to the Type Context at this point in serialization +void *outValue : Pointer to the serializable object; in this scope, can be + either an integer or C-string, + depending on whether or not we are serializing dates to Unix epoch or ISO + format +size_t *_outLen : Pointer to the C-string length of the serializable object. + Should be modified within function body. + +Returns +------- +Function pointer to appropriate serialization routine. + +Notes +----- +For iso_date formats, this passes a npy_datetimestruct to the appropriate +conversion function. Unfortunately the npy_datetimestuct does not have timezone +awareness, so the offset from UTC in minutes is passed instead. +*/ static void *PyDateTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { npy_datetimestruct dts; - PyDateTime_Date *obj = (PyDateTime_Date *)_obj; + PyDateTime_DateTime *obj = (PyDateTime_DateTime *)_obj; PRINTMARK(); if (!convert_pydatetime_to_datetimestruct(obj, &dts)) { PRINTMARK(); - return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen); + + long offset_in_min = 0; + PyObject *utcoffset = PyObject_CallMethod(_obj, "utcoffset", NULL); + + if (utcoffset == NULL) { + if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + // 'datetime.date' object has no attribute 'utcoffset' + PyErr_Clear(); + } else { + // Propogate any other errors + return NULL; + } + } else { + if (utcoffset != Py_None) { + PyObject *tot_seconds = + PyObject_CallMethod(utcoffset, "total_seconds", NULL); + + if (tot_seconds == NULL) { + Py_DECREF(utcoffset); + return NULL; + } + + offset_in_min = PyLong_AsLong(tot_seconds) / 60; + Py_DECREF(tot_seconds); + } + Py_DECREF(utcoffset); + } + + return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen, + offset_in_min); } else { if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ValueError, @@ -535,7 +615,10 @@ static void *NpyDatetime64ToJSON(JSOBJ _obj, JSONTypeContext *tc, pandas_datetime_to_datetimestruct((npy_datetime)GET_TC(tc)->longValue, NPY_FR_ns, &dts); - return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen); + + // Because this function is for numpy datetimes which by nature are not + // tz-aware we can pass the offset_in_min as 0 + return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen, 0); } static void *PyTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime.c b/pandas/_libs/tslibs/src/datetime/np_datetime.c index a8a47e2e90f93..c9af316b9a0dc 100644 --- a/pandas/_libs/tslibs/src/datetime/np_datetime.c +++ b/pandas/_libs/tslibs/src/datetime/np_datetime.c @@ -28,6 +28,24 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #include <numpy/ndarraytypes.h> #include "np_datetime.h" +char *_datetime_strings[NPY_DATETIME_NUMUNITS] = { + "Y", + "M", + "W", + "<invalid>", + "D", + "h", + "m", + "s", + "ms", + "us", + "ns", + "ps", + "fs", + "as", + "generic" +}; + #if PY_MAJOR_VERSION >= 3 #define PyInt_AsLong PyLong_AsLong #endif // PyInt_AsLong @@ -321,7 +339,7 @@ int cmp_npy_datetimestruct(const npy_datetimestruct *a, * Returns -1 on error, 0 on success, and 1 (with no error set) * if obj doesn't have the needed date or datetime attributes. */ -int convert_pydatetime_to_datetimestruct(PyDateTime_Date *dtobj, +int convert_pydatetime_to_datetimestruct(PyDateTime_DateTime *dtobj, npy_datetimestruct *out) { // Assumes that obj is a valid datetime object PyObject *tmp; diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime.h b/pandas/_libs/tslibs/src/datetime/np_datetime.h index 549d38409ca83..32543b80f219f 100644 --- a/pandas/_libs/tslibs/src/datetime/np_datetime.h +++ b/pandas/_libs/tslibs/src/datetime/np_datetime.h @@ -35,7 +35,7 @@ extern const npy_datetimestruct _NS_MAX_DTS; // stuff pandas needs // ---------------------------------------------------------------------------- -int convert_pydatetime_to_datetimestruct(PyDateTime_Date *dtobj, +int convert_pydatetime_to_datetimestruct(PyDateTime_DateTime *dtobj, npy_datetimestruct *out); npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, @@ -48,6 +48,7 @@ void pandas_timedelta_to_timedeltastruct(npy_timedelta val, NPY_DATETIMEUNIT fr, pandas_timedeltastruct *result); +extern char *_datetime_strings[NPY_DATETIME_NUMUNITS]; extern const int days_per_month_table[2][12]; // stuff numpy-derived code needs in header diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c index 54ed6ecff21e2..417e3f6a77217 100644 --- a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c +++ b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c @@ -37,6 +37,169 @@ This file implements string parsing and creation for NumPy datetime. #include "np_datetime.h" #include "np_datetime_strings.h" +/* + * Platform-specific time_t typedef. Some platforms use 32 bit, some use 64 bit + * and we just use the default with the exception of mingw, where we must use + * 64 bit because MSVCRT version 9 does not have the (32 bit) localtime() + * symbol, so we need to use the 64 bit version [1]. + * + * [1] http://thread.gmane.org/gmane.comp.gnu.mingw.user/27011 + */ +#if defined(NPY_MINGW_USE_CUSTOM_MSVCR) + typedef __time64_t NPY_TIME_T; +#else + typedef time_t NPY_TIME_T; +#endif + +/* + * Wraps `localtime` functionality for multiple platforms. This + * converts a time value to a time structure in the local timezone. + * If size(NPY_TIME_T) == 4, then years must be between 1970 and 2038. If + * size(NPY_TIME_T) == 8, then years must be later than 1970. If the years are + * not in this range, then get_localtime() will fail on some platforms. + * + * Returns 0 on success, -1 on failure. + * + * Notes: + * 1) If NPY_TIME_T is 32 bit (i.e. sizeof(NPY_TIME_T) == 4), then the + * maximum year it can represent is 2038 (see [1] for more details). Trying + * to use a higher date like 2041 in the 32 bit "ts" variable below will + * typically result in "ts" being a negative number (corresponding roughly + * to a year ~ 1905). If NPY_TIME_T is 64 bit, then there is no such + * problem in practice. + * 2) If the "ts" argument to localtime() is negative, it represents + * years < 1970 both for 32 and 64 bits (for 32 bits the earliest year it can + * represent is 1901, while 64 bits can represent much earlier years). + * 3) On Linux, localtime() works for negative "ts". On Windows and in Wine, + * localtime() as well as the localtime_s() and _localtime64_s() functions + * will fail for any negative "ts" and return a nonzero exit number + * (localtime_s, _localtime64_s) or NULL (localtime). This behavior is the + * same for both 32 and 64 bits. + * + * From this it follows that get_localtime() is only guaranteed to work + * correctly on all platforms for years between 1970 and 2038 for 32bit + * NPY_TIME_T and years higher than 1970 for 64bit NPY_TIME_T. For + * multiplatform code, get_localtime() should never be used outside of this + * range. + * + * [1] https://en.wikipedia.org/wiki/Year_2038_problem + */ +static int +get_localtime(NPY_TIME_T *ts, struct tm *tms) +{ + char *func_name = "<unknown>"; +#if defined(_WIN32) + #if defined(_MSC_VER) && (_MSC_VER >= 1400) + if (localtime_s(tms, ts) != 0) { + func_name = "localtime_s"; + goto fail; + } + #elif defined(NPY_MINGW_USE_CUSTOM_MSVCR) + if (_localtime64_s(tms, ts) != 0) { + func_name = "_localtime64_s"; + goto fail; + } + #else + struct tm *tms_tmp; + tms_tmp = localtime(ts); + if (tms_tmp == NULL) { + func_name = "localtime"; + goto fail; + } + memcpy(tms, tms_tmp, sizeof(struct tm)); + #endif +#else + if (localtime_r(ts, tms) == NULL) { + func_name = "localtime_r"; + goto fail; + } +#endif + + return 0; + +fail: + PyErr_Format(PyExc_OSError, "Failed to use '%s' to convert " + "to a local time", func_name); + return -1; +} + + +/* + * Converts a datetimestruct in UTC to a datetimestruct in local time, + * also returning the timezone offset applied. This function works for any year + * > 1970 on all platforms and both 32 and 64 bits. If the year < 1970, then it + * will fail on some platforms. + * + * Returns 0 on success, -1 on failure. + */ +static int +convert_datetimestruct_utc_to_local(npy_datetimestruct *out_dts_local, + const npy_datetimestruct *dts_utc, int *out_timezone_offset) +{ + NPY_TIME_T rawtime = 0, localrawtime; + struct tm tm_; + npy_int64 year_correction = 0; + + /* Make a copy of the input 'dts' to modify */ + *out_dts_local = *dts_utc; + + /* + * For 32 bit NPY_TIME_T, the get_localtime() function does not work for + * years later than 2038, see the comments above get_localtime(). So if the + * year >= 2038, we instead call get_localtime() for the year 2036 or 2037 + * (depending on the leap year) which must work and at the end we add the + * 'year_correction' back. + */ + if (sizeof(NPY_TIME_T) == 4 && out_dts_local->year >= 2038) { + if (is_leapyear(out_dts_local->year)) { + /* 2036 is a leap year */ + year_correction = out_dts_local->year - 2036; + out_dts_local->year -= year_correction; /* = 2036 */ + } + else { + /* 2037 is not a leap year */ + year_correction = out_dts_local->year - 2037; + out_dts_local->year -= year_correction; /* = 2037 */ + } + } + + /* + * Convert everything in 'dts' to a time_t, to minutes precision. + * This is POSIX time, which skips leap-seconds, but because + * we drop the seconds value from the npy_datetimestruct, everything + * is ok for this operation. + */ + rawtime = (NPY_TIME_T)get_datetimestruct_days(out_dts_local) * 24 * 60 * 60; + rawtime += dts_utc->hour * 60 * 60; + rawtime += dts_utc->min * 60; + + /* localtime converts a 'time_t' into a local 'struct tm' */ + if (get_localtime(&rawtime, &tm_) < 0) { + /* This should only fail if year < 1970 on some platforms. */ + return -1; + } + + /* Copy back all the values except seconds */ + out_dts_local->min = tm_.tm_min; + out_dts_local->hour = tm_.tm_hour; + out_dts_local->day = tm_.tm_mday; + out_dts_local->month = tm_.tm_mon + 1; + out_dts_local->year = tm_.tm_year + 1900; + + /* Extract the timezone offset that was applied */ + rawtime /= 60; + localrawtime = (NPY_TIME_T)get_datetimestruct_days(out_dts_local) * 24 * 60; + localrawtime += out_dts_local->hour * 60; + localrawtime += out_dts_local->min; + + *out_timezone_offset = localrawtime - rawtime; + + /* Reapply the year 2038 year correction */ + out_dts_local->year += year_correction; + + return 0; +} + /* * Parses (almost) standard ISO 8601 date strings. The differences are: @@ -590,47 +753,211 @@ int get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base) { } +/* + * Finds the largest unit whose value is nonzero, and for which + * the remainder for the rest of the units is zero. + */ +static NPY_DATETIMEUNIT +lossless_unit_from_datetimestruct(npy_datetimestruct *dts) +{ + if (dts->as % 1000 != 0) { + return NPY_FR_as; + } + else if (dts->as != 0) { + return NPY_FR_fs; + } + else if (dts->ps % 1000 != 0) { + return NPY_FR_ps; + } + else if (dts->ps != 0) { + return NPY_FR_ns; + } + else if (dts->us % 1000 != 0) { + return NPY_FR_us; + } + else if (dts->us != 0) { + return NPY_FR_ms; + } + else if (dts->sec != 0) { + return NPY_FR_s; + } + else if (dts->min != 0) { + return NPY_FR_m; + } + else if (dts->hour != 0) { + return NPY_FR_h; + } + else if (dts->day != 1) { + return NPY_FR_D; + } + else if (dts->month != 1) { + return NPY_FR_M; + } + else { + return NPY_FR_Y; + } +} + + /* * Converts an npy_datetimestruct to an (almost) ISO 8601 - * NULL-terminated string using timezone Z (UTC). If the string fits in - * the space exactly, it leaves out the NULL terminator and returns success. + * NULL-terminated string. If the string fits in the space exactly, + * it leaves out the NULL terminator and returns success. * * The differences from ISO 8601 are the 'NaT' string, and * the number of year digits is >= 4 instead of strictly 4. * + * If 'local' is non-zero, it produces a string in local time with + * a +-#### timezone offset. If 'local' is zero and 'utc' is non-zero, + * produce a string ending with 'Z' to denote UTC. By default, no time + * zone information is attached. + * * 'base' restricts the output to that unit. Set 'base' to * -1 to auto-detect a base after which all the values are zero. * + * 'tzoffset' is used if 'local' is enabled, and 'tzoffset' is + * set to a value other than -1. This is a manual override for + * the local time zone to use, as an offset in minutes. + * + * 'casting' controls whether data loss is allowed by truncating + * the data to a coarser unit. This interacts with 'local', slightly, + * in order to form a date unit string as a local time, the casting + * must be unsafe. + * * Returns 0 on success, -1 on failure (for example if the output * string was too short). */ -int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, - NPY_DATETIMEUNIT base) { +int +make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, npy_intp outlen, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, + NPY_CASTING casting) +{ + npy_datetimestruct dts_local; + int timezone_offset = 0; + char *substr = outstr; - int sublen = outlen; - int tmplen; + npy_intp sublen = outlen; + npy_intp tmplen; + + /* Handle NaT, and treat a datetime with generic units as NaT */ + if (dts->year == NPY_DATETIME_NAT || base == NPY_FR_GENERIC) { + if (outlen < 3) { + goto string_too_short; + } + outstr[0] = 'N'; + outstr[1] = 'a'; + outstr[2] = 'T'; + if (outlen > 3) { + outstr[3] = '\0'; + } + + return 0; + } + + /* + * Only do local time within a reasonable year range. The years + * earlier than 1970 are not made local, because the Windows API + * raises an error when they are attempted (see the comments above the + * get_localtime() function). For consistency, this + * restriction is applied to all platforms. + * + * Note that this only affects how the datetime becomes a string. + * The result is still completely unambiguous, it only means + * that datetimes outside this range will not include a time zone + * when they are printed. + */ + if ((dts->year < 1970 || dts->year >= 10000) && tzoffset == -1) { + local = 0; + } + /* Automatically detect a good unit */ + if (base == NPY_FR_ERROR) { + base = lossless_unit_from_datetimestruct(dts); + /* + * If there's a timezone, use at least minutes precision, + * and never split up hours and minutes by default + */ + if ((base < NPY_FR_m && local) || base == NPY_FR_h) { + base = NPY_FR_m; + } + /* Don't split up dates by default */ + else if (base < NPY_FR_D) { + base = NPY_FR_D; + } + } /* * Print weeks with the same precision as days. * * TODO: Could print weeks with YYYY-Www format if the week * epoch is a Monday. */ - if (base == NPY_FR_W) { + else if (base == NPY_FR_W) { base = NPY_FR_D; } -/* YEAR */ -/* - * Can't use PyOS_snprintf, because it always produces a '\0' - * character at the end, and NumPy string types are permitted - * to have data all the way to the end of the buffer. - */ + /* Use the C API to convert from UTC to local time */ + if (local && tzoffset == -1) { + if (convert_datetimestruct_utc_to_local(&dts_local, dts, + &timezone_offset) < 0) { + return -1; + } + + /* Set dts to point to our local time instead of the UTC time */ + dts = &dts_local; + } + /* Use the manually provided tzoffset */ + else if (local) { + /* Make a copy of the npy_datetimestruct we can modify */ + dts_local = *dts; + dts = &dts_local; + + /* Set and apply the required timezone offset */ + timezone_offset = tzoffset; + add_minutes_to_datetimestruct(dts, timezone_offset); + } + + /* + * Now the datetimestruct data is in the final form for + * the string representation, so ensure that the data + * is being cast according to the casting rule. + */ + if (casting != NPY_UNSAFE_CASTING) { + /* Producing a date as a local time is always 'unsafe' */ + if (base <= NPY_FR_D && local) { + PyErr_SetString(PyExc_TypeError, "Cannot create a local " + "timezone-based date string from a NumPy " + "datetime without forcing 'unsafe' casting"); + return -1; + } + /* Only 'unsafe' and 'same_kind' allow data loss */ + else { + NPY_DATETIMEUNIT unitprec; + + unitprec = lossless_unit_from_datetimestruct(dts); + if (casting != NPY_SAME_KIND_CASTING && unitprec > base) { + PyErr_Format(PyExc_TypeError, "Cannot create a " + "string with unit precision '%s' " + "from the NumPy datetime, which has data at " + "unit precision '%s', " + "requires 'unsafe' or 'same_kind' casting", + _datetime_strings[base], + _datetime_strings[unitprec]); + return -1; + } + } + } + + /* YEAR */ + /* + * Can't use PyOS_snprintf, because it always produces a '\0' + * character at the end, and NumPy string types are permitted + * to have data all the way to the end of the buffer. + */ #ifdef _WIN32 tmplen = _snprintf(substr, sublen, "%04" NPY_INT64_FMT, dts->year); #else tmplen = snprintf(substr, sublen, "%04" NPY_INT64_FMT, dts->year); -#endif // _WIN32 +#endif /* If it ran out of space or there isn't space for the NULL terminator */ if (tmplen < 0 || tmplen > sublen) { goto string_too_short; @@ -647,15 +974,15 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, } /* MONTH */ - if (sublen < 1) { + if (sublen < 1 ) { goto string_too_short; } substr[0] = '-'; - if (sublen < 2) { + if (sublen < 2 ) { goto string_too_short; } substr[1] = (char)((dts->month / 10) + '0'); - if (sublen < 3) { + if (sublen < 3 ) { goto string_too_short; } substr[2] = (char)((dts->month % 10) + '0'); @@ -671,15 +998,15 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, } /* DAY */ - if (sublen < 1) { + if (sublen < 1 ) { goto string_too_short; } substr[0] = '-'; - if (sublen < 2) { + if (sublen < 2 ) { goto string_too_short; } substr[1] = (char)((dts->day / 10) + '0'); - if (sublen < 3) { + if (sublen < 3 ) { goto string_too_short; } substr[2] = (char)((dts->day % 10) + '0'); @@ -695,15 +1022,15 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, } /* HOUR */ - if (sublen < 1) { + if (sublen < 1 ) { goto string_too_short; } substr[0] = 'T'; - if (sublen < 2) { + if (sublen < 2 ) { goto string_too_short; } substr[1] = (char)((dts->hour / 10) + '0'); - if (sublen < 3) { + if (sublen < 3 ) { goto string_too_short; } substr[2] = (char)((dts->hour % 10) + '0'); @@ -716,15 +1043,15 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, } /* MINUTE */ - if (sublen < 1) { + if (sublen < 1 ) { goto string_too_short; } substr[0] = ':'; - if (sublen < 2) { + if (sublen < 2 ) { goto string_too_short; } substr[1] = (char)((dts->min / 10) + '0'); - if (sublen < 3) { + if (sublen < 3 ) { goto string_too_short; } substr[2] = (char)((dts->min % 10) + '0'); @@ -737,15 +1064,15 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, } /* SECOND */ - if (sublen < 1) { + if (sublen < 1 ) { goto string_too_short; } substr[0] = ':'; - if (sublen < 2) { + if (sublen < 2 ) { goto string_too_short; } substr[1] = (char)((dts->sec / 10) + '0'); - if (sublen < 3) { + if (sublen < 3 ) { goto string_too_short; } substr[2] = (char)((dts->sec % 10) + '0'); @@ -758,19 +1085,19 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, } /* MILLISECOND */ - if (sublen < 1) { + if (sublen < 1 ) { goto string_too_short; } substr[0] = '.'; - if (sublen < 2) { + if (sublen < 2 ) { goto string_too_short; } substr[1] = (char)((dts->us / 100000) % 10 + '0'); - if (sublen < 3) { + if (sublen < 3 ) { goto string_too_short; } substr[2] = (char)((dts->us / 10000) % 10 + '0'); - if (sublen < 4) { + if (sublen < 4 ) { goto string_too_short; } substr[3] = (char)((dts->us / 1000) % 10 + '0'); @@ -783,15 +1110,15 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, } /* MICROSECOND */ - if (sublen < 1) { + if (sublen < 1 ) { goto string_too_short; } substr[0] = (char)((dts->us / 100) % 10 + '0'); - if (sublen < 2) { + if (sublen < 2 ) { goto string_too_short; } substr[1] = (char)((dts->us / 10) % 10 + '0'); - if (sublen < 3) { + if (sublen < 3 ) { goto string_too_short; } substr[2] = (char)(dts->us % 10 + '0'); @@ -804,15 +1131,15 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, } /* NANOSECOND */ - if (sublen < 1) { + if (sublen < 1 ) { goto string_too_short; } substr[0] = (char)((dts->ps / 100000) % 10 + '0'); - if (sublen < 2) { + if (sublen < 2 ) { goto string_too_short; } substr[1] = (char)((dts->ps / 10000) % 10 + '0'); - if (sublen < 3) { + if (sublen < 3 ) { goto string_too_short; } substr[2] = (char)((dts->ps / 1000) % 10 + '0'); @@ -825,15 +1152,15 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, } /* PICOSECOND */ - if (sublen < 1) { + if (sublen < 1 ) { goto string_too_short; } substr[0] = (char)((dts->ps / 100) % 10 + '0'); - if (sublen < 2) { + if (sublen < 2 ) { goto string_too_short; } substr[1] = (char)((dts->ps / 10) % 10 + '0'); - if (sublen < 3) { + if (sublen < 3 ) { goto string_too_short; } substr[2] = (char)(dts->ps % 10 + '0'); @@ -846,15 +1173,15 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, } /* FEMTOSECOND */ - if (sublen < 1) { + if (sublen < 1 ) { goto string_too_short; } substr[0] = (char)((dts->as / 100000) % 10 + '0'); - if (sublen < 2) { + if (sublen < 2 ) { goto string_too_short; } substr[1] = (char)((dts->as / 10000) % 10 + '0'); - if (sublen < 3) { + if (sublen < 3 ) { goto string_too_short; } substr[2] = (char)((dts->as / 1000) % 10 + '0'); @@ -867,15 +1194,15 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, } /* ATTOSECOND */ - if (sublen < 1) { + if (sublen < 1 ) { goto string_too_short; } substr[0] = (char)((dts->as / 100) % 10 + '0'); - if (sublen < 2) { + if (sublen < 2 ) { goto string_too_short; } substr[1] = (char)((dts->as / 10) % 10 + '0'); - if (sublen < 3) { + if (sublen < 3 ) { goto string_too_short; } substr[2] = (char)(dts->as % 10 + '0'); @@ -883,13 +1210,50 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; add_time_zone: + if (local) { + /* Add the +/- sign */ + if (sublen < 1) { + goto string_too_short; + } + if (timezone_offset < 0) { + substr[0] = '-'; + timezone_offset = -timezone_offset; + } + else { + substr[0] = '+'; + } + substr += 1; + sublen -= 1; + + /* Add the timezone offset */ + if (sublen < 1 ) { + goto string_too_short; + } + substr[0] = (char)((timezone_offset / (10*60)) % 10 + '0'); + if (sublen < 2 ) { + goto string_too_short; + } + substr[1] = (char)((timezone_offset / 60) % 10 + '0'); + if (sublen < 3 ) { + goto string_too_short; + } + substr[2] = (char)(((timezone_offset % 60) / 10) % 10 + '0'); + if (sublen < 3 ) { + goto string_too_short; + } + substr[3] = (char)((timezone_offset % 60) % 10 + '0'); + substr += 4; + sublen -= 4; + } /* UTC "Zulu" time */ - if (sublen < 1) { - goto string_too_short; + else if (utc) { + if (sublen < 1) { + goto string_too_short; + } + substr[0] = 'Z'; + substr += 1; + sublen -= 1; } - substr[0] = 'Z'; - substr += 1; - sublen -= 1; /* Add a NULL terminator, and return */ if (sublen > 0) { @@ -900,8 +1264,8 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, string_too_short: PyErr_Format(PyExc_RuntimeError, - "The string provided for NumPy ISO datetime formatting " - "was too short, with length %d", - outlen); + "The string provided for NumPy ISO datetime formatting " + "was too short, with length %"NPY_INTP_FMT, + outlen); return -1; } diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h index 880c34ea77638..98368ce020000 100644 --- a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h +++ b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.h @@ -68,15 +68,28 @@ get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base); /* * Converts an npy_datetimestruct to an (almost) ISO 8601 - * NULL-terminated string using timezone Z (UTC). + * NULL-terminated string. + * + * If 'local' is non-zero, it produces a string in local time with + * a +-#### timezone offset, otherwise it uses timezone Z (UTC). * * 'base' restricts the output to that unit. Set 'base' to * -1 to auto-detect a base after which all the values are zero. * + * 'tzoffset' is used if 'local' is enabled, and 'tzoffset' is + * set to a value other than -1. This is a manual override for + * the local time zone to use, as an offset in minutes. + * + * 'casting' controls whether data loss is allowed by truncating + * the data to a coarser unit. This interacts with 'local', slightly, + * in order to form a date unit string as a local time, the casting + * must be unsafe. + * * Returns 0 on success, -1 on failure (for example if the output * string was too short). */ int -make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, - NPY_DATETIMEUNIT base); +make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, npy_intp outlen, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, + NPY_CASTING casting); #endif // PANDAS__LIBS_TSLIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_ diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index 569e299860614..0dc3f172433df 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -285,12 +285,12 @@ def test_to_json(self): ("idx", 0), ("A", 1), ("B", "a"), - ("C", "2016-01-01T00:00:00.000Z"), + ("C", "2016-01-01T00:00:00.000+0000"), ("D", "P0DT1H0M0S"), ("E", "a"), ("F", "a"), ("G", 1.0), - ("H", "2016-01-01T06:00:00.000Z"), + ("H", "2016-01-01T00:00:00.000-0600"), ] ), OrderedDict( @@ -298,12 +298,12 @@ def test_to_json(self): ("idx", 1), ("A", 2), ("B", "b"), - ("C", "2016-01-02T00:00:00.000Z"), + ("C", "2016-01-02T00:00:00.000+0000"), ("D", "P0DT1H1M0S"), ("E", "b"), ("F", "b"), ("G", 2.0), - ("H", "2016-01-02T06:00:00.000Z"), + ("H", "2016-01-02T00:00:00.000-0600"), ] ), OrderedDict( @@ -311,12 +311,12 @@ def test_to_json(self): ("idx", 2), ("A", 3), ("B", "c"), - ("C", "2016-01-03T00:00:00.000Z"), + ("C", "2016-01-03T00:00:00.000+0000"), ("D", "P0DT1H2M0S"), ("E", "c"), ("F", "c"), ("G", 3.0), - ("H", "2016-01-03T06:00:00.000Z"), + ("H", "2016-01-03T00:00:00.000-0600"), ] ), OrderedDict( @@ -324,12 +324,12 @@ def test_to_json(self): ("idx", 3), ("A", 4), ("B", "c"), - ("C", "2016-01-04T00:00:00.000Z"), + ("C", "2016-01-04T00:00:00.000+0000"), ("D", "P0DT1H3M0S"), ("E", "c"), ("F", "c"), ("G", 4.0), - ("H", "2016-01-04T06:00:00.000Z"), + ("H", "2016-01-04T00:00:00.000-0600"), ] ), ] @@ -381,8 +381,8 @@ def test_to_json_period_index(self): schema = {"fields": fields, "primaryKey": ["index"]} data = [ - OrderedDict([("index", "2015-11-01T00:00:00.000Z"), ("values", 1)]), - OrderedDict([("index", "2016-02-01T00:00:00.000Z"), ("values", 1)]), + OrderedDict([("index", "2015-11-01T00:00:00.000+0000"), ("values", 1)]), + OrderedDict([("index", "2016-02-01T00:00:00.000+0000"), ("values", 1)]), ] expected = OrderedDict([("schema", schema), ("data", data)]) @@ -612,7 +612,7 @@ def test_timestamp_in_columns(self): ) result = df.to_json(orient="table") js = json.loads(result) - assert js["schema"]["fields"][1]["name"] == "2016-01-01T00:00:00.000Z" + assert js["schema"]["fields"][1]["name"] == "2016-01-01T00:00:00.000+0000" # TODO - below expectation is not correct; see GH 28256 assert js["schema"]["fields"][2]["name"] == 10000 diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 8e28740c70bad..0844f25f6edec 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1154,38 +1154,32 @@ def test_sparse(self): assert expected == ss.to_json() @pytest.mark.parametrize( - "ts", + "ts,expected", [ - Timestamp("2013-01-10 05:00:00Z"), - Timestamp("2013-01-10 00:00:00", tz="US/Eastern"), - Timestamp("2013-01-10 00:00:00-0500"), + (Timestamp("2013-01-10 05:00:00Z"), '"2013-01-10T05:00:00.000+0000"'), + ( + Timestamp("2013-01-10 00:00:00", tz="US/Eastern"), + '"2013-01-10T00:00:00.000-0500"', + ), + (Timestamp("2013-01-10 00:00:00-0500"), '"2013-01-10T00:00:00.000-0500"'), ], ) - def test_tz_is_utc(self, ts): + def test_tz_utc_offsets(self, ts, expected): from pandas.io.json import dumps - exp = '"2013-01-10T05:00:00.000Z"' - - assert dumps(ts, iso_dates=True) == exp + assert dumps(ts, iso_dates=True) == expected dt = ts.to_pydatetime() - assert dumps(dt, iso_dates=True) == exp + assert dumps(dt, iso_dates=True) == expected - @pytest.mark.parametrize( - "tz_range", - [ - pd.date_range("2013-01-01 05:00:00Z", periods=2), - pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"), - pd.date_range("2013-01-01 00:00:00-0500", periods=2), - ], - ) - def test_tz_range_is_utc(self, tz_range): + def test_tz_range_is_utc(self): from pandas.io.json import dumps - exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]' + tz_range = pd.date_range("2013-01-01 05:00:00Z", periods=2) + exp = '["2013-01-01T05:00:00.000+0000","2013-01-02T05:00:00.000+0000"]' dfexp = ( '{"DT":{' - '"0":"2013-01-01T05:00:00.000Z",' - '"1":"2013-01-02T05:00:00.000Z"}}' + '"0":"2013-01-01T05:00:00.000+0000",' + '"1":"2013-01-02T05:00:00.000+0000"}}' ) assert dumps(tz_range, iso_dates=True) == exp @@ -1195,6 +1189,14 @@ def test_tz_range_is_utc(self, tz_range): result = dumps(df, iso_dates=True) assert result == dfexp + def test_datetime_tz_iso_maintains_offset(self, orient): + # GH 12997 + tz_range = pd.date_range("20130101", periods=3, tz="US/Eastern") + df = DataFrame(tz_range, columns=["date"]) + result = df.to_json(orient=orient, date_format="iso") + + assert "2013-01-01T00:00:00.000-0500" in result + def test_read_inline_jsonl(self): # GH9180 result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
- [X] closes #12997 - [X] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry This vendors updates from numpy that allow for tz-aware ISO date formatting. Note this does slightly change the behavior of UTC dates. Previously they would write out as `2013-01-01T05:00:00.000Z` but now are `2013-01-01T05:00:00.000+00:00`. Both are valid ISO 8601 There is a follow up that needs to be addressed with reading these dates
https://api.github.com/repos/pandas-dev/pandas/pulls/28912
2019-10-10T22:11:53Z
2019-10-30T17:09:37Z
null
2020-01-16T00:33:47Z
TYPING: errors reported by mypy 0.730
diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index b3c7b8a7c8b9f..3a36713ccdbda 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -68,7 +68,11 @@ def load_reduce(self): class _LoadSparseSeries: # To load a SparseSeries as a Series[Sparse] - def __new__(cls) -> "Series": + + # https://github.com/python/mypy/issues/1020 + # error: Incompatible return type for "__new__" (returns "Series", but must return + # a subtype of "_LoadSparseSeries") + def __new__(cls) -> "Series": # type: ignore from pandas import Series warnings.warn( @@ -82,7 +86,11 @@ def __new__(cls) -> "Series": class _LoadSparseFrame: # To load a SparseDataFrame as a DataFrame[Sparse] - def __new__(cls) -> "DataFrame": + + # https://github.com/python/mypy/issues/1020 + # error: Incompatible return type for "__new__" (returns "DataFrame", but must + # return a subtype of "_LoadSparseFrame") + def __new__(cls) -> "DataFrame": # type: ignore from pandas import DataFrame warnings.warn(
errors reported by mypy 0.730 on master. ``` pandas\compat\pickle_compat.py:71: error: Incompatible return type for "__new__" (returns "Series", but must return a subtype of "_LoadSparseSeries") pandas\compat\pickle_compat.py:85: error: Incompatible return type for "__new__" (returns "DataFrame", but must return a subtype of "_LoadSparseFrame") ``` probably makes sense to apply these changes now to avoid ci breakage on mypy bump.
https://api.github.com/repos/pandas-dev/pandas/pulls/28910
2019-10-10T21:51:14Z
2019-10-11T11:58:25Z
2019-10-11T11:58:24Z
2019-10-11T13:44:44Z
TYPING: fix type annotation for pandas.io.formats.format._binify
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 15f21814b072d..ad62c56a337b6 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -868,6 +868,8 @@ def _join_multiline(self, *args) -> str: np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0 for col in strcols ] + + assert lwidth is not None col_bins = _binify(col_widths, lwidth) nbins = len(col_bins) @@ -1890,7 +1892,7 @@ def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> Non set_option("display.column_space", max(12, accuracy + 9)) -def _binify(cols: List[np.int32], line_width: Union[np.int32, int]) -> List[int]: +def _binify(cols: List[int], line_width: int) -> List[int]: adjoin_width = 1 bins = [] curr_width = 0
- [ ] closes #28843 - [n/a ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [n/a ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28908
2019-10-10T21:33:51Z
2019-10-11T12:41:20Z
2019-10-11T12:41:20Z
2019-10-11T12:41:24Z
Added note to 'contributing.rst file', telling users to append GH Issue…
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index dc6fa3d100212..949b6bd475319 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -949,6 +949,9 @@ the expected correct result:: assert_frame_equal(pivoted, expected) +Please remember to add the Github Issue Number as a comment to a new test. +E.g. "# brief comment, see GH#28907" + Transitioning to ``pytest`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~
… Number to new tests. - [ X ] closes #28703
https://api.github.com/repos/pandas-dev/pandas/pulls/28907
2019-10-10T20:28:37Z
2019-10-12T17:09:57Z
2019-10-12T17:09:57Z
2019-10-12T17:10:03Z
clean tests/indexing/common.py
diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index 78764e6763e95..812d84261eb46 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -1,5 +1,4 @@ """ common utilities """ - import itertools from warnings import catch_warnings, filterwarnings @@ -29,7 +28,7 @@ def _axify(obj, key, axis): class Base: """ indexing comprehensive base class """ - _objs = {"series", "frame"} + _kinds = {"series", "frame"} _typs = { "ints", "uints", @@ -101,13 +100,12 @@ def setup_method(self, method): self.series_empty = Series() # form agglomerates - for o in self._objs: - + for kind in self._kinds: d = dict() - for t in self._typs: - d[t] = getattr(self, "{o}_{t}".format(o=o, t=t), None) + for typ in self._typs: + d[typ] = getattr(self, "{kind}_{typ}".format(kind=kind, typ=typ)) - setattr(self, o, d) + setattr(self, kind, d) def generate_indices(self, f, values=False): """ generate the indices @@ -117,7 +115,7 @@ def generate_indices(self, f, values=False): axes = f.axes if values: - axes = (list(range(len(a))) for a in axes) + axes = (list(range(len(ax))) for ax in axes) return itertools.product(*axes) @@ -186,34 +184,34 @@ def check_result( method2, key2, typs=None, - objs=None, + kinds=None, axes=None, fails=None, ): - def _eq(t, o, a, obj, k1, k2): + def _eq(typ, kind, axis, obj, key1, key2): """ compare equal for these 2 keys """ - - if a is not None and a > obj.ndim - 1: + if axis > obj.ndim - 1: return def _print(result, error=None): - if error is not None: - error = str(error) - v = ( + err = str(error) if error is not None else "" + msg = ( "%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s," "key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" - % (name, result, t, o, method1, method2, a, error or "") + % (name, result, typ, kind, method1, method2, axis, err) ) if _verbose: - pprint_thing(v) + pprint_thing(msg) try: - rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a)) + rs = getattr(obj, method1).__getitem__(_axify(obj, key1, axis)) with catch_warnings(record=True): filterwarnings("ignore", "\\n.ix", FutureWarning) try: - xp = self.get_result(obj, method2, k2, a) + xp = self.get_result( + obj=obj, method=method2, key=key2, axis=axis + ) except (KeyError, IndexError): # TODO: why is this allowed? result = "no comp" @@ -228,8 +226,8 @@ def _print(result, error=None): else: tm.assert_equal(rs, xp) result = "ok" - except AssertionError as e: - detail = str(e) + except AssertionError as exc: + detail = str(exc) result = "fail" # reverse the checks @@ -258,36 +256,25 @@ def _print(result, error=None): if typs is None: typs = self._typs - if objs is None: - objs = self._objs + if kinds is None: + kinds = self._kinds - if axes is not None: - if not isinstance(axes, (tuple, list)): - axes = [axes] - else: - axes = list(axes) - else: + if axes is None: axes = [0, 1] + elif not isinstance(axes, (tuple, list)): + assert isinstance(axes, int) + axes = [axes] # check - for o in objs: - if o not in self._objs: + for kind in kinds: + if kind not in self._kinds: continue - d = getattr(self, o) - for a in axes: - for t in typs: - if t not in self._typs: + d = getattr(self, kind) + for ax in axes: + for typ in typs: + if typ not in self._typs: continue - obj = d[t] - if obj is None: - continue - - def _call(obj=obj): - obj = obj.copy() - - k2 = key2 - _eq(t, o, a, obj, key1, k2) - - _call() + obj = d[typ] + _eq(typ=typ, kind=kind, axis=ax, obj=obj, key1=key1, key2=key2) diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index c3ba5c0545b8b..31120c2c023cc 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -284,7 +284,7 @@ def test_iloc_getitem_dups(self): [0, 1, 1, 3], "ix", {0: [0, 2, 2, 6], 1: [0, 3, 3, 9]}, - objs=["series", "frame"], + kinds=["series", "frame"], typs=["ints", "uints"], ) diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py index 0b8f3af760f1d..532b77d6519c1 100644 --- a/pandas/tests/indexing/test_scalar.py +++ b/pandas/tests/indexing/test_scalar.py @@ -19,9 +19,9 @@ def _check(f, func, values=False): expected = self.get_value(f, i, values) tm.assert_almost_equal(result, expected) - for o in self._objs: + for kind in self._kinds: - d = getattr(self, o) + d = getattr(self, kind) # iat for f in [d["ints"], d["uints"]]: @@ -47,9 +47,9 @@ def _check(f, func, values=False): expected = self.get_value(f, i, values) tm.assert_almost_equal(expected, 1) - for t in self._objs: + for kind in self._kinds: - d = getattr(self, t) + d = getattr(self, kind) # iat for f in [d["ints"], d["uints"]]:
clean-up of tests/indexing/common.py.
https://api.github.com/repos/pandas-dev/pandas/pulls/28904
2019-10-10T19:40:09Z
2019-10-11T12:00:15Z
2019-10-11T12:00:15Z
2019-10-11T16:07:25Z
REF: simplify maybe_promote integer cases
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 1e353c97be754..1e62527f95bc7 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -423,57 +423,14 @@ def maybe_promote(dtype, fill_value=np.nan): if issubclass(dtype.type, np.bool_): dtype = np.dtype(np.object_) elif issubclass(dtype.type, np.integer): - # upcast to prevent overflow - mst = np.min_scalar_type(fill_value) - if mst > dtype: - # np.dtype ordering considers: - # int[n] < int[2*n] - # uint[n] < uint[2*n] - # u?int[n] < object_ - dtype = mst - - elif np.can_cast(fill_value, dtype): - pass - - elif dtype.kind == "u" and mst.kind == "i": + if not np.can_cast(fill_value, dtype): + # upcast to prevent overflow + mst = np.min_scalar_type(fill_value) dtype = np.promote_types(dtype, mst) if dtype.kind == "f": # Case where we disagree with numpy dtype = np.dtype(np.object_) - elif dtype.kind == "i" and mst.kind == "u": - - if fill_value > np.iinfo(np.int64).max: - # object is the only way to represent fill_value and keep - # the range allowed by the given dtype - dtype = np.dtype(np.object_) - - elif mst.itemsize < dtype.itemsize: - pass - - elif dtype.itemsize == mst.itemsize: - # We never cast signed to unsigned because that loses - # parts of the original range, so find the smallest signed - # integer that can hold all of `mst`. - ndt = { - np.int64: np.object_, - np.int32: np.int64, - np.int16: np.int32, - np.int8: np.int16, - }[dtype.type] - dtype = np.dtype(ndt) - - else: - # bump to signed integer dtype that holds all of `mst` range - # Note: we have to use itemsize because some (windows) - # builds don't satisfiy e.g. np.uint32 == np.uint32 - ndt = { - 4: np.int64, - 2: np.int32, - 1: np.int16, # TODO: Test for this case - }[mst.itemsize] - dtype = np.dtype(ndt) - fill_value = dtype.type(fill_value) elif issubclass(dtype.type, np.floating):
@jreback its a little absurd how much this can be simplified after all the trouble I went through to on the last round Let's hope the float cases simplify as nicely (separate branch/PR)
https://api.github.com/repos/pandas-dev/pandas/pulls/28899
2019-10-10T16:47:28Z
2019-10-10T22:53:02Z
2019-10-10T22:53:02Z
2019-10-10T22:58:04Z
REF: maybe_promote refactor/cleanup
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 328c7566d8e8d..90c2638be5eec 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -393,32 +393,29 @@ def maybe_promote(dtype, fill_value=np.nan): elif is_float(fill_value): if issubclass(dtype.type, np.bool_): - dtype = np.object_ + dtype = np.dtype(np.object_) + elif issubclass(dtype.type, np.integer): dtype = np.dtype(np.float64) - if not isna(fill_value): - fill_value = dtype.type(fill_value) elif dtype.kind == "f": - if not np.can_cast(fill_value, dtype): - # e.g. dtype is float32, need float64 - dtype = np.min_scalar_type(fill_value) + mst = np.min_scalar_type(fill_value) + if mst > dtype: + # e.g. mst is np.float64 and dtype is np.float32 + dtype = mst elif dtype.kind == "c": mst = np.min_scalar_type(fill_value) dtype = np.promote_types(dtype, mst) - if dtype.kind == "c" and not np.isnan(fill_value): - fill_value = dtype.type(fill_value) - elif is_bool(fill_value): if not issubclass(dtype.type, np.bool_): - dtype = np.object_ - else: - fill_value = np.bool_(fill_value) + dtype = np.dtype(np.object_) + elif is_integer(fill_value): if issubclass(dtype.type, np.bool_): dtype = np.dtype(np.object_) + elif issubclass(dtype.type, np.integer): if not np.can_cast(fill_value, dtype): # upcast to prevent overflow @@ -428,35 +425,20 @@ def maybe_promote(dtype, fill_value=np.nan): # Case where we disagree with numpy dtype = np.dtype(np.object_) - fill_value = dtype.type(fill_value) - - elif issubclass(dtype.type, np.floating): - # check if we can cast - if _check_lossless_cast(fill_value, dtype): - fill_value = dtype.type(fill_value) - - if dtype.kind in ["c", "f"]: - # e.g. if dtype is complex128 and fill_value is 1, we - # want np.complex128(1) - fill_value = dtype.type(fill_value) - elif is_complex(fill_value): if issubclass(dtype.type, np.bool_): dtype = np.dtype(np.object_) + elif issubclass(dtype.type, (np.integer, np.floating)): mst = np.min_scalar_type(fill_value) dtype = np.promote_types(dtype, mst) elif dtype.kind == "c": mst = np.min_scalar_type(fill_value) - if mst > dtype and mst.kind == "c": + if mst > dtype: # e.g. mst is np.complex128 and dtype is np.complex64 dtype = mst - if dtype.kind == "c": - # make sure we have a np.complex and not python complex - fill_value = dtype.type(fill_value) - elif fill_value is None: if is_float_dtype(dtype) or is_complex_dtype(dtype): fill_value = np.nan @@ -466,37 +448,48 @@ def maybe_promote(dtype, fill_value=np.nan): elif is_datetime_or_timedelta_dtype(dtype): fill_value = dtype.type("NaT", "ns") else: - dtype = np.object_ + dtype = np.dtype(np.object_) fill_value = np.nan else: - dtype = np.object_ + dtype = np.dtype(np.object_) # in case we have a string that looked like a number if is_extension_array_dtype(dtype): pass elif issubclass(np.dtype(dtype).type, (bytes, str)): - dtype = np.object_ + dtype = np.dtype(np.object_) + fill_value = _ensure_dtype_type(fill_value, dtype) return dtype, fill_value -def _check_lossless_cast(value, dtype: np.dtype) -> bool: +def _ensure_dtype_type(value, dtype): """ - Check if we can cast the given value to the given dtype _losslesly_. + Ensure that the given value is an instance of the given dtype. + + e.g. if out dtype is np.complex64, we should have an instance of that + as opposed to a python complex object. Parameters ---------- value : object - dtype : np.dtype + dtype : np.dtype or ExtensionDtype Returns ------- - bool + object """ - casted = dtype.type(value) - if casted == value: - return True - return False + + # Start with exceptions in which we do _not_ cast to numpy types + if is_extension_array_dtype(dtype): + return value + elif dtype == np.object_: + return value + elif isna(value): + # e.g. keep np.nan rather than try to cast to np.float32(np.nan) + return value + + return dtype.type(value) def infer_dtype_from(val, pandas_dtype=False):
There is some casting we currently do in many places in this function. Instead this PR implements `_ensure_dtype_type` and calls it once at the end of the function. Removes _check_lossless_cast which can be replaced with np.can_cast
https://api.github.com/repos/pandas-dev/pandas/pulls/28897
2019-10-10T15:48:17Z
2019-10-12T17:14:22Z
2019-10-12T17:14:22Z
2019-10-12T18:27:57Z
def item( was broken
diff --git a/pandas/core/base.py b/pandas/core/base.py index 4d5b20c56df5a..afac290ce5a16 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -715,7 +715,7 @@ def item(self): FutureWarning, stacklevel=2, ) - return self.values.item() + return self.values.item(0) @property def data(self):
`pd.DataFrame({"a":[1,2,3]}).a.item() ` -- breaks! `pd.DataFrame({"a":[1,2,3]}).a.values.item(0)` --- this is actual expected behaviour - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28894
2019-10-10T09:45:53Z
2019-10-22T01:04:18Z
null
2019-10-22T01:04:18Z
DOC: Fix commpiler typo in contributing.rst
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index 10d702808606a..dc6fa3d100212 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -172,7 +172,7 @@ installed (or you wish to install a newer version) you can install a compiler yum groupinstall "Development Tools" For other Linux distributions, consult your favourite search engine for -commpiler installation instructions. +compiler installation instructions. Let us know if you have any difficulties by opening an issue or reaching out on `Gitter`_.
Corrects the spelling of compiler in contributing.rst. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28891
2019-10-10T07:23:42Z
2019-10-10T09:20:23Z
2019-10-10T09:20:23Z
2019-10-10T12:43:43Z
DOC: Update performance comparison section of io docs
diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index ef87b6c57b1b9..f8e174abfd193 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -5576,7 +5576,7 @@ Performance considerations -------------------------- This is an informal comparison of various IO methods, using pandas -0.20.3. Timings are machine dependent and small differences should be +0.24.2. Timings are machine dependent and small differences should be ignored. .. code-block:: ipython @@ -5597,11 +5597,18 @@ Given the next test set: .. code-block:: python + + + import numpy as np + import os sz = 1000000 df = pd.DataFrame({'A': np.random.randn(sz), 'B': [1] * sz}) + sz = 1000000 + np.random.seed(42) + df = pd.DataFrame({'A': np.random.randn(sz), 'B': [1] * sz}) def test_sql_write(df): if os.path.exists('test.sql'): @@ -5610,151 +5617,152 @@ Given the next test set: df.to_sql(name='test_table', con=sql_db) sql_db.close() - def test_sql_read(): sql_db = sqlite3.connect('test.sql') pd.read_sql_query("select * from test_table", sql_db) sql_db.close() - def test_hdf_fixed_write(df): df.to_hdf('test_fixed.hdf', 'test', mode='w') - def test_hdf_fixed_read(): pd.read_hdf('test_fixed.hdf', 'test') - def test_hdf_fixed_write_compress(df): df.to_hdf('test_fixed_compress.hdf', 'test', mode='w', complib='blosc') - def test_hdf_fixed_read_compress(): pd.read_hdf('test_fixed_compress.hdf', 'test') - def test_hdf_table_write(df): df.to_hdf('test_table.hdf', 'test', mode='w', format='table') - def test_hdf_table_read(): pd.read_hdf('test_table.hdf', 'test') - def test_hdf_table_write_compress(df): df.to_hdf('test_table_compress.hdf', 'test', mode='w', complib='blosc', format='table') - def test_hdf_table_read_compress(): pd.read_hdf('test_table_compress.hdf', 'test') - def test_csv_write(df): df.to_csv('test.csv', mode='w') - def test_csv_read(): pd.read_csv('test.csv', index_col=0) - def test_feather_write(df): df.to_feather('test.feather') - def test_feather_read(): pd.read_feather('test.feather') - def test_pickle_write(df): df.to_pickle('test.pkl') - def test_pickle_read(): pd.read_pickle('test.pkl') - def test_pickle_write_compress(df): df.to_pickle('test.pkl.compress', compression='xz') - def test_pickle_read_compress(): pd.read_pickle('test.pkl.compress', compression='xz') -When writing, the top-three functions in terms of speed are are -``test_pickle_write``, ``test_feather_write`` and ``test_hdf_fixed_write_compress``. + def test_parquet_write(df): + df.to_parquet('test.parquet') + + def test_parquet_read(): + pd.read_parquet('test.parquet') + +When writing, the top-three functions in terms of speed are ``test_feather_write``, ``test_hdf_fixed_write`` and ``test_hdf_fixed_write_compress``. .. code-block:: ipython - In [14]: %timeit test_sql_write(df) - 2.37 s ± 36.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + In [4]: %timeit test_sql_write(df) + 3.29 s ± 43.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - In [15]: %timeit test_hdf_fixed_write(df) - 194 ms ± 65.9 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) + In [5]: %timeit test_hdf_fixed_write(df) + 19.4 ms ± 560 µs per loop (mean ± std. dev. of 7 runs, 1 loop each) - In [26]: %timeit test_hdf_fixed_write_compress(df) - 119 ms ± 2.15 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) + In [6]: %timeit test_hdf_fixed_write_compress(df) + 19.6 ms ± 308 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) - In [16]: %timeit test_hdf_table_write(df) - 623 ms ± 125 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + In [7]: %timeit test_hdf_table_write(df) + 449 ms ± 5.61 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - In [27]: %timeit test_hdf_table_write_compress(df) - 563 ms ± 23.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + In [8]: %timeit test_hdf_table_write_compress(df) + 448 ms ± 11.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - In [17]: %timeit test_csv_write(df) - 3.13 s ± 49.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + In [9]: %timeit test_csv_write(df) + 3.66 s ± 26.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - In [30]: %timeit test_feather_write(df) - 103 ms ± 5.88 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) + In [10]: %timeit test_feather_write(df) + 9.75 ms ± 117 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - In [31]: %timeit test_pickle_write(df) - 109 ms ± 3.72 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) + In [11]: %timeit test_pickle_write(df) + 30.1 ms ± 229 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) - In [32]: %timeit test_pickle_write_compress(df) - 3.33 s ± 55.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + In [12]: %timeit test_pickle_write_compress(df) + 4.29 s ± 15.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + + In [13]: %timeit test_parquet_write(df) + 67.6 ms ± 706 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) When reading, the top three are ``test_feather_read``, ``test_pickle_read`` and ``test_hdf_fixed_read``. + .. code-block:: ipython - In [18]: %timeit test_sql_read() - 1.35 s ± 14.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + In [14]: %timeit test_sql_read() + 1.77 s ± 17.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + + In [15]: %timeit test_hdf_fixed_read() + 19.4 ms ± 436 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) + + In [16]: %timeit test_hdf_fixed_read_compress() + 19.5 ms ± 222 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) - In [19]: %timeit test_hdf_fixed_read() - 14.3 ms ± 438 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) + In [17]: %timeit test_hdf_table_read() + 38.6 ms ± 857 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) - In [28]: %timeit test_hdf_fixed_read_compress() - 23.5 ms ± 672 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) + In [18]: %timeit test_hdf_table_read_compress() + 38.8 ms ± 1.49 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) - In [20]: %timeit test_hdf_table_read() - 35.4 ms ± 314 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) + In [19]: %timeit test_csv_read() + 452 ms ± 9.04 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - In [29]: %timeit test_hdf_table_read_compress() - 42.6 ms ± 2.1 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) + In [20]: %timeit test_feather_read() + 12.4 ms ± 99.7 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - In [22]: %timeit test_csv_read() - 516 ms ± 27.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + In [21]: %timeit test_pickle_read() + 18.4 ms ± 191 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - In [33]: %timeit test_feather_read() - 4.06 ms ± 115 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) + In [22]: %timeit test_pickle_read_compress() + 915 ms ± 7.48 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - In [34]: %timeit test_pickle_read() - 6.5 ms ± 172 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) + In [23]: %timeit test_parquet_read() + 24.4 ms ± 146 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) - In [35]: %timeit test_pickle_read_compress() - 588 ms ± 3.57 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) +For this test case ``test.pkl.compress``, ``test.parquet`` and ``test.feather`` took the least space on disk. Space on disk (in bytes) .. code-block:: none - 34816000 Aug 21 18:00 test.sql - 24009240 Aug 21 18:00 test_fixed.hdf - 7919610 Aug 21 18:00 test_fixed_compress.hdf - 24458892 Aug 21 18:00 test_table.hdf - 8657116 Aug 21 18:00 test_table_compress.hdf - 28520770 Aug 21 18:00 test.csv - 16000248 Aug 21 18:00 test.feather - 16000848 Aug 21 18:00 test.pkl - 7554108 Aug 21 18:00 test.pkl.compress + 29519500 Oct 10 06:45 test.csv + 16000248 Oct 10 06:45 test.feather + 8281983 Oct 10 06:49 test.parquet + 16000857 Oct 10 06:47 test.pkl + 7552144 Oct 10 06:48 test.pkl.compress + 34816000 Oct 10 06:42 test.sql + 24009288 Oct 10 06:43 test_fixed.hdf + 24009288 Oct 10 06:43 test_fixed_compress.hdf + 24458940 Oct 10 06:44 test_table.hdf + 24458940 Oct 10 06:44 test_table_compress.hdf + + +
xref https://github.com/python-sprints/pandas-mentoring/issues/163
https://api.github.com/repos/pandas-dev/pandas/pulls/28890
2019-10-10T07:04:29Z
2019-11-09T00:59:55Z
2019-11-09T00:59:55Z
2019-11-09T01:00:05Z
DOC: Fixed PR06 error in pandas.Categorical.from_codes
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index bab1127e6e539..8e5a58877e81b 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -636,7 +636,7 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None): Parameters ---------- - codes : array-like, integers + codes : array-like, ints An integer array, where each integer points to a category in categories or dtype.categories, or else is -1 for NaN. categories : index-like, optional @@ -647,7 +647,7 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None): Whether or not this categorical is treated as an ordered categorical. If not given here or in `dtype`, the resulting categorical will be unordered. - dtype : CategoricalDtype or the string "category", optional + dtype : CategoricalDtype or the str "category", optional If :class:`CategoricalDtype`, cannot be used together with `categories` or `ordered`.
- [x] xref #28724 - [ ] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28888
2019-10-10T04:02:46Z
2019-10-10T15:57:24Z
null
2019-10-10T16:05:52Z
DOC: Fixed PR06 error in pandas.io.formats.style.Styler.set_table_attributes
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 6bac3fe426f2d..6b98eaca9dacc 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -780,7 +780,7 @@ def set_table_attributes(self, attributes): Parameters ---------- - attributes : string + attributes : str Returns -------
- [x] xref #28724 - [ ] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28887
2019-10-10T03:54:24Z
2019-10-10T15:53:58Z
2019-10-10T15:53:58Z
2019-10-10T16:03:57Z
REF: use fused types for groupby_helper
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index 000689f634545..6b434b6470581 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -12,39 +12,27 @@ _int64_max = np.iinfo(np.int64).max # group_nth, group_last, group_rank # ---------------------------------------------------------------------- -{{py: - -# name, c_type, nan_val -dtypes = [('float64', 'float64_t', 'NAN'), - ('float32', 'float32_t', 'NAN'), - ('int64', 'int64_t', 'NPY_NAT'), - ('object', 'object', 'NAN')] - -def get_dispatch(dtypes): - - for name, c_type, nan_val in dtypes: - - yield name, c_type, nan_val -}} - - -{{for name, c_type, nan_val in get_dispatch(dtypes)}} +ctypedef fused rank_t: + float64_t + float32_t + int64_t + object @cython.wraparound(False) @cython.boundscheck(False) -def group_last_{{name}}({{c_type}}[:, :] out, - int64_t[:] counts, - {{c_type}}[:, :] values, - const int64_t[:] labels, - Py_ssize_t min_count=-1): +def group_last(rank_t[:, :] out, + int64_t[:] counts, + rank_t[:, :] values, + const int64_t[:] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{c_type}} val - ndarray[{{c_type}}, ndim=2] resx + rank_t val + ndarray[rank_t, ndim=2] resx ndarray[int64_t, ndim=2] nobs assert min_count == -1, "'min_count' only used in add and prod" @@ -53,19 +41,15 @@ def group_last_{{name}}({{c_type}}[:, :] out, raise AssertionError("len(index) != len(labels)") nobs = np.zeros((<object>out).shape, dtype=np.int64) - {{if name == 'object'}} - resx = np.empty((<object>out).shape, dtype=object) - {{else}} - resx = np.empty_like(out) - {{endif}} + if rank_t is object: + resx = np.empty((<object>out).shape, dtype=object) + else: + resx = np.empty_like(out) N, K = (<object>values).shape - {{if name == "object"}} - if True: # make templating happy - {{else}} - with nogil: - {{endif}} + if rank_t is object: + # TODO: De-duplicate once conditional-nogil is available for i in range(N): lab = labels[i] if lab < 0: @@ -76,36 +60,77 @@ def group_last_{{name}}({{c_type}}[:, :] out, val = values[i, j] # not nan - if ( - {{if not name.startswith("int")}} - val == val and - {{endif}} - val != {{nan_val}}): - nobs[lab, j] += 1 - resx[lab, j] = val + if rank_t is int64_t: + # need a special notna check + if val != NPY_NAT: + nobs[lab, j] += 1 + resx[lab, j] = val + else: + if val == val: + nobs[lab, j] += 1 + resx[lab, j] = val for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: - out[i, j] = {{nan_val}} + if rank_t is int64_t: + out[i, j] = NPY_NAT + else: + out[i, j] = NAN else: out[i, j] = resx[i, j] + else: + with nogil: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if rank_t is int64_t: + # need a special notna check + if val != NPY_NAT: + nobs[lab, j] += 1 + resx[lab, j] = val + else: + if val == val: + nobs[lab, j] += 1 + resx[lab, j] = val + + for i in range(ncounts): + for j in range(K): + if nobs[i, j] == 0: + if rank_t is int64_t: + out[i, j] = NPY_NAT + else: + out[i, j] = NAN + else: + out[i, j] = resx[i, j] + +group_last_float64 = group_last["float64_t"] +group_last_float32 = group_last["float32_t"] +group_last_int64 = group_last["int64_t"] +group_last_object = group_last["object"] @cython.wraparound(False) @cython.boundscheck(False) -def group_nth_{{name}}({{c_type}}[:, :] out, - int64_t[:] counts, - {{c_type}}[:, :] values, - const int64_t[:] labels, int64_t rank, - Py_ssize_t min_count=-1): +def group_nth(rank_t[:, :] out, + int64_t[:] counts, + rank_t[:, :] values, + const int64_t[:] labels, int64_t rank, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{c_type}} val - ndarray[{{c_type}}, ndim=2] resx + rank_t val + ndarray[rank_t, ndim=2] resx ndarray[int64_t, ndim=2] nobs assert min_count == -1, "'min_count' only used in add and prod" @@ -114,19 +139,15 @@ def group_nth_{{name}}({{c_type}}[:, :] out, raise AssertionError("len(index) != len(labels)") nobs = np.zeros((<object>out).shape, dtype=np.int64) - {{if name=='object'}} - resx = np.empty((<object>out).shape, dtype=object) - {{else}} - resx = np.empty_like(out) - {{endif}} + if rank_t is object: + resx = np.empty((<object>out).shape, dtype=object) + else: + resx = np.empty_like(out) N, K = (<object>values).shape - {{if name == "object"}} - if True: # make templating happy - {{else}} - with nogil: - {{endif}} + if rank_t is object: + # TODO: De-duplicate once conditional-nogil is available for i in range(N): lab = labels[i] if lab < 0: @@ -137,11 +158,7 @@ def group_nth_{{name}}({{c_type}}[:, :] out, val = values[i, j] # not nan - if ( - {{if not name.startswith("int")}} - val == val and - {{endif}} - val != {{nan_val}}): + if val == val: nobs[lab, j] += 1 if nobs[lab, j] == rank: resx[lab, j] = val @@ -149,28 +166,65 @@ def group_nth_{{name}}({{c_type}}[:, :] out, for i in range(ncounts): for j in range(K): if nobs[i, j] == 0: - out[i, j] = {{nan_val}} + out[i, j] = NAN else: out[i, j] = resx[i, j] + else: + with nogil: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if rank_t is int64_t: + # need a special notna check + if val != NPY_NAT: + nobs[lab, j] += 1 + if nobs[lab, j] == rank: + resx[lab, j] = val + else: + if val == val: + nobs[lab, j] += 1 + if nobs[lab, j] == rank: + resx[lab, j] = val + + for i in range(ncounts): + for j in range(K): + if nobs[i, j] == 0: + if rank_t is int64_t: + out[i, j] = NPY_NAT + else: + out[i, j] = NAN + else: + out[i, j] = resx[i, j] + -{{if name != 'object'}} +group_nth_float64 = group_nth["float64_t"] +group_nth_float32 = group_nth["float32_t"] +group_nth_int64 = group_nth["int64_t"] +group_nth_object = group_nth["object"] @cython.boundscheck(False) @cython.wraparound(False) -def group_rank_{{name}}(float64_t[:, :] out, - {{c_type}}[:, :] values, - const int64_t[:] labels, - bint is_datetimelike, object ties_method, - bint ascending, bint pct, object na_option): +def group_rank(float64_t[:, :] out, + rank_t[:, :] values, + const int64_t[:] labels, + bint is_datetimelike, object ties_method, + bint ascending, bint pct, object na_option): """ Provides the rank of values within each group. Parameters ---------- out : array of float64_t values which this method will write its results to - values : array of {{c_type}} values to be ranked + values : array of rank_t values to be ranked labels : array containing unique label for each group, with its ordering matching up to the corresponding record in `values` is_datetimelike : bool, default False @@ -203,10 +257,13 @@ def group_rank_{{name}}(float64_t[:, :] out, Py_ssize_t grp_vals_seen=1, grp_na_count=0, grp_tie_count=0 ndarray[int64_t] _as ndarray[float64_t, ndim=2] grp_sizes - ndarray[{{c_type}}] masked_vals + ndarray[rank_t] masked_vals ndarray[uint8_t] mask bint keep_na - {{c_type}} nan_fill_val + rank_t nan_fill_val + + if rank_t is object: + raise NotImplementedError("Cant do nogil") tiebreak = tiebreakers[ties_method] keep_na = na_option == 'keep' @@ -217,25 +274,23 @@ def group_rank_{{name}}(float64_t[:, :] out, # with mask, without obfuscating location of missing data # in values array masked_vals = np.array(values[:, 0], copy=True) - {{if name == 'int64'}} - mask = (masked_vals == {{nan_val}}).astype(np.uint8) - {{else}} - mask = np.isnan(masked_vals).astype(np.uint8) - {{endif}} + if rank_t is int64_t: + mask = (masked_vals == NPY_NAT).astype(np.uint8) + else: + mask = np.isnan(masked_vals).astype(np.uint8) if ascending ^ (na_option == 'top'): - {{if name == 'int64'}} - nan_fill_val = np.iinfo(np.int64).max - {{else}} - nan_fill_val = np.inf - {{endif}} + if rank_t is int64_t: + nan_fill_val = np.iinfo(np.int64).max + else: + nan_fill_val = np.inf order = (masked_vals, mask, labels) else: - {{if name == 'int64'}} - nan_fill_val = np.iinfo(np.int64).min - {{else}} - nan_fill_val = -np.inf - {{endif}} + if rank_t is int64_t: + nan_fill_val = np.iinfo(np.int64).min + else: + nan_fill_val = -np.inf + order = (masked_vals, ~mask, labels) np.putmask(masked_vals, mask, nan_fill_val) @@ -337,8 +392,13 @@ def group_rank_{{name}}(float64_t[:, :] out, out[i, 0] = NAN elif grp_sizes[i, 0] != 0: out[i, 0] = out[i, 0] / grp_sizes[i, 0] -{{endif}} -{{endfor}} + + +group_rank_float64 = group_rank["float64_t"] +group_rank_float32 = group_rank["float32_t"] +group_rank_int64 = group_rank["int64_t"] +# Note: we do not have a group_rank_object because that would require a +# not-nogil implementation, see GH#19560 # ---------------------------------------------------------------------- @@ -484,7 +544,8 @@ def group_cummin(groupby_t[:, :] out, const int64_t[:] labels, int ngroups, bint is_datetimelike): - """Cumulative minimum of columns of `values`, in row groups `labels`. + """ + Cumulative minimum of columns of `values`, in row groups `labels`. Parameters ---------- @@ -548,9 +609,10 @@ def group_cummin(groupby_t[:, :] out, def group_cummax(groupby_t[:, :] out, groupby_t[:, :] values, const int64_t[:] labels, - int ngroups, + int ngroups, bint is_datetimelike): - """Cumulative maximum of columns of `values`, in row groups `labels`. + """ + Cumulative maximum of columns of `values`, in row groups `labels`. Parameters ----------
There will be some nice cleanups we can do after we bump cython to 0.30 (which hasnt come out yet). Also I think there is some na-checking code that we can share between the various fused-types functions after this.
https://api.github.com/repos/pandas-dev/pandas/pulls/28886
2019-10-10T03:47:10Z
2019-10-11T12:01:31Z
2019-10-11T12:01:31Z
2019-10-11T15:25:48Z
DOC: Fixed PR06 errors in pandas.api.extensions.ExtensionArray
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 0778b6726d104..7a16c3f6a35b6 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -177,7 +177,7 @@ def _from_sequence(cls, scalars, dtype=None, copy=False): dtype : dtype, optional Construct for this particular dtype. This should be a Dtype compatible with the ExtensionArray. - copy : boolean, default False + copy : bool, default False If True, copy the underlying data. Returns @@ -200,7 +200,7 @@ def _from_sequence_of_strings(cls, strings, dtype=None, copy=False): dtype : dtype, optional Construct for this particular dtype. This should be a Dtype compatible with the ExtensionArray. - copy : boolean, default False + copy : bool, default False If True, copy the underlying data. Returns @@ -769,7 +769,7 @@ def take( Parameters ---------- - indices : sequence of integers + indices : sequence of int Indices to be taken. allow_fill : bool, default False How to handle negative values in `indices`.
- [x] xref #28724 - [ ] tests added / passed - [x] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28885
2019-10-10T03:45:40Z
2019-10-10T16:00:24Z
2019-10-10T16:00:24Z
2019-10-10T16:04:28Z
Remove ix, part 1
diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 6d239e96cd167..4d8b3e3e24ac7 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -393,85 +393,6 @@ def test_boolean_index_empty_corner(self): blah[k] blah[k] = 0 - def test_getitem_ix_mixed_integer(self): - df = DataFrame( - np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3] - ) - - result = df.iloc[:-1] - expected = df.loc[df.index[:-1]] - assert_frame_equal(result, expected) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = df.ix[[1, 10]] - expected = df.ix[Index([1, 10], dtype=object)] - assert_frame_equal(result, expected) - - # 11320 - df = pd.DataFrame( - { - "rna": (1.5, 2.2, 3.2, 4.5), - -1000: [11, 21, 36, 40], - 0: [10, 22, 43, 34], - 1000: [0, 10, 20, 30], - }, - columns=["rna", -1000, 0, 1000], - ) - result = df[[1000]] - expected = df.iloc[:, [3]] - assert_frame_equal(result, expected) - result = df[[-1000]] - expected = df.iloc[:, [1]] - assert_frame_equal(result, expected) - - def test_getitem_setitem_ix_negative_integers(self, float_frame): - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = float_frame.ix[:, -1] - assert_series_equal(result, float_frame["D"]) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = float_frame.ix[:, [-1]] - assert_frame_equal(result, float_frame[["D"]]) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - result = float_frame.ix[:, [-1, -2]] - assert_frame_equal(result, float_frame[["D", "C"]]) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - float_frame.ix[:, [-1]] = 0 - assert (float_frame["D"] == 0).all() - - df = DataFrame(np.random.randn(8, 4)) - # ix does label-based indexing when having an integer index - msg = "\"None of [Int64Index([-1], dtype='int64')] are in the [index]\"" - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - with pytest.raises(KeyError, match=re.escape(msg)): - df.ix[[-1]] - - msg = "\"None of [Int64Index([-1], dtype='int64')] are in the [columns]\"" - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - with pytest.raises(KeyError, match=re.escape(msg)): - df.ix[:, [-1]] - - # #1942 - a = DataFrame(np.random.randn(20, 2), index=[chr(x + 65) for x in range(20)]) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - a.ix[-1] = a.ix[-2] - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - assert_series_equal(a.ix[-1], a.ix[-2], check_names=False) - assert a.ix[-1].name == "T" - assert a.ix[-2].name == "S" - def test_getattr(self, float_frame): assert_series_equal(float_frame.A, float_frame["A"]) msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'" diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py index 692a86aa1a338..a7a9b2c917952 100644 --- a/pandas/tests/indexing/multiindex/test_slice.py +++ b/pandas/tests/indexing/multiindex/test_slice.py @@ -12,16 +12,15 @@ from pandas.util import testing as tm -@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning") class TestMultiIndexSlicers: def test_per_axis_per_level_getitem(self): # GH6134 # example test case - ix = MultiIndex.from_product( + midx = MultiIndex.from_product( [_mklbl("A", 5), _mklbl("B", 7), _mklbl("C", 4), _mklbl("D", 2)] ) - df = DataFrame(np.arange(len(ix.to_numpy())), index=ix) + df = DataFrame(np.arange(len(midx)), index=midx) result = df.loc[(slice("A1", "A3"), slice(None), ["C1", "C3"]), :] expected = df.loc[ @@ -98,7 +97,7 @@ def test_per_axis_per_level_getitem(self): tm.assert_frame_equal(result, expected) # multi-level series - s = Series(np.arange(len(ix.to_numpy())), index=ix) + s = Series(np.arange(len(midx)), index=midx) result = s.loc["A1":"A3", :, ["C1", "C3"]] expected = s.loc[ [ @@ -637,8 +636,6 @@ def test_multiindex_label_slicing_with_negative_step(self): def assert_slices_equivalent(l_slc, i_slc): tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc]) tm.assert_series_equal(s[l_slc], s.iloc[i_slc]) - with catch_warnings(record=True): - tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc]) assert_slices_equivalent(SLC[::-1], SLC[::-1]) diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py deleted file mode 100644 index 6029db8ed66f6..0000000000000 --- a/pandas/tests/indexing/test_ix.py +++ /dev/null @@ -1,355 +0,0 @@ -""" test indexing with ix """ - -from warnings import catch_warnings - -import numpy as np -import pytest - -from pandas.core.dtypes.common import is_scalar - -import pandas as pd -from pandas import DataFrame, Series, option_context -from pandas.util import testing as tm - - -def test_ix_deprecation(): - # GH 15114 - - df = DataFrame({"A": [1, 2, 3]}) - with tm.assert_produces_warning(FutureWarning, check_stacklevel=True): - df.ix[1, "A"] - - -@pytest.mark.filterwarnings("ignore:\\n.ix:FutureWarning") -class TestIX: - def test_ix_loc_setitem_consistency(self): - - # GH 5771 - # loc with slice and series - s = Series(0, index=[4, 5, 6]) - s.loc[4:5] += 1 - expected = Series([1, 1, 0], index=[4, 5, 6]) - tm.assert_series_equal(s, expected) - - # GH 5928 - # chained indexing assignment - df = DataFrame({"a": [0, 1, 2]}) - expected = df.copy() - with catch_warnings(record=True): - expected.ix[[0, 1, 2], "a"] = -expected.ix[[0, 1, 2], "a"] - - with catch_warnings(record=True): - df["a"].ix[[0, 1, 2]] = -df["a"].ix[[0, 1, 2]] - tm.assert_frame_equal(df, expected) - - df = DataFrame({"a": [0, 1, 2], "b": [0, 1, 2]}) - with catch_warnings(record=True): - df["a"].ix[[0, 1, 2]] = -df["a"].ix[[0, 1, 2]].astype("float64") + 0.5 - expected = DataFrame({"a": [0.5, -0.5, -1.5], "b": [0, 1, 2]}) - tm.assert_frame_equal(df, expected) - - # GH 8607 - # ix setitem consistency - df = DataFrame( - { - "delta": [1174, 904, 161], - "elapsed": [7673, 9277, 1470], - "timestamp": [1413840976, 1413842580, 1413760580], - } - ) - expected = DataFrame( - { - "delta": [1174, 904, 161], - "elapsed": [7673, 9277, 1470], - "timestamp": pd.to_datetime( - [1413840976, 1413842580, 1413760580], unit="s" - ), - } - ) - - df2 = df.copy() - df2["timestamp"] = pd.to_datetime(df["timestamp"], unit="s") - tm.assert_frame_equal(df2, expected) - - df2 = df.copy() - df2.loc[:, "timestamp"] = pd.to_datetime(df["timestamp"], unit="s") - tm.assert_frame_equal(df2, expected) - - df2 = df.copy() - with catch_warnings(record=True): - df2.ix[:, 2] = pd.to_datetime(df["timestamp"], unit="s") - tm.assert_frame_equal(df2, expected) - - def test_ix_loc_consistency(self): - - # GH 8613 - # some edge cases where ix/loc should return the same - # this is not an exhaustive case - - def compare(result, expected): - if is_scalar(expected): - assert result == expected - else: - assert expected.equals(result) - - # failure cases for .loc, but these work for .ix - df = DataFrame(np.random.randn(5, 4), columns=list("ABCD")) - for key in [ - slice(1, 3), - tuple([slice(0, 2), slice(0, 2)]), - tuple([slice(0, 2), df.columns[0:2]]), - ]: - - for index in [ - tm.makeStringIndex, - tm.makeUnicodeIndex, - tm.makeDateIndex, - tm.makePeriodIndex, - tm.makeTimedeltaIndex, - ]: - df.index = index(len(df.index)) - with catch_warnings(record=True): - df.ix[key] - - msg = ( - r"cannot do slice indexing" - r" on {klass} with these indexers \[(0|1)\] of" - r" {kind}".format(klass=type(df.index), kind=str(int)) - ) - with pytest.raises(TypeError, match=msg): - df.loc[key] - - df = DataFrame( - np.random.randn(5, 4), - columns=list("ABCD"), - index=pd.date_range("2012-01-01", periods=5), - ) - - for key in [ - "2012-01-03", - "2012-01-31", - slice("2012-01-03", "2012-01-03"), - slice("2012-01-03", "2012-01-04"), - slice("2012-01-03", "2012-01-06", 2), - slice("2012-01-03", "2012-01-31"), - tuple([[True, True, True, False, True]]), - ]: - - # getitem - - # if the expected raises, then compare the exceptions - try: - with catch_warnings(record=True): - expected = df.ix[key] - except KeyError: - with pytest.raises(KeyError, match=r"^'2012-01-31'$"): - df.loc[key] - continue - - result = df.loc[key] - compare(result, expected) - - # setitem - df1 = df.copy() - df2 = df.copy() - - with catch_warnings(record=True): - df1.ix[key] = 10 - df2.loc[key] = 10 - compare(df2, df1) - - # edge cases - s = Series([1, 2, 3, 4], index=list("abde")) - - result1 = s["a":"c"] - with catch_warnings(record=True): - result2 = s.ix["a":"c"] - result3 = s.loc["a":"c"] - tm.assert_series_equal(result1, result2) - tm.assert_series_equal(result1, result3) - - # now work rather than raising KeyError - s = Series(range(5), [-2, -1, 1, 2, 3]) - - with catch_warnings(record=True): - result1 = s.ix[-10:3] - result2 = s.loc[-10:3] - tm.assert_series_equal(result1, result2) - - with catch_warnings(record=True): - result1 = s.ix[0:3] - result2 = s.loc[0:3] - tm.assert_series_equal(result1, result2) - - def test_ix_weird_slicing(self): - # http://stackoverflow.com/q/17056560/1240268 - df = DataFrame({"one": [1, 2, 3, np.nan, np.nan], "two": [1, 2, 3, 4, 5]}) - df.loc[df["one"] > 1, "two"] = -df["two"] - - expected = DataFrame( - { - "one": {0: 1.0, 1: 2.0, 2: 3.0, 3: np.nan, 4: np.nan}, - "two": {0: 1, 1: -2, 2: -3, 3: 4, 4: 5}, - } - ) - tm.assert_frame_equal(df, expected) - - def test_ix_assign_column_mixed(self, float_frame): - # GH #1142 - df = float_frame - df["foo"] = "bar" - - orig = df.loc[:, "B"].copy() - df.loc[:, "B"] = df.loc[:, "B"] + 1 - tm.assert_series_equal(df.B, orig + 1) - - # GH 3668, mixed frame with series value - df = DataFrame({"x": np.arange(10), "y": np.arange(10, 20), "z": "bar"}) - expected = df.copy() - - for i in range(5): - indexer = i * 2 - v = 1000 + i * 200 - expected.loc[indexer, "y"] = v - assert expected.loc[indexer, "y"] == v - - df.loc[df.x % 2 == 0, "y"] = df.loc[df.x % 2 == 0, "y"] * 100 - tm.assert_frame_equal(df, expected) - - # GH 4508, making sure consistency of assignments - df = DataFrame({"a": [1, 2, 3], "b": [0, 1, 2]}) - df.loc[[0, 2], "b"] = [100, -100] - expected = DataFrame({"a": [1, 2, 3], "b": [100, 1, -100]}) - tm.assert_frame_equal(df, expected) - - df = DataFrame({"a": list(range(4))}) - df["b"] = np.nan - df.loc[[1, 3], "b"] = [100, -100] - expected = DataFrame({"a": [0, 1, 2, 3], "b": [np.nan, 100, np.nan, -100]}) - tm.assert_frame_equal(df, expected) - - # ok, but chained assignments are dangerous - # if we turn off chained assignment it will work - with option_context("chained_assignment", None): - df = DataFrame({"a": list(range(4))}) - df["b"] = np.nan - df["b"].loc[[1, 3]] = [100, -100] - tm.assert_frame_equal(df, expected) - - def test_ix_get_set_consistency(self): - - # GH 4544 - # ix/loc get/set not consistent when - # a mixed int/string index - df = DataFrame( - np.arange(16).reshape((4, 4)), - columns=["a", "b", 8, "c"], - index=["e", 7, "f", "g"], - ) - - with catch_warnings(record=True): - assert df.ix["e", 8] == 2 - assert df.loc["e", 8] == 2 - - with catch_warnings(record=True): - df.ix["e", 8] = 42 - assert df.ix["e", 8] == 42 - assert df.loc["e", 8] == 42 - - df.loc["e", 8] = 45 - with catch_warnings(record=True): - assert df.ix["e", 8] == 45 - assert df.loc["e", 8] == 45 - - def test_ix_slicing_strings(self): - # see gh-3836 - data = { - "Classification": ["SA EQUITY CFD", "bbb", "SA EQUITY", "SA SSF", "aaa"], - "Random": [1, 2, 3, 4, 5], - "X": ["correct", "wrong", "correct", "correct", "wrong"], - } - df = DataFrame(data) - x = df[~df.Classification.isin(["SA EQUITY CFD", "SA EQUITY", "SA SSF"])] - with catch_warnings(record=True): - df.ix[x.index, "X"] = df["Classification"] - - expected = DataFrame( - { - "Classification": { - 0: "SA EQUITY CFD", - 1: "bbb", - 2: "SA EQUITY", - 3: "SA SSF", - 4: "aaa", - }, - "Random": {0: 1, 1: 2, 2: 3, 3: 4, 4: 5}, - "X": {0: "correct", 1: "bbb", 2: "correct", 3: "correct", 4: "aaa"}, - } - ) # bug was 4: 'bbb' - - tm.assert_frame_equal(df, expected) - - def test_ix_setitem_out_of_bounds_axis_0(self): - df = DataFrame( - np.random.randn(2, 5), - index=["row{i}".format(i=i) for i in range(2)], - columns=["col{i}".format(i=i) for i in range(5)], - ) - with catch_warnings(record=True): - msg = "cannot set by positional indexing with enlargement" - with pytest.raises(ValueError, match=msg): - df.ix[2, 0] = 100 - - def test_ix_setitem_out_of_bounds_axis_1(self): - df = DataFrame( - np.random.randn(5, 2), - index=["row{i}".format(i=i) for i in range(5)], - columns=["col{i}".format(i=i) for i in range(2)], - ) - with catch_warnings(record=True): - msg = "cannot set by positional indexing with enlargement" - with pytest.raises(ValueError, match=msg): - df.ix[0, 2] = 100 - - def test_ix_empty_list_indexer_is_ok(self): - with catch_warnings(record=True): - from pandas.util.testing import makeCustomDataframe as mkdf - - df = mkdf(5, 2) - # vertical empty - tm.assert_frame_equal( - df.ix[:, []], - df.iloc[:, :0], - check_index_type=True, - check_column_type=True, - ) - # horizontal empty - tm.assert_frame_equal( - df.ix[[], :], - df.iloc[:0, :], - check_index_type=True, - check_column_type=True, - ) - # horizontal empty - tm.assert_frame_equal( - df.ix[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True - ) - - def test_ix_duplicate_returns_series(self): - df = DataFrame( - np.random.randn(3, 3), index=[0.1, 0.2, 0.2], columns=list("abc") - ) - with catch_warnings(record=True): - r = df.ix[0.2, "a"] - e = df.loc[0.2, "a"] - tm.assert_series_equal(r, e) - - def test_ix_intervalindex(self): - # https://github.com/pandas-dev/pandas/issues/27865 - df = DataFrame( - np.random.randn(5, 2), - index=pd.IntervalIndex.from_breaks([-np.inf, 0, 1, 2, 3, np.inf]), - ) - result = df.ix[0:2, 0] - expected = df.iloc[0:2, 0] - tm.assert_series_equal(result, expected) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 4a60d3966a9bb..1292392eb111d 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -209,11 +209,6 @@ def test_reindex(self): reindexed = self.frame.loc[[("foo", "one"), ("bar", "one")]] tm.assert_frame_equal(reindexed, expected) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - reindexed = self.frame.ix[[("foo", "one"), ("bar", "one")]] - tm.assert_frame_equal(reindexed, expected) - def test_reindex_preserve_levels(self): new_index = self.ymd.index[::10] chunk = self.ymd.reindex(new_index) @@ -222,11 +217,6 @@ def test_reindex_preserve_levels(self): chunk = self.ymd.loc[new_index] assert chunk.index is new_index - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - chunk = self.ymd.ix[new_index] - assert chunk.index is new_index - ymdT = self.ymd.T chunk = ymdT.reindex(columns=new_index) assert chunk.columns is new_index
- [ ] xref #15113 and #26438 First part of removal od ``NDFrame.ix``. There's still alot of tests for this, so I'll do this removal in 2-3 steps. This PR removes pandas/tests/indexing/test_ix.py and tests for ix in a few more places.
https://api.github.com/repos/pandas-dev/pandas/pulls/28884
2019-10-10T01:12:37Z
2019-10-10T01:53:18Z
null
2019-10-10T16:41:48Z
CLN: dont catch Exception in groupby var
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 4e0dd65042196..d477b173b95f0 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -41,6 +41,7 @@ class providing the base-class of operations. ) from pandas.core.dtypes.missing import isna, notna +from pandas.core import nanops import pandas.core.algorithms as algorithms from pandas.core.arrays import Categorical from pandas.core.base import ( @@ -721,6 +722,10 @@ def f(g): with np.errstate(all="ignore"): return func(g, *args, **kwargs) + elif hasattr(nanops, "nan" + func): + # TODO: should we wrap this in to e.g. _is_builtin_func? + f = getattr(nanops, "nan" + func) + else: raise ValueError( "func must be a callable if args or kwargs are supplied" @@ -1297,16 +1302,9 @@ def var(self, ddof=1, *args, **kwargs): """ nv.validate_groupby_func("var", args, kwargs) if ddof == 1: - try: - return self._cython_agg_general( - "var", - alt=lambda x, axis: Series(x).var(ddof=ddof, **kwargs), - **kwargs - ) - except Exception: - f = lambda x: x.var(ddof=ddof, **kwargs) - with _group_selection_context(self): - return self._python_agg_general(f) + return self._cython_agg_general( + "var", alt=lambda x, axis: Series(x).var(ddof=ddof, **kwargs), **kwargs + ) else: f = lambda x: x.var(ddof=ddof, **kwargs) with _group_selection_context(self):
Between this, #28878, and #28873, I'm finding that the outside-in approach to cleaning this up is proving easier the earlier inside-out approach.
https://api.github.com/repos/pandas-dev/pandas/pulls/28883
2019-10-09T22:40:33Z
2019-10-10T01:25:29Z
2019-10-10T01:25:29Z
2019-10-10T01:55:47Z
TST: Fix maybe_promote floating non-boxed tests
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 5b13e13bb20ba..098f42b1a8c5c 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -398,6 +398,22 @@ def maybe_promote(dtype, fill_value=np.nan): dtype = np.dtype(np.float64) if not isna(fill_value): fill_value = dtype.type(fill_value) + + elif dtype.kind == "f": + if not np.can_cast(fill_value, dtype): + # e.g. dtype is float32, need float64 + dtype = np.min_scalar_type(fill_value) + + elif dtype.kind == "c": + if not np.can_cast(fill_value, dtype): + if np.can_cast(fill_value, np.dtype("c16")): + dtype = np.dtype(np.complex128) + else: + dtype = np.dtype(np.object_) + + if dtype.kind == "c" and not np.isnan(fill_value): + fill_value = dtype.type(fill_value) + elif is_bool(fill_value): if not issubclass(dtype.type, np.bool_): dtype = np.object_ @@ -405,7 +421,7 @@ def maybe_promote(dtype, fill_value=np.nan): fill_value = np.bool_(fill_value) elif is_integer(fill_value): if issubclass(dtype.type, np.bool_): - dtype = np.object_ + dtype = np.dtype(np.object_) elif issubclass(dtype.type, np.integer): # upcast to prevent overflow arr = np.asarray(fill_value) @@ -415,11 +431,37 @@ def maybe_promote(dtype, fill_value=np.nan): # check if we can cast if _check_lossless_cast(fill_value, dtype): fill_value = dtype.type(fill_value) + + if dtype.kind in ["c", "f"]: + # e.g. if dtype is complex128 and fill_value is 1, we + # want np.complex128(1) + fill_value = dtype.type(fill_value) + elif is_complex(fill_value): if issubclass(dtype.type, np.bool_): - dtype = np.object_ + dtype = np.dtype(np.object_) elif issubclass(dtype.type, (np.integer, np.floating)): - dtype = np.complex128 + c8 = np.dtype(np.complex64) + info = np.finfo(dtype) if dtype.kind == "f" else np.iinfo(dtype) + if ( + np.can_cast(fill_value, c8) + and np.can_cast(info.min, c8) + and np.can_cast(info.max, c8) + ): + dtype = np.dtype(np.complex64) + else: + dtype = np.dtype(np.complex128) + + elif dtype.kind == "c": + mst = np.min_scalar_type(fill_value) + if mst > dtype and mst.kind == "c": + # e.g. mst is np.complex128 and dtype is np.complex64 + dtype = mst + + if dtype.kind == "c": + # make sure we have a np.complex and not python complex + fill_value = dtype.type(fill_value) + elif fill_value is None: if is_float_dtype(dtype) or is_complex_dtype(dtype): fill_value = np.nan diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py index e4e5a22ea6ca0..e9041a27ab9be 100644 --- a/pandas/tests/dtypes/cast/test_promote.py +++ b/pandas/tests/dtypes/cast/test_promote.py @@ -408,25 +408,14 @@ def test_maybe_promote_float_with_float(dtype, fill_value, expected_dtype, box): if box_dtype == object: pytest.xfail("falsely upcasts to object") - if boxed and is_float_dtype(dtype) and is_complex_dtype(expected_dtype): + elif boxed and is_float_dtype(dtype) and is_complex_dtype(expected_dtype): pytest.xfail("does not upcast to complex") - if (dtype, expected_dtype) in [ + elif boxed and (dtype, expected_dtype) in [ ("float32", "float64"), ("float32", "complex64"), ("complex64", "complex128"), ]: pytest.xfail("does not upcast correctly depending on value") - # this following xfails are "only" a consequence of the - now strictly - # enforced - principle that maybe_promote_with_scalar always casts - if not boxed and abs(fill_value) < 2: - pytest.xfail("wrong return type of fill_value") - if ( - not boxed - and dtype == "complex128" - and expected_dtype == "complex128" - and is_float_dtype(type(fill_value)) - ): - pytest.xfail("wrong return type of fill_value") # output is not a generic float, but corresponds to expected_dtype exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
Like #28864, this involves changing the underlying function. Some of this can be de-duplicated once these are in. I think that this is the last of the xfails for non-boxed test cases. Really looking forward to having this done with,.
https://api.github.com/repos/pandas-dev/pandas/pulls/28880
2019-10-09T20:43:50Z
2019-10-10T12:50:18Z
2019-10-10T12:50:18Z
2019-10-10T13:33:32Z
CLN: assorted cleanups, remove unicode checks in cython
diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx index c0aa661266d29..6b27b2204e75e 100644 --- a/pandas/_libs/hashing.pyx +++ b/pandas/_libs/hashing.pyx @@ -60,7 +60,7 @@ def hash_object_array(object[:] arr, object key, object encoding='utf8'): val = arr[i] if isinstance(val, bytes): data = <bytes>val - elif isinstance(val, unicode): + elif isinstance(val, str): data = <bytes>val.encode(encoding) elif val is None or is_nan(val): # null, stringify and encode diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index 17f1d011af01b..1cbdb0df6233c 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -667,7 +667,7 @@ cdef class StringHashTable(HashTable): for i in range(n): val = values[i] - if isinstance(val, (str, unicode)): + if isinstance(val, str): v = get_c_string(val) else: v = get_c_string(self.na_string_sentinel) @@ -700,7 +700,7 @@ cdef class StringHashTable(HashTable): for i in range(n): val = values[i] - if isinstance(val, (str, unicode)): + if isinstance(val, str): v = get_c_string(val) else: v = get_c_string(self.na_string_sentinel) @@ -774,7 +774,7 @@ cdef class StringHashTable(HashTable): val = values[i] if (ignore_na - and (not isinstance(val, (str, unicode)) + and (not isinstance(val, str) or (use_na_value and val == na_value))): # if missing values do not count as unique values (i.e. if # ignore_na is True), we can skip the actual value, and diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index f5a42d7aef3ba..3f12ec4c15fc7 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -2249,7 +2249,7 @@ cdef _apply_converter(object f, parser_t *parser, int64_t col, def _maybe_encode(values): if values is None: return [] - return [x.encode('utf-8') if isinstance(x, unicode) else x for x in values] + return [x.encode('utf-8') if isinstance(x, str) else x for x in values] def sanitize_objects(ndarray[object] values, set na_values, diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 2ed85595f7e3a..8f5c8d10776df 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -22,7 +22,7 @@ from pandas._libs.tslibs.np_datetime cimport ( from pandas._libs.tslibs.nattype cimport NPY_NAT -def get_time_micros(ndarray[int64_t] dtindex): +def get_time_micros(const int64_t[:] dtindex): """ Return the number of microseconds in the time component of a nanosecond timestamp. @@ -537,7 +537,7 @@ def get_date_field(const int64_t[:] dtindex, object field): elif field == 'is_leap_year': return isleapyear_arr(get_date_field(dtindex, 'Y')) - raise ValueError("Field %s not supported" % field) + raise ValueError("Field {field} not supported".format(field=field)) @cython.wraparound(False) diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index ca70c8af45f2f..33665484311ba 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -252,9 +252,7 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): ------- datetime, datetime/dateutil.parser._result, str """ - if not isinstance(arg, (str, unicode)): - # Note: cython recognizes `unicode` in both py2/py3, optimizes - # this check into a C call. + if not isinstance(arg, str): return arg if getattr(freq, "_typ", None) == "dateoffset": @@ -370,7 +368,7 @@ cdef inline object _parse_dateabbr_string(object date_string, object default, int year, quarter = -1, month, mnum, date_len # special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1 - assert isinstance(date_string, (str, unicode)) + assert isinstance(date_string, str) # len(date_string) == 0 # should be NaT??? @@ -517,7 +515,7 @@ cdef dateutil_parse(object timestr, object default, ignoretz=False, tzdata = tzinfos.get(res.tzname) if isinstance(tzdata, datetime.tzinfo): tzinfo = tzdata - elif isinstance(tzdata, (str, unicode)): + elif isinstance(tzdata, str): tzinfo = _dateutil_tzstr(tzdata) elif isinstance(tzdata, int): tzinfo = tzoffset(res.tzname, tzdata) diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 32dcc86faa7e8..84a41b8757001 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -2448,7 +2448,10 @@ class Period(_Period): converted = other.asfreq(freq) ordinal = converted.ordinal - elif is_null_datetimelike(value) or value in nat_strings: + elif is_null_datetimelike(value) or (isinstance(value, str) and + value in nat_strings): + # explicit str check is necessary to avoid raising incorrectly + # if we have a non-hashable value. ordinal = NPY_NAT elif isinstance(value, str) or util.is_integer_object(value): diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index bda5f8f4326f1..958650e3842fa 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1148,7 +1148,7 @@ def _addsub_offset_array(self, other, op): ) # For EA self.astype('O') returns a numpy array, not an Index - left = lib.values_from_object(self.astype("O")) + left = self.astype("O") res_values = op(left, np.array(other)) kwargs = {} diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index f2d74794eadf5..43208d98abd3c 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -70,7 +70,7 @@ def _period_array_cmp(cls, op): nat_result = opname == "__ne__" def wrapper(self, other): - op = getattr(self.asi8, opname) + ordinal_op = getattr(self.asi8, opname) other = lib.item_from_zerodim(other) if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): @@ -82,11 +82,11 @@ def wrapper(self, other): if isinstance(other, Period): self._check_compatible_with(other) - result = op(other.ordinal) + result = ordinal_op(other.ordinal) elif isinstance(other, cls): self._check_compatible_with(other) - result = op(other.asi8) + result = ordinal_op(other.asi8) mask = self._isnan | other._isnan if mask.any(): @@ -98,7 +98,7 @@ def wrapper(self, other): result.fill(nat_result) else: other = Period(other, freq=self.freq) - result = op(other.ordinal) + result = ordinal_op(other.ordinal) if self._hasnans: result[self._isnan] = nat_result diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 6c9462ff4fa4d..21e07b5101a64 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -553,7 +553,7 @@ def __mul__(self, other): # for that instead of ValueError raise ValueError("Cannot multiply with unequal lengths") - if is_object_dtype(other): + if is_object_dtype(other.dtype): # this multiplication will succeed only if all elements of other # are int or float scalars, so we will end up with # timedelta64[ns]-dtyped result @@ -601,11 +601,11 @@ def __truediv__(self, other): if len(other) != len(self): raise ValueError("Cannot divide vectors with unequal lengths") - elif is_timedelta64_dtype(other): + elif is_timedelta64_dtype(other.dtype): # let numpy handle it return self._data / other - elif is_object_dtype(other): + elif is_object_dtype(other.dtype): # Note: we do not do type inference on the result, so either # an object array or numeric-dtyped (if numpy does inference) # will be returned. GH#23829 @@ -649,12 +649,12 @@ def __rtruediv__(self, other): if len(other) != len(self): raise ValueError("Cannot divide vectors with unequal lengths") - elif is_timedelta64_dtype(other): + elif is_timedelta64_dtype(other.dtype): # let numpy handle it return other / self._data - elif is_object_dtype(other): - # Note: unlike in __truediv__, we do not _need_ to do type# + elif is_object_dtype(other.dtype): + # Note: unlike in __truediv__, we do not _need_ to do type # inference on the result. It does not raise, a numeric array # is returned. GH#23829 result = [other[n] / self[n] for n in range(len(self))] @@ -701,7 +701,7 @@ def __floordiv__(self, other): if len(other) != len(self): raise ValueError("Cannot divide with unequal lengths") - elif is_timedelta64_dtype(other): + elif is_timedelta64_dtype(other.dtype): other = type(self)(other) # numpy timedelta64 does not natively support floordiv, so operate @@ -713,7 +713,7 @@ def __floordiv__(self, other): result[mask] = np.nan return result - elif is_object_dtype(other): + elif is_object_dtype(other.dtype): result = [self[n] // other[n] for n in range(len(self))] result = np.array(result) if lib.infer_dtype(result, skipna=False) == "timedelta": @@ -721,7 +721,7 @@ def __floordiv__(self, other): return type(self)(result) return result - elif is_integer_dtype(other) or is_float_dtype(other): + elif is_integer_dtype(other.dtype) or is_float_dtype(other.dtype): result = self._data // other return type(self)(result) @@ -763,7 +763,7 @@ def __rfloordiv__(self, other): if len(other) != len(self): raise ValueError("Cannot divide with unequal lengths") - elif is_timedelta64_dtype(other): + elif is_timedelta64_dtype(other.dtype): other = type(self)(other) # numpy timedelta64 does not natively support floordiv, so operate @@ -775,7 +775,7 @@ def __rfloordiv__(self, other): result[mask] = np.nan return result - elif is_object_dtype(other): + elif is_object_dtype(other.dtype): result = [other[n] // self[n] for n in range(len(self))] result = np.array(result) return result diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index a225eec93b27e..8c9a4b94446c0 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -161,7 +161,7 @@ def arithmetic_op( right: Any, op, str_rep: str, - eval_kwargs: Dict[str, str], + eval_kwargs: Dict[str, bool], ): """ Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ... diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index 0e1cd42329169..73eddf91325ae 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -400,7 +400,7 @@ def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame): added = float_frame + mixed_int_frame _check_mixed_float(added, dtype="float64") - def test_combineSeries( + def test_combine_series( self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame ): @@ -432,6 +432,7 @@ def test_combineSeries( added = mixed_float_frame + series.astype("float16") _check_mixed_float(added, dtype=dict(C=None)) + # FIXME: don't leave commented-out # these raise with numexpr.....as we are adding an int64 to an # uint64....weird vs int diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 82c197ac054f0..f5f6c9ad6b3da 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -122,7 +122,8 @@ def test_ops(self): result = getattr(df, rop)(m) assert_frame_equal(result, expected) - # GH7192 + # GH7192: Note we need a large number of rows to ensure this + # goes through the numexpr path df = DataFrame(dict(A=np.random.randn(25000))) df.iloc[0:5] = np.nan expected = 1 - np.isnan(df.iloc[0:25])
https://api.github.com/repos/pandas-dev/pandas/pulls/28879
2019-10-09T20:10:44Z
2019-10-10T01:30:55Z
2019-10-10T01:30:55Z
2019-10-10T01:43:10Z
CLN: dont catch on groupby.mean
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 41a5195008f0c..5200d33c6a1fb 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -971,6 +971,18 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True, min_count=-1): if result is not no_result: # see if we can cast the block back to the original dtype result = maybe_downcast_numeric(result, block.dtype) + + if result.ndim == 1 and isinstance(result, np.ndarray): + # e.g. block.values was an IntegerArray + try: + # Cast back if feasible + result = type(block.values)._from_sequence( + result, dtype=block.values.dtype + ) + except ValueError: + # reshape to be valid for non-Extension Block + result = result.reshape(1, -1) + newb = block.make_block(result) new_items.append(locs) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 4e0dd65042196..a127e7dc9bada 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1212,16 +1212,9 @@ def mean(self, *args, **kwargs): Name: B, dtype: float64 """ nv.validate_groupby_func("mean", args, kwargs, ["numeric_only"]) - try: - return self._cython_agg_general( - "mean", alt=lambda x, axis: Series(x).mean(**kwargs), **kwargs - ) - except GroupByError: - raise - except Exception: - with _group_selection_context(self): - f = lambda x: x.mean(axis=self.axis, **kwargs) - return self._python_agg_general(f) + return self._cython_agg_general( + "mean", alt=lambda x, axis: Series(x).mean(**kwargs), **kwargs + ) @Substitution(name="groupby") @Appender(_common_see_also)
The new casting in cython_agg_blocks is specific to a single IntegerArray test case. We could pretty reasonably move that into maybe_downcast_numeric. For the moment id rather hold off since I expect other EA cases to show up here. cc @WillAyd
https://api.github.com/repos/pandas-dev/pandas/pulls/28878
2019-10-09T19:58:12Z
2019-10-10T01:27:38Z
2019-10-10T01:27:38Z
2019-10-10T01:51:09Z
ENH: show numbers on .info() with verbose flag
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 051d64ee87711..8c59ed0dd9388 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -290,13 +290,42 @@ New repr for :class:`~pandas.arrays.IntervalArray` closed='right', dtype='interval[int64]') - *pandas 1.0.0* .. ipython:: python pd.arrays.IntervalArray.from_tuples([(0, 1), (2, 3)]) +Extended verbose info output for :class:`~pandas.DataFrame` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- :meth:`Dataframe.info` now shows line numbers for the columns summary (:issue:`17304`) + +*pandas 0.25.x* + +.. code-block:: python + + >>> df = pd.DataFrame({"int_col": [1, 2, 3], + ... "text_col": ["a", "b", "c"], + ... "float_col": [0.0, 0.1, 0.2]}) + >>> df.info(verbose=True) + <class 'pandas.core.frame.DataFrame'> + RangeIndex: 3 entries, 0 to 2 + Data columns (total 3 columns): + int_col 3 non-null int64 + text_col 3 non-null object + float_col 3 non-null float64 + dtypes: float64(1), int64(1), object(1) + memory usage: 152.0+ bytes + +*pandas 1.0.0* + +.. ipython:: python + + df = pd.DataFrame({"int_col": [1, 2, 3], + "text_col": ["a", "b", "c"], + "float_col": [0.0, 0.1, 0.2]}) + df.info(verbose=True) All :class:`SeriesGroupBy` aggregation methods now respect the ``observed`` keyword ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b69199defbcc4..8bc417acaf7f3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2276,9 +2276,11 @@ def info( <class 'pandas.core.frame.DataFrame'> RangeIndex: 5 entries, 0 to 4 Data columns (total 3 columns): - int_col 5 non-null int64 - text_col 5 non-null object - float_col 5 non-null float64 + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 int_col 5 non-null int64 + 1 text_col 5 non-null object + 2 float_col 5 non-null float64 dtypes: float64(1), int64(1), object(1) memory usage: 248.0+ bytes @@ -2317,9 +2319,11 @@ def info( <class 'pandas.core.frame.DataFrame'> RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): - column_1 1000000 non-null object - column_2 1000000 non-null object - column_3 1000000 non-null object + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 column_1 1000000 non-null object + 1 column_2 1000000 non-null object + 2 column_3 1000000 non-null object dtypes: object(3) memory usage: 22.9+ MB @@ -2327,9 +2331,11 @@ def info( <class 'pandas.core.frame.DataFrame'> RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): - column_1 1000000 non-null object - column_2 1000000 non-null object - column_3 1000000 non-null object + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 column_1 1000000 non-null object + 1 column_2 1000000 non-null object + 2 column_3 1000000 non-null object dtypes: object(3) memory usage: 188.8 MB """ @@ -2348,6 +2354,7 @@ def info( return cols = self.columns + col_count = len(self.columns) # hack if max_cols is None: @@ -2356,36 +2363,76 @@ def info( max_rows = get_option("display.max_info_rows", len(self) + 1) if null_counts is None: - show_counts = (len(self.columns) <= max_cols) and (len(self) < max_rows) + show_counts = (col_count <= max_cols) and (len(self) < max_rows) else: show_counts = null_counts - exceeds_info_cols = len(self.columns) > max_cols + exceeds_info_cols = col_count > max_cols def _verbose_repr(): lines.append(f"Data columns (total {len(self.columns)} columns):") - space = max(len(pprint_thing(k)) for k in self.columns) + 4 + + id_head = " # " + column_head = "Column" + col_space = 2 + + max_col = max(len(pprint_thing(k)) for k in cols) + len_column = len(pprint_thing(column_head)) + space = max(max_col, len_column) + col_space + + max_id = len(pprint_thing(col_count)) + len_id = len(pprint_thing(id_head)) + space_num = max(max_id, len_id) + col_space counts = None - tmpl = "{count}{dtype}" + header = _put_str(id_head, space_num) + _put_str(column_head, space) if show_counts: counts = self.count() if len(cols) != len(counts): # pragma: no cover raise AssertionError( f"Columns must equal counts ({len(cols)} != {len(counts)})" ) - tmpl = "{count} non-null {dtype}" + count_header = "Non-Null Count" + len_count = len(count_header) + non_null = " non-null" + max_count = max(len(pprint_thing(k)) for k in counts) + len(non_null) + space_count = max(len_count, max_count) + col_space + count_temp = "{count}" + non_null + else: + count_header = "" + space_count = len(count_header) + len_count = space_count + count_temp = "{count}" + + dtype_header = "Dtype" + len_dtype = len(dtype_header) + max_dtypes = max(len(pprint_thing(k)) for k in self.dtypes) + space_dtype = max(len_dtype, max_dtypes) + header += _put_str(count_header, space_count) + _put_str( + dtype_header, space_dtype + ) + + lines.append(header) + lines.append( + _put_str("-" * len_id, space_num) + + _put_str("-" * len_column, space) + + _put_str("-" * len_count, space_count) + + _put_str("-" * len_dtype, space_dtype) + ) - dtypes = self.dtypes for i, col in enumerate(self.columns): - dtype = dtypes.iloc[i] + dtype = self.dtypes.iloc[i] col = pprint_thing(col) + line_no = _put_str(" {num}".format(num=i), space_num) count = "" if show_counts: count = counts.iloc[i] lines.append( - _put_str(col, space) + tmpl.format(count=count, dtype=dtype) + line_no + + _put_str(col, space) + + _put_str(count_temp.format(count=count), space_count) + + _put_str(dtype, space_dtype) ) def _non_verbose_repr(): diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index 60dce36312145..91610102cf0f9 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -205,6 +205,28 @@ def test_info(self, float_frame, datetime_frame): frame.info() frame.info(verbose=False) + def test_info_verbose(self): + buf = StringIO() + size = 1001 + start = 5 + frame = DataFrame(np.random.randn(3, size)) + frame.info(verbose=True, buf=buf) + + res = buf.getvalue() + header = " # Column Dtype \n--- ------ ----- " + assert header in res + + frame.info(verbose=True, buf=buf) + buf.seek(0) + lines = buf.readlines() + assert len(lines) > 0 + + for i, line in enumerate(lines): + if i >= start and i < start + size: + index = i - start + line_nr = " {} ".format(index) + assert line.startswith(line_nr) + def test_info_memory(self): # https://github.com/pandas-dev/pandas/issues/21056 df = pd.DataFrame({"a": pd.Series([1, 2], dtype="i8")}) @@ -218,7 +240,9 @@ def test_info_memory(self): <class 'pandas.core.frame.DataFrame'> RangeIndex: 2 entries, 0 to 1 Data columns (total 1 columns): - a 2 non-null int64 + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 a 2 non-null int64 dtypes: int64(1) memory usage: {} bytes """.format( @@ -262,8 +286,8 @@ def test_info_duplicate_columns_shows_correct_dtypes(self): frame.info(buf=io) io.seek(0) lines = io.readlines() - assert "a 1 non-null int64\n" == lines[3] - assert "a 1 non-null float64\n" == lines[4] + assert " 0 a 1 non-null int64 \n" == lines[5] + assert " 1 a 1 non-null float64\n" == lines[6] def test_info_shows_column_dtypes(self): dtypes = [ @@ -283,13 +307,20 @@ def test_info_shows_column_dtypes(self): buf = StringIO() df.info(buf=buf) res = buf.getvalue() + header = ( + " # Column Non-Null Count Dtype \n" + "--- ------ -------------- ----- " + ) + assert header in res for i, dtype in enumerate(dtypes): - name = "{i:d} {n:d} non-null {dtype}".format(i=i, n=n, dtype=dtype) + name = " {i:d} {i:d} {n:d} non-null {dtype}".format( + i=i, n=n, dtype=dtype + ) assert name in res def test_info_max_cols(self): df = DataFrame(np.random.randn(10, 5)) - for len_, verbose in [(5, None), (5, False), (10, True)]: + for len_, verbose in [(5, None), (5, False), (12, True)]: # For verbose always ^ setting ^ summarize ^ full output with option_context("max_info_columns", 4): buf = StringIO() @@ -297,16 +328,16 @@ def test_info_max_cols(self): res = buf.getvalue() assert len(res.strip().split("\n")) == len_ - for len_, verbose in [(10, None), (5, False), (10, True)]: + for len_, verbose in [(12, None), (5, False), (12, True)]: - # max_cols no exceeded + # max_cols not exceeded with option_context("max_info_columns", 5): buf = StringIO() df.info(buf=buf, verbose=verbose) res = buf.getvalue() assert len(res.strip().split("\n")) == len_ - for len_, max_cols in [(10, 5), (5, 4)]: + for len_, max_cols in [(12, 5), (5, 4)]: # setting truncates with option_context("max_info_columns", 4): buf = StringIO()
- [x] closes #17304 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28876
2019-10-09T19:38:52Z
2020-01-03T01:18:53Z
2020-01-03T01:18:53Z
2020-01-03T08:54:26Z
BUG: Allow all int types for merge (GH28870)
diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index fd1c1271a5e37..dd96c6b594cea 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -344,6 +344,7 @@ Reshaping - Bug :func:`merge_asof` could not use :class:`datetime.timedelta` for ``tolerance`` kwarg (:issue:`28098`) - Bug in :func:`merge`, did not append suffixes correctly with MultiIndex (:issue:`28518`) - :func:`qcut` and :func:`cut` now handle boolean input (:issue:`20303`) +- Fix to ensure all int dtypes can be used in :func:`merge_asof` when using a tolerance value. Previously every non-int64 type would raise an erroneous ``MergeError`` (:issue:`28870`). Sparse ^^^^^^ diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 910c7ea561929..7bfc8153da568 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -28,7 +28,6 @@ is_dtype_equal, is_extension_array_dtype, is_float_dtype, - is_int64_dtype, is_integer, is_integer_dtype, is_list_like, @@ -1641,7 +1640,7 @@ def _get_merge_keys(self): if self.tolerance < Timedelta(0): raise MergeError("tolerance must be positive") - elif is_int64_dtype(lt): + elif is_integer_dtype(lt): if not is_integer(self.tolerance): raise MergeError(msg) if self.tolerance < 0: diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index caf2539a9e150..2e9ae80323159 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -1287,3 +1287,19 @@ def test_timedelta_tolerance_nearest(self): ) assert_frame_equal(result, expected) + + def test_int_type_tolerance(self, any_int_dtype): + # GH #28870 + + left = pd.DataFrame({"a": [0, 10, 20], "left_val": [1, 2, 3]}) + right = pd.DataFrame({"a": [5, 15, 25], "right_val": [1, 2, 3]}) + left["a"] = left["a"].astype(any_int_dtype) + right["a"] = right["a"].astype(any_int_dtype) + + expected = pd.DataFrame( + {"a": [0, 10, 20], "left_val": [1, 2, 3], "right_val": [np.nan, 1.0, 2.0]} + ) + expected["a"] = expected["a"].astype(any_int_dtype) + + result = pd.merge_asof(left, right, on="a", tolerance=10) + assert_frame_equal(result, expected)
- [x] closes #28870 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28875
2019-10-09T18:58:32Z
2019-10-11T12:09:34Z
2019-10-11T12:09:34Z
2019-10-12T15:49:35Z
use requests when it is installed
diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index fc99b458fa0af..b3402345f8c1a 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -263,6 +263,7 @@ pymysql 0.7.11 MySQL engine for sqlalchemy pyreadstat SPSS files (.sav) reading pytables 3.4.2 HDF5 reading / writing qtpy Clipboard I/O +requests 2.10.0 Improves reading data from URLs s3fs 0.0.8 Amazon S3 access xarray 0.8.2 pandas-like API for N-dimensional data xclip Clipboard I/O on linux diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index cd4e1b7e8aa4d..7756953aadbdf 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -18,6 +18,7 @@ "pandas_gbq": "0.8.0", "pyarrow": "0.9.0", "pytables": "3.4.2", + "requests": "2.10.0", "s3fs": "0.0.8", "scipy": "0.19.0", "sqlalchemy": "1.1.4", diff --git a/pandas/io/common.py b/pandas/io/common.py index 2ca2007e2925f..cbf385328429a 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -32,6 +32,7 @@ import zipfile from pandas.compat import _get_lzma_file, _import_lzma +from pandas.compat._optional import import_optional_dependency from pandas.errors import ( # noqa AbstractMethodError, DtypeWarning, @@ -184,13 +185,25 @@ def is_gcs_url(url) -> bool: def urlopen(*args, **kwargs): - """ - Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of - the stdlib. - """ - import urllib.request + compression = None + content_encoding = None + try: + requests = import_optional_dependency("requests") + r = requests.get(*args, **kwargs) + r.raise_for_status() + content = r.content + r.close() + except ImportError: + import urllib.request - return urllib.request.urlopen(*args, **kwargs) + r = urllib.request.urlopen(*args, **kwargs) + content = r.read() + content_encoding = r.headers.get("Content-Encoding", None) + if content_encoding == "gzip": + # Override compression based on Content-Encoding header. + compression = "gzip" + reader = BytesIO(content) + return reader, compression def get_filepath_or_buffer( @@ -221,13 +234,7 @@ def get_filepath_or_buffer( filepath_or_buffer = _stringify_path(filepath_or_buffer) if isinstance(filepath_or_buffer, str) and _is_url(filepath_or_buffer): - req = urlopen(filepath_or_buffer) - content_encoding = req.headers.get("Content-Encoding", None) - if content_encoding == "gzip": - # Override compression based on Content-Encoding header - compression = "gzip" - reader = BytesIO(req.read()) - req.close() + reader, compression = urlopen(filepath_or_buffer) return reader, encoding, compression, True if is_s3_url(filepath_or_buffer): diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 039a0560af627..ae4c94dcde833 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -1,7 +1,6 @@ import abc from collections import OrderedDict from datetime import date, datetime, timedelta -from io import BytesIO import os from textwrap import fill @@ -339,7 +338,7 @@ class _BaseExcelReader(metaclass=abc.ABCMeta): def __init__(self, filepath_or_buffer): # If filepath_or_buffer is a url, load the data into a BytesIO if _is_url(filepath_or_buffer): - filepath_or_buffer = BytesIO(urlopen(filepath_or_buffer).read()) + filepath_or_buffer, _ = urlopen(filepath_or_buffer) elif not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)): filepath_or_buffer, _, _, _ = get_filepath_or_buffer(filepath_or_buffer) diff --git a/pandas/io/html.py b/pandas/io/html.py index 490c574463b9b..6bb5e5436dc5a 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -122,8 +122,7 @@ def _read(obj): raw_text : str """ if _is_url(obj): - with urlopen(obj) as url: - text = url.read() + text, _ = urlopen(obj) elif hasattr(obj, "read"): text = obj.read() elif isinstance(obj, (str, bytes)):
closes #16716 closes #28825 closes #28826 solves https://github.com/pandas-dev/pandas/pull/16910 - [ ] tests added / passed - [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Continuation of https://github.com/pandas-dev/pandas/pull/21504 and https://github.com/pandas-dev/pandas/pull/17087 This PR is not ready but I'd appreciate any early comment/feedback.
https://api.github.com/repos/pandas-dev/pandas/pulls/28874
2019-10-09T18:31:52Z
2020-02-06T23:59:52Z
null
2020-02-06T23:59:52Z
CLN: No catching needed for groupby median
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 4e0dd65042196..3df99e330fe4a 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1236,23 +1236,11 @@ def median(self, **kwargs): Series or DataFrame Median of values within each group. """ - try: - return self._cython_agg_general( - "median", - alt=lambda x, axis: Series(x).median(axis=axis, **kwargs), - **kwargs - ) - except GroupByError: - raise - except Exception: - - def f(x): - if isinstance(x, np.ndarray): - x = Series(x) - return x.median(axis=self.axis, **kwargs) - - with _group_selection_context(self): - return self._python_agg_general(f) + return self._cython_agg_general( + "median", + alt=lambda x, axis: Series(x).median(axis=axis, **kwargs), + **kwargs + ) @Substitution(name="groupby") @Appender(_common_see_also)
https://api.github.com/repos/pandas-dev/pandas/pulls/28873
2019-10-09T16:57:29Z
2019-10-10T01:50:32Z
2019-10-10T01:50:32Z
2019-10-10T04:39:24Z
TST: Use fixtures instead of setup_method for index tests
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 793992d311502..b657d8d16df81 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -33,10 +33,6 @@ class Base: _holder = None _compat_props = ["shape", "ndim", "size", "nbytes"] - def setup_indices(self): - for name, idx in self.indices.items(): - setattr(self, name, idx) - def test_pickle_compat_construction(self): # need an object to create with msg = ( @@ -205,24 +201,23 @@ def test_reindex_base(self): with pytest.raises(ValueError, match="Invalid fill method"): idx.get_indexer(idx, method="invalid") - def test_get_indexer_consistency(self): + def test_get_indexer_consistency(self, indices): # See GH 16819 - for name, index in self.indices.items(): - if isinstance(index, IntervalIndex): - continue - - if index.is_unique or isinstance(index, CategoricalIndex): - indexer = index.get_indexer(index[0:2]) - assert isinstance(indexer, np.ndarray) - assert indexer.dtype == np.intp - else: - e = "Reindexing only valid with uniquely valued Index objects" - with pytest.raises(InvalidIndexError, match=e): - index.get_indexer(index[0:2]) + if isinstance(indices, IntervalIndex): + return - indexer, _ = index.get_indexer_non_unique(index[0:2]) + if indices.is_unique or isinstance(indices, CategoricalIndex): + indexer = indices.get_indexer(indices[0:2]) assert isinstance(indexer, np.ndarray) assert indexer.dtype == np.intp + else: + e = "Reindexing only valid with uniquely valued Index objects" + with pytest.raises(InvalidIndexError, match=e): + indices.get_indexer(indices[0:2]) + + indexer, _ = indices.get_indexer_non_unique(indices[0:2]) + assert isinstance(indexer, np.ndarray) + assert indexer.dtype == np.intp def test_ndarray_compat_properties(self): idx = self.create_index() @@ -258,146 +253,138 @@ def test_repr_max_seq_item_setting(self): repr(idx) assert "..." not in str(idx) - def test_copy_name(self): + def test_copy_name(self, indices): # gh-12309: Check that the "name" argument # passed at initialization is honored. + if isinstance(indices, MultiIndex): + return - for name, index in self.indices.items(): - if isinstance(index, MultiIndex): - continue - - first = index.__class__(index, copy=True, name="mario") - second = first.__class__(first, copy=False) + first = indices.__class__(indices, copy=True, name="mario") + second = first.__class__(first, copy=False) - # Even though "copy=False", we want a new object. - assert first is not second + # Even though "copy=False", we want a new object. + assert first is not second - # Not using tm.assert_index_equal() since names differ. - assert index.equals(first) + # Not using tm.assert_index_equal() since names differ. + assert indices.equals(first) - assert first.name == "mario" - assert second.name == "mario" + assert first.name == "mario" + assert second.name == "mario" - s1 = Series(2, index=first) - s2 = Series(3, index=second[:-1]) + s1 = Series(2, index=first) + s2 = Series(3, index=second[:-1]) - if not isinstance(index, CategoricalIndex): - # See gh-13365 - s3 = s1 * s2 - assert s3.index.name == "mario" + if not isinstance(indices, CategoricalIndex): + # See gh-13365 + s3 = s1 * s2 + assert s3.index.name == "mario" - def test_ensure_copied_data(self): + def test_ensure_copied_data(self, indices): # Check the "copy" argument of each Index.__new__ is honoured # GH12309 - for name, index in self.indices.items(): - init_kwargs = {} - if isinstance(index, PeriodIndex): - # Needs "freq" specification: - init_kwargs["freq"] = index.freq - elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)): - # RangeIndex cannot be initialized from data - # MultiIndex and CategoricalIndex are tested separately - continue - - index_type = index.__class__ - result = index_type(index.values, copy=True, **init_kwargs) - tm.assert_index_equal(index, result) + init_kwargs = {} + if isinstance(indices, PeriodIndex): + # Needs "freq" specification: + init_kwargs["freq"] = indices.freq + elif isinstance(indices, (RangeIndex, MultiIndex, CategoricalIndex)): + # RangeIndex cannot be initialized from data + # MultiIndex and CategoricalIndex are tested separately + return + + index_type = indices.__class__ + result = index_type(indices.values, copy=True, **init_kwargs) + tm.assert_index_equal(indices, result) + tm.assert_numpy_array_equal( + indices._ndarray_values, result._ndarray_values, check_same="copy" + ) + + if isinstance(indices, PeriodIndex): + # .values an object array of Period, thus copied + result = index_type(ordinal=indices.asi8, copy=False, **init_kwargs) tm.assert_numpy_array_equal( - index._ndarray_values, result._ndarray_values, check_same="copy" + indices._ndarray_values, result._ndarray_values, check_same="same" + ) + elif isinstance(indices, IntervalIndex): + # checked in test_interval.py + pass + else: + result = index_type(indices.values, copy=False, **init_kwargs) + tm.assert_numpy_array_equal( + indices.values, result.values, check_same="same" + ) + tm.assert_numpy_array_equal( + indices._ndarray_values, result._ndarray_values, check_same="same" ) - if isinstance(index, PeriodIndex): - # .values an object array of Period, thus copied - result = index_type(ordinal=index.asi8, copy=False, **init_kwargs) - tm.assert_numpy_array_equal( - index._ndarray_values, result._ndarray_values, check_same="same" - ) - elif isinstance(index, IntervalIndex): - # checked in test_interval.py - pass - else: - result = index_type(index.values, copy=False, **init_kwargs) - tm.assert_numpy_array_equal( - index.values, result.values, check_same="same" - ) - tm.assert_numpy_array_equal( - index._ndarray_values, result._ndarray_values, check_same="same" - ) - - def test_memory_usage(self): - for name, index in self.indices.items(): - result = index.memory_usage() - if len(index): - index.get_loc(index[0]) - result2 = index.memory_usage() - result3 = index.memory_usage(deep=True) - - # RangeIndex, IntervalIndex - # don't have engines - if not isinstance(index, (RangeIndex, IntervalIndex)): - assert result2 > result - - if index.inferred_type == "object": - assert result3 > result2 - - else: - - # we report 0 for no-length - assert result == 0 - - def test_argsort(self): - for k, ind in self.indices.items(): - - # separately tested - if k in ["catIndex"]: - continue - - result = ind.argsort() - expected = np.array(ind).argsort() - tm.assert_numpy_array_equal(result, expected, check_dtype=False) - - def test_numpy_argsort(self): - for k, ind in self.indices.items(): - result = np.argsort(ind) - expected = ind.argsort() - tm.assert_numpy_array_equal(result, expected) - - # these are the only two types that perform - # pandas compatibility input validation - the - # rest already perform separate (or no) such - # validation via their 'values' attribute as - # defined in pandas.core.indexes/base.py - they - # cannot be changed at the moment due to - # backwards compatibility concerns - if isinstance(type(ind), (CategoricalIndex, RangeIndex)): - msg = "the 'axis' parameter is not supported" - with pytest.raises(ValueError, match=msg): - np.argsort(ind, axis=1) - - msg = "the 'kind' parameter is not supported" - with pytest.raises(ValueError, match=msg): - np.argsort(ind, kind="mergesort") - - msg = "the 'order' parameter is not supported" - with pytest.raises(ValueError, match=msg): - np.argsort(ind, order=("a", "b")) - - def test_take(self): + def test_memory_usage(self, indices): + indices._engine.clear_mapping() + result = indices.memory_usage() + if indices.empty: + # we report 0 for no-length + assert result == 0 + return + + # non-zero length + indices.get_loc(indices[0]) + result2 = indices.memory_usage() + result3 = indices.memory_usage(deep=True) + + # RangeIndex, IntervalIndex + # don't have engines + if not isinstance(indices, (RangeIndex, IntervalIndex)): + assert result2 > result + + if indices.inferred_type == "object": + assert result3 > result2 + + def test_argsort(self, request, indices): + # separately tested + if isinstance(indices, CategoricalIndex): + return + + result = indices.argsort() + expected = np.array(indices).argsort() + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + def test_numpy_argsort(self, indices): + result = np.argsort(indices) + expected = indices.argsort() + tm.assert_numpy_array_equal(result, expected) + + # these are the only two types that perform + # pandas compatibility input validation - the + # rest already perform separate (or no) such + # validation via their 'values' attribute as + # defined in pandas.core.indexes/base.py - they + # cannot be changed at the moment due to + # backwards compatibility concerns + if isinstance(type(indices), (CategoricalIndex, RangeIndex)): + msg = "the 'axis' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(indices, axis=1) + + msg = "the 'kind' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(indices, kind="mergesort") + + msg = "the 'order' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(indices, order=("a", "b")) + + def test_take(self, indices): indexer = [4, 3, 0, 2] - for k, ind in self.indices.items(): - - # separate - if k in ["boolIndex", "tuples", "empty"]: - continue + if len(indices) < 5: + # not enough elements; ignore + return - result = ind.take(indexer) - expected = ind[indexer] - assert result.equals(expected) + result = indices.take(indexer) + expected = indices[indexer] + assert result.equals(expected) - if not isinstance(ind, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): - # GH 10791 - with pytest.raises(AttributeError): - ind.freq + if not isinstance(indices, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): + # GH 10791 + with pytest.raises(AttributeError): + indices.freq def test_take_invalid_kwargs(self): idx = self.create_index() @@ -454,173 +441,152 @@ def test_where(self, klass): @pytest.mark.parametrize( "method", ["intersection", "union", "difference", "symmetric_difference"] ) - def test_set_ops_error_cases(self, case, method): - for name, idx in self.indices.items(): - # non-iterable input + def test_set_ops_error_cases(self, case, method, indices): + # non-iterable input + msg = "Input must be Index or array-like" + with pytest.raises(TypeError, match=msg): + getattr(indices, method)(case) - msg = "Input must be Index or array-like" - with pytest.raises(TypeError, match=msg): - getattr(idx, method)(case) + def test_intersection_base(self, indices): + if isinstance(indices, CategoricalIndex): + return - def test_intersection_base(self): - for name, idx in self.indices.items(): - first = idx[:5] - second = idx[:3] - intersect = first.intersection(second) + first = indices[:5] + second = indices[:3] + intersect = first.intersection(second) + assert tm.equalContents(intersect, second) - if isinstance(idx, CategoricalIndex): - pass - else: - assert tm.equalContents(intersect, second) - - # GH 10149 - cases = [klass(second.values) for klass in [np.array, Series, list]] - for case in cases: - if isinstance(idx, CategoricalIndex): - pass - else: - result = first.intersection(case) - assert tm.equalContents(result, second) - - if isinstance(idx, MultiIndex): - msg = "other must be a MultiIndex or a list of tuples" - with pytest.raises(TypeError, match=msg): - first.intersection([1, 2, 3]) - - def test_union_base(self): - for name, idx in self.indices.items(): - first = idx[3:] - second = idx[:5] - everything = idx - union = first.union(second) - assert tm.equalContents(union, everything) - - # GH 10149 - cases = [klass(second.values) for klass in [np.array, Series, list]] - for case in cases: - if isinstance(idx, CategoricalIndex): - pass - else: - result = first.union(case) - assert tm.equalContents(result, everything) - - if isinstance(idx, MultiIndex): - msg = "other must be a MultiIndex or a list of tuples" - with pytest.raises(TypeError, match=msg): - first.union([1, 2, 3]) + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + result = first.intersection(case) + assert tm.equalContents(result, second) - @pytest.mark.parametrize("sort", [None, False]) - def test_difference_base(self, sort): - for name, idx in self.indices.items(): - first = idx[2:] - second = idx[:4] - answer = idx[4:] - result = first.difference(second, sort) - - if isinstance(idx, CategoricalIndex): - pass - else: - assert tm.equalContents(result, answer) + if isinstance(indices, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.intersection([1, 2, 3]) + + def test_union_base(self, indices): + first = indices[3:] + second = indices[:5] + everything = indices + union = first.union(second) + assert tm.equalContents(union, everything) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + if not isinstance(indices, CategoricalIndex): + result = first.union(case) + assert tm.equalContents(result, everything) + + if isinstance(indices, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.union([1, 2, 3]) - # GH 10149 - cases = [klass(second.values) for klass in [np.array, Series, list]] - for case in cases: - if isinstance(idx, CategoricalIndex): - pass - elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)): - assert result.__class__ == answer.__class__ - tm.assert_numpy_array_equal( - result.sort_values().asi8, answer.sort_values().asi8 - ) - else: - result = first.difference(case, sort) - assert tm.equalContents(result, answer) - - if isinstance(idx, MultiIndex): - msg = "other must be a MultiIndex or a list of tuples" - with pytest.raises(TypeError, match=msg): - first.difference([1, 2, 3], sort) - - def test_symmetric_difference(self): - for name, idx in self.indices.items(): - first = idx[1:] - second = idx[:-1] - if isinstance(idx, CategoricalIndex): - pass + @pytest.mark.parametrize("sort", [None, False]) + def test_difference_base(self, sort, indices): + if isinstance(indices, CategoricalIndex): + return + + first = indices[2:] + second = indices[:4] + answer = indices[4:] + result = first.difference(second, sort) + assert tm.equalContents(result, answer) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + if isinstance(indices, (DatetimeIndex, TimedeltaIndex)): + assert result.__class__ == answer.__class__ + tm.assert_numpy_array_equal( + result.sort_values().asi8, answer.sort_values().asi8 + ) else: - answer = idx[[0, -1]] - result = first.symmetric_difference(second) + result = first.difference(case, sort) assert tm.equalContents(result, answer) - # GH 10149 - cases = [klass(second.values) for klass in [np.array, Series, list]] - for case in cases: - if isinstance(idx, CategoricalIndex): - pass - else: - result = first.symmetric_difference(case) - assert tm.equalContents(result, answer) - - if isinstance(idx, MultiIndex): - msg = "other must be a MultiIndex or a list of tuples" - with pytest.raises(TypeError, match=msg): - first.symmetric_difference([1, 2, 3]) - - def test_insert_base(self): - - for name, idx in self.indices.items(): - result = idx[1:4] - - if not len(idx): - continue + if isinstance(indices, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.difference([1, 2, 3], sort) + + def test_symmetric_difference(self, indices): + if isinstance(indices, CategoricalIndex): + return + + first = indices[1:] + second = indices[:-1] + answer = indices[[0, -1]] + result = first.symmetric_difference(second) + assert tm.equalContents(result, answer) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + result = first.symmetric_difference(case) + assert tm.equalContents(result, answer) + + if isinstance(indices, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.symmetric_difference([1, 2, 3]) - # test 0th element - assert idx[0:4].equals(result.insert(0, idx[0])) + def test_insert_base(self, indices): + result = indices[1:4] - def test_delete_base(self): + if not len(indices): + return - for name, idx in self.indices.items(): + # test 0th element + assert indices[0:4].equals(result.insert(0, indices[0])) - if not len(idx): - continue + def test_delete_base(self, indices): + if not len(indices): + return - if isinstance(idx, RangeIndex): - # tested in class - continue + if isinstance(indices, RangeIndex): + # tested in class + return - expected = idx[1:] - result = idx.delete(0) - assert result.equals(expected) - assert result.name == expected.name + expected = indices[1:] + result = indices.delete(0) + assert result.equals(expected) + assert result.name == expected.name - expected = idx[:-1] - result = idx.delete(-1) - assert result.equals(expected) - assert result.name == expected.name + expected = indices[:-1] + result = indices.delete(-1) + assert result.equals(expected) + assert result.name == expected.name - with pytest.raises((IndexError, ValueError)): - # either depending on numpy version - idx.delete(len(idx)) + with pytest.raises((IndexError, ValueError)): + # either depending on numpy version + indices.delete(len(indices)) - def test_equals(self): + def test_equals(self, indices): + if isinstance(indices, IntervalIndex): + # IntervalIndex tested separately + return - for name, idx in self.indices.items(): - assert idx.equals(idx) - assert idx.equals(idx.copy()) - assert idx.equals(idx.astype(object)) + assert indices.equals(indices) + assert indices.equals(indices.copy()) + assert indices.equals(indices.astype(object)) - assert not idx.equals(list(idx)) - assert not idx.equals(np.array(idx)) + assert not indices.equals(list(indices)) + assert not indices.equals(np.array(indices)) - # Cannot pass in non-int64 dtype to RangeIndex - if not isinstance(idx, RangeIndex): - same_values = Index(idx, dtype=object) - assert idx.equals(same_values) - assert same_values.equals(idx) + # Cannot pass in non-int64 dtype to RangeIndex + if not isinstance(indices, RangeIndex): + same_values = Index(indices, dtype=object) + assert indices.equals(same_values) + assert same_values.equals(indices) - if idx.nlevels == 1: - # do not test MultiIndex - assert not idx.equals(pd.Series(idx)) + if indices.nlevels == 1: + # do not test MultiIndex + assert not indices.equals(Series(indices)) def test_equals_op(self): # GH9947, GH10637 @@ -686,107 +652,99 @@ def test_equals_op(self): tm.assert_numpy_array_equal(index_a == item, expected3) tm.assert_series_equal(series_a == item, Series(expected3)) - def test_hasnans_isnans(self): + def test_hasnans_isnans(self, indices): # GH 11343, added tests for hasnans / isnans + if isinstance(indices, MultiIndex): + return + + # cases in indices doesn't include NaN + idx = indices.copy(deep=True) + expected = np.array([False] * len(idx), dtype=bool) + tm.assert_numpy_array_equal(idx._isnan, expected) + assert idx.hasnans is False + + idx = indices.copy(deep=True) + values = np.asarray(idx.values) + + if len(indices) == 0: + return + elif isinstance(indices, DatetimeIndexOpsMixin): + values[1] = iNaT + elif isinstance(indices, (Int64Index, UInt64Index)): + return + else: + values[1] = np.nan - for name, index in self.indices.items(): - if isinstance(index, MultiIndex): - pass - else: - idx = index.copy() - - # cases in indices doesn't include NaN - expected = np.array([False] * len(idx), dtype=bool) - tm.assert_numpy_array_equal(idx._isnan, expected) - assert idx.hasnans is False - - idx = index.copy() - values = np.asarray(idx.values) - - if len(index) == 0: - continue - elif isinstance(index, DatetimeIndexOpsMixin): - values[1] = iNaT - elif isinstance(index, (Int64Index, UInt64Index)): - continue - else: - values[1] = np.nan - - if isinstance(index, PeriodIndex): - idx = index.__class__(values, freq=index.freq) - else: - idx = index.__class__(values) - - expected = np.array([False] * len(idx), dtype=bool) - expected[1] = True - tm.assert_numpy_array_equal(idx._isnan, expected) - assert idx.hasnans is True - - def test_fillna(self): + if isinstance(indices, PeriodIndex): + idx = indices.__class__(values, freq=indices.freq) + else: + idx = indices.__class__(values) + + expected = np.array([False] * len(idx), dtype=bool) + expected[1] = True + tm.assert_numpy_array_equal(idx._isnan, expected) + assert idx.hasnans is True + + def test_fillna(self, indices): # GH 11343 - for name, index in self.indices.items(): - if len(index) == 0: - pass - elif isinstance(index, MultiIndex): - idx = index.copy() - msg = "isna is not defined for MultiIndex" - with pytest.raises(NotImplementedError, match=msg): - idx.fillna(idx[0]) + if len(indices) == 0: + pass + elif isinstance(indices, MultiIndex): + idx = indices.copy(deep=True) + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + idx.fillna(idx[0]) + else: + idx = indices.copy(deep=True) + result = idx.fillna(idx[0]) + tm.assert_index_equal(result, idx) + assert result is not idx + + msg = "'value' must be a scalar, passed: " + with pytest.raises(TypeError, match=msg): + idx.fillna([idx[0]]) + + idx = indices.copy(deep=True) + values = np.asarray(idx.values) + + if isinstance(indices, DatetimeIndexOpsMixin): + values[1] = iNaT + elif isinstance(indices, (Int64Index, UInt64Index)): + return else: - idx = index.copy() - result = idx.fillna(idx[0]) - tm.assert_index_equal(result, idx) - assert result is not idx - - msg = "'value' must be a scalar, passed: " - with pytest.raises(TypeError, match=msg): - idx.fillna([idx[0]]) - - idx = index.copy() - values = np.asarray(idx.values) - - if isinstance(index, DatetimeIndexOpsMixin): - values[1] = iNaT - elif isinstance(index, (Int64Index, UInt64Index)): - continue - else: - values[1] = np.nan - - if isinstance(index, PeriodIndex): - idx = index.__class__(values, freq=index.freq) - else: - idx = index.__class__(values) - - expected = np.array([False] * len(idx), dtype=bool) - expected[1] = True - tm.assert_numpy_array_equal(idx._isnan, expected) - assert idx.hasnans is True - - def test_nulls(self): - # this is really a smoke test for the methods - # as these are adequately tested for function elsewhere + values[1] = np.nan - for name, index in self.indices.items(): - if len(index) == 0: - tm.assert_numpy_array_equal(index.isna(), np.array([], dtype=bool)) - elif isinstance(index, MultiIndex): - idx = index.copy() - msg = "isna is not defined for MultiIndex" - with pytest.raises(NotImplementedError, match=msg): - idx.isna() + if isinstance(indices, PeriodIndex): + idx = indices.__class__(values, freq=indices.freq) else: + idx = indices.__class__(values) - if not index.hasnans: - tm.assert_numpy_array_equal( - index.isna(), np.zeros(len(index), dtype=bool) - ) - tm.assert_numpy_array_equal( - index.notna(), np.ones(len(index), dtype=bool) - ) - else: - result = isna(index) - tm.assert_numpy_array_equal(index.isna(), result) - tm.assert_numpy_array_equal(index.notna(), ~result) + expected = np.array([False] * len(idx), dtype=bool) + expected[1] = True + tm.assert_numpy_array_equal(idx._isnan, expected) + assert idx.hasnans is True + + def test_nulls(self, indices): + # this is really a smoke test for the methods + # as these are adequately tested for function elsewhere + if len(indices) == 0: + tm.assert_numpy_array_equal(indices.isna(), np.array([], dtype=bool)) + elif isinstance(indices, MultiIndex): + idx = indices.copy() + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + idx.isna() + elif not indices.hasnans: + tm.assert_numpy_array_equal( + indices.isna(), np.zeros(len(indices), dtype=bool) + ) + tm.assert_numpy_array_equal( + indices.notna(), np.ones(len(indices), dtype=bool) + ) + else: + result = isna(indices) + tm.assert_numpy_array_equal(indices.isna(), result) + tm.assert_numpy_array_equal(indices.notna(), ~result) def test_empty(self): # GH 15270 diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py index 12c5fb8339549..2a9a8bf8d824f 100644 --- a/pandas/tests/indexes/conftest.py +++ b/pandas/tests/indexes/conftest.py @@ -5,28 +5,29 @@ from pandas.core.indexes.api import Index, MultiIndex import pandas.util.testing as tm -indices_list = [ - tm.makeUnicodeIndex(100), - tm.makeStringIndex(100), - tm.makeDateIndex(100), - tm.makePeriodIndex(100), - tm.makeTimedeltaIndex(100), - tm.makeIntIndex(100), - tm.makeUIntIndex(100), - tm.makeRangeIndex(100), - tm.makeFloatIndex(100), - Index([True, False]), - tm.makeCategoricalIndex(100), - tm.makeIntervalIndex(100), - Index([]), - MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])), - Index([0, 0, 1, 1, 2, 2]), -] - - -@pytest.fixture(params=indices_list, ids=lambda x: type(x).__name__) +indices_dict = { + "unicode": tm.makeUnicodeIndex(100), + "string": tm.makeStringIndex(100), + "datetime": tm.makeDateIndex(100), + "period": tm.makePeriodIndex(100), + "timedelta": tm.makeTimedeltaIndex(100), + "int": tm.makeIntIndex(100), + "uint": tm.makeUIntIndex(100), + "range": tm.makeRangeIndex(100), + "float": tm.makeFloatIndex(100), + "bool": Index([True, False]), + "categorical": tm.makeCategoricalIndex(100), + "interval": tm.makeIntervalIndex(100), + "empty": Index([]), + "tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])), + "repeats": Index([0, 0, 1, 1, 2, 2]), +} + + +@pytest.fixture(params=indices_dict.keys()) def indices(request): - return request.param + # copy to avoid mutation, e.g. setting .name + return indices_dict[request.param].copy() @pytest.fixture(params=[1, np.array(1, dtype=np.int64)]) diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py index 7523b250ea291..f7cded9f44918 100644 --- a/pandas/tests/indexes/datetimelike.py +++ b/pandas/tests/indexes/datetimelike.py @@ -58,13 +58,14 @@ def test_view(self): tm.assert_index_equal(result, i_view) def test_map_callable(self): - expected = self.index + self.index.freq - result = self.index.map(lambda x: x + x.freq) + index = self.create_index() + expected = index + index.freq + result = index.map(lambda x: x + x.freq) tm.assert_index_equal(result, expected) # map to NaT - result = self.index.map(lambda x: pd.NaT if x == self.index[0] else x) - expected = pd.Index([pd.NaT] + self.index[1:].tolist()) + result = index.map(lambda x: pd.NaT if x == index[0] else x) + expected = pd.Index([pd.NaT] + index[1:].tolist()) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( @@ -75,23 +76,24 @@ def test_map_callable(self): ], ) def test_map_dictlike(self, mapper): - expected = self.index + self.index.freq + index = self.create_index() + expected = index + index.freq # don't compare the freqs if isinstance(expected, pd.DatetimeIndex): expected.freq = None - result = self.index.map(mapper(expected, self.index)) + result = index.map(mapper(expected, index)) tm.assert_index_equal(result, expected) - expected = pd.Index([pd.NaT] + self.index[1:].tolist()) - result = self.index.map(mapper(expected, self.index)) + expected = pd.Index([pd.NaT] + index[1:].tolist()) + result = index.map(mapper(expected, index)) tm.assert_index_equal(result, expected) # empty map; these map to np.nan because we cannot know # to re-infer things - expected = pd.Index([np.nan] * len(self.index)) - result = self.index.map(mapper([], [])) + expected = pd.Index([np.nan] * len(index)) + result = index.map(mapper([], [])) tm.assert_index_equal(result, expected) def test_asobject_deprecated(self): diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py index 0f1d7927ee3b4..8fa87f55f404b 100644 --- a/pandas/tests/indexes/datetimes/test_datetimelike.py +++ b/pandas/tests/indexes/datetimes/test_datetimelike.py @@ -1,4 +1,5 @@ """ generic tests from the Datetimelike class """ +import pytest from pandas import DatetimeIndex, date_range from pandas.util import testing as tm @@ -9,12 +10,12 @@ class TestDatetimeIndex(DatetimeLike): _holder = DatetimeIndex - def setup_method(self, method): - self.indices = dict( - index=tm.makeDateIndex(10), - index_dec=date_range("20130110", periods=10, freq="-1D"), - ) - self.setup_indices() + @pytest.fixture( + params=[tm.makeDateIndex(10), date_range("20130110", periods=10, freq="-1D")], + ids=["index_inc", "index_dec"], + ) + def indices(self, request): + return request.param def create_index(self): return date_range("20130101", periods=5) diff --git a/pandas/tests/indexes/interval/test_base.py b/pandas/tests/indexes/interval/test_base.py index b2cb29dafac09..339bdaf79c690 100644 --- a/pandas/tests/indexes/interval/test_base.py +++ b/pandas/tests/indexes/interval/test_base.py @@ -14,10 +14,9 @@ class TestBase(Base): _holder = IntervalIndex - def setup_method(self, method): - self.index = IntervalIndex.from_arrays([0, 1], [1, 2]) - self.index_with_nan = IntervalIndex.from_tuples([(0, 1), np.nan, (1, 2)]) - self.indices = dict(intervalIndex=tm.makeIntervalIndex(10)) + @pytest.fixture + def indices(self): + return tm.makeIntervalIndex(10) def create_index(self, closed="right"): return IntervalIndex.from_breaks(range(11), closed=closed) diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index ee37be7ab4c14..1a2c58bdfce37 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -25,12 +25,15 @@ class TestPeriodIndex(DatetimeLike): _holder = PeriodIndex - def setup_method(self, method): - self.indices = dict( - index=tm.makePeriodIndex(10), - index_dec=period_range("20130101", periods=10, freq="D")[::-1], - ) - self.setup_indices() + @pytest.fixture( + params=[ + tm.makePeriodIndex(10), + period_range("20130101", periods=10, freq="D")[::-1], + ], + ids=["index_inc", "index_dec"], + ) + def indices(self, request): + return request.param def create_index(self): return period_range("20130101", periods=5, freq="D") diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 82d5ddd1ac358..0dc6d24202c34 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -41,6 +41,7 @@ from pandas.core.indexes.api import Index, MultiIndex from pandas.core.sorting import safe_sort from pandas.tests.indexes.common import Base +from pandas.tests.indexes.conftest import indices_dict import pandas.util.testing as tm from pandas.util.testing import assert_almost_equal @@ -48,73 +49,57 @@ class TestIndex(Base): _holder = Index - def setup_method(self, method): - self.indices = dict( - unicodeIndex=tm.makeUnicodeIndex(100), - strIndex=tm.makeStringIndex(100), - dateIndex=tm.makeDateIndex(100), - periodIndex=tm.makePeriodIndex(100), - tdIndex=tm.makeTimedeltaIndex(100), - intIndex=tm.makeIntIndex(100), - uintIndex=tm.makeUIntIndex(100), - rangeIndex=tm.makeRangeIndex(100), - floatIndex=tm.makeFloatIndex(100), - boolIndex=Index([True, False]), - catIndex=tm.makeCategoricalIndex(100), - empty=Index([]), - tuples=MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])), - repeats=Index([0, 0, 1, 1, 2, 2]), - ) - self.setup_indices() + @pytest.fixture + def index(self, request): + """ + Fixture for selectively parametrizing indices_dict via indirect parametrization + (parametrize over indices_dict keys with indirect=True). Defaults to string + index if no keys are provided. + """ + key = getattr(request, "param", "string") + + # copy to avoid mutation, e.g. setting .name + return indices_dict[key].copy() def create_index(self): return Index(list("abcde")) - def generate_index_types(self, skip_index_keys=[]): - """ - Return a generator of the various index types, leaving - out the ones with a key in skip_index_keys - """ - for key, index in self.indices.items(): - if key not in skip_index_keys: - yield key, index - def test_can_hold_identifiers(self): index = self.create_index() key = index[0] assert index._can_hold_identifiers_and_holds_name(key) is True - def test_new_axis(self): - new_index = self.dateIndex[None, :] + @pytest.mark.parametrize("index", ["datetime"], indirect=True) + def test_new_axis(self, index): + new_index = index[None, :] assert new_index.ndim == 2 assert isinstance(new_index, np.ndarray) - def test_copy_and_deepcopy(self): - new_copy2 = self.intIndex.copy(dtype=int) + @pytest.mark.parametrize("index", ["int", "uint", "float"], indirect=True) + def test_copy_and_deepcopy(self, index): + new_copy2 = index.copy(dtype=int) assert new_copy2.dtype.kind == "i" - @pytest.mark.parametrize("attr", ["strIndex", "dateIndex"]) - def test_constructor_regular(self, attr): - # regular instance creation - index = getattr(self, attr) - tm.assert_contains_all(index, index) + def test_constructor_regular(self, indices): + tm.assert_contains_all(indices, indices) - def test_constructor_casting(self): + def test_constructor_casting(self, index): # casting - arr = np.array(self.strIndex) - index = Index(arr) - tm.assert_contains_all(arr, index) - tm.assert_index_equal(self.strIndex, index) + arr = np.array(index) + new_index = Index(arr) + tm.assert_contains_all(arr, new_index) + tm.assert_index_equal(index, new_index) - def test_constructor_copy(self): + def test_constructor_copy(self, index): # copy - arr = np.array(self.strIndex) - index = Index(arr, copy=True, name="name") - assert isinstance(index, Index) - assert index.name == "name" - tm.assert_numpy_array_equal(arr, index.values) + # index = self.create_index() + arr = np.array(index) + new_index = Index(arr, copy=True, name="name") + assert isinstance(new_index, Index) + assert new_index.name == "name" + tm.assert_numpy_array_equal(arr, new_index.values) arr[0] = "SOMEBIGLONGSTRING" - assert index[0] != "SOMEBIGLONGSTRING" + assert new_index[0] != "SOMEBIGLONGSTRING" # what to do here? # arr = np.array(5.) @@ -570,37 +555,50 @@ def test_constructor_cast(self): with pytest.raises(ValueError, match=msg): Index(["a", "b", "c"], dtype=float) - def test_view_with_args(self): - restricted = ["unicodeIndex", "strIndex", "catIndex", "boolIndex", "empty"] - for i in list(set(self.indices.keys()) - set(restricted)): - ind = self.indices[i] - ind.view("i8") + @pytest.mark.parametrize( + "index", + [ + "datetime", + "float", + "int", + "period", + "range", + "repeats", + "timedelta", + "tuples", + "uint", + ], + indirect=True, + ) + def test_view_with_args(self, index): + index.view("i8") @pytest.mark.parametrize( - "index_type", + "index", [ - "unicodeIndex", - "strIndex", - pytest.param("catIndex", marks=pytest.mark.xfail(reason="gh-25464")), - "boolIndex", + "unicode", + "string", + pytest.param("categorical", marks=pytest.mark.xfail(reason="gh-25464")), + "bool", "empty", ], + indirect=True, ) - def test_view_with_args_object_array_raises(self, index_type): - ind = self.indices[index_type] + def test_view_with_args_object_array_raises(self, index): msg = "Cannot change data-type for object array" with pytest.raises(TypeError, match=msg): - ind.view("i8") + index.view("i8") - def test_astype(self): - casted = self.intIndex.astype("i8") + @pytest.mark.parametrize("index", ["int", "range"], indirect=True) + def test_astype(self, index): + casted = index.astype("i8") # it works! casted.get_loc(5) # pass on name - self.intIndex.name = "foobar" - casted = self.intIndex.astype("i8") + index.name = "foobar" + casted = index.astype("i8") assert casted.name == "foobar" def test_equals_object(self): @@ -700,16 +698,17 @@ def test_is_(self): ind2 = Index(arr, copy=False) assert not ind1.is_(ind2) - def test_asof(self): - d = self.dateIndex[0] - assert self.dateIndex.asof(d) == d - assert isna(self.dateIndex.asof(d - timedelta(1))) + @pytest.mark.parametrize("index", ["datetime"], indirect=True) + def test_asof(self, index): + d = index[0] + assert index.asof(d) == d + assert isna(index.asof(d - timedelta(1))) - d = self.dateIndex[-1] - assert self.dateIndex.asof(d + timedelta(1)) == d + d = index[-1] + assert index.asof(d + timedelta(1)) == d - d = self.dateIndex[0].to_pydatetime() - assert isinstance(self.dateIndex.asof(d), Timestamp) + d = index[0].to_pydatetime() + assert isinstance(index.asof(d), Timestamp) def test_asof_datetime_partial(self): index = pd.date_range("2010-01-01", periods=2, freq="m") @@ -731,40 +730,39 @@ def test_nanosecond_index_access(self): expected_ts = np_datetime64_compat("2013-01-01 00:00:00.000000050+0000", "ns") assert first_value == x[Timestamp(expected_ts)] - def test_booleanindex(self): - boolIndex = np.repeat(True, len(self.strIndex)).astype(bool) - boolIndex[5:30:2] = False + def test_booleanindex(self, index): + bool_index = np.repeat(True, len(index)).astype(bool) + bool_index[5:30:2] = False - subIndex = self.strIndex[boolIndex] + sub_index = index[bool_index] - for i, val in enumerate(subIndex): - assert subIndex.get_loc(val) == i + for i, val in enumerate(sub_index): + assert sub_index.get_loc(val) == i - subIndex = self.strIndex[list(boolIndex)] - for i, val in enumerate(subIndex): - assert subIndex.get_loc(val) == i + sub_index = index[list(bool_index)] + for i, val in enumerate(sub_index): + assert sub_index.get_loc(val) == i def test_fancy(self): - sl = self.strIndex[[1, 2, 3]] + index = self.create_index() + sl = index[[1, 2, 3]] for i in sl: assert i == sl[sl.get_loc(i)] - @pytest.mark.parametrize("attr", ["strIndex", "intIndex", "floatIndex"]) + @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True) @pytest.mark.parametrize("dtype", [np.int_, np.bool_]) - def test_empty_fancy(self, attr, dtype): + def test_empty_fancy(self, index, dtype): empty_arr = np.array([], dtype=dtype) - index = getattr(self, attr) empty_index = index.__class__([]) assert index[[]].identical(empty_index) assert index[empty_arr].identical(empty_index) - @pytest.mark.parametrize("attr", ["strIndex", "intIndex", "floatIndex"]) - def test_empty_fancy_raises(self, attr): + @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True) + def test_empty_fancy_raises(self, index): # pd.DatetimeIndex is excluded, because it overrides getitem and should # be tested separately. empty_farr = np.array([], dtype=np.float_) - index = getattr(self, attr) empty_index = index.__class__([]) assert index[[]].identical(empty_index) @@ -774,9 +772,9 @@ def test_empty_fancy_raises(self, attr): index[empty_farr] @pytest.mark.parametrize("sort", [None, False]) - def test_intersection(self, sort): - first = self.strIndex[:20] - second = self.strIndex[:10] + def test_intersection(self, index, sort): + first = index[:20] + second = index[:10] intersect = first.intersection(second, sort=sort) if sort is None: tm.assert_index_equal(intersect, second.sort_values()) @@ -812,10 +810,10 @@ def test_intersection_name_preservation(self, index2, keeps_name, sort): ) @pytest.mark.parametrize("sort", [None, False]) def test_intersection_name_preservation2( - self, first_name, second_name, expected_name, sort + self, index, first_name, second_name, expected_name, sort ): - first = self.strIndex[5:20] - second = self.strIndex[:10] + first = index[5:20] + second = index[:10] first.name = first_name second.name = second_name intersect = first.intersection(second, sort=sort) @@ -900,11 +898,10 @@ def test_chained_union(self, sort): tm.assert_index_equal(union, expected) @pytest.mark.parametrize("sort", [None, False]) - def test_union(self, sort): - # TODO: Replace with fixturesult - first = self.strIndex[5:20] - second = self.strIndex[:10] - everything = self.strIndex[:20] + def test_union(self, index, sort): + first = index[5:20] + second = index[:10] + everything = index[:20] union = first.union(second, sort=sort) if sort is None: @@ -965,12 +962,11 @@ def test_union_sort_other_incomparable_true(self): @pytest.mark.parametrize("klass", [np.array, Series, list]) @pytest.mark.parametrize("sort", [None, False]) - def test_union_from_iterables(self, klass, sort): + def test_union_from_iterables(self, index, klass, sort): # GH 10149 - # TODO: Replace with fixturesult - first = self.strIndex[5:20] - second = self.strIndex[:10] - everything = self.strIndex[:20] + first = index[5:20] + second = index[:10] + everything = index[:20] case = klass(second.values) result = first.union(case, sort=sort) @@ -979,9 +975,8 @@ def test_union_from_iterables(self, klass, sort): assert tm.equalContents(result, everything) @pytest.mark.parametrize("sort", [None, False]) - def test_union_identity(self, sort): - # TODO: replace with fixturesult - first = self.strIndex[5:20] + def test_union_identity(self, index, sort): + first = index[5:20] union = first.union(first, sort=sort) # i.e. identity is not preserved when sort is True @@ -1021,19 +1016,21 @@ def test_union_name_preservation( @pytest.mark.parametrize("sort", [None, False]) def test_union_dt_as_obj(self, sort): # TODO: Replace with fixturesult - firstCat = self.strIndex.union(self.dateIndex) - secondCat = self.strIndex.union(self.strIndex) + index = self.create_index() + date_index = pd.date_range("2019-01-01", periods=10) + first_cat = index.union(date_index) + second_cat = index.union(index) - if self.dateIndex.dtype == np.object_: - appended = np.append(self.strIndex, self.dateIndex) + if date_index.dtype == np.object_: + appended = np.append(index, date_index) else: - appended = np.append(self.strIndex, self.dateIndex.astype("O")) + appended = np.append(index, date_index.astype("O")) - assert tm.equalContents(firstCat, appended) - assert tm.equalContents(secondCat, self.strIndex) - tm.assert_contains_all(self.strIndex, firstCat) - tm.assert_contains_all(self.strIndex, secondCat) - tm.assert_contains_all(self.dateIndex, firstCat) + assert tm.equalContents(first_cat, appended) + assert tm.equalContents(second_cat, index) + tm.assert_contains_all(index, first_cat) + tm.assert_contains_all(index, second_cat) + tm.assert_contains_all(date_index, first_cat) @pytest.mark.parametrize( "method", ["union", "intersection", "difference", "symmetric_difference"] @@ -1045,11 +1042,9 @@ def test_setops_disallow_true(self, method): with pytest.raises(ValueError, match="The 'sort' keyword only takes"): getattr(idx1, method)(idx2, sort=True) - def test_map_identity_mapping(self): + def test_map_identity_mapping(self, indices): # GH 12766 - # TODO: replace with fixture - for name, cur_index in self.indices.items(): - tm.assert_index_equal(cur_index, cur_index.map(lambda x: x)) + tm.assert_index_equal(indices, indices.map(lambda x: x)) def test_map_with_tuples(self): # GH 12766 @@ -1096,31 +1091,37 @@ def test_map_tseries_indices_accsr_return_index(self): lambda values, index: pd.Series(values, index), ], ) - def test_map_dictlike(self, mapper): + def test_map_dictlike_simple(self, mapper): # GH 12756 expected = Index(["foo", "bar", "baz"]) index = tm.makeIntIndex(3) result = index.map(mapper(expected.values, index)) tm.assert_index_equal(result, expected) - # TODO: replace with fixture - for name in self.indices.keys(): - if name == "catIndex": - # Tested in test_categorical - continue - elif name == "repeats": - # Cannot map duplicated index - continue - - index = self.indices[name] - expected = Index(np.arange(len(index), 0, -1)) - + @pytest.mark.parametrize( + "mapper", + [ + lambda values, index: {i: e for e, i in zip(values, index)}, + lambda values, index: pd.Series(values, index), + ], + ) + def test_map_dictlike(self, indices, mapper): + # GH 12756 + if isinstance(indices, CategoricalIndex): + # Tested in test_categorical + return + elif not indices.is_unique: + # Cannot map duplicated index + return + + if indices.empty: # to match proper result coercion for uints - if name == "empty": - expected = Index([]) + expected = Index([]) + else: + expected = Index(np.arange(len(indices), 0, -1)) - result = index.map(mapper(expected, index)) - tm.assert_index_equal(result, expected) + result = indices.map(mapper(expected, indices)) + tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "mapper", @@ -1169,11 +1170,10 @@ def test_append_empty_preserve_name(self, name, expected): @pytest.mark.parametrize("second_name,expected", [(None, None), ("name", "name")]) @pytest.mark.parametrize("sort", [None, False]) - def test_difference_name_preservation(self, second_name, expected, sort): - # TODO: replace with fixturesult - first = self.strIndex[5:20] - second = self.strIndex[:10] - answer = self.strIndex[10:20] + def test_difference_name_preservation(self, index, second_name, expected, sort): + first = index[5:20] + second = index[:10] + answer = index[10:20] first.name = "name" second.name = second_name @@ -1187,8 +1187,8 @@ def test_difference_name_preservation(self, second_name, expected, sort): assert result.name == expected @pytest.mark.parametrize("sort", [None, False]) - def test_difference_empty_arg(self, sort): - first = self.strIndex[5:20] + def test_difference_empty_arg(self, index, sort): + first = index[5:20] first.name == "name" result = first.difference([], sort) @@ -1196,8 +1196,8 @@ def test_difference_empty_arg(self, sort): assert result.name == first.name @pytest.mark.parametrize("sort", [None, False]) - def test_difference_identity(self, sort): - first = self.strIndex[5:20] + def test_difference_identity(self, index, sort): + first = index[5:20] first.name == "name" result = first.difference(first, sort) @@ -1205,12 +1205,12 @@ def test_difference_identity(self, sort): assert result.name == first.name @pytest.mark.parametrize("sort", [None, False]) - def test_difference_sort(self, sort): - first = self.strIndex[5:20] - second = self.strIndex[:10] + def test_difference_sort(self, index, sort): + first = index[5:20] + second = index[:10] result = first.difference(second, sort) - expected = self.strIndex[10:20] + expected = index[10:20] if sort is None: expected = expected.sort_values() @@ -1267,7 +1267,7 @@ def test_difference_incomparable_true(self, opname): @pytest.mark.parametrize("sort", [None, False]) def test_symmetric_difference_mi(self, sort): - index1 = MultiIndex.from_tuples(self.tuples) + index1 = MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])) index2 = MultiIndex.from_tuples([("foo", 1), ("bar", 3)]) result = index1.symmetric_difference(index2, sort=sort) expected = MultiIndex.from_tuples([("bar", 2), ("baz", 3), ("bar", 3)]) @@ -1308,73 +1308,78 @@ def test_symmetric_difference_non_index(self, sort): assert result.name == "new_name" @pytest.mark.parametrize("sort", [None, False]) - def test_difference_type(self, sort): + def test_difference_type(self, indices, sort): # GH 20040 # If taking difference of a set and itself, it # needs to preserve the type of the index - skip_index_keys = ["repeats"] - for key, index in self.generate_index_types(skip_index_keys): - result = index.difference(index, sort=sort) - expected = index.drop(index) - tm.assert_index_equal(result, expected) + if not indices.is_unique: + return + result = indices.difference(indices, sort=sort) + expected = indices.drop(indices) + tm.assert_index_equal(result, expected) @pytest.mark.parametrize("sort", [None, False]) - def test_intersection_difference(self, sort): + def test_intersection_difference(self, indices, sort): # GH 20040 # Test that the intersection of an index with an # empty index produces the same index as the difference # of an index with itself. Test for all types - skip_index_keys = ["repeats"] - for key, index in self.generate_index_types(skip_index_keys): - inter = index.intersection(index.drop(index)) - diff = index.difference(index, sort=sort) - tm.assert_index_equal(inter, diff) + if not indices.is_unique: + return + inter = indices.intersection(indices.drop(indices)) + diff = indices.difference(indices, sort=sort) + tm.assert_index_equal(inter, diff) @pytest.mark.parametrize( - "attr,expected", + "index, expected", [ - ("strIndex", False), - ("boolIndex", False), - ("catIndex", False), - ("intIndex", True), - ("dateIndex", False), - ("floatIndex", True), + ("string", False), + ("bool", False), + ("categorical", False), + ("int", True), + ("datetime", False), + ("float", True), ], + indirect=["index"], ) - def test_is_numeric(self, attr, expected): - assert getattr(self, attr).is_numeric() == expected + def test_is_numeric(self, index, expected): + assert index.is_numeric() is expected @pytest.mark.parametrize( - "attr,expected", + "index, expected", [ - ("strIndex", True), - ("boolIndex", True), - ("catIndex", False), - ("intIndex", False), - ("dateIndex", False), - ("floatIndex", False), + ("string", True), + ("bool", True), + ("categorical", False), + ("int", False), + ("datetime", False), + ("float", False), ], + indirect=["index"], ) - def test_is_object(self, attr, expected): - assert getattr(self, attr).is_object() == expected + def test_is_object(self, index, expected): + assert index.is_object() is expected @pytest.mark.parametrize( - "attr,expected", + "index, expected", [ - ("strIndex", False), - ("boolIndex", False), - ("catIndex", False), - ("intIndex", False), - ("dateIndex", True), - ("floatIndex", False), + ("string", False), + ("bool", False), + ("categorical", False), + ("int", False), + ("datetime", True), + ("float", False), ], + indirect=["index"], ) - def test_is_all_dates(self, attr, expected): - assert getattr(self, attr).is_all_dates == expected + def test_is_all_dates(self, index, expected): + assert index.is_all_dates is expected + + def test_summary(self, indices): + self._check_method_works(Index._summary, indices) - def test_summary(self): - self._check_method_works(Index._summary) - # GH3869 + def test_summary_bug(self): + # GH3869` ind = Index(["{other}%s", "~:{range}:0"], name="A") result = ind._summary() # shouldn't be formatted accidentally. @@ -1388,9 +1393,10 @@ def test_summary_deprecated(self): with tm.assert_produces_warning(FutureWarning): ind.summary() - def test_format(self): - self._check_method_works(Index.format) + def test_format(self, indices): + self._check_method_works(Index.format, indices) + def test_format_bug(self): # GH 14626 # windows has different precision on datetime.datetime.now (it doesn't # include us since the default for Timestamp shows these but Index @@ -1402,7 +1408,7 @@ def test_format(self): expected = [str(index[0])] assert formatted == expected - self.strIndex[:0].format() + Index([]).format() @pytest.mark.parametrize("vals", [[1, 2.0 + 3.0j, 4.0], ["a", "b", "c"]]) def test_format_missing(self, vals, nulls_fixture): @@ -1419,8 +1425,7 @@ def test_format_missing(self, vals, nulls_fixture): def test_format_with_name_time_info(self): # bug I fixed 12/20/2011 - inc = timedelta(hours=4) - dates = Index([dt + inc for dt in self.dateIndex], name="something") + dates = date_range("2011-01-01 04:00:00", periods=10, name="something") formatted = dates.format(name=True) assert formatted[0] == "something" @@ -1438,15 +1443,8 @@ def test_logical_compat(self, op): index = self.create_index() assert getattr(index, op)() == getattr(index.values, op)() - def _check_method_works(self, method): - # TODO: make this a dedicated test with parametrized methods - method(self.empty) - method(self.dateIndex) - method(self.unicodeIndex) - method(self.strIndex) - method(self.intIndex) - method(self.tuples) - method(self.catIndex) + def _check_method_works(self, method, index): + method(index) def test_get_indexer(self): index1 = Index([1, 2, 3, 4, 5]) @@ -1766,38 +1764,37 @@ def test_slice_locs_negative_step(self, in_slice, expected): expected = pd.Index(list(expected)) tm.assert_index_equal(result, expected) - def test_drop_by_str_label(self): - # TODO: Parametrize these after replacing self.strIndex with fixture - n = len(self.strIndex) - drop = self.strIndex[list(range(5, 10))] - dropped = self.strIndex.drop(drop) + @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True) + def test_drop_by_str_label(self, index): + n = len(index) + drop = index[list(range(5, 10))] + dropped = index.drop(drop) - expected = self.strIndex[list(range(5)) + list(range(10, n))] + expected = index[list(range(5)) + list(range(10, n))] tm.assert_index_equal(dropped, expected) - dropped = self.strIndex.drop(self.strIndex[0]) - expected = self.strIndex[1:] + dropped = index.drop(index[0]) + expected = index[1:] tm.assert_index_equal(dropped, expected) + @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True) @pytest.mark.parametrize("keys", [["foo", "bar"], ["1", "bar"]]) - def test_drop_by_str_label_raises_missing_keys(self, keys): + def test_drop_by_str_label_raises_missing_keys(self, index, keys): with pytest.raises(KeyError, match=""): - self.strIndex.drop(keys) + index.drop(keys) - def test_drop_by_str_label_errors_ignore(self): - # TODO: Parametrize these after replacing self.strIndex with fixture - - # errors='ignore' - n = len(self.strIndex) - drop = self.strIndex[list(range(5, 10))] + @pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True) + def test_drop_by_str_label_errors_ignore(self, index): + n = len(index) + drop = index[list(range(5, 10))] mixed = drop.tolist() + ["foo"] - dropped = self.strIndex.drop(mixed, errors="ignore") + dropped = index.drop(mixed, errors="ignore") - expected = self.strIndex[list(range(5)) + list(range(10, n))] + expected = index[list(range(5)) + list(range(10, n))] tm.assert_index_equal(dropped, expected) - dropped = self.strIndex.drop(["foo", "bar"], errors="ignore") - expected = self.strIndex[list(range(n))] + dropped = index.drop(["foo", "bar"], errors="ignore") + expected = index[list(range(n))] tm.assert_index_equal(dropped, expected) def test_drop_by_numeric_label_loc(self): @@ -1916,12 +1913,15 @@ def test_set_value_deprecated(self): idx.set_value(arr, idx[1], 80) assert arr[1] == 80 - def test_get_value(self): + @pytest.mark.parametrize( + "index", ["string", "int", "datetime", "timedelta"], indirect=True + ) + def test_get_value(self, index): # TODO: Remove function? GH 19728 values = np.random.randn(100) - date = self.dateIndex[67] + value = index[67] - assert_almost_equal(self.dateIndex.get_value(values, date), values[67]) + assert_almost_equal(index.get_value(values, value), values[67]) @pytest.mark.parametrize("values", [["foo", "bar", "quux"], {"foo", "bar", "quux"}]) @pytest.mark.parametrize( @@ -2040,8 +2040,8 @@ def test_boolean_cmp(self, values): tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize("name,level", [(None, 0), ("a", "a")]) - def test_get_level_values(self, name, level): - expected = self.strIndex.copy() + def test_get_level_values(self, index, name, level): + expected = index.copy() if name: expected.name = name @@ -2052,14 +2052,12 @@ def test_slice_keep_name(self): index = Index(["a", "b"], name="asdf") assert index.name == index[1:].name - # instance attributes of the form self.<name>Index - @pytest.mark.parametrize("index_kind", ["unicode", "str", "date", "int", "float"]) - def test_join_self(self, join_type, index_kind): - - res = getattr(self, "{0}Index".format(index_kind)) - - joined = res.join(res, how=join_type) - assert res is joined + @pytest.mark.parametrize( + "index", ["unicode", "string", "datetime", "int", "float"], indirect=True + ) + def test_join_self(self, index, join_type): + joined = index.join(index, how=join_type) + assert index is joined @pytest.mark.parametrize("method", ["strip", "rstrip", "lstrip"]) def test_str_attribute(self, method): @@ -2424,10 +2422,11 @@ def test_tab_complete_warning(self, ip): with provisionalcompleter("ignore"): list(ip.Completer.completions("idx.", 4)) - def test_deprecated_contains(self): - for index in self.indices.values(): - with tm.assert_produces_warning(FutureWarning): - index.contains(1) + def test_deprecated_contains(self, indices): + # deprecated for all types except IntervalIndex + warning = FutureWarning if not isinstance(indices, pd.IntervalIndex) else None + with tm.assert_produces_warning(warning): + indices.contains(1) class TestMixedIntIndex(Base): @@ -2437,12 +2436,12 @@ class TestMixedIntIndex(Base): _holder = Index - def setup_method(self, method): - self.indices = dict(mixedIndex=Index([0, "a", 1, "b", 2, "c"])) - self.setup_indices() + @pytest.fixture(params=[[0, "a", 1, "b", 2, "c"]], ids=["mixedIndex"]) + def indices(self, request): + return Index(request.param) def create_index(self): - return self.mixedIndex + return Index([0, "a", 1, "b", 2, "c"]) def test_argsort(self): index = self.create_index() @@ -2766,13 +2765,12 @@ def test_ensure_index_mixed_closed_intervals(self): ], ) def test_generated_op_names(opname, indices): - index = indices - if isinstance(index, ABCIndex) and opname == "rsub": + if isinstance(indices, ABCIndex) and opname == "rsub": # pd.Index.__rsub__ does not exist; though the method does exist # for subclasses. see GH#19723 return opname = "__{name}__".format(name=opname) - method = getattr(index, opname) + method = getattr(indices, opname) assert method.__name__ == opname diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 67bf9bd20e716..4326c3f8188fc 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -19,9 +19,9 @@ class TestCategoricalIndex(Base): _holder = CategoricalIndex - def setup_method(self, method): - self.indices = dict(catIndex=tm.makeCategoricalIndex(100)) - self.setup_indices() + @pytest.fixture + def indices(self, request): + return tm.makeCategoricalIndex(100) def create_index(self, categories=None, ordered=False): if categories is None: @@ -780,7 +780,7 @@ def test_identical(self): assert ci1.identical(ci1.copy()) assert not ci1.identical(ci2) - def test_ensure_copied_data(self): + def test_ensure_copied_data(self, indices): # gh-12309: Check the "copy" argument of each # Index.__new__ is honored. # @@ -788,13 +788,12 @@ def test_ensure_copied_data(self): # self.value is not an ndarray. _base = lambda ar: ar if ar.base is None else ar.base - for index in self.indices.values(): - result = CategoricalIndex(index.values, copy=True) - tm.assert_index_equal(index, result) - assert _base(index.values) is not _base(result.values) + result = CategoricalIndex(indices.values, copy=True) + tm.assert_index_equal(indices, result) + assert _base(indices.values) is not _base(result.values) - result = CategoricalIndex(index.values, copy=False) - assert _base(index.values) is _base(result.values) + result = CategoricalIndex(indices.values, copy=False) + assert _base(indices.values) is _base(result.values) def test_equals_categorical(self): ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index 8bc9783694492..e424b3601a4b2 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, timedelta import re import numpy as np @@ -87,32 +87,42 @@ def test_where(self, klass): result = i.where(klass(cond)) tm.assert_index_equal(result, expected) - def test_insert(self): + def test_insert(self, nulls_fixture): # GH 18295 (test missing) - expected = Float64Index([0, np.nan, 1, 2, 3, 4]) - for na in (np.nan, pd.NaT, None): - result = self.create_index().insert(1, na) - tm.assert_index_equal(result, expected) + index = self.create_index() + expected = Float64Index([index[0], np.nan] + list(index[1:])) + result = index.insert(1, nulls_fixture) + tm.assert_index_equal(result, expected) class TestFloat64Index(Numeric): _holder = Float64Index - def setup_method(self, method): - self.indices = dict( - mixed=Float64Index([1.5, 2, 3, 4, 5]), - float=Float64Index(np.arange(5) * 2.5), - mixed_dec=Float64Index([5, 4, 3, 2, 1.5]), - float_dec=Float64Index(np.arange(4, -1, -1) * 2.5), - ) - self.setup_indices() + @pytest.fixture( + params=[ + [1.5, 2, 3, 4, 5], + [0.0, 2.5, 5.0, 7.5, 10.0], + [5, 4, 3, 2, 1.5], + [10.0, 7.5, 5.0, 2.5, 0.0], + ], + ids=["mixed", "float", "mixed_dec", "float_dec"], + ) + def indices(self, request): + return Float64Index(request.param) + + @pytest.fixture + def mixed_index(self): + return Float64Index([1.5, 2, 3, 4, 5]) + + @pytest.fixture + def float_index(self): + return Float64Index([0.0, 2.5, 5.0, 7.5, 10.0]) def create_index(self): return Float64Index(np.arange(5, dtype="float64")) - def test_repr_roundtrip(self): - for ind in (self.mixed, self.float): - tm.assert_index_equal(eval(repr(ind)), ind) + def test_repr_roundtrip(self, indices): + tm.assert_index_equal(eval(repr(indices)), indices) def check_is_index(self, i): assert isinstance(i, Index) @@ -176,30 +186,32 @@ def test_constructor_invalid(self): with pytest.raises(TypeError, match=msg): Float64Index([Timestamp("20130101")]) - def test_constructor_coerce(self): + def test_constructor_coerce(self, mixed_index, float_index): - self.check_coerce(self.mixed, Index([1.5, 2, 3, 4, 5])) - self.check_coerce(self.float, Index(np.arange(5) * 2.5)) - self.check_coerce(self.float, Index(np.array(np.arange(5) * 2.5, dtype=object))) + self.check_coerce(mixed_index, Index([1.5, 2, 3, 4, 5])) + self.check_coerce(float_index, Index(np.arange(5) * 2.5)) + self.check_coerce( + float_index, Index(np.array(np.arange(5) * 2.5, dtype=object)) + ) - def test_constructor_explicit(self): + def test_constructor_explicit(self, mixed_index, float_index): # these don't auto convert self.check_coerce( - self.float, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False + float_index, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False ) self.check_coerce( - self.mixed, Index([1.5, 2, 3, 4, 5], dtype=object), is_float_index=False + mixed_index, Index([1.5, 2, 3, 4, 5], dtype=object), is_float_index=False ) - def test_astype(self): + def test_astype(self, mixed_index, float_index): - result = self.float.astype(object) - assert result.equals(self.float) - assert self.float.equals(result) + result = float_index.astype(object) + assert result.equals(float_index) + assert float_index.equals(result) self.check_is_index(result) - i = self.mixed.copy() + i = mixed_index.copy() i.name = "foo" result = i.astype(object) assert result.equals(i) @@ -451,11 +463,12 @@ def test_view(self): tm.assert_index_equal(i, self._holder(i_view, name="Foo")) def test_is_monotonic(self): - assert self.index.is_monotonic is True - assert self.index.is_monotonic_increasing is True - assert self.index._is_strictly_monotonic_increasing is True - assert self.index.is_monotonic_decreasing is False - assert self.index._is_strictly_monotonic_decreasing is False + index = self._holder([1, 2, 3, 4]) + assert index.is_monotonic is True + assert index.is_monotonic_increasing is True + assert index._is_strictly_monotonic_increasing is True + assert index.is_monotonic_decreasing is False + assert index._is_strictly_monotonic_decreasing is False index = self._holder([4, 3, 2, 1]) assert index.is_monotonic is False @@ -490,23 +503,22 @@ def test_logical_compat(self): assert idx.any() == idx.values.any() def test_identical(self): - i = Index(self.index.copy()) - assert i.identical(self.index) + index = self.create_index() + i = Index(index.copy()) + assert i.identical(index) same_values_different_type = Index(i, dtype=object) assert not i.identical(same_values_different_type) - i = self.index.copy(dtype=object) + i = index.copy(dtype=object) i = i.rename("foo") same_values = Index(i, dtype=object) assert same_values.identical(i) - assert not i.identical(self.index) + assert not i.identical(index) assert Index(same_values, name="foo", dtype=object).identical(i) - assert not self.index.copy(dtype=object).identical( - self.index.copy(dtype=self._dtype) - ) + assert not index.copy(dtype=object).identical(index.copy(dtype=self._dtype)) def test_join_non_unique(self): left = Index([4, 4, 3, 3]) @@ -522,23 +534,21 @@ def test_join_non_unique(self): exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.intp) tm.assert_numpy_array_equal(ridx, exp_ridx) - @pytest.mark.parametrize("kind", ["outer", "inner", "left", "right"]) - def test_join_self(self, kind): - joined = self.index.join(self.index, how=kind) - assert self.index is joined + def test_join_self(self, join_type): + index = self.create_index() + joined = index.join(index, how=join_type) + assert index is joined def test_union_noncomparable(self): - from datetime import datetime, timedelta - # corner case, non-Int64Index - now = datetime.now() - other = Index([now + timedelta(i) for i in range(4)], dtype=object) - result = self.index.union(other) - expected = Index(np.concatenate((self.index, other))) + index = self.create_index() + other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object) + result = index.union(other) + expected = Index(np.concatenate((index, other))) tm.assert_index_equal(result, expected) - result = other.union(self.index) - expected = Index(np.concatenate((other, self.index))) + result = other.union(index) + expected = Index(np.concatenate((other, index))) tm.assert_index_equal(result, expected) def test_cant_or_shouldnt_cast(self): @@ -557,10 +567,12 @@ def test_cant_or_shouldnt_cast(self): self._holder(data) def test_view_index(self): - self.index.view(Index) + index = self.create_index() + index.view(Index) def test_prevent_casting(self): - result = self.index.astype("O") + index = self.create_index() + result = index.astype("O") assert result.dtype == np.object_ def test_take_preserve_name(self): @@ -604,15 +616,15 @@ class TestInt64Index(NumericInt): _dtype = "int64" _holder = Int64Index - def setup_method(self, method): - self.indices = dict( - index=Int64Index(np.arange(0, 20, 2)), - index_dec=Int64Index(np.arange(19, -1, -1)), - ) - self.setup_indices() + @pytest.fixture( + params=[range(0, 20, 2), range(19, -1, -1)], ids=["index_inc", "index_dec"] + ) + def indices(self, request): + return Int64Index(request.param) def create_index(self): - return Int64Index(np.arange(5, dtype="int64")) + # return Int64Index(np.arange(5, dtype="int64")) + return Int64Index(range(0, 20, 2)) def test_constructor(self): # pass list, coerce fine @@ -633,9 +645,9 @@ def test_constructor(self): Int64Index(5) # copy - arr = self.index.values + arr = index.values new_index = Int64Index(arr, copy=True) - tm.assert_index_equal(new_index, self.index) + tm.assert_index_equal(new_index, index) val = arr[0] + 3000 # this should not change index @@ -691,39 +703,42 @@ def test_coerce_list(self): assert isinstance(arr, Index) def test_get_indexer(self): + index = self.create_index() target = Int64Index(np.arange(10)) - indexer = self.index.get_indexer(target) + indexer = index.get_indexer(target) expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = Int64Index(np.arange(10)) - indexer = self.index.get_indexer(target, method="pad") + indexer = index.get_indexer(target, method="pad") expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = Int64Index(np.arange(10)) - indexer = self.index.get_indexer(target, method="backfill") + indexer = index.get_indexer(target, method="backfill") expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) def test_intersection(self): + index = self.create_index() other = Index([1, 2, 3, 4, 5]) - result = self.index.intersection(other) - expected = Index(np.sort(np.intersect1d(self.index.values, other.values))) + result = index.intersection(other) + expected = Index(np.sort(np.intersect1d(index.values, other.values))) tm.assert_index_equal(result, expected) - result = other.intersection(self.index) + result = other.intersection(index) expected = Index( - np.sort(np.asarray(np.intersect1d(self.index.values, other.values))) + np.sort(np.asarray(np.intersect1d(index.values, other.values))) ) tm.assert_index_equal(result, expected) def test_join_inner(self): + index = self.create_index() other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic - res, lidx, ridx = self.index.join(other, how="inner", return_indexers=True) + res, lidx, ridx = index.join(other, how="inner", return_indexers=True) # no guarantee of sortedness, so sort for comparison purposes ind = res.argsort() @@ -741,9 +756,9 @@ def test_join_inner(self): tm.assert_numpy_array_equal(ridx, eridx) # monotonic - res, lidx, ridx = self.index.join(other_mono, how="inner", return_indexers=True) + res, lidx, ridx = index.join(other_mono, how="inner", return_indexers=True) - res2 = self.index.intersection(other_mono) + res2 = index.intersection(other_mono) tm.assert_index_equal(res, res2) elidx = np.array([1, 6], dtype=np.intp) @@ -754,12 +769,13 @@ def test_join_inner(self): tm.assert_numpy_array_equal(ridx, eridx) def test_join_left(self): + index = self.create_index() other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic - res, lidx, ridx = self.index.join(other, how="left", return_indexers=True) - eres = self.index + res, lidx, ridx = index.join(other, how="left", return_indexers=True) + eres = index eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1], dtype=np.intp) assert isinstance(res, Int64Index) @@ -768,7 +784,7 @@ def test_join_left(self): tm.assert_numpy_array_equal(ridx, eridx) # monotonic - res, lidx, ridx = self.index.join(other_mono, how="left", return_indexers=True) + res, lidx, ridx = index.join(other_mono, how="left", return_indexers=True) eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1], dtype=np.intp) assert isinstance(res, Int64Index) tm.assert_index_equal(res, eres) @@ -787,11 +803,12 @@ def test_join_left(self): tm.assert_numpy_array_equal(ridx, eridx) def test_join_right(self): + index = self.create_index() other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic - res, lidx, ridx = self.index.join(other, how="right", return_indexers=True) + res, lidx, ridx = index.join(other, how="right", return_indexers=True) eres = other elidx = np.array([-1, 6, -1, -1, 1, -1], dtype=np.intp) @@ -801,7 +818,7 @@ def test_join_right(self): assert ridx is None # monotonic - res, lidx, ridx = self.index.join(other_mono, how="right", return_indexers=True) + res, lidx, ridx = index.join(other_mono, how="right", return_indexers=True) eres = other_mono elidx = np.array([-1, 1, -1, -1, 6, -1], dtype=np.intp) assert isinstance(other, Int64Index) @@ -821,40 +838,42 @@ def test_join_right(self): tm.assert_numpy_array_equal(ridx, eridx) def test_join_non_int_index(self): + index = self.create_index() other = Index([3, 6, 7, 8, 10], dtype=object) - outer = self.index.join(other, how="outer") - outer2 = other.join(self.index, how="outer") + outer = index.join(other, how="outer") + outer2 = other.join(index, how="outer") expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18]) tm.assert_index_equal(outer, outer2) tm.assert_index_equal(outer, expected) - inner = self.index.join(other, how="inner") - inner2 = other.join(self.index, how="inner") + inner = index.join(other, how="inner") + inner2 = other.join(index, how="inner") expected = Index([6, 8, 10]) tm.assert_index_equal(inner, inner2) tm.assert_index_equal(inner, expected) - left = self.index.join(other, how="left") - tm.assert_index_equal(left, self.index.astype(object)) + left = index.join(other, how="left") + tm.assert_index_equal(left, index.astype(object)) - left2 = other.join(self.index, how="left") + left2 = other.join(index, how="left") tm.assert_index_equal(left2, other) - right = self.index.join(other, how="right") + right = index.join(other, how="right") tm.assert_index_equal(right, other) - right2 = other.join(self.index, how="right") - tm.assert_index_equal(right2, self.index.astype(object)) + right2 = other.join(index, how="right") + tm.assert_index_equal(right2, index.astype(object)) def test_join_outer(self): + index = self.create_index() other = Int64Index([7, 12, 25, 1, 2, 5]) other_mono = Int64Index([1, 2, 5, 7, 12, 25]) # not monotonic # guarantee of sortedness - res, lidx, ridx = self.index.join(other, how="outer", return_indexers=True) - noidx_res = self.index.join(other, how="outer") + res, lidx, ridx = index.join(other, how="outer", return_indexers=True) + noidx_res = index.join(other, how="outer") tm.assert_index_equal(res, noidx_res) eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25]) @@ -869,8 +888,8 @@ def test_join_outer(self): tm.assert_numpy_array_equal(ridx, eridx) # monotonic - res, lidx, ridx = self.index.join(other_mono, how="outer", return_indexers=True) - noidx_res = self.index.join(other_mono, how="outer") + res, lidx, ridx = index.join(other_mono, how="outer", return_indexers=True) + noidx_res = index.join(other_mono, how="outer") tm.assert_index_equal(res, noidx_res) elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) @@ -888,14 +907,24 @@ class TestUInt64Index(NumericInt): _dtype = "uint64" _holder = UInt64Index - def setup_method(self, method): - vals = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25] - self.indices = dict( - index=UInt64Index(vals), index_dec=UInt64Index(reversed(vals)) - ) - self.setup_indices() + @pytest.fixture( + params=[ + [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25], + [2 ** 63 + 25, 2 ** 63 + 20, 2 ** 63 + 15, 2 ** 63 + 10, 2 ** 63], + ], + ids=["index_inc", "index_dec"], + ) + def indices(self, request): + return UInt64Index(request.param) + + @pytest.fixture + def index_large(self): + # large values used in TestUInt64Index where no compat needed with Int64/Float64 + large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25] + return UInt64Index(large) def create_index(self): + # compat with shared Int64/Float64 tests; use index_large for UInt64 only tests return UInt64Index(np.arange(5, dtype="uint64")) def test_constructor(self): @@ -915,42 +944,42 @@ def test_constructor(self): res = Index(np.array([-1, 2 ** 63], dtype=object)) tm.assert_index_equal(res, idx) - def test_get_indexer(self): + def test_get_indexer(self, index_large): target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2 ** 63) - indexer = self.index.get_indexer(target) + indexer = index_large.get_indexer(target) expected = np.array([0, -1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2 ** 63) - indexer = self.index.get_indexer(target, method="pad") + indexer = index_large.get_indexer(target, method="pad") expected = np.array([0, 0, 1, 2, 3, 4, 4, 4, 4, 4], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) target = UInt64Index(np.arange(10).astype("uint64") * 5 + 2 ** 63) - indexer = self.index.get_indexer(target, method="backfill") + indexer = index_large.get_indexer(target, method="backfill") expected = np.array([0, 1, 1, 2, 3, 4, -1, -1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) - def test_intersection(self): + def test_intersection(self, index_large): other = Index([2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20]) - result = self.index.intersection(other) - expected = Index(np.sort(np.intersect1d(self.index.values, other.values))) + result = index_large.intersection(other) + expected = Index(np.sort(np.intersect1d(index_large.values, other.values))) tm.assert_index_equal(result, expected) - result = other.intersection(self.index) + result = other.intersection(index_large) expected = Index( - np.sort(np.asarray(np.intersect1d(self.index.values, other.values))) + np.sort(np.asarray(np.intersect1d(index_large.values, other.values))) ) tm.assert_index_equal(result, expected) - def test_join_inner(self): + def test_join_inner(self, index_large): other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) other_mono = UInt64Index( 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64") ) # not monotonic - res, lidx, ridx = self.index.join(other, how="inner", return_indexers=True) + res, lidx, ridx = index_large.join(other, how="inner", return_indexers=True) # no guarantee of sortedness, so sort for comparison purposes ind = res.argsort() @@ -968,9 +997,11 @@ def test_join_inner(self): tm.assert_numpy_array_equal(ridx, eridx) # monotonic - res, lidx, ridx = self.index.join(other_mono, how="inner", return_indexers=True) + res, lidx, ridx = index_large.join( + other_mono, how="inner", return_indexers=True + ) - res2 = self.index.intersection(other_mono) + res2 = index_large.intersection(other_mono) tm.assert_index_equal(res, res2) elidx = np.array([1, 4], dtype=np.intp) @@ -981,15 +1012,15 @@ def test_join_inner(self): tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) - def test_join_left(self): + def test_join_left(self, index_large): other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) other_mono = UInt64Index( 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64") ) # not monotonic - res, lidx, ridx = self.index.join(other, how="left", return_indexers=True) - eres = self.index + res, lidx, ridx = index_large.join(other, how="left", return_indexers=True) + eres = index_large eridx = np.array([-1, 5, -1, -1, 2], dtype=np.intp) assert isinstance(res, UInt64Index) @@ -998,7 +1029,7 @@ def test_join_left(self): tm.assert_numpy_array_equal(ridx, eridx) # monotonic - res, lidx, ridx = self.index.join(other_mono, how="left", return_indexers=True) + res, lidx, ridx = index_large.join(other_mono, how="left", return_indexers=True) eridx = np.array([-1, 3, -1, -1, 5], dtype=np.intp) assert isinstance(res, UInt64Index) @@ -1020,14 +1051,14 @@ def test_join_left(self): tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) - def test_join_right(self): + def test_join_right(self, index_large): other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) other_mono = UInt64Index( 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64") ) # not monotonic - res, lidx, ridx = self.index.join(other, how="right", return_indexers=True) + res, lidx, ridx = index_large.join(other, how="right", return_indexers=True) eres = other elidx = np.array([-1, -1, 4, -1, -1, 1], dtype=np.intp) @@ -1037,7 +1068,9 @@ def test_join_right(self): assert ridx is None # monotonic - res, lidx, ridx = self.index.join(other_mono, how="right", return_indexers=True) + res, lidx, ridx = index_large.join( + other_mono, how="right", return_indexers=True + ) eres = other_mono elidx = np.array([-1, -1, -1, 1, -1, 4], dtype=np.intp) @@ -1060,38 +1093,38 @@ def test_join_right(self): tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) - def test_join_non_int_index(self): + def test_join_non_int_index(self, index_large): other = Index( 2 ** 63 + np.array([1, 5, 7, 10, 20], dtype="uint64"), dtype=object ) - outer = self.index.join(other, how="outer") - outer2 = other.join(self.index, how="outer") + outer = index_large.join(other, how="outer") + outer2 = other.join(index_large, how="outer") expected = Index( 2 ** 63 + np.array([0, 1, 5, 7, 10, 15, 20, 25], dtype="uint64") ) tm.assert_index_equal(outer, outer2) tm.assert_index_equal(outer, expected) - inner = self.index.join(other, how="inner") - inner2 = other.join(self.index, how="inner") + inner = index_large.join(other, how="inner") + inner2 = other.join(index_large, how="inner") expected = Index(2 ** 63 + np.array([10, 20], dtype="uint64")) tm.assert_index_equal(inner, inner2) tm.assert_index_equal(inner, expected) - left = self.index.join(other, how="left") - tm.assert_index_equal(left, self.index.astype(object)) + left = index_large.join(other, how="left") + tm.assert_index_equal(left, index_large.astype(object)) - left2 = other.join(self.index, how="left") + left2 = other.join(index_large, how="left") tm.assert_index_equal(left2, other) - right = self.index.join(other, how="right") + right = index_large.join(other, how="right") tm.assert_index_equal(right, other) - right2 = other.join(self.index, how="right") - tm.assert_index_equal(right2, self.index.astype(object)) + right2 = other.join(index_large, how="right") + tm.assert_index_equal(right2, index_large.astype(object)) - def test_join_outer(self): + def test_join_outer(self, index_large): other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) other_mono = UInt64Index( 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64") @@ -1099,8 +1132,8 @@ def test_join_outer(self): # not monotonic # guarantee of sortedness - res, lidx, ridx = self.index.join(other, how="outer", return_indexers=True) - noidx_res = self.index.join(other, how="outer") + res, lidx, ridx = index_large.join(other, how="outer", return_indexers=True) + noidx_res = index_large.join(other, how="outer") tm.assert_index_equal(res, noidx_res) eres = UInt64Index( @@ -1115,8 +1148,10 @@ def test_join_outer(self): tm.assert_numpy_array_equal(ridx, eridx) # monotonic - res, lidx, ridx = self.index.join(other_mono, how="outer", return_indexers=True) - noidx_res = self.index.join(other_mono, how="outer") + res, lidx, ridx = index_large.join( + other_mono, how="outer", return_indexers=True + ) + noidx_res = index_large.join(other_mono, how="outer") tm.assert_index_equal(res, noidx_res) elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py index 627c5cc56e010..fa64e1bacb2e5 100644 --- a/pandas/tests/indexes/test_range.py +++ b/pandas/tests/indexes/test_range.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, timedelta import numpy as np import pytest @@ -22,15 +22,18 @@ class TestRangeIndex(Numeric): _holder = RangeIndex _compat_props = ["shape", "ndim", "size"] - def setup_method(self, method): - self.indices = dict( - index=RangeIndex(0, 20, 2, name="foo"), - index_dec=RangeIndex(18, -1, -2, name="bar"), - ) - self.setup_indices() + @pytest.fixture( + params=[ + RangeIndex(start=0, stop=20, step=2, name="foo"), + RangeIndex(start=18, stop=-1, step=-2, name="bar"), + ], + ids=["index_inc", "index_dec"], + ) + def indices(self, request): + return request.param def create_index(self): - return RangeIndex(5) + return RangeIndex(start=0, stop=20, step=2) def test_can_hold_identifiers(self): idx = self.create_index() @@ -38,8 +41,9 @@ def test_can_hold_identifiers(self): assert idx._can_hold_identifiers_and_holds_name(key) is False def test_too_many_names(self): + index = self.create_index() with pytest.raises(ValueError, match="^Length"): - self.index.names = ["roger", "harold"] + index.names = ["roger", "harold"] @pytest.mark.parametrize("name", [None, "foo"]) @pytest.mark.parametrize( @@ -267,7 +271,8 @@ def test_view(self): tm.assert_index_equal(i, i_view) def test_dtype(self): - assert self.index.dtype == np.int64 + index = self.create_index() + assert index.dtype == np.int64 def test_cached_data(self): # GH 26565, GH26617 @@ -326,11 +331,12 @@ def test_cached_data(self): assert isinstance(idx._cached_data, np.ndarray) def test_is_monotonic(self): - assert self.index.is_monotonic is True - assert self.index.is_monotonic_increasing is True - assert self.index.is_monotonic_decreasing is False - assert self.index._is_strictly_monotonic_increasing is True - assert self.index._is_strictly_monotonic_decreasing is False + index = RangeIndex(0, 20, 2) + assert index.is_monotonic is True + assert index.is_monotonic_increasing is True + assert index.is_monotonic_decreasing is False + assert index._is_strictly_monotonic_increasing is True + assert index._is_strictly_monotonic_decreasing is False index = RangeIndex(4, 0, -1) assert index.is_monotonic is False @@ -376,43 +382,45 @@ def test_logical_compat(self): assert idx.any() == idx.values.any() def test_identical(self): - i = Index(self.index.copy()) - assert i.identical(self.index) + index = self.create_index() + i = Index(index.copy()) + assert i.identical(index) # we don't allow object dtype for RangeIndex - if isinstance(self.index, RangeIndex): + if isinstance(index, RangeIndex): return same_values_different_type = Index(i, dtype=object) assert not i.identical(same_values_different_type) - i = self.index.copy(dtype=object) + i = index.copy(dtype=object) i = i.rename("foo") same_values = Index(i, dtype=object) - assert same_values.identical(self.index.copy(dtype=object)) + assert same_values.identical(index.copy(dtype=object)) - assert not i.identical(self.index) + assert not i.identical(index) assert Index(same_values, name="foo", dtype=object).identical(i) - assert not self.index.copy(dtype=object).identical( - self.index.copy(dtype="int64") - ) + assert not index.copy(dtype=object).identical(index.copy(dtype="int64")) def test_get_indexer(self): + index = self.create_index() target = RangeIndex(10) - indexer = self.index.get_indexer(target) + indexer = index.get_indexer(target) expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) def test_get_indexer_pad(self): + index = self.create_index() target = RangeIndex(10) - indexer = self.index.get_indexer(target, method="pad") + indexer = index.get_indexer(target, method="pad") expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) def test_get_indexer_backfill(self): + index = self.create_index() target = RangeIndex(10) - indexer = self.index.get_indexer(target, method="backfill") + indexer = index.get_indexer(target, method="backfill") expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp) tm.assert_numpy_array_equal(indexer, expected) @@ -434,10 +442,11 @@ def test_get_indexer_decreasing(self, stop): def test_join_outer(self): # join with Int64Index + index = self.create_index() other = Int64Index(np.arange(25, 14, -1)) - res, lidx, ridx = self.index.join(other, how="outer", return_indexers=True) - noidx_res = self.index.join(other, how="outer") + res, lidx, ridx = index.join(other, how="outer", return_indexers=True) + noidx_res = index.join(other, how="outer") tm.assert_index_equal(res, noidx_res) eres = Int64Index( @@ -461,8 +470,8 @@ def test_join_outer(self): # join with RangeIndex other = RangeIndex(25, 14, -1) - res, lidx, ridx = self.index.join(other, how="outer", return_indexers=True) - noidx_res = self.index.join(other, how="outer") + res, lidx, ridx = index.join(other, how="outer", return_indexers=True) + noidx_res = index.join(other, how="outer") tm.assert_index_equal(res, noidx_res) assert isinstance(res, Int64Index) @@ -473,9 +482,10 @@ def test_join_outer(self): def test_join_inner(self): # Join with non-RangeIndex + index = self.create_index() other = Int64Index(np.arange(25, 14, -1)) - res, lidx, ridx = self.index.join(other, how="inner", return_indexers=True) + res, lidx, ridx = index.join(other, how="inner", return_indexers=True) # no guarantee of sortedness, so sort for comparison purposes ind = res.argsort() @@ -495,7 +505,7 @@ def test_join_inner(self): # Join two RangeIndex other = RangeIndex(25, 14, -1) - res, lidx, ridx = self.index.join(other, how="inner", return_indexers=True) + res, lidx, ridx = index.join(other, how="inner", return_indexers=True) assert isinstance(res, RangeIndex) tm.assert_index_equal(res, eres) @@ -504,10 +514,11 @@ def test_join_inner(self): def test_join_left(self): # Join with Int64Index + index = self.create_index() other = Int64Index(np.arange(25, 14, -1)) - res, lidx, ridx = self.index.join(other, how="left", return_indexers=True) - eres = self.index + res, lidx, ridx = index.join(other, how="left", return_indexers=True) + eres = index eridx = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 9, 7], dtype=np.intp) assert isinstance(res, RangeIndex) @@ -518,7 +529,7 @@ def test_join_left(self): # Join withRangeIndex other = Int64Index(np.arange(25, 14, -1)) - res, lidx, ridx = self.index.join(other, how="left", return_indexers=True) + res, lidx, ridx = index.join(other, how="left", return_indexers=True) assert isinstance(res, RangeIndex) tm.assert_index_equal(res, eres) @@ -527,9 +538,10 @@ def test_join_left(self): def test_join_right(self): # Join with Int64Index + index = self.create_index() other = Int64Index(np.arange(25, 14, -1)) - res, lidx, ridx = self.index.join(other, how="right", return_indexers=True) + res, lidx, ridx = index.join(other, how="right", return_indexers=True) eres = other elidx = np.array([-1, -1, -1, -1, -1, -1, -1, 9, -1, 8, -1], dtype=np.intp) @@ -541,7 +553,7 @@ def test_join_right(self): # Join withRangeIndex other = RangeIndex(25, 14, -1) - res, lidx, ridx = self.index.join(other, how="right", return_indexers=True) + res, lidx, ridx = index.join(other, how="right", return_indexers=True) eres = other assert isinstance(other, RangeIndex) @@ -550,36 +562,38 @@ def test_join_right(self): assert ridx is None def test_join_non_int_index(self): + index = self.create_index() other = Index([3, 6, 7, 8, 10], dtype=object) - outer = self.index.join(other, how="outer") - outer2 = other.join(self.index, how="outer") + outer = index.join(other, how="outer") + outer2 = other.join(index, how="outer") expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18]) tm.assert_index_equal(outer, outer2) tm.assert_index_equal(outer, expected) - inner = self.index.join(other, how="inner") - inner2 = other.join(self.index, how="inner") + inner = index.join(other, how="inner") + inner2 = other.join(index, how="inner") expected = Index([6, 8, 10]) tm.assert_index_equal(inner, inner2) tm.assert_index_equal(inner, expected) - left = self.index.join(other, how="left") - tm.assert_index_equal(left, self.index.astype(object)) + left = index.join(other, how="left") + tm.assert_index_equal(left, index.astype(object)) - left2 = other.join(self.index, how="left") + left2 = other.join(index, how="left") tm.assert_index_equal(left2, other) - right = self.index.join(other, how="right") + right = index.join(other, how="right") tm.assert_index_equal(right, other) - right2 = other.join(self.index, how="right") - tm.assert_index_equal(right2, self.index.astype(object)) + right2 = other.join(index, how="right") + tm.assert_index_equal(right2, index.astype(object)) def test_join_non_unique(self): + index = self.create_index() other = Index([4, 4, 3, 3]) - res, lidx, ridx = self.index.join(other, return_indexers=True) + res, lidx, ridx = index.join(other, return_indexers=True) eres = Int64Index([0, 2, 4, 4, 6, 8, 10, 12, 14, 16, 18]) elidx = np.array([0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp) @@ -589,40 +603,40 @@ def test_join_non_unique(self): tm.assert_numpy_array_equal(lidx, elidx) tm.assert_numpy_array_equal(ridx, eridx) - def test_join_self(self): - kinds = "outer", "inner", "left", "right" - for kind in kinds: - joined = self.index.join(self.index, how=kind) - assert self.index is joined + def test_join_self(self, join_type): + index = self.create_index() + joined = index.join(index, how=join_type) + assert index is joined @pytest.mark.parametrize("sort", [None, False]) def test_intersection(self, sort): # intersect with Int64Index + index = self.create_index() other = Index(np.arange(1, 6)) - result = self.index.intersection(other, sort=sort) - expected = Index(np.sort(np.intersect1d(self.index.values, other.values))) + result = index.intersection(other, sort=sort) + expected = Index(np.sort(np.intersect1d(index.values, other.values))) tm.assert_index_equal(result, expected) - result = other.intersection(self.index, sort=sort) + result = other.intersection(index, sort=sort) expected = Index( - np.sort(np.asarray(np.intersect1d(self.index.values, other.values))) + np.sort(np.asarray(np.intersect1d(index.values, other.values))) ) tm.assert_index_equal(result, expected) # intersect with increasing RangeIndex other = RangeIndex(1, 6) - result = self.index.intersection(other, sort=sort) - expected = Index(np.sort(np.intersect1d(self.index.values, other.values))) + result = index.intersection(other, sort=sort) + expected = Index(np.sort(np.intersect1d(index.values, other.values))) tm.assert_index_equal(result, expected) # intersect with decreasing RangeIndex other = RangeIndex(5, 0, -1) - result = self.index.intersection(other, sort=sort) - expected = Index(np.sort(np.intersect1d(self.index.values, other.values))) + result = index.intersection(other, sort=sort) + expected = Index(np.sort(np.intersect1d(index.values, other.values))) tm.assert_index_equal(result, expected) # reversed (GH 17296) - result = other.intersection(self.index, sort=sort) + result = other.intersection(index, sort=sort) tm.assert_index_equal(result, expected) # GH 17296: intersect two decreasing RangeIndexes @@ -667,17 +681,15 @@ def test_intersection(self, sort): @pytest.mark.parametrize("sort", [False, None]) def test_union_noncomparable(self, sort): - from datetime import datetime, timedelta - # corner case, non-Int64Index - now = datetime.now() - other = Index([now + timedelta(i) for i in range(4)], dtype=object) - result = self.index.union(other, sort=sort) - expected = Index(np.concatenate((self.index, other))) + index = self.create_index() + other = Index([datetime.now() + timedelta(i) for i in range(4)], dtype=object) + result = index.union(other, sort=sort) + expected = Index(np.concatenate((index, other))) tm.assert_index_equal(result, expected) - result = other.union(self.index, sort=sort) - expected = Index(np.concatenate((other, self.index))) + result = other.union(index, sort=sort) + expected = Index(np.concatenate((other, index))) tm.assert_index_equal(result, expected) @pytest.fixture( @@ -785,11 +797,13 @@ def test_cant_or_shouldnt_cast(self): with pytest.raises(TypeError): RangeIndex("0", "1", "2") - def test_view_Index(self): - self.index.view(Index) + def test_view_index(self): + index = self.create_index() + index.view(Index) def test_prevent_casting(self): - result = self.index.astype("O") + index = self.create_index() + result = index.astype("O") assert result.dtype == np.object_ def test_take_preserve_name(self): @@ -828,7 +842,8 @@ def test_print_unicode_columns(self): repr(df.columns) # should not raise UnicodeDecodeError def test_repr_roundtrip(self): - tm.assert_index_equal(eval(repr(self.index)), self.index) + index = self.create_index() + tm.assert_index_equal(eval(repr(index)), index) def test_slice_keep_name(self): idx = RangeIndex(1, 2, name="asdf") @@ -859,20 +874,17 @@ def test_explicit_conversions(self): result = a - fidx tm.assert_index_equal(result, expected) - def test_has_duplicates(self): - for ind in self.indices: - if not len(ind): - continue - idx = self.indices[ind] - assert idx.is_unique - assert not idx.has_duplicates + def test_has_duplicates(self, indices): + assert indices.is_unique + assert not indices.has_duplicates def test_extended_gcd(self): - result = self.index._extended_gcd(6, 10) + index = self.create_index() + result = index._extended_gcd(6, 10) assert result[0] == result[1] * 6 + result[2] * 10 assert 2 == result[0] - result = self.index._extended_gcd(10, 6) + result = index._extended_gcd(10, 6) assert 2 == result[1] * 10 + result[2] * 6 assert 2 == result[0] @@ -917,80 +929,71 @@ def test_pickle_compat_construction(self): pass def test_slice_specialised(self): + index = self.create_index() + index.name = "foo" # scalar indexing - res = self.index[1] + res = index[1] expected = 2 assert res == expected - res = self.index[-1] + res = index[-1] expected = 18 assert res == expected # slicing # slice value completion - index = self.index[:] - expected = self.index - tm.assert_index_equal(index, expected) + index_slice = index[:] + expected = index + tm.assert_index_equal(index_slice, expected) # positive slice values - index = self.index[7:10:2] + index_slice = index[7:10:2] expected = Index(np.array([14, 18]), name="foo") - tm.assert_index_equal(index, expected) + tm.assert_index_equal(index_slice, expected) # negative slice values - index = self.index[-1:-5:-2] + index_slice = index[-1:-5:-2] expected = Index(np.array([18, 14]), name="foo") - tm.assert_index_equal(index, expected) + tm.assert_index_equal(index_slice, expected) # stop overshoot - index = self.index[2:100:4] + index_slice = index[2:100:4] expected = Index(np.array([4, 12]), name="foo") - tm.assert_index_equal(index, expected) + tm.assert_index_equal(index_slice, expected) # reverse - index = self.index[::-1] - expected = Index(self.index.values[::-1], name="foo") - tm.assert_index_equal(index, expected) + index_slice = index[::-1] + expected = Index(index.values[::-1], name="foo") + tm.assert_index_equal(index_slice, expected) - index = self.index[-8::-1] + index_slice = index[-8::-1] expected = Index(np.array([4, 2, 0]), name="foo") - tm.assert_index_equal(index, expected) + tm.assert_index_equal(index_slice, expected) - index = self.index[-40::-1] + index_slice = index[-40::-1] expected = Index(np.array([], dtype=np.int64), name="foo") - tm.assert_index_equal(index, expected) - - index = self.index[40::-1] - expected = Index(self.index.values[40::-1], name="foo") - tm.assert_index_equal(index, expected) - - index = self.index[10::-1] - expected = Index(self.index.values[::-1], name="foo") - tm.assert_index_equal(index, expected) - - def test_len_specialised(self): - - # make sure that our len is the same as - # np.arange calc - - for step in np.arange(1, 6, 1): + tm.assert_index_equal(index_slice, expected) - arr = np.arange(0, 5, step) - i = RangeIndex(0, 5, step) - assert len(i) == len(arr) + index_slice = index[40::-1] + expected = Index(index.values[40::-1], name="foo") + tm.assert_index_equal(index_slice, expected) - i = RangeIndex(5, 0, step) - assert len(i) == 0 + index_slice = index[10::-1] + expected = Index(index.values[::-1], name="foo") + tm.assert_index_equal(index_slice, expected) - for step in np.arange(-6, -1, 1): + @pytest.mark.parametrize("step", set(range(-5, 6)) - {0}) + def test_len_specialised(self, step): + # make sure that our len is the same as np.arange calc + start, stop = (0, 5) if step > 0 else (5, 0) - arr = np.arange(5, 0, step) - i = RangeIndex(5, 0, step) - assert len(i) == len(arr) + arr = np.arange(start, stop, step) + index = RangeIndex(start, stop, step) + assert len(index) == len(arr) - i = RangeIndex(0, 5, step) - assert len(i) == 0 + index = RangeIndex(stop, start, step) + assert len(index) == 0 @pytest.fixture( params=[ diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py index b3850f7a4e09e..d5b23653e8a72 100644 --- a/pandas/tests/indexes/test_setops.py +++ b/pandas/tests/indexes/test_setops.py @@ -13,7 +13,7 @@ import pandas as pd from pandas import Float64Index, Int64Index, RangeIndex, UInt64Index from pandas.api.types import pandas_dtype -from pandas.tests.indexes.conftest import indices_list +from pandas.tests.indexes.conftest import indices_dict import pandas.util.testing as tm COMPATIBLE_INCONSISTENT_PAIRS = OrderedDict( @@ -26,15 +26,12 @@ ) -@pytest.fixture( - params=list(it.combinations(indices_list, 2)), - ids=lambda x: type(x[0]).__name__ + type(x[1]).__name__, -) +@pytest.fixture(params=it.combinations(indices_dict, 2), ids="-".join) def index_pair(request): """ Create all combinations of 2 index types. """ - return request.param + return indices_dict[request.param[0]], indices_dict[request.param[1]] def test_union_same_types(indices): diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index e790a913fcac2..2ef86ddf8c8bf 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -30,9 +30,9 @@ class TestTimedeltaIndex(DatetimeLike): _holder = TimedeltaIndex - def setup_method(self, method): - self.indices = dict(index=tm.makeTimedeltaIndex(10)) - self.setup_indices() + @pytest.fixture + def indices(self): + return tm.makeTimedeltaIndex(10) def create_index(self): return pd.to_timedelta(range(5), unit="d") + pd.offsets.Hour(1)
The common index tests use `setup_method` to create a `dict` of indexes to test against: https://github.com/pandas-dev/pandas/blob/df2e0813e053cc5bc924b2292ea8918a6b27f0e2/pandas/tests/indexes/test_range.py#L25-L30 This `dict` of indexes is then iterated over within the tests: https://github.com/pandas-dev/pandas/blob/df2e0813e053cc5bc924b2292ea8918a6b27f0e2/pandas/tests/indexes/common.py#L359-L363 The bulk of this PR involves converting `self.indices` into a parametrized fixture of indexes, and adjusting the tests to support this (largely just unindenting). I had to do this conversion for all indexes at once since common test code for all index classes utilizes this pattern, so the diff is fairly large, but it should be relatively simple changes. I also had to make some changes to references to specific indexes as well (e.g. `self.index`, `self.strIndex`, etc.) since the `setup_method` code also directly set each index in the `dict` as a class attribute.
https://api.github.com/repos/pandas-dev/pandas/pulls/28865
2019-10-09T04:50:28Z
2019-10-11T15:14:07Z
2019-10-11T15:14:07Z
2019-12-20T01:03:29Z
TST: Fix xfails for non-box maybe_promote on integer dtypes
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 5b13e13bb20ba..a7fdd6759ba95 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -408,9 +408,58 @@ def maybe_promote(dtype, fill_value=np.nan): dtype = np.object_ elif issubclass(dtype.type, np.integer): # upcast to prevent overflow - arr = np.asarray(fill_value) - if arr != arr.astype(dtype): - dtype = arr.dtype + mst = np.min_scalar_type(fill_value) + if mst > dtype: + # np.dtype ordering considers: + # int[n] < int[2*n] + # uint[n] < uint[2*n] + # u?int[n] < object_ + dtype = mst + + elif np.can_cast(fill_value, dtype): + pass + + elif dtype.kind == "u" and mst.kind == "i": + dtype = np.promote_types(dtype, mst) + if dtype.kind == "f": + # Case where we disagree with numpy + dtype = np.dtype(np.object_) + + elif dtype.kind == "i" and mst.kind == "u": + + if fill_value > np.iinfo(np.int64).max: + # object is the only way to represent fill_value and keep + # the range allowed by the given dtype + dtype = np.dtype(np.object_) + + elif mst.itemsize < dtype.itemsize: + pass + + elif dtype.itemsize == mst.itemsize: + # We never cast signed to unsigned because that loses + # parts of the original range, so find the smallest signed + # integer that can hold all of `mst`. + ndt = { + np.int64: np.object_, + np.int32: np.int64, + np.int16: np.int32, + np.int8: np.int16, + }[dtype.type] + dtype = np.dtype(ndt) + + else: + # bump to signed integer dtype that holds all of `mst` range + # Note: we have to use itemsize because some (windows) + # builds don't satisfiy e.g. np.uint32 == np.uint32 + ndt = { + 4: np.int64, + 2: np.int32, + 1: np.int16, # TODO: Test for this case + }[mst.itemsize] + dtype = np.dtype(ndt) + + fill_value = dtype.type(fill_value) + elif issubclass(dtype.type, np.floating): # check if we can cast if _check_lossless_cast(fill_value, dtype): diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py index e4e5a22ea6ca0..8d10ed26a80fa 100644 --- a/pandas/tests/dtypes/cast/test_promote.py +++ b/pandas/tests/dtypes/cast/test_promote.py @@ -151,7 +151,17 @@ def _assert_match(result_fill_value, expected_fill_value): # GH#23982/25425 require the same type in addition to equality/NA-ness res_type = type(result_fill_value) ex_type = type(expected_fill_value) - assert res_type == ex_type + if res_type.__name__ == "uint64": + # No idea why, but these (sometimes) do not compare as equal + assert ex_type.__name__ == "uint64" + elif res_type.__name__ == "ulonglong": + # On some builds we get this instead of np.uint64 + # Note: cant check res_type.dtype.itemsize directly on numpy 1.18 + assert res_type(0).itemsize == 8 + assert ex_type == res_type or ex_type == np.uint64 + else: + # On some builds, type comparison fails, e.g. np.int32 != np.int32 + assert res_type == ex_type or res_type.__name__ == ex_type.__name__ match_value = result_fill_value == expected_fill_value @@ -275,26 +285,6 @@ def test_maybe_promote_int_with_int(dtype, fill_value, expected_dtype, box): expected_dtype = np.dtype(expected_dtype) boxed, box_dtype = box # read from parametrized fixture - if not boxed: - if expected_dtype == object: - pytest.xfail("overflow error") - if expected_dtype == "int32": - pytest.xfail("always upcasts to platform int") - if dtype == "int8" and expected_dtype == "int16": - pytest.xfail("casts to int32 instead of int16") - if ( - issubclass(dtype.type, np.unsignedinteger) - and np.iinfo(dtype).max < fill_value <= np.iinfo("int64").max - ): - pytest.xfail("falsely casts to signed") - if (dtype, expected_dtype) in [ - ("uint8", "int16"), - ("uint32", "int64"), - ] and fill_value != np.iinfo("int32").min - 1: - pytest.xfail("casts to int32 instead of int8/int16") - # this following xfail is "only" a consequence of the - now strictly - # enforced - principle that maybe_promote_with_scalar always casts - pytest.xfail("wrong return type of fill_value") if boxed: if expected_dtype != object: pytest.xfail("falsely casts to object")
Orthogonal to other outstanding maybe_promote PRs. This one required pretty significant changes to the code. Using `np.min_scalar_type` and `np.can_cast` cleans this up a bit, but it is still pretty verbose. AFAICT there is no clear way to make it shorter without significantly sacrificing clarity. In a follow-up I think L410-459 can be refactored out to a helper function. Waiting on that until I figure out the boxed=True cases, which are still troublesome.
https://api.github.com/repos/pandas-dev/pandas/pulls/28864
2019-10-09T03:35:04Z
2019-10-10T12:51:18Z
2019-10-10T12:51:18Z
2019-10-10T13:34:20Z
Vectorized ISO Format
diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 2ed85595f7e3a..1693a2e936a1f 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -18,10 +18,15 @@ from pandas._libs.tslibs.ccalendar cimport ( get_day_of_year) from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, pandas_timedeltastruct, dt64_to_dtstruct, - td64_to_tdstruct) + td64_to_tdstruct, NPY_DATETIMEUNIT, NPY_FR_ns) from pandas._libs.tslibs.nattype cimport NPY_NAT +cdef extern from "./src/datetime/np_datetime_strings.h": + int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, + NPY_DATETIMEUNIT base) + + def get_time_micros(ndarray[int64_t] dtindex): """ Return the number of microseconds in the time component of a @@ -43,6 +48,36 @@ def get_time_micros(ndarray[int64_t] dtindex): return micros +@cython.wraparound(False) +@cython.boundscheck(False) +def get_datetime_isoformats(ndarray[int64_t] dtindex) -> ndarray: + """ + Return isoformats for an array of datetimelike objects. + + Parameters + ---------- + dtindex : DatetimeArray + + Returns + ------- + Array of ISO formats + """ + cdef: + Py_ssize_t i, count = len(dtindex) + int64_t val, convert_status + npy_datetimestruct dts + char buf[34] # ns precision with UTC offset max length + + out = np.empty(count, dtype=object) + + for i in range(count): + dt64_to_dtstruct(dtindex[i], &dts); + # TODO: handle bad return + convert_status = make_iso_8601_datetime(&dts, buf, 34, NPY_FR_ns) + out[i] = buf.decode("UTF-8") + + return out + @cython.wraparound(False) @cython.boundscheck(False) def build_field_sarray(const int64_t[:] dtindex): @@ -128,7 +163,6 @@ def get_date_name_field(const int64_t[:] dtindex, object field, object locale=No dt64_to_dtstruct(dtindex[i], &dts) out[i] = names[dts.month].capitalize() - else: raise ValueError("Field {field} not supported".format(field=field)) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 0335058a69c63..16b4b9482721c 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -134,9 +134,11 @@ def f(self): return result if field in self._object_ops: - result = fields.get_date_name_field(values, field) - result = self._maybe_mask_results(result, fill_value=None) - + if field == "isoformat": + result = fields.get_datetime_isoformats(values) + else: + result = fields.get_date_name_field(values, field) + result = self._maybe_mask_results(result, fill_value=None) else: result = fields.get_date_field(values, field) result = self._maybe_mask_results( @@ -284,7 +286,7 @@ class DatetimeArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps, dtl.DatelikeOps "is_year_end", "is_leap_year", ] - _object_ops = ["weekday_name", "freq", "tz"] + _object_ops = ["weekday_name", "freq", "tz", "isoformat"] _field_ops = [ "year", "month", @@ -1522,7 +1524,13 @@ def date(self): The name of day in a week (ex: Friday)\n\n.. deprecated:: 0.23.0 """, ) - + isoformat = _field_accessor( + "isoformat", + "isoformat", + """ + ISO formatted string. + """, + ) dayofyear = _field_accessor( "dayofyear", "doy", diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py index 00310f4fba7c7..201261cf70287 100644 --- a/pandas/tests/indexes/datetimes/test_scalar_compat.py +++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py @@ -64,6 +64,39 @@ def test_dti_timestamp_fields(self, field): result = getattr(Timestamp(idx[-1]), field) assert result == expected + @pytest.mark.parametrize( + "tz,expected_vals", + [ + ( + "utc", + [ + "2000-01-01T00:00:00.000000000Z", + "2000-01-02T00:00:00.000000000Z", + "2000-01-03T00:00:00.000000000Z", + ], + ), + # "US/Eastern", + # [ + # "2000-01-01T00:00:00.000000000-05:00", + # "2000-01-02T00:00:00.000000000-05:00", + # "2000-01-03T00:00:00.000000000-05:00", + # ], + ], + ) + def test_dti_isoformat_datetimes(self, tz, expected_vals): + dts = pd.date_range(start="2000-01-1", periods=3, freq="D", tz=tz) + result = pd.Series(dts).dt.isoformat + expected = pd.Series(expected_vals) + tm.assert_series_equal(result, expected) + + @pytest.mark.skip + def test_dti_isoformat_timedelts(self): + ... + + @pytest.mark.skip + def test_dti_isoformat_period_raises(self): + ... + def test_dti_timestamp_freq_fields(self): # extra fields from DatetimeIndex like quarter and week idx = tm.makeDateIndex(100)
- [ ] closes #28180 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Needs work but could certainly use some guidance from @jbrockmendel This uses the dt -> ISO string formatting that is deeply nested in the JSON code. It doesn't handle time zones properly (see #12997), doesn't match what you would get by default from `Timestamp.isoformat` (different precision) and doesn't support Timedeltas yet. When Timedeltas are supported this could ease some of the performance issues @cbertinato is seeing in #28595 In any case looking for guidance and thoughts on how to properly implement this, if this is even in the right direction Here's a rough benchmark on performance: ```ipython
https://api.github.com/repos/pandas-dev/pandas/pulls/28863
2019-10-09T00:00:43Z
2019-10-11T12:53:48Z
null
2020-01-16T00:33:48Z
CLN: catch less in pd.io
diff --git a/pandas/io/html.py b/pandas/io/html.py index 490c574463b9b..715f0c08546a3 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -897,7 +897,7 @@ def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs): try: tables = p.parse_tables() - except Exception as caught: + except ValueError as caught: # if `io` is an io-like object, check if it's seekable # and try to rewind it before trying the next parser if hasattr(io, "seekable") and io.seekable(): diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 3678e32943b2e..2d8303632e723 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -3271,24 +3271,29 @@ def converter(*date_cols): ) else: try: - result = tools.to_datetime( - date_parser(*date_cols), errors="ignore", cache=cache_dates - ) - if isinstance(result, datetime.datetime): - raise Exception("scalar parser") - return result + parsed_cols = date_parser(*date_cols) + if isinstance(parsed_cols, datetime.datetime): + raise TypeError("scalar parser") except Exception: + # Since `date_parser` is user-provided, we can't guess + # what it might raise. + dcs = parsing._concat_date_cols(date_cols) try: - return tools.to_datetime( - parsing.try_parse_dates( - parsing._concat_date_cols(date_cols), - parser=date_parser, - dayfirst=dayfirst, - ), - errors="ignore", + parsed = parsing.try_parse_dates( + dcs, parser=date_parser, dayfirst=dayfirst ) except Exception: + # Since `date_parser` is user-provided, we can't guess + # what it might raise. return generic_parser(date_parser, *date_cols) + else: + return tools.to_datetime(parsed, errors="ignore") + + else: + result = tools.to_datetime( + parsed_cols, errors="ignore", cache=cache_dates + ) + return result return converter
@bashtage (only tangentially related to this PR) there is an `except Exception` in pandas.io.stata that I'd like to make more specific. Any suggestions?
https://api.github.com/repos/pandas-dev/pandas/pulls/28862
2019-10-08T23:22:12Z
2019-11-12T18:00:24Z
null
2019-11-21T23:07:48Z
TST: Fix 36 maybe_promote xfails wanting np.bytes_ instead of np.object_
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py index e4e5a22ea6ca0..b498d589119d2 100644 --- a/pandas/tests/dtypes/cast/test_promote.py +++ b/pandas/tests/dtypes/cast/test_promote.py @@ -506,25 +506,13 @@ def test_maybe_promote_bytes_with_any(bytes_dtype, any_numpy_dtype_reduced, box) fill_dtype = np.dtype(any_numpy_dtype_reduced) boxed, box_dtype = box # read from parametrized fixture - if issubclass(fill_dtype.type, np.bytes_): - if not boxed or box_dtype == object: - pytest.xfail("falsely upcasts to object") - # takes the opinion that bool dtype has no missing value marker - else: - pytest.xfail("wrong missing value marker") - else: - if boxed and box_dtype is None: - pytest.xfail("does not upcast to object") - # create array of given dtype; casts "1" to correct dtype fill_value = np.array([1], dtype=fill_dtype)[0] - # filling bytes with anything but bytes casts to object - expected_dtype = ( - dtype if issubclass(fill_dtype.type, np.bytes_) else np.dtype(object) - ) + # we never use bytes dtype internally, always promote to object + expected_dtype = np.dtype(np.object_) exp_val_for_scalar = fill_value - exp_val_for_array = None if issubclass(fill_dtype.type, np.bytes_) else np.nan + exp_val_for_array = np.nan _check_promote( dtype, @@ -542,13 +530,7 @@ def test_maybe_promote_any_with_bytes(any_numpy_dtype_reduced, bytes_dtype, box) fill_dtype = np.dtype(bytes_dtype) boxed, box_dtype = box # read from parametrized fixture - if issubclass(dtype.type, np.bytes_): - if not boxed or box_dtype == object: - pytest.xfail("falsely upcasts to object") - # takes the opinion that bool dtype has no missing value marker - else: - pytest.xfail("wrong missing value marker") - else: + if not issubclass(dtype.type, np.bytes_): if ( boxed and (box_dtype == "bytes" or box_dtype is None) @@ -562,11 +544,11 @@ def test_maybe_promote_any_with_bytes(any_numpy_dtype_reduced, bytes_dtype, box) # special case for box_dtype (cannot use fixture in parametrization) box_dtype = fill_dtype if box_dtype == "bytes" else box_dtype - # filling bytes with anything but bytes casts to object - expected_dtype = dtype if issubclass(dtype.type, np.bytes_) else np.dtype(object) + # we never use bytes dtype internally, always promote to object + expected_dtype = np.dtype(np.object_) # output is not a generic bytes, but corresponds to expected_dtype exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0] - exp_val_for_array = None if issubclass(dtype.type, np.bytes_) else np.nan + exp_val_for_array = np.nan _check_promote( dtype,
@jreback less trivial than some of the others. These changes are based on my understanding that maybe_promote should _never_ be returning np.bytes_ dtype; those cases all become np.object_. That is what maybe_promote currently does, and this updates the tests to expect that behavior.
https://api.github.com/repos/pandas-dev/pandas/pulls/28861
2019-10-08T22:57:01Z
2019-10-11T12:24:27Z
2019-10-11T12:24:27Z
2019-10-11T15:30:07Z
ENH: Add optional argument keep_index to dataframe melt method (merged master onto old PR)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5200ad0ba0d23..4946ce4e13eae 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6312,6 +6312,10 @@ def unstack(self, level=-1, fill_value=None): Name to use for the 'value' column. col_level : int or str, optional If columns are a MultiIndex then use this level to melt. + keep_index : boolean, optional, default False + If True, the original index is reused. + In the resulting MulitIndex the names of the unpivoted columns + are added as an additional level to ensure uniqueness. Returns ------- @@ -6396,6 +6400,7 @@ def melt( var_name=None, value_name="value", col_level=None, + keep_index=False, ): from pandas.core.reshape.melt import melt @@ -6406,6 +6411,7 @@ def melt( var_name=var_name, value_name=value_name, col_level=col_level, + keep_index=keep_index, ) # ---------------------------------------------------------------------- diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 6f2e264f1a4d0..c01f15f2682bd 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -27,6 +27,7 @@ def melt( var_name=None, value_name="value", col_level=None, + keep_index=False, ): # TODO: what about the existing index? # If multiindex, gather names of columns on all level for checking presence @@ -116,7 +117,23 @@ def melt( # asanyarray will keep the columns as an Index mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N) - return frame._constructor(mdata, columns=mcolumns) + result = frame._constructor(mdata, columns=mcolumns) + + if keep_index: + orig_index_values = list(np.tile(frame.index.get_values(), K)) + + if len(frame.index.names) == len(set(frame.index.names)): + orig_index_names = frame.index.names + else: + orig_index_names = [ + "original_index_{i}".format(i=i) for i in range(len(frame.index.names)) + ] + + result[orig_index_names] = frame._constructor(orig_index_values) + + result = result.set_index(orig_index_names + list(var_name)) + + return result def lreshape(data, groups, dropna=True, label=None):
This PR merges master onto @NiklasKeck's PR branch (#17440) to add an optional argument to keep_index to `pd.melt`. There is quite a bit of discussion between the following 2 PRs and issues: Index gets lost when DataFrame melt method is used #17440 ENH: Add optional argument keep_index to dataframe melt method #17459 Melt enhance #17677 Please let me know if additional things need to be done to complete this PR ---- Setting keep_index to True will reuse the original DataFrame index + names of melted columns as additional level. closes issue #17440 (cherry picked from commit 0c64bf0e5f145781c0f74fb93ffcd63a6d964bd9) - [x] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28859
2019-10-08T21:42:43Z
2019-12-09T18:05:32Z
null
2019-12-09T18:05:32Z
DOC: Fix missing periods and non capitalized summary beginnings (#27977)
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 697e97e518b13..32dcc86faa7e8 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1710,7 +1710,7 @@ cdef class _Period: def asfreq(self, freq, how='E'): """ Convert Period to desired frequency, either at the start or end of the - interval + interval. Parameters ---------- @@ -1777,7 +1777,7 @@ cdef class _Period: def to_timestamp(self, freq=None, how='start', tz=None): """ Return the Timestamp representation of the Period at the target - frequency at the specified end (how) of the Period + frequency at the specified end (how) of the Period. Parameters ---------- @@ -2380,7 +2380,7 @@ cdef class _Period: class Period(_Period): """ - Represents a period of time + Represents a period of time. Parameters ---------- diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index f9cb35eb79ae3..3d267b0114695 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1344,7 +1344,7 @@ class Timedelta(_Timedelta): def floor(self, freq): """ - return a new Timedelta floored to this resolution. + Return a new Timedelta floored to this resolution. Parameters ---------- @@ -1355,7 +1355,7 @@ class Timedelta(_Timedelta): def ceil(self, freq): """ - return a new Timedelta ceiled to this resolution. + Return a new Timedelta ceiled to this resolution. Parameters ---------- diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 6dd0b116b3b0d..4039cc91fb554 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -975,7 +975,7 @@ def length(self): @property def mid(self): """ - Return the midpoint of each Interval in the IntervalArray as an Index + Return the midpoint of each Interval in the IntervalArray as an Index. """ try: return 0.5 * (self.left + self.right) diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 9c4746f4d68e3..6bac3fe426f2d 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -849,7 +849,7 @@ def set_uuid(self, uuid): def set_caption(self, caption): """ - Set the caption on a Styler + Set the caption on a Styler. Parameters ---------- diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index 74ce60c6116a9..426ca9632af29 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -8,7 +8,7 @@ def table(ax, data, rowLabels=None, colLabels=None, **kwargs): """ - Helper function to convert DataFrame and Series to matplotlib.table + Helper function to convert DataFrame and Series to matplotlib.table. Parameters ---------- @@ -32,7 +32,7 @@ def table(ax, data, rowLabels=None, colLabels=None, **kwargs): def register(explicit=True): """ - Register Pandas Formatters and Converters with matplotlib + Register Pandas Formatters and Converters with matplotlib. This function modifies the global ``matplotlib.units.registry`` dictionary. Pandas adds custom converters for @@ -54,7 +54,7 @@ def register(explicit=True): def deregister(): """ - Remove pandas' formatters and converters + Remove pandas' formatters and converters. Removes the custom converters added by :func:`register`. This attempts to set the state of the registry back to the state before
- [ ] closes #27977 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Fixed for the following functions `pandas.io.formats.style.Styler.set_caption pandas.plotting.table pandas.plotting.register_matplotlib_converters pandas.plotting.deregister_matplotlib_converters pandas.arrays.IntervalArray.mid pandas.Timedelta.ceil pandas.Timedelta.floor pandas.Period pandas.Period.asfreq pandas.Period.to_timestamp`
https://api.github.com/repos/pandas-dev/pandas/pulls/28858
2019-10-08T21:26:02Z
2019-10-09T07:37:50Z
2019-10-09T07:37:50Z
2019-10-09T07:38:05Z
CLN: assorted cleanups, mostly post-black fixups
diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py index ec3dd7a48a89f..a9e45cad22d27 100644 --- a/asv_bench/benchmarks/ctors.py +++ b/asv_bench/benchmarks/ctors.py @@ -67,7 +67,7 @@ class SeriesConstructors: def setup(self, data_fmt, with_index, dtype): if data_fmt in (gen_of_str, gen_of_tuples) and with_index: raise NotImplementedError( - "Series constructors do not support " "using generators with indexes" + "Series constructors do not support using generators with indexes" ) N = 10 ** 4 if dtype == "float": diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py index 06a181875aaa8..cbab9fdc9c0ba 100644 --- a/asv_bench/benchmarks/eval.py +++ b/asv_bench/benchmarks/eval.py @@ -27,7 +27,7 @@ def time_add(self, engine, threads): def time_and(self, engine, threads): pd.eval( - "(self.df > 0) & (self.df2 > 0) & " "(self.df3 > 0) & (self.df4 > 0)", + "(self.df > 0) & (self.df2 > 0) & (self.df3 > 0) & (self.df4 > 0)", engine=engine, ) diff --git a/asv_bench/benchmarks/io/hdf.py b/asv_bench/benchmarks/io/hdf.py index 8ec04a2087f1b..b78dc63d17130 100644 --- a/asv_bench/benchmarks/io/hdf.py +++ b/asv_bench/benchmarks/io/hdf.py @@ -88,11 +88,11 @@ def time_write_store_table_dc(self): def time_query_store_table_wide(self): self.store.select( - "table_wide", where="index > self.start_wide and " "index < self.stop_wide" + "table_wide", where="index > self.start_wide and index < self.stop_wide" ) def time_query_store_table(self): - self.store.select("table", where="index > self.start and " "index < self.stop") + self.store.select("table", where="index > self.start and index < self.stop") def time_store_repr(self): repr(self.store) diff --git a/doc/source/conf.py b/doc/source/conf.py index 5e2a2db20b53c..34faf183db1c2 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -628,11 +628,11 @@ def linkcode_resolve(domain, info): fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__)) if "+" in pandas.__version__: - return "http://github.com/pandas-dev/pandas/blob/master/pandas/" "{}{}".format( + return "http://github.com/pandas-dev/pandas/blob/master/pandas/{}{}".format( fn, linespec ) else: - return "http://github.com/pandas-dev/pandas/blob/" "v{}/pandas/{}{}".format( + return "http://github.com/pandas-dev/pandas/blob/v{}/pandas/{}{}".format( pandas.__version__, fn, linespec ) diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index da75e2c49ae10..ea52736cb11a7 100644 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -256,7 +256,7 @@ Timezones Numeric ^^^^^^^ - Bug in :meth:`DataFrame.quantile` with zero-column :class:`DataFrame` incorrectly raising (:issue:`23925`) -- :class:`DataFrame` inequality comparisons with object-dtype and ``complex`` entries failing to raise ``TypeError`` like their :class:`Series` counterparts (:issue:`28079`) +- :class:`DataFrame` flex inequality comparisons methods (:meth:`DataFrame.lt`, :meth:`DataFrame.le`, :meth:`DataFrame.gt`, :meth: `DataFrame.ge`) with object-dtype and ``complex`` entries failing to raise ``TypeError`` like their :class:`Series` counterparts (:issue:`28079`) - Bug in :class:`DataFrame` logical operations (`&`, `|`, `^`) not matching :class:`Series` behavior by filling NA values (:issue:`28741`) - diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index a7d6d19bbc80d..34eb9412451c5 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -170,9 +170,9 @@ cdef class Reducer: PyArray_SETITEM(result, PyArray_ITER_DATA(it), res) chunk.data = chunk.data + self.increment PyArray_ITER_NEXT(it) - except Exception, e: - if hasattr(e, 'args'): - e.args = e.args + (i,) + except Exception as err: + if hasattr(err, 'args'): + err.args = err.args + (i,) raise finally: # so we don't free the wrong memory diff --git a/pandas/_version.py b/pandas/_version.py index 4f5bdf59a99d5..0cdedf3da3ea7 100644 --- a/pandas/_version.py +++ b/pandas/_version.py @@ -249,7 +249,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): - fmt = "tag '{full_tag}' doesn't start with prefix " "'{tag_prefix}'" + fmt = "tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" msg = fmt.format(full_tag=full_tag, tag_prefix=tag_prefix) if verbose: print(msg) diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 714423de34222..605d179e7c652 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -341,13 +341,15 @@ def apply_series_generator(self): for i, v in enumerate(series_gen): results[i] = self.f(v) keys.append(v.name) - except Exception as e: - if hasattr(e, "args"): + except Exception as err: + if hasattr(err, "args"): # make sure i is defined if i is not None: k = res_index[i] - e.args = e.args + ("occurred at index %s" % pprint_thing(k),) + err.args = err.args + ( + "occurred at index %s" % pprint_thing(k), + ) raise self.results = results diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 7cc9dc11a8ccc..eb57d703cd4d5 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -672,7 +672,7 @@ def _read_next_page(self): return True elif len(self._cached_page) != self._page_length: self.close() - msg = "failed to read complete page from file " "(read {:d} of {:d} bytes)" + msg = "failed to read complete page from file (read {:d} of {:d} bytes)" raise ValueError(msg.format(len(self._cached_page), self._page_length)) self._read_page_header() diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 966a18e11a620..d7b0839ec62ea 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -685,7 +685,7 @@ def _get_call_args(backend_name, data, args, kwargs): else: raise TypeError( ( - "Called plot accessor for type {}, expected " "Series or DataFrame" + "Called plot accessor for type {}, expected Series or DataFrame" ).format(type(data).__name__) ) @@ -740,7 +740,7 @@ def __call__(self, *args, **kwargs): return plot_backend.plot(data, x=x, y=y, kind=kind, **kwargs) else: raise ValueError( - ("plot kind {} can only be used for " "data frames").format(kind) + ("plot kind {} can only be used for data frames").format(kind) ) elif kind in self._series_kinds: if isinstance(data, ABCDataFrame): diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index 99035013092cc..eed328131da92 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -331,9 +331,7 @@ def plot_group(keys, values, ax): if return_type is None: return_type = "axes" if layout is not None: - raise ValueError( - "The 'layout' keyword is not supported when " "'by' is None" - ) + raise ValueError("The 'layout' keyword is not supported when 'by' is None") if ax is None: rc = {"figure.figsize": figsize} if figsize is not None else {} diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 82c5ba7f0317d..a729951b3d7db 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -230,7 +230,7 @@ def _validate_color_args(self): "color" in self.kwds or "colors" in self.kwds ) and self.colormap is not None: warnings.warn( - "'color' and 'colormap' cannot be used " "simultaneously. Using 'color'" + "'color' and 'colormap' cannot be used simultaneously. Using 'color'" ) if "color" in self.kwds and self.style is not None: diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py index 5213e09f14067..f95ff2578d882 100644 --- a/pandas/plotting/_matplotlib/hist.py +++ b/pandas/plotting/_matplotlib/hist.py @@ -184,7 +184,7 @@ def _grouped_plot( if figsize == "default": # allowed to specify mpl default with 'default' warnings.warn( - "figsize='default' is deprecated. Specify figure " "size by tuple instead", + "figsize='default' is deprecated. Specify figure size by tuple instead", FutureWarning, stacklevel=5, ) @@ -298,9 +298,7 @@ def hist_series( if by is None: if kwds.get("layout", None) is not None: - raise ValueError( - "The 'layout' keyword is not supported when " "'by' is None" - ) + raise ValueError("The 'layout' keyword is not supported when 'by' is None") # hack until the plotting interface is a bit more unified fig = kwds.pop( "figure", plt.gcf() if plt.get_fignums() else plt.figure(figsize=figsize) @@ -394,7 +392,7 @@ def hist_frame( naxes = len(data.columns) if naxes == 0: - raise ValueError("hist method requires numerical columns, " "nothing to plot.") + raise ValueError("hist method requires numerical columns, nothing to plot.") fig, axes = _subplots( naxes=naxes, diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py index e1bba5856e271..927b9cf4e392a 100644 --- a/pandas/plotting/_matplotlib/style.py +++ b/pandas/plotting/_matplotlib/style.py @@ -25,7 +25,7 @@ def _get_standard_colors( elif color is not None: if colormap is not None: warnings.warn( - "'color' and 'colormap' cannot be used " "simultaneously. Using 'color'" + "'color' and 'colormap' cannot be used simultaneously. Using 'color'" ) colors = list(color) if is_list_like(color) else color else: diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index eddc9b4cd21bd..caa0167c06389 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -188,8 +188,7 @@ def _subplots( ax = _flatten(ax) if layout is not None: warnings.warn( - "When passing multiple axes, layout keyword is " "ignored", - UserWarning, + "When passing multiple axes, layout keyword is ignored", UserWarning ) if sharex or sharey: warnings.warn( diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 1045b72f0aa6e..f35707de189dc 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -1233,8 +1233,8 @@ class ErrorThread(threading.Thread): def run(self): try: super().run() - except Exception as e: - self.err = e + except Exception as err: + self.err = err else: self.err = None diff --git a/pandas/tests/window/test_window.py b/pandas/tests/window/test_window.py index 5692404205012..f42c507e51511 100644 --- a/pandas/tests/window/test_window.py +++ b/pandas/tests/window/test_window.py @@ -65,7 +65,7 @@ def test_agg_function_support(self, arg): df = pd.DataFrame({"A": np.arange(5)}) roll = df.rolling(2, win_type="triang") - msg = "'{arg}' is not a valid function for " "'Window' object".format(arg=arg) + msg = "'{arg}' is not a valid function for 'Window' object".format(arg=arg) with pytest.raises(AttributeError, match=msg): roll.agg(arg) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 81d8869dd7ba0..84b00d7f4907f 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -658,9 +658,7 @@ def __init__(self, start="09:00", end="17:00", offset=timedelta(0)): # Validation of input if len(start) != len(end): - raise ValueError( - "number of starting time and ending time " "must be the same" - ) + raise ValueError("number of starting time and ending time must be the same") num_openings = len(start) # sort starting and ending time by starting time @@ -2242,7 +2240,7 @@ def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code): variation = "last" else: raise ValueError( - "Unable to parse varion_code: " "{code}".format(code=varion_code) + "Unable to parse varion_code: {code}".format(code=varion_code) ) startingMonth = ccalendar.MONTH_TO_CAL_NUM[startingMonth_code] @@ -2557,7 +2555,7 @@ def __init__(self, n=1, normalize=False): BaseOffset.__init__(self, n, normalize) if normalize: raise ValueError( - "Tick offset with `normalize=True` are not " "allowed." + "Tick offset with `normalize=True` are not allowed." ) # GH#21427 __gt__ = _tick_comp(operator.gt) diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 8a25e511b5fc4..ebc015c820c14 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -171,7 +171,7 @@ def deprecate_kwarg( if mapping is not None and not hasattr(mapping, "get") and not callable(mapping): raise TypeError( - "mapping from old to new argument values " "must be dict or callable!" + "mapping from old to new argument values must be dict or callable!" ) def _deprecate_kwarg(func: F) -> F: @@ -214,7 +214,7 @@ def wrapper(*args, **kwargs) -> Callable[..., Any]: warnings.warn(msg, FutureWarning, stacklevel=stacklevel) if kwargs.get(new_arg_name) is not None: msg = ( - "Can only specify '{old_name}' or '{new_name}', " "not both" + "Can only specify '{old_name}' or '{new_name}', not both" ).format(old_name=old_arg_name, new_name=new_arg_name) raise TypeError(msg) else: diff --git a/pandas/util/_exceptions.py b/pandas/util/_exceptions.py index 953c8a43a21b8..4f2cbd4314b8e 100644 --- a/pandas/util/_exceptions.py +++ b/pandas/util/_exceptions.py @@ -6,11 +6,11 @@ def rewrite_exception(old_name, new_name): """Rewrite the message of an exception.""" try: yield - except Exception as e: - msg = e.args[0] + except Exception as err: + msg = err.args[0] msg = msg.replace(old_name, new_name) args = (msg,) - if len(e.args) > 1: - args = args + e.args[1:] - e.args = args + if len(err.args) > 1: + args = args + err.args[1:] + err.args = args raise diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py index 21d09c06940ca..25795859d8018 100644 --- a/pandas/util/_print_versions.py +++ b/pandas/util/_print_versions.py @@ -139,7 +139,7 @@ def main(): "--json", metavar="FILE", nargs=1, - help="Save output as JSON into file, pass in " "'-' to output to stdout", + help="Save output as JSON into file, pass in '-' to output to stdout", ) (options, args) = parser.parse_args() diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index c9fd426f68b48..b516c3d78a11e 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -185,7 +185,7 @@ def skip_if_no(package: str, min_version: Optional[str] = None) -> Callable: ) skip_if_not_us_locale = pytest.mark.skipif( _skip_if_not_us_locale(), - reason="Specific locale is set " "{lang}".format(lang=locale.getlocale()[0]), + reason="Specific locale is set {lang}".format(lang=locale.getlocale()[0]), ) skip_if_no_scipy = pytest.mark.skipif( _skip_if_no_scipy(), reason="Missing SciPy requirement" diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index f5a472596f58f..0eaf46d563163 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -289,7 +289,7 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name): # First fill with explicit values provided by the user... if arg_name in kwargs: if args: - msg = "{} got multiple values for argument " "'{}'".format( + msg = "{} got multiple values for argument '{}'".format( method_name, arg_name ) raise TypeError(msg) @@ -318,7 +318,7 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name): elif len(args) == 2: if "axis" in kwargs: # Unambiguously wrong - msg = "Cannot specify both 'axis' and any of 'index' " "or 'columns'" + msg = "Cannot specify both 'axis' and any of 'index' or 'columns'" raise TypeError(msg) msg = ( diff --git a/pandas/util/testing.py b/pandas/util/testing.py index a34fdee227afc..c8b41a87baa9d 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1175,7 +1175,7 @@ def assert_series_equal( # vs Timestamp) but will compare equal if not Index(left.values).equals(Index(right.values)): msg = ( - "[datetimelike_compat=True] {left} is not equal to " "{right}." + "[datetimelike_compat=True] {left} is not equal to {right}." ).format(left=left.values, right=right.values) raise AssertionError(msg) else: @@ -2363,26 +2363,26 @@ def wrapper(*args, **kwargs): skip() try: return t(*args, **kwargs) - except Exception as e: - errno = getattr(e, "errno", None) + except Exception as err: + errno = getattr(err, "errno", None) if not errno and hasattr(errno, "reason"): - errno = getattr(e.reason, "errno", None) + errno = getattr(err.reason, "errno", None) if errno in skip_errnos: skip( "Skipping test due to known errno" - " and error {error}".format(error=e) + " and error {error}".format(error=err) ) - e_str = str(e) + e_str = str(err) if any(m.lower() in e_str.lower() for m in _skip_on_messages): skip( "Skipping test because exception " - "message is known and error {error}".format(error=e) + "message is known and error {error}".format(error=err) ) - if not isinstance(e, error_classes): + if not isinstance(err, error_classes): raise if raise_on_error or can_connect(url, error_classes): @@ -2390,7 +2390,7 @@ def wrapper(*args, **kwargs): else: skip( "Skipping test due to lack of connectivity" - " and error {error}".format(error=e) + " and error {error}".format(error=err) ) return wrapper diff --git a/scripts/find_commits_touching_func.py b/scripts/find_commits_touching_func.py index 95a892b822cff..5e1a169dbfc3f 100755 --- a/scripts/find_commits_touching_func.py +++ b/scripts/find_commits_touching_func.py @@ -46,14 +46,14 @@ "--dir-masks", metavar="d_re(,d_re)*", default=[], - help="comma separated list of regexes to match base " "path against", + help="comma separated list of regexes to match base path against", ) argparser.add_argument( "-p", "--path-masks", metavar="p_re(,p_re)*", default=[], - help="comma separated list of regexes to match full " "file path against", + help="comma separated list of regexes to match full file path against", ) argparser.add_argument( "-y", @@ -195,7 +195,7 @@ def sorter(i): return hits[i].path, d print( - ("\nThese commits touched the %s method in these files " "on these dates:\n") + ("\nThese commits touched the %s method in these files on these dates:\n") % args.funcname ) for i in sorted(range(len(hits)), key=sorter): diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index 85e5bf239cbfa..f1b1d9d8678bb 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -1029,7 +1029,7 @@ def test_bad_generic_functions(self, capsys, func): ( "BadReturns", "no_capitalization", - ("Return value description should start with a capital " "letter",), + ("Return value description should start with a capital letter",), ), ( "BadReturns", diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index 401eaf8ff5ed5..d363e7108fff3 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -91,7 +91,7 @@ "whitespace only", "GL06": 'Found unknown section "{section}". Allowed sections are: ' "{allowed_sections}", - "GL07": "Sections are in the wrong order. Correct order is: " "{correct_sections}", + "GL07": "Sections are in the wrong order. Correct order is: {correct_sections}", "GL08": "The object does not have a docstring", "GL09": "Deprecation warning should precede extended summary", "GL10": "reST directives {directives} must be followed by two colons", diff --git a/setup.py b/setup.py index 7040147c2b741..04aedcb101e25 100755 --- a/setup.py +++ b/setup.py @@ -79,7 +79,7 @@ def is_platform_mac(): except ImportError: import tempita except ImportError: - raise ImportError("Building pandas requires Tempita: " "pip install Tempita") + raise ImportError("Building pandas requires Tempita: pip install Tempita") _pxi_dep_template = { @@ -142,9 +142,7 @@ def build_extensions(self): _build_ext.build_extensions(self) -DESCRIPTION = ( - "Powerful data structures for data analysis, time series, " "and statistics" -) +DESCRIPTION = "Powerful data structures for data analysis, time series, and statistics" LONG_DESCRIPTION = """ **pandas** is a Python package providing fast, flexible, and expressive data structures designed to make working with structured (tabular, multidimensional,
Mostly fixing extra " " introduced by black (will take a look at the issue tracker there to see if that can be fixed once and for all). Also change `except Foo as e:` to `except Foo as err` and remove one `except Exception` in a docs file. Clarified a whatsnew note that @jorisvandenbossche asked for a while back.
https://api.github.com/repos/pandas-dev/pandas/pulls/28857
2019-10-08T20:40:03Z
2019-10-11T18:15:35Z
2019-10-11T18:15:35Z
2019-10-11T18:35:24Z
TST: un-xfail 22 maybe_promote tests
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py index da2b4c28a02a5..5fd1ceb7b0027 100644 --- a/pandas/tests/dtypes/cast/test_promote.py +++ b/pandas/tests/dtypes/cast/test_promote.py @@ -515,14 +515,6 @@ def test_maybe_promote_bytes_with_any(bytes_dtype, any_numpy_dtype_reduced, box) else: if boxed and box_dtype is None: pytest.xfail("does not upcast to object") - if ( - is_integer_dtype(fill_dtype) - or is_float_dtype(fill_dtype) - or is_complex_dtype(fill_dtype) - or is_object_dtype(fill_dtype) - or is_timedelta64_dtype(fill_dtype) - ) and not boxed: - pytest.xfail("does not upcast to object") # create array of given dtype; casts "1" to correct dtype fill_value = np.array([1], dtype=fill_dtype)[0] @@ -557,15 +549,12 @@ def test_maybe_promote_any_with_bytes(any_numpy_dtype_reduced, bytes_dtype, box) else: pytest.xfail("wrong missing value marker") else: - pass if ( boxed and (box_dtype == "bytes" or box_dtype is None) and not (is_string_dtype(dtype) or dtype == bool) ): pytest.xfail("does not upcast to object") - if not boxed and is_datetime_or_timedelta_dtype(dtype): - pytest.xfail("raises error") # create array of given dtype fill_value = b"abc"
orthogonal to other outstanding PRs in this file
https://api.github.com/repos/pandas-dev/pandas/pulls/28856
2019-10-08T18:48:11Z
2019-10-08T20:58:00Z
2019-10-08T20:58:00Z
2019-10-08T21:07:44Z
remove doc note about apply applying a function to the first element …
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1a19910a0957c..5200ad0ba0d23 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6729,14 +6729,6 @@ def apply( DataFrame.aggregate: Only perform aggregating type operations. DataFrame.transform: Only perform transforming type operations. - Notes - ----- - In the current implementation apply calls `func` twice on the - first column/row to decide whether it can take a fast or slow - code path. This can lead to unexpected behavior if `func` has - side-effects, as they will take effect twice for the first - column/row. - Examples --------
…twice - [x] closes #28827 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28854
2019-10-08T18:35:16Z
2019-10-08T20:56:32Z
2019-10-08T20:56:32Z
2019-10-08T20:56:40Z
CLN: dont catch Exception on reindex_multi
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a135f567fe6f4..f77d543193e74 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4555,10 +4555,7 @@ def reindex(self, *args, **kwargs): # check if we are a multi reindex if self._needs_reindex_multi(axes, method, level): - try: - return self._reindex_multi(axes, copy, fill_value) - except Exception: - pass + return self._reindex_multi(axes, copy, fill_value) # perform the reindex on the axes return self._reindex_axes( @@ -9065,7 +9062,6 @@ def _where( # try to not change dtype at first (if try_quick) if try_quick: - new_other = com.values_from_object(self) new_other = new_other.copy() new_other[icond] = other
@toobaz any guesses what this might have been intended to catch?
https://api.github.com/repos/pandas-dev/pandas/pulls/28853
2019-10-08T18:20:00Z
2019-10-11T15:50:18Z
2019-10-11T15:50:18Z
2019-10-11T15:54:38Z
TST: Fix not-boxed maybe_promote test
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 4435b2518e90b..b439a3e2dfbc8 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -359,6 +359,8 @@ def maybe_promote(dtype, fill_value=np.nan): if isinstance(fill_value, datetime) and fill_value.tzinfo is not None: # Trying to insert tzaware into tznaive, have to cast to object dtype = np.dtype(np.object_) + elif is_integer(fill_value) or (is_float(fill_value) and not isna(fill_value)): + dtype = np.dtype(np.object_) else: try: fill_value = tslibs.Timestamp(fill_value).to_datetime64() diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py index da2b4c28a02a5..2252e6c7b3dc9 100644 --- a/pandas/tests/dtypes/cast/test_promote.py +++ b/pandas/tests/dtypes/cast/test_promote.py @@ -603,8 +603,6 @@ def test_maybe_promote_datetime64_with_any( else: if boxed and box_dtype is None: pytest.xfail("does not upcast to object") - if not boxed: - pytest.xfail("does not upcast to object or raises") # create array of given dtype; casts "1" to correct dtype fill_value = np.array([1], dtype=fill_dtype)[0]
Along with #28833 this gets close to finishing off the not-boxed cases
https://api.github.com/repos/pandas-dev/pandas/pulls/28852
2019-10-08T17:56:04Z
2019-10-08T21:21:13Z
2019-10-08T21:21:13Z
2019-10-08T21:33:31Z
TST: un-xfail 1 passing maybe_promote test
diff --git a/pandas/tests/dtypes/cast/test_promote.py b/pandas/tests/dtypes/cast/test_promote.py index da2b4c28a02a5..45dbdf72209b6 100644 --- a/pandas/tests/dtypes/cast/test_promote.py +++ b/pandas/tests/dtypes/cast/test_promote.py @@ -1038,14 +1038,7 @@ def test_maybe_promote_any_numpy_dtype_with_na( dtype = np.dtype(any_numpy_dtype_reduced) boxed, box_dtype = box # read from parametrized fixture - if ( - dtype == bytes - and not boxed - and fill_value is not None - and fill_value is not NaT - ): - pytest.xfail("does not upcast to object") - elif is_integer_dtype(dtype) and fill_value is not NaT: + if is_integer_dtype(dtype) and fill_value is not NaT: # integer + other missing value (np.nan / None) casts to float expected_dtype = np.float64 exp_val_for_scalar = np.nan
https://api.github.com/repos/pandas-dev/pandas/pulls/28850
2019-10-08T17:05:03Z
2019-10-08T20:55:02Z
2019-10-08T20:55:02Z
2019-10-08T21:06:02Z
[MINOR] Fix formatting string in _ODFReader.get_sheet_by_name
diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 3be36663bac79..1b22ac953a2a8 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -60,7 +60,7 @@ def get_sheet_by_name(self, name: str): if table.getAttribute("name") == name: return table - raise ValueError("sheet {name} not found".format(name)) + raise ValueError("sheet {name} not found".format(name=name)) def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: """Parse an ODF Table into a list of lists
This PR fixes minor problem with `_ODFReader.get_sheet_by_name` method. If `name` is not found, it attempts to raise a `ValueError` with formatted string that expects keyword argument `name`, however `names` is passed as a positional argument. In effect it will throw unexpected `KeyError`. Steps to reproduce the problem: ```python from odf.opendocument import OpenDocumentSpreadsheet import tempfile path = tempfile.mktemp() OpenDocumentSpreadsheet().save(path) pd.read_excel(path, engine="odf", sheet_name="foo") # Traceback (most recent call last): ... # KeyError: 'name' ``` Expected behavior ```python # Traceback (most recent call last): # ... # ValueError: sheet foo not found ``` - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/28849
2019-10-08T16:41:00Z
2019-10-08T16:49:33Z
null
2019-10-08T17:51:40Z